diff options
Diffstat (limited to 'lib')
85 files changed, 11444 insertions, 1595 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index a3928d4438b5..a9e56539bd11 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -10,6 +10,14 @@ menu "Library routines" config RAID6_PQ tristate +config RAID6_PQ_BENCHMARK + bool "Automatically choose fastest RAID6 PQ functions" + depends on RAID6_PQ + default y + help + Benchmark all available RAID6 PQ functions on init and choose the + fastest one. + config BITREVERSE tristate @@ -399,8 +407,11 @@ config INTERVAL_TREE for more information. -config RADIX_TREE_MULTIORDER +config XARRAY_MULTI bool + help + Support entries which occupy multiple consecutive indices in the + XArray. config ASSOCIATIVE_ARRAY bool @@ -574,7 +585,7 @@ config SG_POOL # sg chaining option # -config ARCH_HAS_SG_CHAIN +config ARCH_NO_SG_CHAIN def_bool n config ARCH_HAS_PMEM_API @@ -621,3 +632,6 @@ config GENERIC_LIB_CMPDI2 config GENERIC_LIB_UCMPDI2 bool + +config OBJAGG + tristate "objagg" if COMPILE_TEST diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 89df6750b365..91ed81250fb3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -239,7 +239,6 @@ config ENABLE_MUST_CHECK config FRAME_WARN int "Warn for stack frames larger than (needs gcc 4.4)" range 0 8192 - default 3072 if KASAN_EXTRA default 2048 if GCC_PLUGIN_LATENT_ENTROPY default 1280 if (!64BIT && PARISC) default 1024 if (!64BIT && !PARISC) @@ -283,23 +282,6 @@ config UNUSED_SYMBOLS you really need it, and what the merge plan to the mainline kernel for your module is. -config PAGE_OWNER - bool "Track page owner" - depends on DEBUG_KERNEL && STACKTRACE_SUPPORT - select DEBUG_FS - select STACKTRACE - select STACKDEPOT - select PAGE_EXTENSION - help - This keeps track of what call chain is the owner of a page, may - help to find bare alloc_page(s) leaks. Even if you include this - feature on your build, it is disabled in default. You should pass - "page_owner=on" to boot parameter in order to enable it. Eats - a fair amount of memory if enabled. See tools/vm/page_owner_sort.c - for user-space helper. - - If unsure, say N. - config DEBUG_FS bool "Debug Filesystem" help @@ -456,7 +438,7 @@ config DEBUG_KERNEL menu "Memory Debugging" -source mm/Kconfig.debug +source "mm/Kconfig.debug" config DEBUG_OBJECTS bool "Debug object operations" @@ -610,6 +592,21 @@ config DEBUG_KMEMLEAK_DEFAULT_OFF Say Y here to disable kmemleak by default. It can then be enabled on the command line via kmemleak=on. +config DEBUG_KMEMLEAK_AUTO_SCAN + bool "Enable kmemleak auto scan thread on boot up" + default y + depends on DEBUG_KMEMLEAK + help + Depending on the cpu, kmemleak scan may be cpu intensive and can + stall user tasks at times. This option enables/disables automatic + kmemleak scan at boot up. + + Say N here to disable kmemleak auto scan thread to stop automatic + scanning. Disabling this option disables automatic reporting of + memory leaks. + + If unsure, say Y. + config DEBUG_STACK_USAGE bool "Stack utilization instrumentation" depends on DEBUG_KERNEL && !IA64 @@ -1309,7 +1306,7 @@ config DEBUG_KOBJECT depends on DEBUG_KERNEL help If you say Y here, some extra kobject debugging messages will be sent - to the syslog. + to the syslog. config DEBUG_KOBJECT_RELEASE bool "kobject release debugging" @@ -1626,7 +1623,7 @@ config LATENCYTOP Enable this option if you want to use the LatencyTOP tool to find out which userspace is blocking on what kernel operations. -source kernel/trace/Kconfig +source "kernel/trace/Kconfig" config PROVIDE_OHCI1394_DMA_INIT bool "Remote debugging over FireWire early on boot" @@ -1702,7 +1699,6 @@ if RUNTIME_TESTING_MENU config LKDTM tristate "Linux Kernel Dump Test Tool Module" depends on DEBUG_FS - depends on BLOCK help This module enables testing of the different dumping mechanisms by inducing system failures at predefined crash points. @@ -1830,6 +1826,9 @@ config TEST_BITFIELD config TEST_UUID tristate "Test functions located in the uuid module at runtime" +config TEST_XARRAY + tristate "Test the XArray code at runtime" + config TEST_OVERFLOW tristate "Test check_*_overflow() functions at runtime" @@ -1875,6 +1874,19 @@ config TEST_LKM If unsure, say N. +config TEST_VMALLOC + tristate "Test module for stress/performance analysis of vmalloc allocator" + default n + depends on MMU + depends on m + help + This builds the "test_vmalloc" module that should be used for + stress and performance analysis. So, any new change for vmalloc + subsystem can be evaluated from performance and stability point + of view. + + If unsure, say N. + config TEST_USER_COPY tristate "Test user/kernel boundary protections" depends on m @@ -1982,11 +1994,59 @@ config TEST_DEBUG_VIRTUAL If unsure, say N. +config TEST_MEMCAT_P + tristate "Test memcat_p() helper function" + help + Test the memcat_p() helper for correctly merging two + pointer arrays together. + + If unsure, say N. + +config TEST_LIVEPATCH + tristate "Test livepatching" + default n + depends on DYNAMIC_DEBUG + depends on LIVEPATCH + depends on m + help + Test kernel livepatching features for correctness. The tests will + load test modules that will be livepatched in various scenarios. + + To run all the livepatching tests: + + make -C tools/testing/selftests TARGETS=livepatch run_tests + + Alternatively, individual tests may be invoked: + + tools/testing/selftests/livepatch/test-callbacks.sh + tools/testing/selftests/livepatch/test-livepatch.sh + tools/testing/selftests/livepatch/test-shadow-vars.sh + + If unsure, say N. + +config TEST_OBJAGG + tristate "Perform selftest on object aggreration manager" + default n + depends on OBJAGG + help + Enable this option to test object aggregation manager on boot + (or module load). + + +config TEST_STACKINIT + tristate "Test level of stack variable initialization" + help + Test if the kernel is zero-initializing stack variables and + padding. Coverage is controlled by compiler flags, + CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF, + or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL. + + If unsure, say N. + endif # RUNTIME_TESTING_MENU config MEMTEST bool "Memtest" - depends on HAVE_MEMBLOCK ---help--- This option adds a kernel parameter 'memtest', which allows memtest to be set. diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index d0bad1bd9a2b..9950b660e62d 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -1,36 +1,82 @@ +# This config refers to the generic KASAN mode. config HAVE_ARCH_KASAN bool -if HAVE_ARCH_KASAN +config HAVE_ARCH_KASAN_SW_TAGS + bool + +config CC_HAS_KASAN_GENERIC + def_bool $(cc-option, -fsanitize=kernel-address) + +config CC_HAS_KASAN_SW_TAGS + def_bool $(cc-option, -fsanitize=kernel-hwaddress) config KASAN - bool "KASan: runtime memory debugger" + bool "KASAN: runtime memory debugger" + depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \ + (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS) + depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) + help + Enables KASAN (KernelAddressSANitizer) - runtime memory debugger, + designed to find out-of-bounds accesses and use-after-free bugs. + See Documentation/dev-tools/kasan.rst for details. + +choice + prompt "KASAN mode" + depends on KASAN + default KASAN_GENERIC + help + KASAN has two modes: generic KASAN (similar to userspace ASan, + x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC) and + software tag-based KASAN (a version based on software memory + tagging, arm64 only, similar to userspace HWASan, enabled with + CONFIG_KASAN_SW_TAGS). + Both generic and tag-based KASAN are strictly debugging features. + +config KASAN_GENERIC + bool "Generic mode" + depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) select SLUB_DEBUG if SLUB select CONSTRUCTORS select STACKDEPOT help - Enables kernel address sanitizer - runtime memory debugger, - designed to find out-of-bounds accesses and use-after-free bugs. - This is strictly a debugging feature and it requires a gcc version - of 4.9.2 or later. Detection of out of bounds accesses to stack or - global variables requires gcc 5.0 or later. - This feature consumes about 1/8 of available memory and brings about - ~x3 performance slowdown. + Enables generic KASAN mode. + Supported in both GCC and Clang. With GCC it requires version 4.9.2 + or later for basic support and version 5.0 or later for detection of + out-of-bounds accesses for stack and global variables and for inline + instrumentation mode (CONFIG_KASAN_INLINE). With Clang it requires + version 3.7.0 or later and it doesn't support detection of + out-of-bounds accesses for global variables yet. + This mode consumes about 1/8th of available memory at kernel start + and introduces an overhead of ~x1.5 for the rest of the allocations. + The performance slowdown is ~x3. For better error detection enable CONFIG_STACKTRACE. - Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB + Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB (the resulting kernel does not boot). -config KASAN_EXTRA - bool "KAsan: extra checks" - depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST +config KASAN_SW_TAGS + bool "Software tag-based mode" + depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS + depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) + select SLUB_DEBUG if SLUB + select CONSTRUCTORS + select STACKDEPOT help - This enables further checks in the kernel address sanitizer, for now - it only includes the address-use-after-scope check that can lead - to excessive kernel stack usage, frame size warnings and longer - compile time. - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more + Enables software tag-based KASAN mode. + This mode requires Top Byte Ignore support by the CPU and therefore + is only supported for arm64. + This mode requires Clang version 7.0.0 or later. + This mode consumes about 1/16th of available memory at kernel start + and introduces an overhead of ~20% for the rest of the allocations. + This mode may potentially introduce problems relating to pointer + casting and comparison, as it embeds tags into the top byte of each + pointer. + For better error detection enable CONFIG_STACKTRACE. + Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB + (the resulting kernel does not boot). +endchoice choice prompt "Instrumentation type" @@ -53,10 +99,32 @@ config KASAN_INLINE memory accesses. This is faster than outline (in some workloads it gives about x2 boost over outline instrumentation), but make kernel's .text size much bigger. - This requires a gcc version of 5.0 or later. + For CONFIG_KASAN_GENERIC this requires GCC 5.0 or later. endchoice +config KASAN_STACK_ENABLE + bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST + default !(CLANG_VERSION < 90000) + depends on KASAN + help + The LLVM stack address sanitizer has a know problem that + causes excessive stack usage in a lot of functions, see + https://bugs.llvm.org/show_bug.cgi?id=38809 + Disabling asan-stack makes it safe to run kernels build + with clang-8 with KASAN enabled, though it loses some of + the functionality. + This feature is always disabled when compile-testing with clang-8 + or earlier to avoid cluttering the output in stack overflow + warnings, but clang-8 users can still enable it for builds without + CONFIG_COMPILE_TEST. On gcc and later clang versions it is + assumed to always be safe to use and enabled by default. + +config KASAN_STACK + int + default 1 if KASAN_STACK_ENABLE || CC_IS_GCC + default 0 + config KASAN_S390_4_LEVEL_PAGING bool "KASan: use 4-level paging" depends on KASAN && S390 @@ -67,11 +135,9 @@ config KASAN_S390_4_LEVEL_PAGING 4-level paging instead. config TEST_KASAN - tristate "Module for testing kasan for bug detection" + tristate "Module for testing KASAN for bug detection" depends on m && KASAN help This is a test module doing various nasty things like out of bounds accesses, use after free. It is useful for testing - kernel debugging features like kernel address sanitizer. - -endif + kernel debugging features like KASAN. diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 98fa559ebd80..a2ae4a8e4fa6 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -27,15 +27,19 @@ config UBSAN_SANITIZE_ALL Enabling this option will get kernel image size increased significantly. -config UBSAN_ALIGNMENT - bool "Enable checking of pointers alignment" +config UBSAN_NO_ALIGNMENT + bool "Disable checking of pointers alignment" depends on UBSAN - default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS + default y if HAVE_EFFICIENT_UNALIGNED_ACCESS help - This option enables detection of unaligned memory accesses. - Enabling this option on architectures that support unaligned + This option disables the check of unaligned memory accesses. + This option should be used when building allmodconfig. + Disabling this option on architectures that support unaligned accesses may produce a lot of false positives. +config UBSAN_ALIGNMENT + def_bool !UBSAN_NO_ALIGNMENT + config TEST_UBSAN tristate "Module for testing for undefined behavior detection" depends on m && UBSAN diff --git a/lib/Makefile b/lib/Makefile index 423876446810..647517940b29 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -18,13 +18,13 @@ KCOV_INSTRUMENT_debugobjects.o := n KCOV_INSTRUMENT_dynamic_debug.o := n lib-y := ctype.o string.o vsprintf.o cmdline.o \ - rbtree.o radix-tree.o timerqueue.o\ + rbtree.o radix-tree.o timerqueue.o xarray.o \ idr.o int_sqrt.o extable.o \ - sha1.o chacha20.o irq_regs.o argv_split.o \ + sha1.o chacha.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ - nmi_backtrace.o nodemask.o win_minmax.o + nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o lib-$(CONFIG_PRINTK) += dump_stack.o lib-$(CONFIG_MMU) += ioremap.o @@ -53,11 +53,14 @@ obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o obj-$(CONFIG_TEST_IDA) += test_ida.o obj-$(CONFIG_TEST_KASAN) += test_kasan.o CFLAGS_test_kasan.o += -fno-builtin +CFLAGS_test_kasan.o += $(call cc-disable-warning, vla) obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o +CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla) UBSAN_SANITIZE_test_ubsan.o := y obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o obj-$(CONFIG_TEST_LKM) += test_module.o +obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o obj-$(CONFIG_TEST_SORT) += test_sort.o @@ -68,9 +71,15 @@ obj-$(CONFIG_TEST_PRINTF) += test_printf.o obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o obj-$(CONFIG_TEST_UUID) += test_uuid.o +obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o +obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o +obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o +obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o + +obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -270,3 +279,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o +obj-$(CONFIG_OBJAGG) += objagg.o diff --git a/lib/assoc_array.c b/lib/assoc_array.c index c6659cb37033..edc3c14af41d 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -768,9 +768,11 @@ all_leaves_cluster_together: new_s0->index_key[i] = ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); - blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); - pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); - new_s0->index_key[keylen - 1] &= ~blank; + if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) { + blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); + pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); + new_s0->index_key[keylen - 1] &= ~blank; + } /* This now reduces to a node splitting exercise for which we'll need * to regenerate the disparity table. @@ -1115,6 +1117,7 @@ struct assoc_array_edit *assoc_array_delete(struct assoc_array *array, index_key)) goto found_leaf; } + /* fall through */ case assoc_array_walk_tree_empty: case assoc_array_walk_found_wrong_shortcut: default: diff --git a/lib/bitmap.c b/lib/bitmap.c index 2fd07f6df0b8..98872e9025da 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -13,6 +13,7 @@ #include <linux/bitops.h> #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/uaccess.h> @@ -36,11 +37,6 @@ * carefully filter out these unused bits from impacting their * results. * - * These operations actually hold to a slightly stronger rule: - * if you don't input any bitmaps to these ops that have some - * unused bits set, then they won't output any set unused bits - * in output bitmaps. - * * The byte ordering of bitmaps is more natural on little * endian architectures. See the big-endian headers * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h @@ -447,7 +443,7 @@ int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *maskp, int nmaskbits) { - if (!access_ok(VERIFY_READ, ubuf, ulen)) + if (!access_ok(ubuf, ulen)) return -EFAULT; return __bitmap_parse((const char __force *)ubuf, ulen, 1, maskp, nmaskbits); @@ -466,20 +462,18 @@ EXPORT_SYMBOL(bitmap_parse_user); * ranges if list is specified or hex digits grouped into comma-separated * sets of 8 digits/set. Returns the number of characters written to buf. * - * It is assumed that @buf is a pointer into a PAGE_SIZE area and that - * sufficient storage remains at @buf to accommodate the - * bitmap_print_to_pagebuf() output. + * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned + * area and that sufficient storage remains at @buf to accommodate the + * bitmap_print_to_pagebuf() output. Returns the number of characters + * actually printed to @buf, excluding terminating '\0'. */ int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits) { - ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; - int n = 0; + ptrdiff_t len = PAGE_SIZE - offset_in_page(buf); - if (len > 1) - n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) : - scnprintf(buf, len, "%*pb\n", nmaskbits, maskp); - return n; + return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) : + scnprintf(buf, len, "%*pb\n", nmaskbits, maskp); } EXPORT_SYMBOL(bitmap_print_to_pagebuf); @@ -647,7 +641,7 @@ int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, unsigned long *maskp, int nmaskbits) { - if (!access_ok(VERIFY_READ, ubuf, ulen)) + if (!access_ok(ubuf, ulen)) return -EFAULT; return __bitmap_parselist((const char __force *)ubuf, ulen, 1, maskp, nmaskbits); diff --git a/lib/bsearch.c b/lib/bsearch.c index 18b445b010c3..82512fe7b33c 100644 --- a/lib/bsearch.c +++ b/lib/bsearch.c @@ -11,6 +11,7 @@ #include <linux/export.h> #include <linux/bsearch.h> +#include <linux/kprobes.h> /* * bsearch - binary search an array of elements @@ -53,3 +54,4 @@ void *bsearch(const void *key, const void *base, size_t num, size_t size, return NULL; } EXPORT_SYMBOL(bsearch); +NOKPROBE_SYMBOL(bsearch); diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c index ab719495e2cb..8be59f84eaea 100644 --- a/lib/bust_spinlocks.c +++ b/lib/bust_spinlocks.c @@ -2,7 +2,8 @@ /* * lib/bust_spinlocks.c * - * Provides a minimal bust_spinlocks for architectures which don't have one of their own. + * Provides a minimal bust_spinlocks for architectures which don't + * have one of their own. * * bust_spinlocks() clears any spinlocks which would prevent oops, die(), BUG() * and panic() information from reaching the user. @@ -16,8 +17,7 @@ #include <linux/vt_kern.h> #include <linux/console.h> - -void __attribute__((weak)) bust_spinlocks(int yes) +void bust_spinlocks(int yes) { if (yes) { ++oops_in_progress; diff --git a/lib/chacha20.c b/lib/chacha.c index d907fec6a9ed..a46d2832dbab 100644 --- a/lib/chacha20.c +++ b/lib/chacha.c @@ -1,5 +1,5 @@ /* - * ChaCha20 256-bit cipher algorithm, RFC7539 + * The "hash function" used as the core of the ChaCha stream cipher (RFC7539) * * Copyright (C) 2015 Martin Willi * @@ -14,17 +14,16 @@ #include <linux/bitops.h> #include <linux/cryptohash.h> #include <asm/unaligned.h> -#include <crypto/chacha20.h> +#include <crypto/chacha.h> -void chacha20_block(u32 *state, u8 *stream) +static void chacha_permute(u32 *x, int nrounds) { - u32 x[16]; int i; - for (i = 0; i < ARRAY_SIZE(x); i++) - x[i] = state[i]; + /* whitelist the allowed round counts */ + WARN_ON_ONCE(nrounds != 20 && nrounds != 12); - for (i = 0; i < 20; i += 2) { + for (i = 0; i < nrounds; i += 2) { x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16); x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16); x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16); @@ -65,10 +64,54 @@ void chacha20_block(u32 *state, u8 *stream) x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7); x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7); } +} + +/** + * chacha_block - generate one keystream block and increment block counter + * @state: input state matrix (16 32-bit words) + * @stream: output keystream block (64 bytes) + * @nrounds: number of rounds (20 or 12; 20 is recommended) + * + * This is the ChaCha core, a function from 64-byte strings to 64-byte strings. + * The caller has already converted the endianness of the input. This function + * also handles incrementing the block counter in the input matrix. + */ +void chacha_block(u32 *state, u8 *stream, int nrounds) +{ + u32 x[16]; + int i; + + memcpy(x, state, 64); + + chacha_permute(x, nrounds); for (i = 0; i < ARRAY_SIZE(x); i++) put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]); state[12]++; } -EXPORT_SYMBOL(chacha20_block); +EXPORT_SYMBOL(chacha_block); + +/** + * hchacha_block - abbreviated ChaCha core, for XChaCha + * @in: input state matrix (16 32-bit words) + * @out: output (8 32-bit words) + * @nrounds: number of rounds (20 or 12; 20 is recommended) + * + * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step + * towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha + * skips the final addition of the initial state, and outputs only certain words + * of the state. It should not be used for streaming directly. + */ +void hchacha_block(const u32 *in, u32 *out, int nrounds) +{ + u32 x[16]; + + memcpy(x, in, 64); + + chacha_permute(x, nrounds); + + memcpy(&out[0], &x[0], 16); + memcpy(&out[4], &x[12], 16); +} +EXPORT_SYMBOL(hchacha_block); diff --git a/lib/cordic.c b/lib/cordic.c index 6cf477839ebd..8ef27c12956f 100644 --- a/lib/cordic.c +++ b/lib/cordic.c @@ -16,15 +16,6 @@ #include <linux/module.h> #include <linux/cordic.h> -#define CORDIC_ANGLE_GEN 39797 -#define CORDIC_PRECISION_SHIFT 16 -#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2) - -#define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT)) -#define FLOAT(X) (((X) >= 0) \ - ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \ - : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1)) - static const s32 arctan_table[] = { 2949120, 1740967, @@ -64,16 +55,16 @@ struct cordic_iq cordic_calc_iq(s32 theta) coord.q = 0; angle = 0; - theta = FIXED(theta); + theta = CORDIC_FIXED(theta); signtheta = (theta < 0) ? -1 : 1; - theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) - - FIXED(180) * signtheta; + theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) - + CORDIC_FIXED(180) * signtheta; - if (FLOAT(theta) > 90) { - theta -= FIXED(180); + if (CORDIC_FLOAT(theta) > 90) { + theta -= CORDIC_FIXED(180); signx = -1; - } else if (FLOAT(theta) < -90) { - theta += FIXED(180); + } else if (CORDIC_FLOAT(theta) < -90) { + theta += CORDIC_FIXED(180); signx = -1; } diff --git a/lib/cpumask.c b/lib/cpumask.c index beca6244671a..087a3e9a0202 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -4,7 +4,8 @@ #include <linux/bitops.h> #include <linux/cpumask.h> #include <linux/export.h> -#include <linux/bootmem.h> +#include <linux/memblock.h> +#include <linux/numa.h> /** * cpumask_next - get the next cpu in a cpumask @@ -163,7 +164,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var); */ void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) { - *mask = memblock_virt_alloc(cpumask_size(), 0); + *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); } /** @@ -206,7 +207,7 @@ unsigned int cpumask_local_spread(unsigned int i, int node) /* Wrap: we always want a cpu. */ i %= num_online_cpus(); - if (node == -1) { + if (node == NUMA_NO_NODE) { for_each_cpu(cpu, cpu_online_mask) if (i-- == 0) return cpu; diff --git a/lib/crc32.c b/lib/crc32.c index 45b1d67a1767..4a20455d1f61 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) EXPORT_SYMBOL(crc32_le); EXPORT_SYMBOL(__crc32c_le); -u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); -u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); +u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); +u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); /* * This multiplies the polynomials x and y modulo the given modulus. diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 70935ed91125..55437fd5128b 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -135,7 +135,6 @@ static void fill_pool(void) if (!new) return; - kmemleak_ignore(new); raw_spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&new->node, &obj_pool); debug_objects_allocated++; @@ -1128,16 +1127,14 @@ static int __init debug_objects_replace_static_objects(void) obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); if (!obj) goto free; - kmemleak_ignore(obj); hlist_add_head(&obj->node, &objects); } /* - * When debug_objects_mem_init() is called we know that only - * one CPU is up, so disabling interrupts is enough - * protection. This avoids the lockdep hell of lock ordering. + * debug_objects_mem_init() is now called early that only one CPU is up + * and interrupts have been disabled, so it is safe to replace the + * active object references. */ - local_irq_disable(); /* Remove the statically allocated objects from the pool */ hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) @@ -1158,7 +1155,6 @@ static int __init debug_objects_replace_static_objects(void) cnt++; } } - local_irq_enable(); pr_debug("%d of %d active objects replaced\n", cnt, obj_pool_used); @@ -1184,7 +1180,8 @@ void __init debug_objects_mem_init(void) obj_cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0, - SLAB_DEBUG_OBJECTS, NULL); + SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, + NULL); if (!obj_cache || debug_objects_replace_static_objects()) { debug_objects_enabled = 0; diff --git a/lib/devres.c b/lib/devres.c index faccf1a037d0..69bed2f38306 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -134,7 +134,6 @@ EXPORT_SYMBOL(devm_iounmap); void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) { resource_size_t size; - const char *name; void __iomem *dest_ptr; BUG_ON(!dev); @@ -145,9 +144,8 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) } size = resource_size(res); - name = res->name ?: dev_name(dev); - if (!devm_request_mem_region(dev, res->start, size, name)) { + if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) { dev_err(dev, "can't request region for resource %pR\n", res); return IOMEM_ERR_PTR(-EBUSY); } diff --git a/lib/div64.c b/lib/div64.c index 01c8602bb6ff..ee146bb4c558 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -109,7 +109,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) quot = div_u64_rem(dividend, divisor, &rem32); *remainder = rem32; } else { - int n = 1 + fls(high); + int n = fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) @@ -147,7 +147,7 @@ u64 div64_u64(u64 dividend, u64 divisor) if (high == 0) { quot = div_u64(dividend, divisor); } else { - int n = 1 + fls(high); + int n = fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c7c96bc7654a..7bdf98c37e91 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -188,7 +188,7 @@ static int ddebug_change(const struct ddebug_query *query, newflags = (dp->flags & mask) | flags; if (newflags == dp->flags) continue; -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL if (dp->flags & _DPRINTK_FLAGS_PRINT) { if (!(flags & _DPRINTK_FLAGS_PRINT)) static_branch_disable(&dp->key.dd_key_true); @@ -847,17 +847,19 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *name) { struct ddebug_table *dt; - const char *new_name; dt = kzalloc(sizeof(*dt), GFP_KERNEL); - if (dt == NULL) - return -ENOMEM; - new_name = kstrdup_const(name, GFP_KERNEL); - if (new_name == NULL) { - kfree(dt); + if (dt == NULL) { + pr_err("error adding module: %s\n", name); return -ENOMEM; } - dt->mod_name = new_name; + /* + * For built-in modules, name lives in .rodata and is + * immortal. For loaded modules, name points at the name[] + * member of struct module, which lives at least as long as + * this struct ddebug_table. + */ + dt->mod_name = name; dt->num_ddebugs = n; dt->ddebugs = tab; @@ -868,7 +870,6 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, vpr_info("%u debug prints in module %s\n", n, dt->mod_name); return 0; } -EXPORT_SYMBOL_GPL(ddebug_add_module); /* helper for ddebug_dyndbg_(boot|module)_param_cb */ static int ddebug_dyndbg_param_cb(char *param, char *val, @@ -913,7 +914,6 @@ int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *module) static void ddebug_table_free(struct ddebug_table *dt) { list_del_init(&dt->link); - kfree_const(dt->mod_name); kfree(dt); } @@ -930,15 +930,15 @@ int ddebug_remove_module(const char *mod_name) mutex_lock(&ddebug_lock); list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { - if (!strcmp(dt->mod_name, mod_name)) { + if (dt->mod_name == mod_name) { ddebug_table_free(dt); ret = 0; + break; } } mutex_unlock(&ddebug_lock); return ret; } -EXPORT_SYMBOL_GPL(ddebug_remove_module); static void ddebug_remove_all_tables(void) { diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c index 5367ffa5c18f..f0e394dd2beb 100644 --- a/lib/find_bit_benchmark.c +++ b/lib/find_bit_benchmark.c @@ -108,14 +108,13 @@ static int __init test_find_next_and_bit(const void *bitmap, const void *bitmap2, unsigned long len) { unsigned long i, cnt; - cycles_t cycles; + ktime_t time; - cycles = get_cycles(); + time = ktime_get(); for (cnt = i = 0; i < BITMAP_LEN; cnt++) - i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i+1); - cycles = get_cycles() - cycles; - pr_err("find_next_and_bit:\t\t%llu cycles, %ld iterations\n", - (u64)cycles, cnt); + i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i + 1); + time = ktime_get() - time; + pr_err("find_next_and_bit: %18llu ns, %6ld iterations\n", time, cnt); return 0; } diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig index 8fa0791e8a1e..3ecdd5204ec5 100644 --- a/lib/fonts/Kconfig +++ b/lib/fonts/Kconfig @@ -109,6 +109,15 @@ config FONT_SUN12x22 big letters (like the letters used in the SPARC PROM). If the standard font is unreadable for you, say Y, otherwise say N. +config FONT_TER16x32 + bool "Terminus 16x32 font (not supported by all drivers)" + depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC) + help + Terminus Font is a clean, fixed width bitmap font, designed + for long (8 and more hours per day) work with computers. + This is the high resolution, large version for use with HiDPI screens. + If the standard font is unreadable for you, say Y, otherwise say N. + config FONT_AUTOSELECT def_bool y depends on !FONT_8x8 @@ -121,6 +130,7 @@ config FONT_AUTOSELECT depends on !FONT_SUN8x16 depends on !FONT_SUN12x22 depends on !FONT_10x18 + depends on !FONT_TER16x32 select FONT_8x16 endif # FONT_SUPPORT diff --git a/lib/fonts/Makefile b/lib/fonts/Makefile index d56f02dea83a..ed95070860de 100644 --- a/lib/fonts/Makefile +++ b/lib/fonts/Makefile @@ -14,6 +14,7 @@ font-objs-$(CONFIG_FONT_PEARL_8x8) += font_pearl_8x8.o font-objs-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o font-objs-$(CONFIG_FONT_6x10) += font_6x10.o +font-objs-$(CONFIG_FONT_TER16x32) += font_ter16x32.o font-objs += $(font-objs-y) diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c new file mode 100644 index 000000000000..3f0cf1ccdf3a --- /dev/null +++ b/lib/fonts/font_ter16x32.c @@ -0,0 +1,2072 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/font.h> +#include <linux/module.h> + +#define FONTDATAMAX 16384 + +static const unsigned char fontdata_ter16x32[FONTDATAMAX] = { + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x7f, 0xfc, + 0xf0, 0x1e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, + 0xee, 0xee, 0xee, 0xee, 0xe0, 0x0e, 0xe0, 0x0e, + 0xe0, 0x0e, 0xe0, 0x0e, 0xef, 0xee, 0xe7, 0xce, + 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xf0, 0x1e, + 0x7f, 0xfc, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x7f, 0xfc, + 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, + 0xe3, 0x8e, 0xe3, 0x8e, 0xff, 0xfe, 0xff, 0xfe, + 0xff, 0xfe, 0xff, 0xfe, 0xe0, 0x0e, 0xf0, 0x1e, + 0xf8, 0x3e, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, + 0x7f, 0xfc, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 2 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x78, 0x3c, 0xfc, 0x7e, 0xfe, 0xfe, 0xff, 0xfe, + 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, + 0x7f, 0xfc, 0x7f, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0, + 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 3 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0f, 0xe0, + 0x1f, 0xf0, 0x3f, 0xf8, 0x7f, 0xfc, 0xff, 0xfe, + 0xff, 0xfe, 0x7f, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0, + 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 4 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x0f, 0xe0, + 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0, + 0x07, 0xc0, 0x03, 0x80, 0x3b, 0xb8, 0x7f, 0xfc, + 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, + 0x7f, 0xfc, 0x3b, 0xb8, 0x03, 0x80, 0x03, 0x80, + 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 5 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x07, 0xc0, 0x0f, 0xe0, 0x1f, 0xf0, 0x3f, 0xf8, + 0x7f, 0xfc, 0x7f, 0xfc, 0xff, 0xfe, 0xff, 0xfe, + 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0x7b, 0xbc, + 0x3b, 0xb8, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0xc0, 0x07, 0xe0, 0x0f, 0xf0, 0x0f, 0xf0, + 0x0f, 0xf0, 0x0f, 0xf0, 0x07, 0xe0, 0x03, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfc, 0x3f, 0xf8, 0x1f, 0xf0, 0x0f, 0xf0, 0x0f, + 0xf0, 0x0f, 0xf0, 0x0f, 0xf8, 0x1f, 0xfc, 0x3f, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 8 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0xc0, 0x07, 0xe0, 0x0e, 0x70, 0x0c, 0x30, + 0x0c, 0x30, 0x0e, 0x70, 0x07, 0xe0, 0x03, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 9 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfc, 0x3f, 0xf8, 0x1f, 0xf1, 0x8f, 0xf3, 0xcf, + 0xf3, 0xcf, 0xf1, 0x8f, 0xf8, 0x1f, 0xfc, 0x3f, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 10 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0xfe, 0x03, 0xfe, + 0x00, 0x1e, 0x00, 0x3e, 0x00, 0x76, 0x00, 0xe6, + 0x01, 0xc6, 0x03, 0x86, 0x3f, 0xe0, 0x7f, 0xf0, + 0xf0, 0x78, 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38, + 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38, 0xf0, 0x78, + 0x7f, 0xf0, 0x3f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 11 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, + 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 12 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfc, 0x3f, 0xfc, + 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, + 0x3f, 0xfc, 0x3f, 0xfc, 0x38, 0x00, 0x38, 0x00, + 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, + 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, + 0xf8, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 13 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfe, 0x7f, 0xfe, + 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, + 0x7f, 0xfe, 0x7f, 0xfe, 0x70, 0x0e, 0x70, 0x0e, + 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, + 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x3e, + 0xf0, 0x3c, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 14 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x73, 0x9c, 0x73, 0x9c, + 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0, 0x7c, 0x7c, + 0x7c, 0x7c, 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, + 0x73, 0x9c, 0x73, 0x9c, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xc0, 0x00, 0xf0, 0x00, 0xfc, 0x00, 0xff, 0x00, + 0xff, 0xc0, 0xff, 0xf0, 0xff, 0xfc, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfc, 0xff, 0xf0, 0xff, 0xc0, + 0xff, 0x00, 0xfc, 0x00, 0xf0, 0x00, 0xc0, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 16 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x03, 0x00, 0x0f, 0x00, 0x3f, 0x00, 0xff, + 0x03, 0xff, 0x0f, 0xff, 0x3f, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x3f, 0xff, 0x0f, 0xff, 0x03, 0xff, + 0x00, 0xff, 0x00, 0x3f, 0x00, 0x0f, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 17 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, + 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c, + 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c, + 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0, + 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 19 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xfe, 0x3f, 0xfe, + 0x79, 0xce, 0x71, 0xce, 0x71, 0xce, 0x71, 0xce, + 0x71, 0xce, 0x71, 0xce, 0x79, 0xce, 0x3f, 0xce, + 0x1f, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, + 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, + 0x01, 0xce, 0x01, 0xce, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 20 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x07, 0xe0, 0x0f, 0xf0, 0x1e, 0x78, 0x1c, 0x38, + 0x1c, 0x00, 0x1e, 0x00, 0x0f, 0xc0, 0x0f, 0xe0, + 0x1c, 0xf0, 0x1c, 0x78, 0x1c, 0x38, 0x1c, 0x38, + 0x1c, 0x38, 0x1e, 0x38, 0x0f, 0x38, 0x07, 0xf0, + 0x03, 0xf0, 0x00, 0x78, 0x00, 0x38, 0x1c, 0x38, + 0x1e, 0x78, 0x0f, 0xf0, 0x07, 0xe0, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 21 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfe, 0x7f, 0xfe, + 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, + 0x7f, 0xfe, 0x7f, 0xfe, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 22 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, + 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c, + 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x63, 0x8c, 0x73, 0x9c, 0x3b, 0xb8, + 0x1f, 0xf0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, + 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c, + 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 24 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c, + 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0, + 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 25 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xc0, 0x00, 0xe0, 0x00, 0x70, + 0x00, 0x38, 0x00, 0x1c, 0x7f, 0xfe, 0x7f, 0xfe, + 0x7f, 0xfe, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, + 0x00, 0xe0, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 26 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0x00, 0x07, 0x00, 0x0e, 0x00, + 0x1c, 0x00, 0x38, 0x00, 0x7f, 0xfe, 0x7f, 0xfe, + 0x7f, 0xfe, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, + 0x07, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 27 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x06, 0x60, 0x0e, 0x70, 0x1c, 0x38, + 0x38, 0x1c, 0x70, 0x0e, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x70, 0x0e, 0x38, 0x1c, 0x1c, 0x38, + 0x0e, 0x70, 0x06, 0x60, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 29 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x80, 0x01, 0x80, 0x03, 0xc0, 0x03, 0xc0, + 0x07, 0xe0, 0x07, 0xe0, 0x0f, 0xf0, 0x0f, 0xf0, + 0x1f, 0xf8, 0x1f, 0xf8, 0x3f, 0xfc, 0x3f, 0xfc, + 0x7f, 0xfe, 0x7f, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x7f, 0xfe, 0x7f, 0xfe, + 0x3f, 0xfc, 0x3f, 0xfc, 0x1f, 0xf8, 0x1f, 0xf8, + 0x0f, 0xf0, 0x0f, 0xf0, 0x07, 0xe0, 0x07, 0xe0, + 0x03, 0xc0, 0x03, 0xc0, 0x01, 0x80, 0x01, 0x80, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 31 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 32 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 33 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 34 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7f, 0xfc, + 0x7f, 0xfc, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7f, 0xfc, + 0x7f, 0xfc, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 35 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0, + 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c, 0x73, 0x80, + 0x73, 0x80, 0x73, 0x80, 0x7b, 0x80, 0x3f, 0xf0, + 0x1f, 0xf8, 0x03, 0xbc, 0x03, 0x9c, 0x03, 0x9c, + 0x03, 0x9c, 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8, + 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 36 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0x1c, 0x3f, 0x9c, + 0x3b, 0xb8, 0x3b, 0xb8, 0x3f, 0xf0, 0x1f, 0x70, + 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, + 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00, + 0x0e, 0xf8, 0x0f, 0xfc, 0x1d, 0xdc, 0x1d, 0xdc, + 0x39, 0xfc, 0x38, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 37 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0xc0, 0x1f, 0xe0, + 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, + 0x38, 0x70, 0x1c, 0xe0, 0x0f, 0xc0, 0x0f, 0x80, + 0x1f, 0xce, 0x38, 0xee, 0x70, 0x7c, 0x70, 0x38, + 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x7c, + 0x3f, 0xee, 0x1f, 0xce, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 39 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x01, 0xc0, + 0x03, 0x80, 0x07, 0x00, 0x07, 0x00, 0x0e, 0x00, + 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, + 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, + 0x0e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x03, 0x80, + 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 40 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x07, 0x00, + 0x03, 0x80, 0x01, 0xc0, 0x01, 0xc0, 0x00, 0xe0, + 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, + 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, + 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80, + 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 41 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x38, 0x38, 0x1c, 0x70, + 0x0e, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x7f, 0xfc, + 0x7f, 0xfc, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, + 0x1c, 0x70, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc, + 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 43 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 44 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, + 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 45 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 46 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x38, 0x00, 0x38, 0x00, 0x70, 0x00, 0x70, + 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, + 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00, + 0x0e, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x1c, 0x00, + 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 47 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, + 0x70, 0x7c, 0x70, 0xfc, 0x71, 0xdc, 0x73, 0x9c, + 0x77, 0x1c, 0x7e, 0x1c, 0x7c, 0x1c, 0x78, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0x80, + 0x0f, 0x80, 0x1f, 0x80, 0x1f, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x1f, 0xf0, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 49 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, + 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, + 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 50 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x1c, + 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c, 0x0f, 0xf8, + 0x0f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 51 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x3c, + 0x00, 0x7c, 0x00, 0xfc, 0x01, 0xdc, 0x03, 0x9c, + 0x07, 0x1c, 0x0e, 0x1c, 0x1c, 0x1c, 0x38, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, + 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 52 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, + 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x1c, 0x00, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 53 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x3f, 0xf8, + 0x78, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, + 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 54 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38, + 0x00, 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0xe0, + 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 55 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, + 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 56 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c, + 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 57 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 58 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 59 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x38, + 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, + 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, + 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, + 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x70, + 0x00, 0x38, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 61 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x1c, 0x00, + 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, + 0x00, 0xe0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x1c, + 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, + 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, + 0x1c, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 62 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, + 0x01, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 63 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x3f, 0xfc, + 0x78, 0x0e, 0x70, 0x06, 0x71, 0xfe, 0x73, 0xfe, + 0x77, 0x8e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e, + 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x9e, + 0x73, 0xfe, 0x71, 0xf6, 0x70, 0x00, 0x78, 0x00, + 0x3f, 0xfe, 0x1f, 0xfe, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 64 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 65 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, + 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x38, 0x7f, 0xf0, 0x7f, 0xf0, + 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, + 0x7f, 0xf8, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 66 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 67 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xc0, 0x7f, 0xf0, + 0x70, 0x78, 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x70, 0x78, + 0x7f, 0xf0, 0x7f, 0xc0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 68 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0, + 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 69 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0, + 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x71, 0xfc, + 0x71, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 71 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, + 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 72 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x0f, 0xe0, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 73 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0xfe, + 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, + 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, + 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, + 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x78, + 0x3f, 0xf0, 0x1f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 74 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x0c, 0x70, 0x1c, + 0x70, 0x38, 0x70, 0x70, 0x70, 0xe0, 0x71, 0xc0, + 0x73, 0x80, 0x77, 0x00, 0x7e, 0x00, 0x7c, 0x00, + 0x7c, 0x00, 0x7e, 0x00, 0x77, 0x00, 0x73, 0x80, + 0x71, 0xc0, 0x70, 0xe0, 0x70, 0x70, 0x70, 0x38, + 0x70, 0x1c, 0x70, 0x0c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 75 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 76 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x0e, 0x70, 0x0e, + 0x78, 0x1e, 0x7c, 0x3e, 0x7e, 0x7e, 0x7e, 0x7e, + 0x77, 0xee, 0x73, 0xce, 0x73, 0xce, 0x71, 0x8e, + 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, + 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, + 0x70, 0x0e, 0x70, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 77 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x7c, 0x1c, 0x7e, 0x1c, 0x77, 0x1c, 0x73, 0x9c, + 0x71, 0xdc, 0x70, 0xfc, 0x70, 0x7c, 0x70, 0x3c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 79 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, + 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, + 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x73, 0x9c, 0x79, 0xfc, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x38, 0x00, 0x1c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 81 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, + 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, + 0x7f, 0xf8, 0x7f, 0xf0, 0x7e, 0x00, 0x77, 0x00, + 0x73, 0x80, 0x71, 0xc0, 0x70, 0xe0, 0x70, 0x70, + 0x70, 0x38, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 82 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x78, 0x00, 0x3f, 0xf0, + 0x1f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 83 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 84 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 85 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38, + 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, + 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, + 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, + 0x07, 0xc0, 0x07, 0xc0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 86 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x0e, 0x70, 0x0e, + 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, + 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, + 0x71, 0x8e, 0x73, 0xce, 0x73, 0xce, 0x77, 0xee, + 0x7e, 0x7e, 0x7e, 0x7e, 0x7c, 0x3e, 0x78, 0x1e, + 0x70, 0x0e, 0x70, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 87 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, + 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, + 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0, + 0x07, 0xc0, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0, + 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x38, 0x38, 0x38, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 88 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, + 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, + 0x07, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 89 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x38, + 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, + 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 90 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x0f, 0xf0, + 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, + 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, + 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, + 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, + 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 91 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00, + 0x1c, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x0e, 0x00, + 0x07, 0x00, 0x07, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x01, 0xc0, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0xe0, + 0x00, 0x70, 0x00, 0x70, 0x00, 0x38, 0x00, 0x38, + 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 92 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x0f, 0xf0, + 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, + 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, + 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, + 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, + 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 93 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70, + 0x38, 0x38, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 94 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, + 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */ + 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, + 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 96 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, + 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 97 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, + 0x7f, 0xf8, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 98 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 99 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, + 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 100 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 101 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x01, 0xfe, + 0x03, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x3f, 0xf8, 0x3f, 0xf8, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 102 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 103 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 104 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 105 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, + 0x00, 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf8, 0x00, 0xf8, 0x00, 0x38, 0x00, 0x38, + 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, + 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, + 0x00, 0x38, 0x00, 0x38, 0x38, 0x38, 0x38, 0x38, + 0x3c, 0x78, 0x1f, 0xf0, 0x0f, 0xe0, 0x00, 0x00, /* 106 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00, + 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, + 0x38, 0x1c, 0x38, 0x38, 0x38, 0x70, 0x38, 0xe0, + 0x39, 0xc0, 0x3b, 0x80, 0x3f, 0x00, 0x3f, 0x00, + 0x3b, 0x80, 0x39, 0xc0, 0x38, 0xe0, 0x38, 0x70, + 0x38, 0x38, 0x38, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 107 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0x80, 0x0f, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 108 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xf0, 0x7f, 0xf8, 0x73, 0xbc, 0x73, 0x9c, + 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, + 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, + 0x73, 0x9c, 0x73, 0x9c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 109 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 110 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 111 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, + 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 112 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, /* 113 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x73, 0xfc, 0x77, 0xfc, 0x7e, 0x00, 0x7c, 0x00, + 0x78, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 114 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x00, + 0x70, 0x00, 0x78, 0x00, 0x3f, 0xf0, 0x1f, 0xf8, + 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 115 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x07, 0x00, + 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, + 0x7f, 0xf0, 0x7f, 0xf0, 0x07, 0x00, 0x07, 0x00, + 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, + 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x80, + 0x03, 0xfc, 0x01, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 116 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 117 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38, + 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, + 0x07, 0xc0, 0x07, 0xc0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 118 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, + 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x7b, 0xbc, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 119 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38, + 0x1c, 0x70, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0, + 0x0e, 0xe0, 0x1c, 0x70, 0x38, 0x38, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 120 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 121 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x38, 0x00, 0x70, + 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, + 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 122 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0xf0, 0x03, 0xf0, + 0x07, 0x80, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, + 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x3e, 0x00, + 0x3e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, + 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x80, + 0x03, 0xf0, 0x01, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 123 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 124 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x3f, 0x00, + 0x07, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x01, 0xf0, + 0x01, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x80, + 0x3f, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 125 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1e, 0x1c, 0x3f, 0x1c, 0x77, 0x9c, 0x73, 0xdc, + 0x71, 0xf8, 0x70, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 126 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, + 0x0f, 0xe0, 0x1e, 0xf0, 0x3c, 0x78, 0x78, 0x3c, + 0xf0, 0x1e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, + 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, + 0xff, 0xfe, 0xff, 0xfe, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 127 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, /* 128 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 129 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, + 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 130 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, + 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, + 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 131 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, + 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 132 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, + 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, + 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 133 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x0e, 0xe0, + 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x00, 0x00, + 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, + 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 134 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, /* 135 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, + 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 136 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 137 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, + 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 138 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 139 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, + 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 140 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, + 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 141 */ + 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 142 */ + 0x00, 0x00, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0, + 0x0e, 0xe0, 0x07, 0xc0, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 143 */ + 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, + 0x03, 0x80, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0, + 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 144 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e, + 0x03, 0x8e, 0x3f, 0x8e, 0x7f, 0xfe, 0xf3, 0xfe, + 0xe3, 0x80, 0xe3, 0x80, 0xe3, 0x80, 0xf3, 0xce, + 0x7f, 0xfe, 0x3e, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 145 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfe, 0x7f, 0xfe, + 0xf1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, + 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xff, 0xfe, + 0xff, 0xfe, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, + 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, + 0xe1, 0xfe, 0xe1, 0xfe, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 146 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, + 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 147 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 148 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, + 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 149 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, + 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 150 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, + 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 151 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 152 */ + 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 153 */ + 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 154 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c, + 0x73, 0x80, 0x73, 0x80, 0x73, 0x80, 0x73, 0x80, + 0x73, 0x80, 0x73, 0x80, 0x73, 0x9c, 0x7b, 0xbc, + 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 155 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0xe0, 0x0f, 0xf0, + 0x1e, 0x78, 0x1c, 0x38, 0x1c, 0x00, 0x1c, 0x00, + 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x7f, 0xe0, + 0x7f, 0xe0, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, + 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x1c, 0x1c, 0x1c, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 156 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, + 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, + 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0, + 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8, + 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 157 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x80, + 0xe3, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, + 0xe1, 0xc0, 0xe1, 0xc0, 0xe3, 0xc0, 0xff, 0xf0, + 0xff, 0x70, 0xe0, 0x70, 0xe3, 0xfe, 0xe3, 0xfe, + 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x70, + 0xe0, 0x7e, 0xe0, 0x3e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 158 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x03, 0xfc, + 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0, 0x1f, 0xf0, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x73, 0x80, 0x73, 0x80, + 0x7f, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 159 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, + 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, + 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 160 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, + 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 161 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, + 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 162 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, + 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 163 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0x38, 0x3b, 0xb8, + 0x3b, 0xb8, 0x39, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 164 */ + 0x00, 0x00, 0x1f, 0x38, 0x3b, 0xb8, 0x3b, 0xb8, + 0x39, 0xf0, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, + 0x7c, 0x1c, 0x7e, 0x1c, 0x77, 0x1c, 0x73, 0x9c, + 0x71, 0xdc, 0x70, 0xfc, 0x70, 0x7c, 0x70, 0x3c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 165 */ + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xe0, 0x1f, 0xf0, + 0x00, 0x38, 0x00, 0x38, 0x0f, 0xf8, 0x1f, 0xf8, + 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1f, 0xf8, + 0x0f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, + 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 166 */ + 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x1f, 0xf0, + 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, + 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1f, 0xf0, + 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, + 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 167 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, + 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 168 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 169 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 170 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x3c, 0x00, + 0x7c, 0x06, 0x1c, 0x0e, 0x1c, 0x1c, 0x1c, 0x38, + 0x1c, 0x70, 0x1c, 0xe0, 0x1d, 0xc0, 0x03, 0x80, + 0x07, 0x00, 0x0e, 0xfc, 0x1d, 0xfe, 0x39, 0xce, + 0x71, 0xce, 0x60, 0x1c, 0x00, 0x38, 0x00, 0x70, + 0x00, 0xfe, 0x01, 0xfe, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 171 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x1e, 0x00, + 0x3e, 0x00, 0x0e, 0x00, 0x0e, 0x06, 0x0e, 0x0e, + 0x0e, 0x1c, 0x0e, 0x38, 0x0e, 0x70, 0x00, 0xe0, + 0x01, 0xce, 0x03, 0x9e, 0x07, 0x3e, 0x0e, 0x7e, + 0x1c, 0xee, 0x39, 0xce, 0x73, 0xfe, 0x63, 0xfe, + 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 172 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 173 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0xce, 0x03, 0x9c, 0x07, 0x38, 0x0e, 0x70, + 0x1c, 0xe0, 0x39, 0xc0, 0x73, 0x80, 0x73, 0x80, + 0x39, 0xc0, 0x1c, 0xe0, 0x0e, 0x70, 0x07, 0x38, + 0x03, 0x9c, 0x01, 0xce, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 174 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x73, 0x80, 0x39, 0xc0, 0x1c, 0xe0, 0x0e, 0x70, + 0x07, 0x38, 0x03, 0x9c, 0x01, 0xce, 0x01, 0xce, + 0x03, 0x9c, 0x07, 0x38, 0x0e, 0x70, 0x1c, 0xe0, + 0x39, 0xc0, 0x73, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 175 */ + 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, + 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, + 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, + 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, + 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, + 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, + 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, + 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, /* 176 */ + 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, + 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, + 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, + 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, + 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, + 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, + 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, + 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, /* 177 */ + 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, + 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, + 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, + 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, + 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, + 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, + 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, + 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, /* 178 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 179 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, + 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 180 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80, + 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 181 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0xfe, 0x70, 0xfe, 0x70, + 0xfe, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 182 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xf0, 0xff, 0xf0, + 0xff, 0xf0, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 183 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80, + 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 184 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x00, 0x70, + 0x00, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 185 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 186 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0, 0x00, 0x70, + 0x00, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 187 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x00, 0x70, + 0x00, 0x70, 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 188 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xf0, 0xff, 0xf0, + 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 189 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80, + 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 190 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0x80, 0xff, 0x80, + 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 191 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, + 0x03, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 192 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 193 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 194 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, + 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 195 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 196 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 197 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80, + 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 198 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x7f, 0x0e, 0x7f, + 0x0e, 0x7f, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 199 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x00, + 0x0e, 0x00, 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 200 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff, 0x0e, 0x00, + 0x0e, 0x00, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 201 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 202 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 203 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x00, + 0x0e, 0x00, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 204 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 205 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x00, 0x00, + 0x00, 0x00, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 206 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 207 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 208 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 209 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 210 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0f, 0xff, 0x0f, 0xff, + 0x0f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 211 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80, + 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 212 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80, + 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 213 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0xff, + 0x0f, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 214 */ + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, + 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 215 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x80, + 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 216 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, + 0xff, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 217 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0xff, 0x03, 0xff, + 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 218 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 219 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 220 */ + 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, + 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, + 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, + 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, + 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, + 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, + 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, + 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, /* 221 */ + 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, + 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, /* 222 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 223 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xee, 0x3f, 0xfe, 0x78, 0x3c, 0x70, 0x38, + 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, + 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x3c, + 0x3f, 0xfe, 0x1f, 0xee, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 224 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3f, 0xe0, 0x7f, 0xf0, + 0x70, 0x78, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, + 0x70, 0x38, 0x70, 0x70, 0x7f, 0xf0, 0x7f, 0xf0, + 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, + 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 225 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 226 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 227 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, + 0x70, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, + 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, + 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, + 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 228 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xfe, 0x3f, 0xfe, 0x78, 0xf0, 0x70, 0x78, + 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 229 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x3c, 0x70, 0x7c, 0x70, 0xfc, + 0x7f, 0xdc, 0x7f, 0x9c, 0x70, 0x00, 0x70, 0x00, + 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 230 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0xc0, + 0x01, 0xf8, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 231 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c, + 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, + 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, + 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8, 0x1f, 0xf0, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 232 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x77, 0xdc, + 0x77, 0xdc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 233 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, + 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, + 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 234 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x1f, 0xf0, + 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, + 0x0f, 0xe0, 0x1f, 0xf0, 0x38, 0x38, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, + 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 235 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0xf8, + 0x7f, 0xfc, 0xe7, 0xce, 0xe3, 0x8e, 0xe3, 0x8e, + 0xe3, 0x8e, 0xe3, 0x8e, 0xe7, 0xce, 0x7f, 0xfc, + 0x3e, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 236 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c, + 0x00, 0x38, 0x00, 0x38, 0x0f, 0xf0, 0x1f, 0xf8, + 0x38, 0xfc, 0x38, 0xfc, 0x39, 0xdc, 0x39, 0xdc, + 0x3b, 0x9c, 0x3b, 0x9c, 0x3f, 0x1c, 0x3f, 0x1c, + 0x1f, 0xf8, 0x0f, 0xf0, 0x1c, 0x00, 0x1c, 0x00, + 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 237 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x07, 0xfc, 0x1f, 0xfc, 0x3c, 0x00, + 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xfc, + 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x38, 0x00, + 0x3c, 0x00, 0x1f, 0xfc, 0x07, 0xfc, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 238 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x1f, 0xf0, + 0x3c, 0x78, 0x38, 0x38, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, + 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 239 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, + 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 240 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc, + 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 241 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, + 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, + 0x00, 0x70, 0x00, 0x38, 0x00, 0x38, 0x00, 0x70, + 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, + 0x0e, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x3f, 0xfc, 0x3f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 242 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x70, + 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, + 0x0e, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x0e, 0x00, + 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, + 0x00, 0x70, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, + 0x3f, 0xfc, 0x3f, 0xfc, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 243 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x03, 0xfc, + 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 244 */ + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x73, 0x80, 0x73, 0x80, + 0x7f, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 245 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, + 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 246 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x1c, + 0x7f, 0xbc, 0x7b, 0xfc, 0x70, 0xf8, 0x00, 0x00, + 0x00, 0x00, 0x3e, 0x1c, 0x7f, 0xbc, 0x7b, 0xfc, + 0x70, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 247 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x0f, 0xe0, 0x1f, 0xf0, 0x1c, 0x70, 0x1c, 0x70, + 0x1c, 0x70, 0x1c, 0x70, 0x1f, 0xf0, 0x0f, 0xe0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 248 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0xc0, 0x07, 0xe0, 0x07, 0xe0, + 0x07, 0xe0, 0x07, 0xe0, 0x03, 0xc0, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 249 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, + 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 250 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, + 0x00, 0x3e, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, + 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, + 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x70, 0x38, + 0x70, 0x38, 0x70, 0x38, 0x78, 0x38, 0x3c, 0x38, + 0x1e, 0x38, 0x0f, 0x38, 0x07, 0xb8, 0x03, 0xf8, + 0x01, 0xf8, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 251 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0xe0, 0x1f, 0xf0, 0x1c, 0x38, 0x1c, 0x38, + 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, + 0x1c, 0x38, 0x1c, 0x38, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 252 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, + 0x1f, 0xf0, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0xe0, + 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, + 0x1f, 0xf0, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 253 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x1f, 0xf8, + 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, + 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, + 0x1f, 0xf8, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 254 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 255 */ + +}; + + +const struct font_desc font_ter_16x32 = { + .idx = TER16x32_IDX, + .name = "TER16x32", + .width = 16, + .height = 32, + .data = fontdata_ter16x32, +#ifdef __sparc__ + .pref = 5, +#else + .pref = -1, +#endif +}; diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c index 823376ca0a8b..9969358a7af5 100644 --- a/lib/fonts/fonts.c +++ b/lib/fonts/fonts.c @@ -67,6 +67,10 @@ static const struct font_desc *fonts[] = { #undef NO_FONTS &font_6x10, #endif +#ifdef CONFIG_FONT_TER16x32 +#undef NO_FONTS + &font_ter_16x32, +#endif }; #define num_fonts ARRAY_SIZE(fonts) diff --git a/lib/gcd.c b/lib/gcd.c index 227dea924425..7948ab27f0a4 100644 --- a/lib/gcd.c +++ b/lib/gcd.c @@ -10,7 +10,7 @@ * has decent hardware division. */ -#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) && !defined(CPU_NO_EFFICIENT_FFS) +#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) /* If __ffs is available, the even/odd algorithm benchmarks slower. */ diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c index 9011926e4162..094b43aef8db 100644 --- a/lib/gen_crc64table.c +++ b/lib/gen_crc64table.c @@ -16,8 +16,6 @@ #include <inttypes.h> #include <stdio.h> -#include <linux/swab.h> - #define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL static uint64_t crc64_table[256] = {0}; diff --git a/lib/genalloc.c b/lib/genalloc.c index ca06adc4f445..7e85d1e37a6e 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -35,6 +35,7 @@ #include <linux/interrupt.h> #include <linux/genalloc.h> #include <linux/of_device.h> +#include <linux/vmalloc.h> static inline size_t chunk_size(const struct gen_pool_chunk *chunk) { @@ -187,7 +188,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy int nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(nbits) * sizeof(long); - chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); + chunk = vzalloc_node(nbytes, nid); if (unlikely(chunk == NULL)) return -ENOMEM; @@ -251,7 +252,7 @@ void gen_pool_destroy(struct gen_pool *pool) bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); - kfree(chunk); + vfree(chunk); } kfree_const(pool->name); kfree(pool); @@ -311,7 +312,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, end_bit = chunk_size(chunk) >> order; retry: start_bit = algo(chunk->bits, end_bit, start_bit, - nbits, data, pool); + nbits, data, pool, chunk->start_addr); if (start_bit >= end_bit) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); @@ -525,7 +526,7 @@ EXPORT_SYMBOL(gen_pool_set_algo); */ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { return bitmap_find_next_zero_area(map, size, start, nr, 0); } @@ -543,16 +544,19 @@ EXPORT_SYMBOL(gen_pool_first_fit); */ unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { struct genpool_data_align *alignment; - unsigned long align_mask; + unsigned long align_mask, align_off; int order; alignment = data; order = pool->min_alloc_order; align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; - return bitmap_find_next_zero_area(map, size, start, nr, align_mask); + align_off = (start_addr & (alignment->align - 1)) >> order; + + return bitmap_find_next_zero_area_off(map, size, start, nr, + align_mask, align_off); } EXPORT_SYMBOL(gen_pool_first_fit_align); @@ -567,7 +571,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align); */ unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { struct genpool_data_fixed *fixed_data; int order; @@ -601,7 +605,8 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc); */ unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, - unsigned int nr, void *data, struct gen_pool *pool) + unsigned int nr, void *data, struct gen_pool *pool, + unsigned long start_addr) { unsigned long align_mask = roundup_pow_of_two(nr) - 1; @@ -624,7 +629,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align); */ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { unsigned long start_bit = size; unsigned long len = size + 1; diff --git a/lib/idr.c b/lib/idr.c index fab2fd5bc326..cb1db9b8d3f6 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -6,8 +6,6 @@ #include <linux/spinlock.h> #include <linux/xarray.h> -DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); - /** * idr_alloc_u32() - Allocate an ID. * @idr: IDR handle. @@ -39,10 +37,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid, unsigned int base = idr->idr_base; unsigned int id = *nextid; - if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) - return -EINVAL; - if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR))) - idr->idr_rt.gfp_mask |= IDR_RT_MARKER; + if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR))) + idr->idr_rt.xa_flags |= IDR_RT_MARKER; id = (id < base) ? 0 : id - base; radix_tree_iter_init(&iter, id); @@ -295,15 +291,13 @@ void *idr_replace(struct idr *idr, void *ptr, unsigned long id) void __rcu **slot = NULL; void *entry; - if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) - return ERR_PTR(-EINVAL); id -= idr->idr_base; entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) return ERR_PTR(-ENOENT); - __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL); + __radix_tree_replace(&idr->idr_rt, node, slot, ptr); return entry; } @@ -324,6 +318,9 @@ EXPORT_SYMBOL(idr_replace); * free the individual IDs in it. You can use ida_is_empty() to find * out whether the IDA has any IDs currently allocated. * + * The IDA handles its own locking. It is safe to call any of the IDA + * functions without synchronisation in your code. + * * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward * limitation, it should be quite straightforward to raise the maximum. */ @@ -331,161 +328,197 @@ EXPORT_SYMBOL(idr_replace); /* * Developer's notes: * - * The IDA uses the functionality provided by the IDR & radix tree to store - * bitmaps in each entry. The IDR_FREE tag means there is at least one bit - * free, unlike the IDR where it means at least one entry is free. + * The IDA uses the functionality provided by the XArray to store bitmaps in + * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap + * have been set. * - * I considered telling the radix tree that each slot is an order-10 node - * and storing the bit numbers in the radix tree, but the radix tree can't - * allow a single multiorder entry at index 0, which would significantly - * increase memory consumption for the IDA. So instead we divide the index - * by the number of bits in the leaf bitmap before doing a radix tree lookup. + * I considered telling the XArray that each slot is an order-10 node + * and indexing by bit number, but the XArray can't allow a single multi-index + * entry in the head, which would significantly increase memory consumption + * for the IDA. So instead we divide the index by the number of bits in the + * leaf bitmap before doing a radix tree lookup. * * As an optimisation, if there are only a few low bits set in any given - * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional - * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits - * directly in the entry. By being really tricksy, we could store - * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising - * for 0-3 allocated IDs. - * - * We allow the radix tree 'exceptional' count to get out of date. Nothing - * in the IDA nor the radix tree code checks it. If it becomes important - * to maintain an accurate exceptional count, switch the rcu_assign_pointer() - * calls to radix_tree_iter_replace() which will correct the exceptional - * count. - * - * The IDA always requires a lock to alloc/free. If we add a 'test_bit' + * leaf, instead of allocating a 128-byte bitmap, we store the bits + * as a value entry. Value entries never have the XA_FREE_MARK cleared + * because we can always convert them into a bitmap entry. + * + * It would be possible to optimise further; once we've run out of a + * single 128-byte bitmap, we currently switch to a 576-byte node, put + * the 128-byte bitmap in the first entry and then start allocating extra + * 128-byte entries. We could instead use the 512 bytes of the node's + * data as a bitmap before moving to that scheme. I do not believe this + * is a worthwhile optimisation; Rasmus Villemoes surveyed the current + * users of the IDA and almost none of them use more than 1024 entries. + * Those that do use more than the 8192 IDs that the 512 bytes would + * provide. + * + * The IDA always uses a lock to alloc/free. If we add a 'test_bit' * equivalent, it will still need locking. Going to RCU lookup would require * using RCU to free bitmaps, and that's not trivial without embedding an * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte * bitmap, which is excessive. */ -#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1) - -static int ida_get_new_above(struct ida *ida, int start) +/** + * ida_alloc_range() - Allocate an unused ID. + * @ida: IDA handle. + * @min: Lowest ID to allocate. + * @max: Highest ID to allocate. + * @gfp: Memory allocation flags. + * + * Allocate an ID between @min and @max, inclusive. The allocated ID will + * not exceed %INT_MAX, even if @max is larger. + * + * Context: Any context. + * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, + * or %-ENOSPC if there are no free IDs. + */ +int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, + gfp_t gfp) { - struct radix_tree_root *root = &ida->ida_rt; - void __rcu **slot; - struct radix_tree_iter iter; - struct ida_bitmap *bitmap; - unsigned long index; - unsigned bit, ebit; - int new; - - index = start / IDA_BITMAP_BITS; - bit = start % IDA_BITMAP_BITS; - ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; - - slot = radix_tree_iter_init(&iter, index); - for (;;) { - if (slot) - slot = radix_tree_next_slot(slot, &iter, - RADIX_TREE_ITER_TAGGED); - if (!slot) { - slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX); - if (IS_ERR(slot)) { - if (slot == ERR_PTR(-ENOMEM)) - return -EAGAIN; - return PTR_ERR(slot); + XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS); + unsigned bit = min % IDA_BITMAP_BITS; + unsigned long flags; + struct ida_bitmap *bitmap, *alloc = NULL; + + if ((int)min < 0) + return -ENOSPC; + + if ((int)max < 0) + max = INT_MAX; + +retry: + xas_lock_irqsave(&xas, flags); +next: + bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK); + if (xas.xa_index > min / IDA_BITMAP_BITS) + bit = 0; + if (xas.xa_index * IDA_BITMAP_BITS + bit > max) + goto nospc; + + if (xa_is_value(bitmap)) { + unsigned long tmp = xa_to_value(bitmap); + + if (bit < BITS_PER_XA_VALUE) { + bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit); + if (xas.xa_index * IDA_BITMAP_BITS + bit > max) + goto nospc; + if (bit < BITS_PER_XA_VALUE) { + tmp |= 1UL << bit; + xas_store(&xas, xa_mk_value(tmp)); + goto out; } } - if (iter.index > index) { - bit = 0; - ebit = RADIX_TREE_EXCEPTIONAL_SHIFT; - } - new = iter.index * IDA_BITMAP_BITS; - bitmap = rcu_dereference_raw(*slot); - if (radix_tree_exception(bitmap)) { - unsigned long tmp = (unsigned long)bitmap; - ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit); - if (ebit < BITS_PER_LONG) { - tmp |= 1UL << ebit; - rcu_assign_pointer(*slot, (void *)tmp); - return new + ebit - - RADIX_TREE_EXCEPTIONAL_SHIFT; - } - bitmap = this_cpu_xchg(ida_bitmap, NULL); - if (!bitmap) - return -EAGAIN; - bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; - rcu_assign_pointer(*slot, bitmap); + bitmap = alloc; + if (!bitmap) + bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT); + if (!bitmap) + goto alloc; + bitmap->bitmap[0] = tmp; + xas_store(&xas, bitmap); + if (xas_error(&xas)) { + bitmap->bitmap[0] = 0; + goto out; } + } - if (bitmap) { - bit = find_next_zero_bit(bitmap->bitmap, - IDA_BITMAP_BITS, bit); - new += bit; - if (new < 0) - return -ENOSPC; - if (bit == IDA_BITMAP_BITS) - continue; + if (bitmap) { + bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit); + if (xas.xa_index * IDA_BITMAP_BITS + bit > max) + goto nospc; + if (bit == IDA_BITMAP_BITS) + goto next; - __set_bit(bit, bitmap->bitmap); - if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) - radix_tree_iter_tag_clear(root, &iter, - IDR_FREE); + __set_bit(bit, bitmap->bitmap); + if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) + xas_clear_mark(&xas, XA_FREE_MARK); + } else { + if (bit < BITS_PER_XA_VALUE) { + bitmap = xa_mk_value(1UL << bit); } else { - new += bit; - if (new < 0) - return -ENOSPC; - if (ebit < BITS_PER_LONG) { - bitmap = (void *)((1UL << ebit) | - RADIX_TREE_EXCEPTIONAL_ENTRY); - radix_tree_iter_replace(root, &iter, slot, - bitmap); - return new; - } - bitmap = this_cpu_xchg(ida_bitmap, NULL); + bitmap = alloc; if (!bitmap) - return -EAGAIN; + bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT); + if (!bitmap) + goto alloc; __set_bit(bit, bitmap->bitmap); - radix_tree_iter_replace(root, &iter, slot, bitmap); } - - return new; + xas_store(&xas, bitmap); + } +out: + xas_unlock_irqrestore(&xas, flags); + if (xas_nomem(&xas, gfp)) { + xas.xa_index = min / IDA_BITMAP_BITS; + bit = min % IDA_BITMAP_BITS; + goto retry; } + if (bitmap != alloc) + kfree(alloc); + if (xas_error(&xas)) + return xas_error(&xas); + return xas.xa_index * IDA_BITMAP_BITS + bit; +alloc: + xas_unlock_irqrestore(&xas, flags); + alloc = kzalloc(sizeof(*bitmap), gfp); + if (!alloc) + return -ENOMEM; + xas_set(&xas, min / IDA_BITMAP_BITS); + bit = min % IDA_BITMAP_BITS; + goto retry; +nospc: + xas_unlock_irqrestore(&xas, flags); + return -ENOSPC; } +EXPORT_SYMBOL(ida_alloc_range); -static void ida_remove(struct ida *ida, int id) +/** + * ida_free() - Release an allocated ID. + * @ida: IDA handle. + * @id: Previously allocated ID. + * + * Context: Any context. + */ +void ida_free(struct ida *ida, unsigned int id) { - unsigned long index = id / IDA_BITMAP_BITS; - unsigned offset = id % IDA_BITMAP_BITS; + XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS); + unsigned bit = id % IDA_BITMAP_BITS; struct ida_bitmap *bitmap; - unsigned long *btmp; - struct radix_tree_iter iter; - void __rcu **slot; + unsigned long flags; - slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index); - if (!slot) - goto err; + BUG_ON((int)id < 0); + + xas_lock_irqsave(&xas, flags); + bitmap = xas_load(&xas); - bitmap = rcu_dereference_raw(*slot); - if (radix_tree_exception(bitmap)) { - btmp = (unsigned long *)slot; - offset += RADIX_TREE_EXCEPTIONAL_SHIFT; - if (offset >= BITS_PER_LONG) + if (xa_is_value(bitmap)) { + unsigned long v = xa_to_value(bitmap); + if (bit >= BITS_PER_XA_VALUE) goto err; + if (!(v & (1UL << bit))) + goto err; + v &= ~(1UL << bit); + if (!v) + goto delete; + xas_store(&xas, xa_mk_value(v)); } else { - btmp = bitmap->bitmap; - } - if (!test_bit(offset, btmp)) - goto err; - - __clear_bit(offset, btmp); - radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); - if (radix_tree_exception(bitmap)) { - if (rcu_dereference_raw(*slot) == - (void *)RADIX_TREE_EXCEPTIONAL_ENTRY) - radix_tree_iter_delete(&ida->ida_rt, &iter, slot); - } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) { - kfree(bitmap); - radix_tree_iter_delete(&ida->ida_rt, &iter, slot); + if (!test_bit(bit, bitmap->bitmap)) + goto err; + __clear_bit(bit, bitmap->bitmap); + xas_set_mark(&xas, XA_FREE_MARK); + if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) { + kfree(bitmap); +delete: + xas_store(&xas, NULL); + } } + xas_unlock_irqrestore(&xas, flags); return; err: + xas_unlock_irqrestore(&xas, flags); WARN(1, "ida_free called for id=%d which is not allocated.\n", id); } +EXPORT_SYMBOL(ida_free); /** * ida_destroy() - Free all IDs. @@ -500,80 +533,60 @@ static void ida_remove(struct ida *ida, int id) */ void ida_destroy(struct ida *ida) { + XA_STATE(xas, &ida->xa, 0); + struct ida_bitmap *bitmap; unsigned long flags; - struct radix_tree_iter iter; - void __rcu **slot; - xa_lock_irqsave(&ida->ida_rt, flags); - radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { - struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); - if (!radix_tree_exception(bitmap)) + xas_lock_irqsave(&xas, flags); + xas_for_each(&xas, bitmap, ULONG_MAX) { + if (!xa_is_value(bitmap)) kfree(bitmap); - radix_tree_iter_delete(&ida->ida_rt, &iter, slot); + xas_store(&xas, NULL); } - xa_unlock_irqrestore(&ida->ida_rt, flags); + xas_unlock_irqrestore(&xas, flags); } EXPORT_SYMBOL(ida_destroy); -/** - * ida_alloc_range() - Allocate an unused ID. - * @ida: IDA handle. - * @min: Lowest ID to allocate. - * @max: Highest ID to allocate. - * @gfp: Memory allocation flags. - * - * Allocate an ID between @min and @max, inclusive. The allocated ID will - * not exceed %INT_MAX, even if @max is larger. - * - * Context: Any context. - * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, - * or %-ENOSPC if there are no free IDs. - */ -int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, - gfp_t gfp) -{ - int id = 0; - unsigned long flags; +#ifndef __KERNEL__ +extern void xa_dump_index(unsigned long index, unsigned int shift); +#define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS) - if ((int)min < 0) - return -ENOSPC; - - if ((int)max < 0) - max = INT_MAX; - -again: - xa_lock_irqsave(&ida->ida_rt, flags); - id = ida_get_new_above(ida, min); - if (id > (int)max) { - ida_remove(ida, id); - id = -ENOSPC; - } - xa_unlock_irqrestore(&ida->ida_rt, flags); +static void ida_dump_entry(void *entry, unsigned long index) +{ + unsigned long i; + + if (!entry) + return; + + if (xa_is_node(entry)) { + struct xa_node *node = xa_to_node(entry); + unsigned int shift = node->shift + IDA_CHUNK_SHIFT + + XA_CHUNK_SHIFT; + + xa_dump_index(index * IDA_BITMAP_BITS, shift); + xa_dump_node(node); + for (i = 0; i < XA_CHUNK_SIZE; i++) + ida_dump_entry(node->slots[i], + index | (i << node->shift)); + } else if (xa_is_value(entry)) { + xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG)); + pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry); + } else { + struct ida_bitmap *bitmap = entry; - if (unlikely(id == -EAGAIN)) { - if (!ida_pre_get(ida, gfp)) - return -ENOMEM; - goto again; + xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT); + pr_cont("bitmap: %p data", bitmap); + for (i = 0; i < IDA_BITMAP_LONGS; i++) + pr_cont(" %lx", bitmap->bitmap[i]); + pr_cont("\n"); } - - return id; } -EXPORT_SYMBOL(ida_alloc_range); -/** - * ida_free() - Release an allocated ID. - * @ida: IDA handle. - * @id: Previously allocated ID. - * - * Context: Any context. - */ -void ida_free(struct ida *ida, unsigned int id) +static void ida_dump(struct ida *ida) { - unsigned long flags; - - BUG_ON((int)id < 0); - xa_lock_irqsave(&ida->ida_rt, flags); - ida_remove(ida, id); - xa_unlock_irqrestore(&ida->ida_rt, flags); + struct xarray *xa = &ida->xa; + pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head, + xa->xa_flags >> ROOT_TAG_SHIFT); + ida_dump_entry(xa->xa_head, 0); } -EXPORT_SYMBOL(ida_free); +#endif diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index 14436f4ca6bd..30e0f9770f88 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c @@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x) if (x <= ULONG_MAX) return int_sqrt((unsigned long) x); - m = 1ULL << (fls64(x) & ~1ULL); + m = 1ULL << ((fls64(x) - 1) & ~1ULL); while (m != 0) { b = y + m; y >>= 1; diff --git a/lib/iomap.c b/lib/iomap.c index 541d926da95e..e909ab71e995 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -65,8 +65,9 @@ static void bad_io_access(unsigned long port, const char *access) #endif #ifndef mmio_read16be -#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr)) -#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr)) +#define mmio_read16be(addr) swab16(readw(addr)) +#define mmio_read32be(addr) swab32(readl(addr)) +#define mmio_read64be(addr) swab64(readq(addr)) #endif unsigned int ioread8(void __iomem *addr) @@ -100,14 +101,89 @@ EXPORT_SYMBOL(ioread16be); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); +#ifdef readq +static u64 pio_read64_lo_hi(unsigned long port) +{ + u64 lo, hi; + + lo = inl(port); + hi = inl(port + sizeof(u32)); + + return lo | (hi << 32); +} + +static u64 pio_read64_hi_lo(unsigned long port) +{ + u64 lo, hi; + + hi = inl(port + sizeof(u32)); + lo = inl(port); + + return lo | (hi << 32); +} + +static u64 pio_read64be_lo_hi(unsigned long port) +{ + u64 lo, hi; + + lo = pio_read32be(port + sizeof(u32)); + hi = pio_read32be(port); + + return lo | (hi << 32); +} + +static u64 pio_read64be_hi_lo(unsigned long port) +{ + u64 lo, hi; + + hi = pio_read32be(port); + lo = pio_read32be(port + sizeof(u32)); + + return lo | (hi << 32); +} + +u64 ioread64_lo_hi(void __iomem *addr) +{ + IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr)); + return 0xffffffffffffffffULL; +} + +u64 ioread64_hi_lo(void __iomem *addr) +{ + IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr)); + return 0xffffffffffffffffULL; +} + +u64 ioread64be_lo_hi(void __iomem *addr) +{ + IO_COND(addr, return pio_read64be_lo_hi(port), + return mmio_read64be(addr)); + return 0xffffffffffffffffULL; +} + +u64 ioread64be_hi_lo(void __iomem *addr) +{ + IO_COND(addr, return pio_read64be_hi_lo(port), + return mmio_read64be(addr)); + return 0xffffffffffffffffULL; +} + +EXPORT_SYMBOL(ioread64_lo_hi); +EXPORT_SYMBOL(ioread64_hi_lo); +EXPORT_SYMBOL(ioread64be_lo_hi); +EXPORT_SYMBOL(ioread64be_hi_lo); + +#endif /* readq */ + #ifndef pio_write16be #define pio_write16be(val,port) outw(swab16(val),port) #define pio_write32be(val,port) outl(swab32(val),port) #endif #ifndef mmio_write16be -#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port) -#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port) +#define mmio_write16be(val,port) writew(swab16(val),port) +#define mmio_write32be(val,port) writel(swab32(val),port) +#define mmio_write64be(val,port) writeq(swab64(val),port) #endif void iowrite8(u8 val, void __iomem *addr) @@ -136,6 +212,62 @@ EXPORT_SYMBOL(iowrite16be); EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite32be); +#ifdef writeq +static void pio_write64_lo_hi(u64 val, unsigned long port) +{ + outl(val, port); + outl(val >> 32, port + sizeof(u32)); +} + +static void pio_write64_hi_lo(u64 val, unsigned long port) +{ + outl(val >> 32, port + sizeof(u32)); + outl(val, port); +} + +static void pio_write64be_lo_hi(u64 val, unsigned long port) +{ + pio_write32be(val, port + sizeof(u32)); + pio_write32be(val >> 32, port); +} + +static void pio_write64be_hi_lo(u64 val, unsigned long port) +{ + pio_write32be(val >> 32, port); + pio_write32be(val, port + sizeof(u32)); +} + +void iowrite64_lo_hi(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64_lo_hi(val, port), + writeq(val, addr)); +} + +void iowrite64_hi_lo(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64_hi_lo(val, port), + writeq(val, addr)); +} + +void iowrite64be_lo_hi(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64be_lo_hi(val, port), + mmio_write64be(val, addr)); +} + +void iowrite64be_hi_lo(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64be_hi_lo(val, port), + mmio_write64be(val, addr)); +} + +EXPORT_SYMBOL(iowrite64_lo_hi); +EXPORT_SYMBOL(iowrite64_hi_lo); +EXPORT_SYMBOL(iowrite64be_lo_hi); +EXPORT_SYMBOL(iowrite64be_hi_lo); + +#endif /* readq */ + /* * These are the "repeat MMIO read/write" functions. * Note the "__raw" accesses, since we don't want to diff --git a/lib/ioremap.c b/lib/ioremap.c index 517f5853ffed..063213685563 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -76,83 +76,123 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, return 0; } +static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr, + unsigned long end, phys_addr_t phys_addr, + pgprot_t prot) +{ + if (!ioremap_pmd_enabled()) + return 0; + + if ((end - addr) != PMD_SIZE) + return 0; + + if (!IS_ALIGNED(phys_addr, PMD_SIZE)) + return 0; + + if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) + return 0; + + return pmd_set_huge(pmd, phys_addr, prot); +} + static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { pmd_t *pmd; unsigned long next; - phys_addr -= addr; pmd = pmd_alloc(&init_mm, pud, addr); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); - if (ioremap_pmd_enabled() && - ((next - addr) == PMD_SIZE) && - IS_ALIGNED(phys_addr + addr, PMD_SIZE) && - pmd_free_pte_page(pmd, addr)) { - if (pmd_set_huge(pmd, phys_addr + addr, prot)) - continue; - } + if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) + continue; - if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot)) + if (ioremap_pte_range(pmd, addr, next, phys_addr, prot)) return -ENOMEM; - } while (pmd++, addr = next, addr != end); + } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); return 0; } +static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr, + unsigned long end, phys_addr_t phys_addr, + pgprot_t prot) +{ + if (!ioremap_pud_enabled()) + return 0; + + if ((end - addr) != PUD_SIZE) + return 0; + + if (!IS_ALIGNED(phys_addr, PUD_SIZE)) + return 0; + + if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) + return 0; + + return pud_set_huge(pud, phys_addr, prot); +} + static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { pud_t *pud; unsigned long next; - phys_addr -= addr; pud = pud_alloc(&init_mm, p4d, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); - if (ioremap_pud_enabled() && - ((next - addr) == PUD_SIZE) && - IS_ALIGNED(phys_addr + addr, PUD_SIZE) && - pud_free_pmd_page(pud, addr)) { - if (pud_set_huge(pud, phys_addr + addr, prot)) - continue; - } + if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) + continue; - if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot)) + if (ioremap_pmd_range(pud, addr, next, phys_addr, prot)) return -ENOMEM; - } while (pud++, addr = next, addr != end); + } while (pud++, phys_addr += (next - addr), addr = next, addr != end); return 0; } +static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr, + unsigned long end, phys_addr_t phys_addr, + pgprot_t prot) +{ + if (!ioremap_p4d_enabled()) + return 0; + + if ((end - addr) != P4D_SIZE) + return 0; + + if (!IS_ALIGNED(phys_addr, P4D_SIZE)) + return 0; + + if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) + return 0; + + return p4d_set_huge(p4d, phys_addr, prot); +} + static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { p4d_t *p4d; unsigned long next; - phys_addr -= addr; p4d = p4d_alloc(&init_mm, pgd, addr); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); - if (ioremap_p4d_enabled() && - ((next - addr) == P4D_SIZE) && - IS_ALIGNED(phys_addr + addr, P4D_SIZE)) { - if (p4d_set_huge(p4d, phys_addr + addr, prot)) - continue; - } + if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) + continue; - if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot)) + if (ioremap_pud_range(p4d, addr, next, phys_addr, prot)) return -ENOMEM; - } while (p4d++, addr = next, addr != end); + } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); return 0; } @@ -168,14 +208,13 @@ int ioremap_page_range(unsigned long addr, BUG_ON(addr >= end); start = addr; - phys_addr -= addr; pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot); + err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot); if (err) break; - } while (pgd++, addr = next, addr != end); + } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); flush_cache_vmap(start, end); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 8be175df3075..be4bd627caf0 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -6,6 +6,7 @@ #include <linux/vmalloc.h> #include <linux/splice.h> #include <net/checksum.h> +#include <linux/scatterlist.h> #define PIPE_PARANOIA /* for now */ @@ -83,6 +84,7 @@ const struct kvec *kvec; \ struct kvec v; \ iterate_kvec(i, n, v, kvec, skip, (K)) \ + } else if (unlikely(i->type & ITER_DISCARD)) { \ } else { \ const struct iovec *iov; \ struct iovec v; \ @@ -114,6 +116,8 @@ } \ i->nr_segs -= kvec - i->kvec; \ i->kvec = kvec; \ + } else if (unlikely(i->type & ITER_DISCARD)) { \ + skip += n; \ } else { \ const struct iovec *iov; \ struct iovec v; \ @@ -132,7 +136,7 @@ static int copyout(void __user *to, const void *from, size_t n) { - if (access_ok(VERIFY_WRITE, to, n)) { + if (access_ok(to, n)) { kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); } @@ -141,7 +145,7 @@ static int copyout(void __user *to, const void *from, size_t n) static int copyin(void *to, const void __user *from, size_t n) { - if (access_ok(VERIFY_READ, from, n)) { + if (access_ok(from, n)) { kasan_check_write(to, n); n = raw_copy_from_user(to, from, n); } @@ -428,17 +432,19 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) } EXPORT_SYMBOL(iov_iter_fault_in_readable); -void iov_iter_init(struct iov_iter *i, int direction, +void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, unsigned long nr_segs, size_t count) { + WARN_ON(direction & ~(READ | WRITE)); + direction &= READ | WRITE; + /* It will get better. Eventually... */ if (uaccess_kernel()) { - direction |= ITER_KVEC; - i->type = direction; + i->type = ITER_KVEC | direction; i->kvec = (struct kvec *)iov; } else { - i->type = direction; + i->type = ITER_IOVEC | direction; i->iov = iov; } i->nr_segs = nr_segs; @@ -555,10 +561,48 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, return bytes; } +static __wsum csum_and_memcpy(void *to, const void *from, size_t len, + __wsum sum, size_t off) +{ + __wsum next = csum_partial_copy_nocheck(from, to, len, 0); + return csum_block_add(sum, next, off); +} + +static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, + __wsum *csum, struct iov_iter *i) +{ + struct pipe_inode_info *pipe = i->pipe; + size_t n, r; + size_t off = 0; + __wsum sum = *csum; + int idx; + + if (!sanity(i)) + return 0; + + bytes = n = push_pipe(i, bytes, &idx, &r); + if (unlikely(!n)) + return 0; + for ( ; n; idx = next_idx(idx, pipe), r = 0) { + size_t chunk = min_t(size_t, n, PAGE_SIZE - r); + char *p = kmap_atomic(pipe->bufs[idx].page); + sum = csum_and_memcpy(p + r, addr, chunk, sum, off); + kunmap_atomic(p); + i->idx = idx; + i->iov_offset = r + chunk; + n -= chunk; + off += chunk; + addr += chunk; + } + i->count -= bytes; + *csum = sum; + return bytes; +} + size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return copy_pipe_to_iter(addr, bytes, i); if (iter_is_iovec(i)) might_fault(); @@ -576,7 +620,7 @@ EXPORT_SYMBOL(_copy_to_iter); #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE static int copyout_mcsafe(void __user *to, const void *from, size_t n) { - if (access_ok(VERIFY_WRITE, to, n)) { + if (access_ok(to, n)) { kasan_check_read(from, n); n = copy_to_user_mcsafe((__force void *) to, from, n); } @@ -658,7 +702,7 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) const char *from = addr; unsigned long rem, curr_addr, s_addr = (unsigned long) addr; - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return copy_pipe_to_iter_mcsafe(addr, bytes, i); if (iter_is_iovec(i)) might_fault(); @@ -692,7 +736,7 @@ EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { WARN_ON(1); return 0; } @@ -712,7 +756,7 @@ EXPORT_SYMBOL(_copy_from_iter); bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { WARN_ON(1); return false; } @@ -739,7 +783,7 @@ EXPORT_SYMBOL(_copy_from_iter_full); size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { WARN_ON(1); return 0; } @@ -773,7 +817,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache); size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { WARN_ON(1); return 0; } @@ -794,7 +838,7 @@ EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { WARN_ON(1); return false; } @@ -836,7 +880,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, size_t wanted = copy_to_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); return wanted; - } else if (likely(!(i->type & ITER_PIPE))) + } else if (unlikely(iov_iter_is_discard(i))) + return bytes; + else if (likely(!iov_iter_is_pipe(i))) return copy_page_to_iter_iovec(page, offset, bytes, i); else return copy_page_to_iter_pipe(page, offset, bytes, i); @@ -848,7 +894,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, { if (unlikely(!page_copy_sane(page, offset, bytes))) return 0; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { WARN_ON(1); return 0; } @@ -888,7 +934,7 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i) size_t iov_iter_zero(size_t bytes, struct iov_iter *i) { - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return pipe_zero(bytes, i); iterate_and_advance(i, bytes, v, clear_user(v.iov_base, v.iov_len), @@ -908,7 +954,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, kunmap_atomic(kaddr); return 0; } - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { kunmap_atomic(kaddr); WARN_ON(1); return 0; @@ -972,10 +1018,14 @@ static void pipe_advance(struct iov_iter *i, size_t size) void iov_iter_advance(struct iov_iter *i, size_t size) { - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { pipe_advance(i, size); return; } + if (unlikely(iov_iter_is_discard(i))) { + i->count -= size; + return; + } iterate_and_advance(i, size, v, 0, 0, 0) } EXPORT_SYMBOL(iov_iter_advance); @@ -987,7 +1037,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) if (WARN_ON(unroll > MAX_RW_COUNT)) return; i->count += unroll; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { struct pipe_inode_info *pipe = i->pipe; int idx = i->idx; size_t off = i->iov_offset; @@ -1011,12 +1061,14 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) pipe_truncate(i); return; } + if (unlikely(iov_iter_is_discard(i))) + return; if (unroll <= i->iov_offset) { i->iov_offset -= unroll; return; } unroll -= i->iov_offset; - if (i->type & ITER_BVEC) { + if (iov_iter_is_bvec(i)) { const struct bio_vec *bvec = i->bvec; while (1) { size_t n = (--bvec)->bv_len; @@ -1049,23 +1101,25 @@ EXPORT_SYMBOL(iov_iter_revert); */ size_t iov_iter_single_seg_count(const struct iov_iter *i) { - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return i->count; // it is a silly place, anyway if (i->nr_segs == 1) return i->count; - else if (i->type & ITER_BVEC) + if (unlikely(iov_iter_is_discard(i))) + return i->count; + else if (iov_iter_is_bvec(i)) return min(i->count, i->bvec->bv_len - i->iov_offset); else return min(i->count, i->iov->iov_len - i->iov_offset); } EXPORT_SYMBOL(iov_iter_single_seg_count); -void iov_iter_kvec(struct iov_iter *i, int direction, +void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, unsigned long nr_segs, size_t count) { - BUG_ON(!(direction & ITER_KVEC)); - i->type = direction; + WARN_ON(direction & ~(READ | WRITE)); + i->type = ITER_KVEC | (direction & (READ | WRITE)); i->kvec = kvec; i->nr_segs = nr_segs; i->iov_offset = 0; @@ -1073,12 +1127,12 @@ void iov_iter_kvec(struct iov_iter *i, int direction, } EXPORT_SYMBOL(iov_iter_kvec); -void iov_iter_bvec(struct iov_iter *i, int direction, +void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count) { - BUG_ON(!(direction & ITER_BVEC)); - i->type = direction; + WARN_ON(direction & ~(READ | WRITE)); + i->type = ITER_BVEC | (direction & (READ | WRITE)); i->bvec = bvec; i->nr_segs = nr_segs; i->iov_offset = 0; @@ -1086,13 +1140,13 @@ void iov_iter_bvec(struct iov_iter *i, int direction, } EXPORT_SYMBOL(iov_iter_bvec); -void iov_iter_pipe(struct iov_iter *i, int direction, +void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, size_t count) { - BUG_ON(direction != ITER_PIPE); + BUG_ON(direction != READ); WARN_ON(pipe->nrbufs == pipe->buffers); - i->type = direction; + i->type = ITER_PIPE | READ; i->pipe = pipe; i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); i->iov_offset = 0; @@ -1101,12 +1155,30 @@ void iov_iter_pipe(struct iov_iter *i, int direction, } EXPORT_SYMBOL(iov_iter_pipe); +/** + * iov_iter_discard - Initialise an I/O iterator that discards data + * @i: The iterator to initialise. + * @direction: The direction of the transfer. + * @count: The size of the I/O buffer in bytes. + * + * Set up an I/O iterator that just discards everything that's written to it. + * It's only available as a READ iterator. + */ +void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) +{ + BUG_ON(direction != READ); + i->type = ITER_DISCARD | READ; + i->count = count; + i->iov_offset = 0; +} +EXPORT_SYMBOL(iov_iter_discard); + unsigned long iov_iter_alignment(const struct iov_iter *i) { unsigned long res = 0; size_t size = i->count; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx])) return size | i->iov_offset; return size; @@ -1125,7 +1197,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i) unsigned long res = 0; size_t size = i->count; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { WARN_ON(1); return ~0U; } @@ -1193,8 +1265,11 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, if (maxsize > i->count) maxsize = i->count; - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return pipe_get_pages(i, pages, maxsize, maxpages, start); + if (unlikely(iov_iter_is_discard(i))) + return -EFAULT; + iterate_all_kinds(i, maxsize, v, ({ unsigned long addr = (unsigned long)v.iov_base; size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); @@ -1205,7 +1280,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, len = maxpages * PAGE_SIZE; addr &= ~(PAGE_SIZE - 1); n = DIV_ROUND_UP(len, PAGE_SIZE); - res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); + res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages); if (unlikely(res < 0)) return res; return (res == n ? len : res * PAGE_SIZE) - *start; @@ -1270,8 +1345,11 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, if (maxsize > i->count) maxsize = i->count; - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return pipe_get_pages_alloc(i, pages, maxsize, start); + if (unlikely(iov_iter_is_discard(i))) + return -EFAULT; + iterate_all_kinds(i, maxsize, v, ({ unsigned long addr = (unsigned long)v.iov_base; size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); @@ -1283,7 +1361,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, p = get_pages_array(n); if (!p) return -ENOMEM; - res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); + res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p); if (unlikely(res < 0)) { kvfree(p); return res; @@ -1313,7 +1391,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, __wsum sum, next; size_t off = 0; sum = *csum; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { WARN_ON(1); return 0; } @@ -1329,17 +1407,15 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, err ? v.iov_len : 0; }), ({ char *p = kmap_atomic(v.bv_page); - next = csum_partial_copy_nocheck(p + v.bv_offset, - (to += v.bv_len) - v.bv_len, - v.bv_len, 0); + sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, + p + v.bv_offset, v.bv_len, + sum, off); kunmap_atomic(p); - sum = csum_block_add(sum, next, off); off += v.bv_len; }),({ - next = csum_partial_copy_nocheck(v.iov_base, - (to += v.iov_len) - v.iov_len, - v.iov_len, 0); - sum = csum_block_add(sum, next, off); + sum = csum_and_memcpy((to += v.iov_len) - v.iov_len, + v.iov_base, v.iov_len, + sum, off); off += v.iov_len; }) ) @@ -1355,7 +1431,7 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, __wsum sum, next; size_t off = 0; sum = *csum; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { WARN_ON(1); return false; } @@ -1373,17 +1449,15 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, 0; }), ({ char *p = kmap_atomic(v.bv_page); - next = csum_partial_copy_nocheck(p + v.bv_offset, - (to += v.bv_len) - v.bv_len, - v.bv_len, 0); + sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, + p + v.bv_offset, v.bv_len, + sum, off); kunmap_atomic(p); - sum = csum_block_add(sum, next, off); off += v.bv_len; }),({ - next = csum_partial_copy_nocheck(v.iov_base, - (to += v.iov_len) - v.iov_len, - v.iov_len, 0); - sum = csum_block_add(sum, next, off); + sum = csum_and_memcpy((to += v.iov_len) - v.iov_len, + v.iov_base, v.iov_len, + sum, off); off += v.iov_len; }) ) @@ -1393,14 +1467,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, } EXPORT_SYMBOL(csum_and_copy_from_iter_full); -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i) { const char *from = addr; + __wsum *csum = csump; __wsum sum, next; size_t off = 0; + + if (unlikely(iov_iter_is_pipe(i))) + return csum_and_copy_to_pipe_iter(addr, bytes, csum, i); + sum = *csum; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_discard(i))) { WARN_ON(1); /* for now */ return 0; } @@ -1416,17 +1495,15 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, err ? v.iov_len : 0; }), ({ char *p = kmap_atomic(v.bv_page); - next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len, - p + v.bv_offset, - v.bv_len, 0); + sum = csum_and_memcpy(p + v.bv_offset, + (from += v.bv_len) - v.bv_len, + v.bv_len, sum, off); kunmap_atomic(p); - sum = csum_block_add(sum, next, off); off += v.bv_len; }),({ - next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len, - v.iov_base, - v.iov_len, 0); - sum = csum_block_add(sum, next, off); + sum = csum_and_memcpy(v.iov_base, + (from += v.iov_len) - v.iov_len, + v.iov_len, sum, off); off += v.iov_len; }) ) @@ -1435,6 +1512,21 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, } EXPORT_SYMBOL(csum_and_copy_to_iter); +size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, + struct iov_iter *i) +{ + struct ahash_request *hash = hashp; + struct scatterlist sg; + size_t copied; + + copied = copy_to_iter(addr, bytes, i); + sg_init_one(&sg, addr, copied); + ahash_request_set_crypt(hash, &sg, NULL, copied); + crypto_ahash_update(hash); + return copied; +} +EXPORT_SYMBOL(hash_and_copy_to_iter); + int iov_iter_npages(const struct iov_iter *i, int maxpages) { size_t size = i->count; @@ -1442,8 +1534,10 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) if (!size) return 0; + if (unlikely(iov_iter_is_discard(i))) + return 0; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { struct pipe_inode_info *pipe = i->pipe; size_t off; int idx; @@ -1481,11 +1575,13 @@ EXPORT_SYMBOL(iov_iter_npages); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) { *new = *old; - if (unlikely(new->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(new))) { WARN_ON(1); return NULL; } - if (new->type & ITER_BVEC) + if (unlikely(iov_iter_is_discard(new))) + return NULL; + if (iov_iter_is_bvec(new)) return new->bvec = kmemdup(new->bvec, new->nr_segs * sizeof(struct bio_vec), flags); @@ -1567,7 +1663,7 @@ int import_single_range(int rw, void __user *buf, size_t len, { if (len > MAX_RW_COUNT) len = MAX_RW_COUNT; - if (unlikely(!access_ok(!rw, buf, len))) + if (unlikely(!access_ok(buf, len))) return -EFAULT; iov->iov_base = buf; diff --git a/lib/kobject.c b/lib/kobject.c index 97d86dc17c42..aa89edcd2b63 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -639,7 +639,7 @@ static void kobject_cleanup(struct kobject *kobj) kobject_name(kobj), kobj, __func__, kobj->parent); if (t && !t->release) - pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed.\n", + pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n", kobject_name(kobj), kobj); /* send "remove" if the caller did not do it but sent "add" */ @@ -887,7 +887,7 @@ static void kset_release(struct kobject *kobj) kfree(kset); } -void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) +static void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) { if (kobj->parent) kobject_get_ownership(kobj->parent, uid, gid); diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 63d0816ab23b..f05802687ba4 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -200,7 +200,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) r = kobject_action_type(buf, count, &action, &action_args); if (r) { - msg = "unknown uevent action string\n"; + msg = "unknown uevent action string"; goto out; } @@ -212,7 +212,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) r = kobject_action_args(action_args, count - (action_args - buf), &env); if (r == -EINVAL) { - msg = "incorrect uevent action arguments\n"; + msg = "incorrect uevent action arguments"; goto out; } @@ -224,7 +224,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) out: if (r) { devpath = kobject_get_path(kobj, GFP_KERNEL); - printk(KERN_WARNING "synth uevent: %s: %s", + pr_warn("synth uevent: %s: %s\n", devpath ?: "unknown device", msg ?: "failed to send uevent"); kfree(devpath); @@ -240,6 +240,7 @@ static int kobj_usermode_filter(struct kobject *kobj) ops = kobj_ns_ops(kobj); if (ops) { const void *init_ns, *ns; + ns = kobj->ktype->namespace(kobj); init_ns = ops->initial_ns(); return ns != init_ns; @@ -390,6 +391,7 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj, ops = kobj_ns_ops(kobj); if (!ops && kobj->kset) { struct kobject *ksobj = &kobj->kset->kobj; + if (ksobj->parent != NULL) ops = kobj_ns_ops(ksobj->parent); } @@ -579,7 +581,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, mutex_lock(&uevent_sock_mutex); /* we will send an event, so request a new sequence number */ - retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum); + retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum); if (retval) { mutex_unlock(&uevent_sock_mutex); goto exit; @@ -763,8 +765,7 @@ static int uevent_net_init(struct net *net) ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg); if (!ue_sk->sk) { - printk(KERN_ERR - "kobject_uevent: unable to create netlink socket!\n"); + pr_err("kobject_uevent: unable to create netlink socket!\n"); kfree(ue_sk); return -ENODEV; } diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 661a1e807bd1..1006bf70bf74 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -175,7 +175,7 @@ int _kstrtoul(const char *s, unsigned int base, unsigned long *res) rv = kstrtoull(s, base, &tmp); if (rv < 0) return rv; - if (tmp != (unsigned long long)(unsigned long)tmp) + if (tmp != (unsigned long)tmp) return -ERANGE; *res = tmp; return 0; @@ -191,7 +191,7 @@ int _kstrtol(const char *s, unsigned int base, long *res) rv = kstrtoll(s, base, &tmp); if (rv < 0) return rv; - if (tmp != (long long)(long)tmp) + if (tmp != (long)tmp) return -ERANGE; *res = tmp; return 0; @@ -222,7 +222,7 @@ int kstrtouint(const char *s, unsigned int base, unsigned int *res) rv = kstrtoull(s, base, &tmp); if (rv < 0) return rv; - if (tmp != (unsigned long long)(unsigned int)tmp) + if (tmp != (unsigned int)tmp) return -ERANGE; *res = tmp; return 0; @@ -253,7 +253,7 @@ int kstrtoint(const char *s, unsigned int base, int *res) rv = kstrtoll(s, base, &tmp); if (rv < 0) return rv; - if (tmp != (long long)(int)tmp) + if (tmp != (int)tmp) return -ERANGE; *res = tmp; return 0; @@ -268,7 +268,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res) rv = kstrtoull(s, base, &tmp); if (rv < 0) return rv; - if (tmp != (unsigned long long)(u16)tmp) + if (tmp != (u16)tmp) return -ERANGE; *res = tmp; return 0; @@ -283,7 +283,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res) rv = kstrtoll(s, base, &tmp); if (rv < 0) return rv; - if (tmp != (long long)(s16)tmp) + if (tmp != (s16)tmp) return -ERANGE; *res = tmp; return 0; @@ -298,7 +298,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res) rv = kstrtoull(s, base, &tmp); if (rv < 0) return rv; - if (tmp != (unsigned long long)(u8)tmp) + if (tmp != (u8)tmp) return -ERANGE; *res = tmp; return 0; @@ -313,7 +313,7 @@ int kstrtos8(const char *s, unsigned int base, s8 *res) rv = kstrtoll(s, base, &tmp); if (rv < 0) return rv; - if (tmp != (long long)(s8)tmp) + if (tmp != (s8)tmp) return -ERANGE; *res = tmp; return 0; diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile new file mode 100644 index 000000000000..26900ddaef82 --- /dev/null +++ b/lib/livepatch/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for livepatch test code. + +obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \ + test_klp_callbacks_demo.o \ + test_klp_callbacks_demo2.o \ + test_klp_callbacks_busy.o \ + test_klp_callbacks_mod.o \ + test_klp_livepatch.o \ + test_klp_shadow_vars.o + +# Target modules to be livepatched require CC_FLAGS_FTRACE +CFLAGS_test_klp_callbacks_busy.o += $(CC_FLAGS_FTRACE) +CFLAGS_test_klp_callbacks_mod.o += $(CC_FLAGS_FTRACE) diff --git a/lib/livepatch/test_klp_atomic_replace.c b/lib/livepatch/test_klp_atomic_replace.c new file mode 100644 index 000000000000..5af7093ca00c --- /dev/null +++ b/lib/livepatch/test_klp_atomic_replace.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/livepatch.h> + +static int replace; +module_param(replace, int, 0644); +MODULE_PARM_DESC(replace, "replace (default=0)"); + +#include <linux/seq_file.h> +static int livepatch_meminfo_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%s: %s\n", THIS_MODULE->name, + "this has been live patched"); + return 0; +} + +static struct klp_func funcs[] = { + { + .old_name = "meminfo_proc_show", + .new_func = livepatch_meminfo_proc_show, + }, {} +}; + +static struct klp_object objs[] = { + { + /* name being NULL means vmlinux */ + .funcs = funcs, + }, {} +}; + +static struct klp_patch patch = { + .mod = THIS_MODULE, + .objs = objs, + /* set .replace in the init function below for demo purposes */ +}; + +static int test_klp_atomic_replace_init(void) +{ + patch.replace = replace; + return klp_enable_patch(&patch); +} + +static void test_klp_atomic_replace_exit(void) +{ +} + +module_init(test_klp_atomic_replace_init); +module_exit(test_klp_atomic_replace_exit); +MODULE_LICENSE("GPL"); +MODULE_INFO(livepatch, "Y"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: atomic replace"); diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c new file mode 100644 index 000000000000..40beddf8a0e2 --- /dev/null +++ b/lib/livepatch/test_klp_callbacks_busy.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/workqueue.h> +#include <linux/delay.h> + +static int sleep_secs; +module_param(sleep_secs, int, 0644); +MODULE_PARM_DESC(sleep_secs, "sleep_secs (default=0)"); + +static void busymod_work_func(struct work_struct *work); +static DECLARE_DELAYED_WORK(work, busymod_work_func); + +static void busymod_work_func(struct work_struct *work) +{ + pr_info("%s, sleeping %d seconds ...\n", __func__, sleep_secs); + msleep(sleep_secs * 1000); + pr_info("%s exit\n", __func__); +} + +static int test_klp_callbacks_busy_init(void) +{ + pr_info("%s\n", __func__); + schedule_delayed_work(&work, + msecs_to_jiffies(1000 * 0)); + return 0; +} + +static void test_klp_callbacks_busy_exit(void) +{ + cancel_delayed_work_sync(&work); + pr_info("%s\n", __func__); +} + +module_init(test_klp_callbacks_busy_init); +module_exit(test_klp_callbacks_busy_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: busy target module"); diff --git a/lib/livepatch/test_klp_callbacks_demo.c b/lib/livepatch/test_klp_callbacks_demo.c new file mode 100644 index 000000000000..3fd8fe1cd1cc --- /dev/null +++ b/lib/livepatch/test_klp_callbacks_demo.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/livepatch.h> + +static int pre_patch_ret; +module_param(pre_patch_ret, int, 0644); +MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)"); + +static const char *const module_state[] = { + [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", + [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", + [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", + [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", +}; + +static void callback_info(const char *callback, struct klp_object *obj) +{ + if (obj->mod) + pr_info("%s: %s -> %s\n", callback, obj->mod->name, + module_state[obj->mod->state]); + else + pr_info("%s: vmlinux\n", callback); +} + +/* Executed on object patching (ie, patch enablement) */ +static int pre_patch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); + return pre_patch_ret; +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void post_patch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void pre_unpatch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void post_unpatch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +static void patched_work_func(struct work_struct *work) +{ + pr_info("%s\n", __func__); +} + +static struct klp_func no_funcs[] = { + {} +}; + +static struct klp_func busymod_funcs[] = { + { + .old_name = "busymod_work_func", + .new_func = patched_work_func, + }, {} +}; + +static struct klp_object objs[] = { + { + .name = NULL, /* vmlinux */ + .funcs = no_funcs, + .callbacks = { + .pre_patch = pre_patch_callback, + .post_patch = post_patch_callback, + .pre_unpatch = pre_unpatch_callback, + .post_unpatch = post_unpatch_callback, + }, + }, { + .name = "test_klp_callbacks_mod", + .funcs = no_funcs, + .callbacks = { + .pre_patch = pre_patch_callback, + .post_patch = post_patch_callback, + .pre_unpatch = pre_unpatch_callback, + .post_unpatch = post_unpatch_callback, + }, + }, { + .name = "test_klp_callbacks_busy", + .funcs = busymod_funcs, + .callbacks = { + .pre_patch = pre_patch_callback, + .post_patch = post_patch_callback, + .pre_unpatch = pre_unpatch_callback, + .post_unpatch = post_unpatch_callback, + }, + }, { } +}; + +static struct klp_patch patch = { + .mod = THIS_MODULE, + .objs = objs, +}; + +static int test_klp_callbacks_demo_init(void) +{ + return klp_enable_patch(&patch); +} + +static void test_klp_callbacks_demo_exit(void) +{ +} + +module_init(test_klp_callbacks_demo_init); +module_exit(test_klp_callbacks_demo_exit); +MODULE_LICENSE("GPL"); +MODULE_INFO(livepatch, "Y"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: livepatch demo"); diff --git a/lib/livepatch/test_klp_callbacks_demo2.c b/lib/livepatch/test_klp_callbacks_demo2.c new file mode 100644 index 000000000000..5417573e80af --- /dev/null +++ b/lib/livepatch/test_klp_callbacks_demo2.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/livepatch.h> + +static int replace; +module_param(replace, int, 0644); +MODULE_PARM_DESC(replace, "replace (default=0)"); + +static const char *const module_state[] = { + [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", + [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", + [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", + [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", +}; + +static void callback_info(const char *callback, struct klp_object *obj) +{ + if (obj->mod) + pr_info("%s: %s -> %s\n", callback, obj->mod->name, + module_state[obj->mod->state]); + else + pr_info("%s: vmlinux\n", callback); +} + +/* Executed on object patching (ie, patch enablement) */ +static int pre_patch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); + return 0; +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void post_patch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void pre_unpatch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +/* Executed on object unpatching (ie, patch disablement) */ +static void post_unpatch_callback(struct klp_object *obj) +{ + callback_info(__func__, obj); +} + +static struct klp_func no_funcs[] = { + { } +}; + +static struct klp_object objs[] = { + { + .name = NULL, /* vmlinux */ + .funcs = no_funcs, + .callbacks = { + .pre_patch = pre_patch_callback, + .post_patch = post_patch_callback, + .pre_unpatch = pre_unpatch_callback, + .post_unpatch = post_unpatch_callback, + }, + }, { } +}; + +static struct klp_patch patch = { + .mod = THIS_MODULE, + .objs = objs, + /* set .replace in the init function below for demo purposes */ +}; + +static int test_klp_callbacks_demo2_init(void) +{ + patch.replace = replace; + return klp_enable_patch(&patch); +} + +static void test_klp_callbacks_demo2_exit(void) +{ +} + +module_init(test_klp_callbacks_demo2_init); +module_exit(test_klp_callbacks_demo2_exit); +MODULE_LICENSE("GPL"); +MODULE_INFO(livepatch, "Y"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: livepatch demo2"); diff --git a/lib/livepatch/test_klp_callbacks_mod.c b/lib/livepatch/test_klp_callbacks_mod.c new file mode 100644 index 000000000000..8fbe645b1c2c --- /dev/null +++ b/lib/livepatch/test_klp_callbacks_mod.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> + +static int test_klp_callbacks_mod_init(void) +{ + pr_info("%s\n", __func__); + return 0; +} + +static void test_klp_callbacks_mod_exit(void) +{ + pr_info("%s\n", __func__); +} + +module_init(test_klp_callbacks_mod_init); +module_exit(test_klp_callbacks_mod_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: target module"); diff --git a/lib/livepatch/test_klp_livepatch.c b/lib/livepatch/test_klp_livepatch.c new file mode 100644 index 000000000000..aff08199de71 --- /dev/null +++ b/lib/livepatch/test_klp_livepatch.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/livepatch.h> + +#include <linux/seq_file.h> +static int livepatch_cmdline_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%s: %s\n", THIS_MODULE->name, + "this has been live patched"); + return 0; +} + +static struct klp_func funcs[] = { + { + .old_name = "cmdline_proc_show", + .new_func = livepatch_cmdline_proc_show, + }, { } +}; + +static struct klp_object objs[] = { + { + /* name being NULL means vmlinux */ + .funcs = funcs, + }, { } +}; + +static struct klp_patch patch = { + .mod = THIS_MODULE, + .objs = objs, +}; + +static int test_klp_livepatch_init(void) +{ + return klp_enable_patch(&patch); +} + +static void test_klp_livepatch_exit(void) +{ +} + +module_init(test_klp_livepatch_init); +module_exit(test_klp_livepatch_exit); +MODULE_LICENSE("GPL"); +MODULE_INFO(livepatch, "Y"); +MODULE_AUTHOR("Seth Jennings <sjenning@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: livepatch module"); diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c new file mode 100644 index 000000000000..fe5c413efe96 --- /dev/null +++ b/lib/livepatch/test_klp_shadow_vars.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/livepatch.h> +#include <linux/slab.h> + +/* + * Keep a small list of pointers so that we can print address-agnostic + * pointer values. Use a rolling integer count to differentiate the values. + * Ironically we could have used the shadow variable API to do this, but + * let's not lean too heavily on the very code we're testing. + */ +static LIST_HEAD(ptr_list); +struct shadow_ptr { + void *ptr; + int id; + struct list_head list; +}; + +static void free_ptr_list(void) +{ + struct shadow_ptr *sp, *tmp_sp; + + list_for_each_entry_safe(sp, tmp_sp, &ptr_list, list) { + list_del(&sp->list); + kfree(sp); + } +} + +static int ptr_id(void *ptr) +{ + struct shadow_ptr *sp; + static int count; + + list_for_each_entry(sp, &ptr_list, list) { + if (sp->ptr == ptr) + return sp->id; + } + + sp = kmalloc(sizeof(*sp), GFP_ATOMIC); + if (!sp) + return -ENOMEM; + sp->ptr = ptr; + sp->id = count++; + + list_add(&sp->list, &ptr_list); + + return sp->id; +} + +/* + * Shadow variable wrapper functions that echo the function and arguments + * to the kernel log for testing verification. Don't display raw pointers, + * but use the ptr_id() value instead. + */ +static void *shadow_get(void *obj, unsigned long id) +{ + void *ret = klp_shadow_get(obj, id); + + pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n", + __func__, ptr_id(obj), id, ptr_id(ret)); + + return ret; +} + +static void *shadow_alloc(void *obj, unsigned long id, size_t size, + gfp_t gfp_flags, klp_shadow_ctor_t ctor, + void *ctor_data) +{ + void *ret = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, + ctor_data); + pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n", + __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor), + ptr_id(ctor_data), ptr_id(ret)); + return ret; +} + +static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size, + gfp_t gfp_flags, klp_shadow_ctor_t ctor, + void *ctor_data) +{ + void *ret = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, + ctor_data); + pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n", + __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor), + ptr_id(ctor_data), ptr_id(ret)); + return ret; +} + +static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) +{ + klp_shadow_free(obj, id, dtor); + pr_info("klp_%s(obj=PTR%d, id=0x%lx, dtor=PTR%d)\n", + __func__, ptr_id(obj), id, ptr_id(dtor)); +} + +static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) +{ + klp_shadow_free_all(id, dtor); + pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", + __func__, id, ptr_id(dtor)); +} + + +/* Shadow variable constructor - remember simple pointer data */ +static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data) +{ + int **shadow_int = shadow_data; + *shadow_int = ctor_data; + pr_info("%s: PTR%d -> PTR%d\n", + __func__, ptr_id(shadow_int), ptr_id(ctor_data)); + + return 0; +} + +static void shadow_dtor(void *obj, void *shadow_data) +{ + pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n", + __func__, ptr_id(obj), ptr_id(shadow_data)); +} + +static int test_klp_shadow_vars_init(void) +{ + void *obj = THIS_MODULE; + int id = 0x1234; + size_t size = sizeof(int *); + gfp_t gfp_flags = GFP_KERNEL; + + int var1, var2, var3, var4; + int **sv1, **sv2, **sv3, **sv4; + + void *ret; + + ptr_id(NULL); + ptr_id(&var1); + ptr_id(&var2); + ptr_id(&var3); + ptr_id(&var4); + + /* + * With an empty shadow variable hash table, expect not to find + * any matches. + */ + ret = shadow_get(obj, id); + if (!ret) + pr_info(" got expected NULL result\n"); + + /* + * Allocate a few shadow variables with different <obj> and <id>. + */ + sv1 = shadow_alloc(obj, id, size, gfp_flags, shadow_ctor, &var1); + if (!sv1) + return -ENOMEM; + + sv2 = shadow_alloc(obj + 1, id, size, gfp_flags, shadow_ctor, &var2); + if (!sv2) + return -ENOMEM; + + sv3 = shadow_alloc(obj, id + 1, size, gfp_flags, shadow_ctor, &var3); + if (!sv3) + return -ENOMEM; + + /* + * Verify we can find our new shadow variables and that they point + * to expected data. + */ + ret = shadow_get(obj, id); + if (!ret) + return -EINVAL; + if (ret == sv1 && *sv1 == &var1) + pr_info(" got expected PTR%d -> PTR%d result\n", + ptr_id(sv1), ptr_id(*sv1)); + + ret = shadow_get(obj + 1, id); + if (!ret) + return -EINVAL; + if (ret == sv2 && *sv2 == &var2) + pr_info(" got expected PTR%d -> PTR%d result\n", + ptr_id(sv2), ptr_id(*sv2)); + ret = shadow_get(obj, id + 1); + if (!ret) + return -EINVAL; + if (ret == sv3 && *sv3 == &var3) + pr_info(" got expected PTR%d -> PTR%d result\n", + ptr_id(sv3), ptr_id(*sv3)); + + /* + * Allocate or get a few more, this time with the same <obj>, <id>. + * The second invocation should return the same shadow var. + */ + sv4 = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4); + if (!sv4) + return -ENOMEM; + + ret = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4); + if (!ret) + return -EINVAL; + if (ret == sv4 && *sv4 == &var4) + pr_info(" got expected PTR%d -> PTR%d result\n", + ptr_id(sv4), ptr_id(*sv4)); + + /* + * Free the <obj=*, id> shadow variables and check that we can no + * longer find them. + */ + shadow_free(obj, id, shadow_dtor); /* sv1 */ + ret = shadow_get(obj, id); + if (!ret) + pr_info(" got expected NULL result\n"); + + shadow_free(obj + 1, id, shadow_dtor); /* sv2 */ + ret = shadow_get(obj + 1, id); + if (!ret) + pr_info(" got expected NULL result\n"); + + shadow_free(obj + 2, id, shadow_dtor); /* sv4 */ + ret = shadow_get(obj + 2, id); + if (!ret) + pr_info(" got expected NULL result\n"); + + /* + * We should still find an <id+1> variable. + */ + ret = shadow_get(obj, id + 1); + if (!ret) + return -EINVAL; + if (ret == sv3 && *sv3 == &var3) + pr_info(" got expected PTR%d -> PTR%d result\n", + ptr_id(sv3), ptr_id(*sv3)); + + /* + * Free all the <id+1> variables, too. + */ + shadow_free_all(id + 1, shadow_dtor); /* sv3 */ + ret = shadow_get(obj, id); + if (!ret) + pr_info(" shadow_get() got expected NULL result\n"); + + + free_ptr_list(); + + return 0; +} + +static void test_klp_shadow_vars_exit(void) +{ +} + +module_init(test_klp_shadow_vars_init); +module_exit(test_klp_shadow_vars_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); +MODULE_DESCRIPTION("Livepatch test: shadow variables"); diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 1e1bbf171eca..a1705545e6ac 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -1989,6 +1989,7 @@ void locking_selftest(void) init_shared_classes(); debug_locks_silent = !debug_locks_verbose; + lockdep_set_selftest_task(current); DO_TESTCASE_6R("A-A deadlock", AA); DO_TESTCASE_6R("A-B-B-A deadlock", ABBA); @@ -2097,5 +2098,6 @@ void locking_selftest(void) printk("---------------------------------\n"); debug_locks = 1; } + lockdep_set_selftest_task(NULL); debug_locks_silent = 0; } diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 141734d255e4..0c9d3ad17e0f 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -43,30 +43,36 @@ /*-***************************** * Decompression functions *******************************/ -/* LZ4_decompress_generic() : - * This generic decompression function cover all use cases. - * It shall be instantiated several times, using different sets of directives - * Note that it is important this generic function is really inlined, + +#define DEBUGLOG(l, ...) {} /* disabled */ + +#ifndef assert +#define assert(condition) ((void)0) +#endif + +/* + * LZ4_decompress_generic() : + * This generic decompression function covers all use cases. + * It shall be instantiated several times, using different sets of directives. + * Note that it is important for performance that this function really get inlined, * in order to remove useless branches during compilation optimization. */ static FORCE_INLINE int LZ4_decompress_generic( - const char * const source, - char * const dest, - int inputSize, + const char * const src, + char * const dst, + int srcSize, /* * If endOnInput == endOnInputSize, - * this value is the max size of Output Buffer. + * this value is `dstCapacity` */ int outputSize, /* endOnOutputSize, endOnInputSize */ - int endOnInput, + endCondition_directive endOnInput, /* full, partial */ - int partialDecoding, - /* only used if partialDecoding == partial */ - int targetOutputSize, + earlyEnd_directive partialDecoding, /* noDict, withPrefix64k, usingExtDict */ - int dict, - /* == dest when no prefix */ + dict_directive dict, + /* always <= dst, == dst when no prefix */ const BYTE * const lowPrefix, /* only if dict == usingExtDict */ const BYTE * const dictStart, @@ -74,35 +80,43 @@ static FORCE_INLINE int LZ4_decompress_generic( const size_t dictSize ) { - /* Local Variables */ - const BYTE *ip = (const BYTE *) source; - const BYTE * const iend = ip + inputSize; + const BYTE *ip = (const BYTE *) src; + const BYTE * const iend = ip + srcSize; - BYTE *op = (BYTE *) dest; + BYTE *op = (BYTE *) dst; BYTE * const oend = op + outputSize; BYTE *cpy; - BYTE *oexit = op + targetOutputSize; - const BYTE * const lowLimit = lowPrefix - dictSize; const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; - static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; - static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 }; + static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4}; + static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3}; const int safeDecode = (endOnInput == endOnInputSize); const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); + /* Set up the "end" pointers for the shortcut. */ + const BYTE *const shortiend = iend - + (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/; + const BYTE *const shortoend = oend - + (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/; + + DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__, + srcSize, outputSize); + /* Special cases */ - /* targetOutputSize too high => decode everything */ - if ((partialDecoding) && (oexit > oend - MFLIMIT)) - oexit = oend - MFLIMIT; + assert(lowPrefix <= op); + assert(src != NULL); /* Empty output buffer */ if ((endOnInput) && (unlikely(outputSize == 0))) - return ((inputSize == 1) && (*ip == 0)) ? 0 : -1; + return ((srcSize == 1) && (*ip == 0)) ? 0 : -1; if ((!endOnInput) && (unlikely(outputSize == 0))) return (*ip == 0 ? 1 : -1); + if ((endOnInput) && unlikely(srcSize == 0)) + return -1; + /* Main Loop : decode sequences */ while (1) { size_t length; @@ -111,12 +125,74 @@ static FORCE_INLINE int LZ4_decompress_generic( /* get literal length */ unsigned int const token = *ip++; - length = token>>ML_BITS; + /* ip < iend before the increment */ + assert(!endOnInput || ip <= iend); + + /* + * A two-stage shortcut for the most common case: + * 1) If the literal length is 0..14, and there is enough + * space, enter the shortcut and copy 16 bytes on behalf + * of the literals (in the fast mode, only 8 bytes can be + * safely copied this way). + * 2) Further if the match length is 4..18, copy 18 bytes + * in a similar manner; but we ensure that there's enough + * space in the output for those 18 bytes earlier, upon + * entering the shortcut (in other words, there is a + * combined check for both stages). + */ + if ((endOnInput ? length != RUN_MASK : length <= 8) + /* + * strictly "less than" on input, to re-enter + * the loop with at least one byte + */ + && likely((endOnInput ? ip < shortiend : 1) & + (op <= shortoend))) { + /* Copy the literals */ + memcpy(op, ip, endOnInput ? 16 : 8); + op += length; ip += length; + + /* + * The second stage: + * prepare for match copying, decode full info. + * If it doesn't work out, the info won't be wasted. + */ + length = token & ML_MASK; /* match length */ + offset = LZ4_readLE16(ip); + ip += 2; + match = op - offset; + assert(match <= op); /* check overflow */ + + /* Do not deal with overlapping matches. */ + if ((length != ML_MASK) && + (offset >= 8) && + (dict == withPrefix64k || match >= lowPrefix)) { + /* Copy the match. */ + memcpy(op + 0, match + 0, 8); + memcpy(op + 8, match + 8, 8); + memcpy(op + 16, match + 16, 2); + op += length + MINMATCH; + /* Both stages worked, load the next token. */ + continue; + } + + /* + * The second stage didn't work out, but the info + * is ready. Propel it right to the point of match + * copying. + */ + goto _copy_match; + } + + /* decode literal length */ if (length == RUN_MASK) { unsigned int s; + if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) { + /* overflow detection */ + goto _output_error; + } do { s = *ip++; length += s; @@ -125,14 +201,14 @@ static FORCE_INLINE int LZ4_decompress_generic( : 1) & (s == 255)); if ((safeDecode) - && unlikely( - (size_t)(op + length) < (size_t)(op))) { + && unlikely((uptrval)(op) + + length < (uptrval)(op))) { /* overflow detection */ goto _output_error; } if ((safeDecode) - && unlikely( - (size_t)(ip + length) < (size_t)(ip))) { + && unlikely((uptrval)(ip) + + length < (uptrval)(ip))) { /* overflow detection */ goto _output_error; } @@ -140,16 +216,19 @@ static FORCE_INLINE int LZ4_decompress_generic( /* copy literals */ cpy = op + length; - if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT)) + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); + + if (((endOnInput) && ((cpy > oend - MFLIMIT) || (ip + length > iend - (2 + 1 + LASTLITERALS)))) || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) { if (partialDecoding) { if (cpy > oend) { /* - * Error : - * write attempt beyond end of output buffer + * Partial decoding : + * stop in the middle of literal segment */ - goto _output_error; + cpy = oend; + length = oend - op; } if ((endOnInput) && (ip + length > iend)) { @@ -184,29 +263,43 @@ static FORCE_INLINE int LZ4_decompress_generic( memcpy(op, ip, length); ip += length; op += length; + /* Necessarily EOF, due to parsing restrictions */ - break; + if (!partialDecoding || (cpy == oend)) + break; + } else { + /* may overwrite up to WILDCOPYLENGTH beyond cpy */ + LZ4_wildCopy(op, ip, cpy); + ip += length; + op = cpy; } - LZ4_wildCopy(op, ip, cpy); - ip += length; - op = cpy; - /* get offset */ offset = LZ4_readLE16(ip); ip += 2; match = op - offset; - if ((checkOffset) && (unlikely(match < lowLimit))) { + /* get matchlength */ + length = token & ML_MASK; + +_copy_match: + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { /* Error : offset outside buffers */ goto _output_error; } /* costs ~1%; silence an msan warning when offset == 0 */ - LZ4_write32(op, (U32)offset); + /* + * note : when partialDecoding, there is no guarantee that + * at least 4 bytes remain available in output buffer + */ + if (!partialDecoding) { + assert(oend > op); + assert(oend - op >= 4); + + LZ4_write32(op, (U32)offset); + } - /* get matchlength */ - length = token & ML_MASK; if (length == ML_MASK) { unsigned int s; @@ -221,7 +314,7 @@ static FORCE_INLINE int LZ4_decompress_generic( if ((safeDecode) && unlikely( - (size_t)(op + length) < (size_t)op)) { + (uptrval)(op) + length < (uptrval)op)) { /* overflow detection */ goto _output_error; } @@ -229,24 +322,26 @@ static FORCE_INLINE int LZ4_decompress_generic( length += MINMATCH; - /* check external dictionary */ + /* match starting within external dictionary */ if ((dict == usingExtDict) && (match < lowPrefix)) { if (unlikely(op + length > oend - LASTLITERALS)) { /* doesn't respect parsing restriction */ - goto _output_error; + if (!partialDecoding) + goto _output_error; + length = min(length, (size_t)(oend - op)); } if (length <= (size_t)(lowPrefix - match)) { /* - * match can be copied as a single segment - * from external dictionary + * match fits entirely within external + * dictionary : just copy */ memmove(op, dictEnd - (lowPrefix - match), length); op += length; } else { /* - * match encompass external + * match stretches into both external * dictionary and current block */ size_t const copySize = (size_t)(lowPrefix - match); @@ -254,7 +349,6 @@ static FORCE_INLINE int LZ4_decompress_generic( memcpy(op, dictEnd - copySize, copySize); op += copySize; - if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ BYTE * const endOfMatch = op + restSize; @@ -267,23 +361,44 @@ static FORCE_INLINE int LZ4_decompress_generic( op += restSize; } } - continue; } /* copy match within block */ cpy = op + length; - if (unlikely(offset < 8)) { - const int dec64 = dec64table[offset]; + /* + * partialDecoding : + * may not respect endBlock parsing restrictions + */ + assert(op <= oend); + if (partialDecoding && + (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) { + size_t const mlen = min(length, (size_t)(oend - op)); + const BYTE * const matchEnd = match + mlen; + BYTE * const copyEnd = op + mlen; + + if (matchEnd > op) { + /* overlap copy */ + while (op < copyEnd) + *op++ = *match++; + } else { + memcpy(op, match, mlen); + } + op = copyEnd; + if (op == oend) + break; + continue; + } + if (unlikely(offset < 8)) { op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; - match += dec32table[offset]; + match += inc32table[offset]; memcpy(op + 4, match, 4); - match -= dec64; + match -= dec64table[offset]; } else { LZ4_copy8(op, match); match += 8; @@ -291,7 +406,7 @@ static FORCE_INLINE int LZ4_decompress_generic( op += 8; - if (unlikely(cpy > oend - 12)) { + if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) { BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1); if (cpy > oend - LASTLITERALS) { @@ -307,60 +422,139 @@ static FORCE_INLINE int LZ4_decompress_generic( match += oCopyLimit - op; op = oCopyLimit; } - while (op < cpy) *op++ = *match++; } else { LZ4_copy8(op, match); - if (length > 16) LZ4_wildCopy(op + 8, match + 8, cpy); } - - op = cpy; /* correction */ + op = cpy; /* wildcopy correction */ } /* end of decoding */ if (endOnInput) { /* Nb of output bytes decoded */ - return (int) (((char *)op) - dest); + return (int) (((char *)op) - dst); } else { /* Nb of input bytes read */ - return (int) (((const char *)ip) - source); + return (int) (((const char *)ip) - src); } /* Overflow error detected */ _output_error: - return -1; + return (int) (-(((const char *)ip) - src)) - 1; } int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize) { - return LZ4_decompress_generic(source, dest, compressedSize, - maxDecompressedSize, endOnInputSize, full, 0, - noDict, (BYTE *)dest, NULL, 0); + return LZ4_decompress_generic(source, dest, + compressedSize, maxDecompressedSize, + endOnInputSize, decode_full_block, + noDict, (BYTE *)dest, NULL, 0); } -int LZ4_decompress_safe_partial(const char *source, char *dest, - int compressedSize, int targetOutputSize, int maxDecompressedSize) +int LZ4_decompress_safe_partial(const char *src, char *dst, + int compressedSize, int targetOutputSize, int dstCapacity) { - return LZ4_decompress_generic(source, dest, compressedSize, - maxDecompressedSize, endOnInputSize, partial, - targetOutputSize, noDict, (BYTE *)dest, NULL, 0); + dstCapacity = min(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity, + endOnInputSize, partial_decode, + noDict, (BYTE *)dst, NULL, 0); } int LZ4_decompress_fast(const char *source, char *dest, int originalSize) { return LZ4_decompress_generic(source, dest, 0, originalSize, - endOnOutputSize, full, 0, withPrefix64k, - (BYTE *)(dest - 64 * KB), NULL, 64 * KB); + endOnOutputSize, decode_full_block, + withPrefix64k, + (BYTE *)dest - 64 * KB, NULL, 0); +} + +/* ===== Instantiate a few more decoding cases, used more than once. ===== */ + +int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, + int compressedSize, int maxOutputSize) +{ + return LZ4_decompress_generic(source, dest, + compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, + withPrefix64k, + (BYTE *)dest - 64 * KB, NULL, 0); +} + +static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest, + int compressedSize, + int maxOutputSize, + size_t prefixSize) +{ + return LZ4_decompress_generic(source, dest, + compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, + noDict, + (BYTE *)dest - prefixSize, NULL, 0); +} + +int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, + int compressedSize, int maxOutputSize, + const void *dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, + compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, + usingExtDict, (BYTE *)dest, + (const BYTE *)dictStart, dictSize); } +static int LZ4_decompress_fast_extDict(const char *source, char *dest, + int originalSize, + const void *dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, + 0, originalSize, + endOnOutputSize, decode_full_block, + usingExtDict, (BYTE *)dest, + (const BYTE *)dictStart, dictSize); +} + +/* + * The "double dictionary" mode, for use with e.g. ring buffers: the first part + * of the dictionary is passed as prefix, and the second via dictStart + dictSize. + * These routines are used only once, in LZ4_decompress_*_continue(). + */ +static FORCE_INLINE +int LZ4_decompress_safe_doubleDict(const char *source, char *dest, + int compressedSize, int maxOutputSize, + size_t prefixSize, + const void *dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, + compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, + usingExtDict, (BYTE *)dest - prefixSize, + (const BYTE *)dictStart, dictSize); +} + +static FORCE_INLINE +int LZ4_decompress_fast_doubleDict(const char *source, char *dest, + int originalSize, size_t prefixSize, + const void *dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, + 0, originalSize, + endOnOutputSize, decode_full_block, + usingExtDict, (BYTE *)dest - prefixSize, + (const BYTE *)dictStart, dictSize); +} + +/* ===== streaming decompression functions ===== */ + int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize) { - LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode; + LZ4_streamDecode_t_internal *lz4sd = + &LZ4_streamDecode->internal_donotuse; lz4sd->prefixSize = (size_t) dictSize; lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize; @@ -382,35 +576,51 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int compressedSize, int maxOutputSize) { - LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; + LZ4_streamDecode_t_internal *lz4sd = + &LZ4_streamDecode->internal_donotuse; int result; - if (lz4sd->prefixEnd == (BYTE *)dest) { - result = LZ4_decompress_generic(source, dest, - compressedSize, - maxOutputSize, - endOnInputSize, full, 0, - usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, - lz4sd->externalDict, - lz4sd->extDictSize); - + if (lz4sd->prefixSize == 0) { + /* The first call, no dictionary yet. */ + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_safe(source, dest, + compressedSize, maxOutputSize); + if (result <= 0) + return result; + lz4sd->prefixSize = result; + lz4sd->prefixEnd = (BYTE *)dest + result; + } else if (lz4sd->prefixEnd == (BYTE *)dest) { + /* They're rolling the current segment. */ + if (lz4sd->prefixSize >= 64 * KB - 1) + result = LZ4_decompress_safe_withPrefix64k(source, dest, + compressedSize, maxOutputSize); + else if (lz4sd->extDictSize == 0) + result = LZ4_decompress_safe_withSmallPrefix(source, + dest, compressedSize, maxOutputSize, + lz4sd->prefixSize); + else + result = LZ4_decompress_safe_doubleDict(source, dest, + compressedSize, maxOutputSize, + lz4sd->prefixSize, + lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; - lz4sd->prefixSize += result; - lz4sd->prefixEnd += result; + lz4sd->prefixEnd += result; } else { + /* + * The buffer wraps around, or they're + * switching to another buffer. + */ lz4sd->extDictSize = lz4sd->prefixSize; lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_generic(source, dest, + result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, - endOnInputSize, full, 0, - usingExtDict, (BYTE *)dest, lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; lz4sd->prefixSize = result; - lz4sd->prefixEnd = (BYTE *)dest + result; + lz4sd->prefixEnd = (BYTE *)dest + result; } return result; @@ -422,75 +632,66 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; int result; - if (lz4sd->prefixEnd == (BYTE *)dest) { - result = LZ4_decompress_generic(source, dest, 0, originalSize, - endOnOutputSize, full, 0, - usingExtDict, - lz4sd->prefixEnd - lz4sd->prefixSize, - lz4sd->externalDict, lz4sd->extDictSize); - + if (lz4sd->prefixSize == 0) { + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_fast(source, dest, originalSize); + if (result <= 0) + return result; + lz4sd->prefixSize = originalSize; + lz4sd->prefixEnd = (BYTE *)dest + originalSize; + } else if (lz4sd->prefixEnd == (BYTE *)dest) { + if (lz4sd->prefixSize >= 64 * KB - 1 || + lz4sd->extDictSize == 0) + result = LZ4_decompress_fast(source, dest, + originalSize); + else + result = LZ4_decompress_fast_doubleDict(source, dest, + originalSize, lz4sd->prefixSize, + lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; - lz4sd->prefixSize += originalSize; - lz4sd->prefixEnd += originalSize; + lz4sd->prefixEnd += originalSize; } else { lz4sd->extDictSize = lz4sd->prefixSize; lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_generic(source, dest, 0, originalSize, - endOnOutputSize, full, 0, - usingExtDict, (BYTE *)dest, - lz4sd->externalDict, lz4sd->extDictSize); + result = LZ4_decompress_fast_extDict(source, dest, + originalSize, lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; lz4sd->prefixSize = originalSize; - lz4sd->prefixEnd = (BYTE *)dest + originalSize; + lz4sd->prefixEnd = (BYTE *)dest + originalSize; } - return result; } -/* - * Advanced decoding functions : - * *_usingDict() : - * These decoding functions work the same as "_continue" ones, - * the dictionary must be explicitly provided within parameters - */ -static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source, - char *dest, int compressedSize, int maxOutputSize, int safe, - const char *dictStart, int dictSize) +int LZ4_decompress_safe_usingDict(const char *source, char *dest, + int compressedSize, int maxOutputSize, + const char *dictStart, int dictSize) { if (dictSize == 0) - return LZ4_decompress_generic(source, dest, - compressedSize, maxOutputSize, safe, full, 0, - noDict, (BYTE *)dest, NULL, 0); - if (dictStart + dictSize == dest) { - if (dictSize >= (int)(64 * KB - 1)) - return LZ4_decompress_generic(source, dest, - compressedSize, maxOutputSize, safe, full, 0, - withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0); - return LZ4_decompress_generic(source, dest, compressedSize, - maxOutputSize, safe, full, 0, noDict, - (BYTE *)dest - dictSize, NULL, 0); + return LZ4_decompress_safe(source, dest, + compressedSize, maxOutputSize); + if (dictStart+dictSize == dest) { + if (dictSize >= 64 * KB - 1) + return LZ4_decompress_safe_withPrefix64k(source, dest, + compressedSize, maxOutputSize); + return LZ4_decompress_safe_withSmallPrefix(source, dest, + compressedSize, maxOutputSize, dictSize); } - return LZ4_decompress_generic(source, dest, compressedSize, - maxOutputSize, safe, full, 0, usingExtDict, - (BYTE *)dest, (const BYTE *)dictStart, dictSize); -} - -int LZ4_decompress_safe_usingDict(const char *source, char *dest, - int compressedSize, int maxOutputSize, - const char *dictStart, int dictSize) -{ - return LZ4_decompress_usingDict_generic(source, dest, - compressedSize, maxOutputSize, 1, dictStart, dictSize); + return LZ4_decompress_safe_forceExtDict(source, dest, + compressedSize, maxOutputSize, dictStart, dictSize); } int LZ4_decompress_fast_usingDict(const char *source, char *dest, - int originalSize, const char *dictStart, int dictSize) + int originalSize, + const char *dictStart, int dictSize) { - return LZ4_decompress_usingDict_generic(source, dest, 0, - originalSize, 0, dictStart, dictSize); + if (dictSize == 0 || dictStart + dictSize == dest) + return LZ4_decompress_fast(source, dest, originalSize); + + return LZ4_decompress_fast_extDict(source, dest, originalSize, + dictStart, dictSize); } #ifndef STATIC diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h index 00a0b58a0871..1a7fa9d9170f 100644 --- a/lib/lz4/lz4defs.h +++ b/lib/lz4/lz4defs.h @@ -75,6 +75,11 @@ typedef uintptr_t uptrval; #define WILDCOPYLENGTH 8 #define LASTLITERALS 5 #define MFLIMIT (WILDCOPYLENGTH + MINMATCH) +/* + * ensure it's possible to write 2 x wildcopyLength + * without overflowing output buffer + */ +#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH) /* Increase this value ==> compression run slower on incompressible data */ #define LZ4_SKIPTRIGGER 6 @@ -222,6 +227,8 @@ typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; -typedef enum { full = 0, partial = 1 } earlyEnd_directive; +typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; + +#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c)) #endif diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 236eb21167b5..4525fb094844 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -20,7 +20,8 @@ static noinline size_t lzo1x_1_do_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, - size_t ti, void *wrkmem) + size_t ti, void *wrkmem, signed char *state_offset, + const unsigned char bitstream_version) { const unsigned char *ip; unsigned char *op; @@ -35,27 +36,85 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, ip += ti < 4 ? 4 - ti : 0; for (;;) { - const unsigned char *m_pos; + const unsigned char *m_pos = NULL; size_t t, m_len, m_off; u32 dv; + u32 run_length = 0; literal: ip += 1 + ((ip - ii) >> 5); next: if (unlikely(ip >= ip_end)) break; dv = get_unaligned_le32(ip); - t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; - m_pos = in + dict[t]; - dict[t] = (lzo_dict_t) (ip - in); - if (unlikely(dv != get_unaligned_le32(m_pos))) - goto literal; + + if (dv == 0 && bitstream_version) { + const unsigned char *ir = ip + 4; + const unsigned char *limit = ip_end + < (ip + MAX_ZERO_RUN_LENGTH + 1) + ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1; +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \ + defined(LZO_FAST_64BIT_MEMORY_ACCESS) + u64 dv64; + + for (; (ir + 32) <= limit; ir += 32) { + dv64 = get_unaligned((u64 *)ir); + dv64 |= get_unaligned((u64 *)ir + 1); + dv64 |= get_unaligned((u64 *)ir + 2); + dv64 |= get_unaligned((u64 *)ir + 3); + if (dv64) + break; + } + for (; (ir + 8) <= limit; ir += 8) { + dv64 = get_unaligned((u64 *)ir); + if (dv64) { +# if defined(__LITTLE_ENDIAN) + ir += __builtin_ctzll(dv64) >> 3; +# elif defined(__BIG_ENDIAN) + ir += __builtin_clzll(dv64) >> 3; +# else +# error "missing endian definition" +# endif + break; + } + } +#else + while ((ir < (const unsigned char *) + ALIGN((uintptr_t)ir, 4)) && + (ir < limit) && (*ir == 0)) + ir++; + for (; (ir + 4) <= limit; ir += 4) { + dv = *((u32 *)ir); + if (dv) { +# if defined(__LITTLE_ENDIAN) + ir += __builtin_ctz(dv) >> 3; +# elif defined(__BIG_ENDIAN) + ir += __builtin_clz(dv) >> 3; +# else +# error "missing endian definition" +# endif + break; + } + } +#endif + while (likely(ir < limit) && unlikely(*ir == 0)) + ir++; + run_length = ir - ip; + if (run_length > MAX_ZERO_RUN_LENGTH) + run_length = MAX_ZERO_RUN_LENGTH; + } else { + t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; + m_pos = in + dict[t]; + dict[t] = (lzo_dict_t) (ip - in); + if (unlikely(dv != get_unaligned_le32(m_pos))) + goto literal; + } ii -= ti; ti = 0; t = ip - ii; if (t != 0) { if (t <= 3) { - op[-2] |= t; + op[*state_offset] |= t; COPY4(op, ii); op += t; } else if (t <= 16) { @@ -88,6 +147,17 @@ next: } } + if (unlikely(run_length)) { + ip += run_length; + run_length -= MIN_ZERO_RUN_LENGTH; + put_unaligned_le32((run_length << 21) | 0xfffc18 + | (run_length & 0x7), op); + op += 4; + run_length = 0; + *state_offset = -3; + goto finished_writing_instruction; + } + m_len = 4; { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64) @@ -170,7 +240,6 @@ m_len_done: m_off = ip - m_pos; ip += m_len; - ii = ip; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); @@ -207,29 +276,45 @@ m_len_done: *op++ = (m_off << 2); *op++ = (m_off >> 6); } + *state_offset = -2; +finished_writing_instruction: + ii = ip; goto next; } *out_len = op - out; return in_end - (ii - ti); } -int lzo1x_1_compress(const unsigned char *in, size_t in_len, +int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, - void *wrkmem) + void *wrkmem, const unsigned char bitstream_version) { const unsigned char *ip = in; unsigned char *op = out; size_t l = in_len; size_t t = 0; + signed char state_offset = -2; + unsigned int m4_max_offset; + + // LZO v0 will never write 17 as first byte, + // so this is used to version the bitstream + if (bitstream_version > 0) { + *op++ = 17; + *op++ = bitstream_version; + m4_max_offset = M4_MAX_OFFSET_V1; + } else { + m4_max_offset = M4_MAX_OFFSET_V0; + } while (l > 20) { - size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1); + size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); - t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem); + t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem, + &state_offset, bitstream_version); ip += ll; op += *out_len; l -= ll; @@ -242,7 +327,7 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, if (op == out && t <= 238) { *op++ = (17 + t); } else if (t <= 3) { - op[-2] |= t; + op[state_offset] |= t; } else if (t <= 18) { *op++ = (t - 3); } else { @@ -273,7 +358,24 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, *out_len = op - out; return LZO_E_OK; } + +int lzo1x_1_compress(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) +{ + return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0); +} + +int lzorle1x_1_compress(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) +{ + return lzogeneric1x_1_compress(in, in_len, out, out_len, + wrkmem, LZO_VERSION); +} + EXPORT_SYMBOL_GPL(lzo1x_1_compress); +EXPORT_SYMBOL_GPL(lzorle1x_1_compress); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor"); diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index a1c387f6afba..6d2600ea3b55 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c @@ -46,11 +46,23 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, const unsigned char * const ip_end = in + in_len; unsigned char * const op_end = out + *out_len; + unsigned char bitstream_version; + op = out; ip = in; if (unlikely(in_len < 3)) goto input_overrun; + + if (likely(*ip == 17)) { + bitstream_version = ip[1]; + ip += 2; + if (unlikely(in_len < 5)) + goto input_overrun; + } else { + bitstream_version = 0; + } + if (*ip > 17) { t = *ip++ - 17; if (t < 4) { @@ -154,32 +166,49 @@ copy_literal_run: m_pos -= next >> 2; next &= 3; } else { - m_pos = op; - m_pos -= (t & 8) << 11; - t = (t & 7) + (3 - 1); - if (unlikely(t == 2)) { - size_t offset; - const unsigned char *ip_last = ip; + NEED_IP(2); + next = get_unaligned_le16(ip); + if (((next & 0xfffc) == 0xfffc) && + ((t & 0xf8) == 0x18) && + likely(bitstream_version)) { + NEED_IP(3); + t &= 7; + t |= ip[2] << 3; + t += MIN_ZERO_RUN_LENGTH; + NEED_OP(t); + memset(op, 0, t); + op += t; + next &= 3; + ip += 3; + goto match_next; + } else { + m_pos = op; + m_pos -= (t & 8) << 11; + t = (t & 7) + (3 - 1); + if (unlikely(t == 2)) { + size_t offset; + const unsigned char *ip_last = ip; - while (unlikely(*ip == 0)) { - ip++; - NEED_IP(1); - } - offset = ip - ip_last; - if (unlikely(offset > MAX_255_COUNT)) - return LZO_E_ERROR; + while (unlikely(*ip == 0)) { + ip++; + NEED_IP(1); + } + offset = ip - ip_last; + if (unlikely(offset > MAX_255_COUNT)) + return LZO_E_ERROR; - offset = (offset << 8) - offset; - t += offset + 7 + *ip++; - NEED_IP(2); + offset = (offset << 8) - offset; + t += offset + 7 + *ip++; + NEED_IP(2); + next = get_unaligned_le16(ip); + } + ip += 2; + m_pos -= next >> 2; + next &= 3; + if (m_pos == op) + goto eof_found; + m_pos -= 0x4000; } - next = get_unaligned_le16(ip); - ip += 2; - m_pos -= next >> 2; - next &= 3; - if (m_pos == op) - goto eof_found; - m_pos -= 0x4000; } TEST_LB(m_pos); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h index 4edefd2f540c..b60851fcf6ce 100644 --- a/lib/lzo/lzodefs.h +++ b/lib/lzo/lzodefs.h @@ -13,9 +13,15 @@ */ +/* Version + * 0: original lzo version + * 1: lzo with support for RLE + */ +#define LZO_VERSION 1 + #define COPY4(dst, src) \ put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) -#if defined(__x86_64__) +#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64) #define COPY8(dst, src) \ put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst)) #else @@ -25,19 +31,21 @@ #if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) #error "conflicting endian definitions" -#elif defined(__x86_64__) +#elif defined(CONFIG_X86_64) || defined(CONFIG_ARM64) #define LZO_USE_CTZ64 1 #define LZO_USE_CTZ32 1 -#elif defined(__i386__) || defined(__powerpc__) +#define LZO_FAST_64BIT_MEMORY_ACCESS +#elif defined(CONFIG_X86) || defined(CONFIG_PPC) #define LZO_USE_CTZ32 1 -#elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5) +#elif defined(CONFIG_ARM) && (__LINUX_ARM_ARCH__ >= 5) #define LZO_USE_CTZ32 1 #endif #define M1_MAX_OFFSET 0x0400 #define M2_MAX_OFFSET 0x0800 #define M3_MAX_OFFSET 0x4000 -#define M4_MAX_OFFSET 0xbfff +#define M4_MAX_OFFSET_V0 0xbfff +#define M4_MAX_OFFSET_V1 0xbffe #define M1_MIN_LEN 2 #define M1_MAX_LEN 2 @@ -53,6 +61,9 @@ #define M3_MARKER 32 #define M4_MARKER 16 +#define MIN_ZERO_RUN_LENGTH 4 +#define MAX_ZERO_RUN_LENGTH (2047 + MIN_ZERO_RUN_LENGTH) + #define lzo_dict_t unsigned short #define D_BITS 13 #define D_SIZE (1u << D_BITS) diff --git a/lib/memcat_p.c b/lib/memcat_p.c new file mode 100644 index 000000000000..b810fbc66962 --- /dev/null +++ b/lib/memcat_p.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/slab.h> + +/* + * Merge two NULL-terminated pointer arrays into a newly allocated + * array, which is also NULL-terminated. Nomenclature is inspired by + * memset_p() and memcat() found elsewhere in the kernel source tree. + */ +void **__memcat_p(void **a, void **b) +{ + void **p = a, **new; + int nr; + + /* count the elements in both arrays */ + for (nr = 0, p = a; *p; nr++, p++) + ; + for (p = b; *p; nr++, p++) + ; + /* one for the NULL-terminator */ + nr++; + + new = kmalloc_array(nr, sizeof(void *), GFP_KERNEL); + if (!new) + return NULL; + + /* nr -> last index; p points to NULL in b[] */ + for (nr--; nr >= 0; nr--, p = p == b ? &a[nr] : p - 1) + new[nr] = *p; + + return new; +} +EXPORT_SYMBOL_GPL(__memcat_p); + diff --git a/lib/objagg.c b/lib/objagg.c new file mode 100644 index 000000000000..576be22e86de --- /dev/null +++ b/lib/objagg.c @@ -0,0 +1,1056 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/rhashtable.h> +#include <linux/idr.h> +#include <linux/list.h> +#include <linux/sort.h> +#include <linux/objagg.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/objagg.h> + +struct objagg_hints { + struct rhashtable node_ht; + struct rhashtable_params ht_params; + struct list_head node_list; + unsigned int node_count; + unsigned int root_count; + unsigned int refcount; + const struct objagg_ops *ops; +}; + +struct objagg_hints_node { + struct rhash_head ht_node; /* member of objagg_hints->node_ht */ + struct list_head list; /* member of objagg_hints->node_list */ + struct objagg_hints_node *parent; + unsigned int root_id; + struct objagg_obj_stats_info stats_info; + unsigned long obj[0]; +}; + +static struct objagg_hints_node * +objagg_hints_lookup(struct objagg_hints *objagg_hints, void *obj) +{ + if (!objagg_hints) + return NULL; + return rhashtable_lookup_fast(&objagg_hints->node_ht, obj, + objagg_hints->ht_params); +} + +struct objagg { + const struct objagg_ops *ops; + void *priv; + struct rhashtable obj_ht; + struct rhashtable_params ht_params; + struct list_head obj_list; + unsigned int obj_count; + struct ida root_ida; + struct objagg_hints *hints; +}; + +struct objagg_obj { + struct rhash_head ht_node; /* member of objagg->obj_ht */ + struct list_head list; /* member of objagg->obj_list */ + struct objagg_obj *parent; /* if the object is nested, this + * holds pointer to parent, otherwise NULL + */ + union { + void *delta_priv; /* user delta private */ + void *root_priv; /* user root private */ + }; + unsigned int root_id; + unsigned int refcount; /* counts number of users of this object + * including nested objects + */ + struct objagg_obj_stats stats; + unsigned long obj[0]; +}; + +static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj) +{ + return ++objagg_obj->refcount; +} + +static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj) +{ + return --objagg_obj->refcount; +} + +static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj) +{ + objagg_obj->stats.user_count++; + objagg_obj->stats.delta_user_count++; + if (objagg_obj->parent) + objagg_obj->parent->stats.delta_user_count++; +} + +static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj) +{ + objagg_obj->stats.user_count--; + objagg_obj->stats.delta_user_count--; + if (objagg_obj->parent) + objagg_obj->parent->stats.delta_user_count--; +} + +static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj) +{ + /* Nesting is not supported, so we can use ->parent + * to figure out if the object is root. + */ + return !objagg_obj->parent; +} + +/** + * objagg_obj_root_priv - obtains root private for an object + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Either the object is root itself when the private is returned + * directly, or the parent is root and its private is returned + * instead. + * + * Returns a user private root pointer. + */ +const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj) +{ + if (objagg_obj_is_root(objagg_obj)) + return objagg_obj->root_priv; + WARN_ON(!objagg_obj_is_root(objagg_obj->parent)); + return objagg_obj->parent->root_priv; +} +EXPORT_SYMBOL(objagg_obj_root_priv); + +/** + * objagg_obj_delta_priv - obtains delta private for an object + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Returns user private delta pointer or NULL in case the passed + * object is root. + */ +const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj) +{ + if (objagg_obj_is_root(objagg_obj)) + return NULL; + return objagg_obj->delta_priv; +} +EXPORT_SYMBOL(objagg_obj_delta_priv); + +/** + * objagg_obj_raw - obtains object user private pointer + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Returns user private pointer as was passed to objagg_obj_get() by "obj" arg. + */ +const void *objagg_obj_raw(const struct objagg_obj *objagg_obj) +{ + return objagg_obj->obj; +} +EXPORT_SYMBOL(objagg_obj_raw); + +static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj) +{ + return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params); +} + +static int objagg_obj_parent_assign(struct objagg *objagg, + struct objagg_obj *objagg_obj, + struct objagg_obj *parent, + bool take_parent_ref) +{ + void *delta_priv; + + delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj, + objagg_obj->obj); + if (IS_ERR(delta_priv)) + return PTR_ERR(delta_priv); + + /* User returned a delta private, that means that + * our object can be aggregated into the parent. + */ + objagg_obj->parent = parent; + objagg_obj->delta_priv = delta_priv; + if (take_parent_ref) + objagg_obj_ref_inc(objagg_obj->parent); + trace_objagg_obj_parent_assign(objagg, objagg_obj, + parent, + parent->refcount); + return 0; +} + +static int objagg_obj_parent_lookup_assign(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + struct objagg_obj *objagg_obj_cur; + int err; + + list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) { + /* Nesting is not supported. In case the object + * is not root, it cannot be assigned as parent. + */ + if (!objagg_obj_is_root(objagg_obj_cur)) + continue; + err = objagg_obj_parent_assign(objagg, objagg_obj, + objagg_obj_cur, true); + if (!err) + return 0; + } + return -ENOENT; +} + +static void __objagg_obj_put(struct objagg *objagg, + struct objagg_obj *objagg_obj); + +static void objagg_obj_parent_unassign(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_parent_unassign(objagg, objagg_obj, + objagg_obj->parent, + objagg_obj->parent->refcount); + objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv); + __objagg_obj_put(objagg, objagg_obj->parent); +} + +static int objagg_obj_root_id_alloc(struct objagg *objagg, + struct objagg_obj *objagg_obj, + struct objagg_hints_node *hnode) +{ + unsigned int min, max; + int root_id; + + /* In case there are no hints available, the root id is invalid. */ + if (!objagg->hints) { + objagg_obj->root_id = OBJAGG_OBJ_ROOT_ID_INVALID; + return 0; + } + + if (hnode) { + min = hnode->root_id; + max = hnode->root_id; + } else { + /* For objects with no hint, start after the last + * hinted root_id. + */ + min = objagg->hints->root_count; + max = ~0; + } + + root_id = ida_alloc_range(&objagg->root_ida, min, max, GFP_KERNEL); + + if (root_id < 0) + return root_id; + objagg_obj->root_id = root_id; + return 0; +} + +static void objagg_obj_root_id_free(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + if (!objagg->hints) + return; + ida_free(&objagg->root_ida, objagg_obj->root_id); +} + +static int objagg_obj_root_create(struct objagg *objagg, + struct objagg_obj *objagg_obj, + struct objagg_hints_node *hnode) +{ + int err; + + err = objagg_obj_root_id_alloc(objagg, objagg_obj, hnode); + if (err) + return err; + objagg_obj->root_priv = objagg->ops->root_create(objagg->priv, + objagg_obj->obj, + objagg_obj->root_id); + if (IS_ERR(objagg_obj->root_priv)) { + err = PTR_ERR(objagg_obj->root_priv); + goto err_root_create; + } + trace_objagg_obj_root_create(objagg, objagg_obj); + return 0; + +err_root_create: + objagg_obj_root_id_free(objagg, objagg_obj); + return err; +} + +static void objagg_obj_root_destroy(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_root_destroy(objagg, objagg_obj); + objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv); + objagg_obj_root_id_free(objagg, objagg_obj); +} + +static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj); + +static int objagg_obj_init_with_hints(struct objagg *objagg, + struct objagg_obj *objagg_obj, + bool *hint_found) +{ + struct objagg_hints_node *hnode; + struct objagg_obj *parent; + int err; + + hnode = objagg_hints_lookup(objagg->hints, objagg_obj->obj); + if (!hnode) { + *hint_found = false; + return 0; + } + *hint_found = true; + + if (!hnode->parent) + return objagg_obj_root_create(objagg, objagg_obj, hnode); + + parent = __objagg_obj_get(objagg, hnode->parent->obj); + if (IS_ERR(parent)) + return PTR_ERR(parent); + + err = objagg_obj_parent_assign(objagg, objagg_obj, parent, false); + if (err) { + *hint_found = false; + err = 0; + goto err_parent_assign; + } + + return 0; + +err_parent_assign: + objagg_obj_put(objagg, parent); + return err; +} + +static int objagg_obj_init(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + bool hint_found; + int err; + + /* First, try to use hints if they are available and + * if they provide result. + */ + err = objagg_obj_init_with_hints(objagg, objagg_obj, &hint_found); + if (err) + return err; + + if (hint_found) + return 0; + + /* Try to find if the object can be aggregated under an existing one. */ + err = objagg_obj_parent_lookup_assign(objagg, objagg_obj); + if (!err) + return 0; + /* If aggregation is not possible, make the object a root. */ + return objagg_obj_root_create(objagg, objagg_obj, NULL); +} + +static void objagg_obj_fini(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + if (!objagg_obj_is_root(objagg_obj)) + objagg_obj_parent_unassign(objagg, objagg_obj); + else + objagg_obj_root_destroy(objagg, objagg_obj); +} + +static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj) +{ + struct objagg_obj *objagg_obj; + int err; + + objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size, + GFP_KERNEL); + if (!objagg_obj) + return ERR_PTR(-ENOMEM); + objagg_obj_ref_inc(objagg_obj); + memcpy(objagg_obj->obj, obj, objagg->ops->obj_size); + + err = objagg_obj_init(objagg, objagg_obj); + if (err) + goto err_obj_init; + + err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node, + objagg->ht_params); + if (err) + goto err_ht_insert; + list_add(&objagg_obj->list, &objagg->obj_list); + objagg->obj_count++; + trace_objagg_obj_create(objagg, objagg_obj); + + return objagg_obj; + +err_ht_insert: + objagg_obj_fini(objagg, objagg_obj); +err_obj_init: + kfree(objagg_obj); + return ERR_PTR(err); +} + +static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj) +{ + struct objagg_obj *objagg_obj; + + /* First, try to find the object exactly as user passed it, + * perhaps it is already in use. + */ + objagg_obj = objagg_obj_lookup(objagg, obj); + if (objagg_obj) { + objagg_obj_ref_inc(objagg_obj); + return objagg_obj; + } + + return objagg_obj_create(objagg, obj); +} + +/** + * objagg_obj_get - gets an object within objagg instance + * @objagg: objagg instance + * @obj: user-specific private object pointer + * + * Note: all locking must be provided by the caller. + * + * Size of the "obj" memory is specified in "objagg->ops". + * + * There are 3 main options this function wraps: + * 1) The object according to "obj" already exist. In that case + * the reference counter is incrementes and the object is returned. + * 2) The object does not exist, but it can be aggregated within + * another object. In that case, user ops->delta_create() is called + * to obtain delta data and a new object is created with returned + * user-delta private pointer. + * 3) The object does not exist and cannot be aggregated into + * any of the existing objects. In that case, user ops->root_create() + * is called to create the root and a new object is created with + * returned user-root private pointer. + * + * Returns a pointer to objagg object instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj) +{ + struct objagg_obj *objagg_obj; + + objagg_obj = __objagg_obj_get(objagg, obj); + if (IS_ERR(objagg_obj)) + return objagg_obj; + objagg_obj_stats_inc(objagg_obj); + trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount); + return objagg_obj; +} +EXPORT_SYMBOL(objagg_obj_get); + +static void objagg_obj_destroy(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_destroy(objagg, objagg_obj); + --objagg->obj_count; + list_del(&objagg_obj->list); + rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node, + objagg->ht_params); + objagg_obj_fini(objagg, objagg_obj); + kfree(objagg_obj); +} + +static void __objagg_obj_put(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + if (!objagg_obj_ref_dec(objagg_obj)) + objagg_obj_destroy(objagg, objagg_obj); +} + +/** + * objagg_obj_put - puts an object within objagg instance + * @objagg: objagg instance + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Symmetric to objagg_obj_get(). + */ +void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount); + objagg_obj_stats_dec(objagg_obj); + __objagg_obj_put(objagg, objagg_obj); +} +EXPORT_SYMBOL(objagg_obj_put); + +/** + * objagg_create - creates a new objagg instance + * @ops: user-specific callbacks + * @objagg_hints: hints, can be NULL + * @priv: pointer to a private data passed to the ops + * + * Note: all locking must be provided by the caller. + * + * The purpose of the library is to provide an infrastructure to + * aggregate user-specified objects. Library does not care about the type + * of the object. User fills-up ops which take care of the specific + * user object manipulation. + * + * As a very stupid example, consider integer numbers. For example + * number 8 as a root object. That can aggregate number 9 with delta 1, + * number 10 with delta 2, etc. This example is implemented as + * a part of a testing module in test_objagg.c file. + * + * Each objagg instance contains multiple trees. Each tree node is + * represented by "an object". In the current implementation there can be + * only roots and leafs nodes. Leaf nodes are called deltas. + * But in general, this can be easily extended for intermediate nodes. + * In that extension, a delta would be associated with all non-root + * nodes. + * + * Returns a pointer to newly created objagg instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +struct objagg *objagg_create(const struct objagg_ops *ops, + struct objagg_hints *objagg_hints, void *priv) +{ + struct objagg *objagg; + int err; + + if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy || + !ops->delta_check || !ops->delta_create || + !ops->delta_destroy)) + return ERR_PTR(-EINVAL); + + objagg = kzalloc(sizeof(*objagg), GFP_KERNEL); + if (!objagg) + return ERR_PTR(-ENOMEM); + objagg->ops = ops; + if (objagg_hints) { + objagg->hints = objagg_hints; + objagg_hints->refcount++; + } + objagg->priv = priv; + INIT_LIST_HEAD(&objagg->obj_list); + + objagg->ht_params.key_len = ops->obj_size; + objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj); + objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node); + + err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params); + if (err) + goto err_rhashtable_init; + + ida_init(&objagg->root_ida); + + trace_objagg_create(objagg); + return objagg; + +err_rhashtable_init: + kfree(objagg); + return ERR_PTR(err); +} +EXPORT_SYMBOL(objagg_create); + +/** + * objagg_destroy - destroys a new objagg instance + * @objagg: objagg instance + * + * Note: all locking must be provided by the caller. + */ +void objagg_destroy(struct objagg *objagg) +{ + trace_objagg_destroy(objagg); + ida_destroy(&objagg->root_ida); + WARN_ON(!list_empty(&objagg->obj_list)); + rhashtable_destroy(&objagg->obj_ht); + if (objagg->hints) + objagg_hints_put(objagg->hints); + kfree(objagg); +} +EXPORT_SYMBOL(objagg_destroy); + +static int objagg_stats_info_sort_cmp_func(const void *a, const void *b) +{ + const struct objagg_obj_stats_info *stats_info1 = a; + const struct objagg_obj_stats_info *stats_info2 = b; + + if (stats_info1->is_root != stats_info2->is_root) + return stats_info2->is_root - stats_info1->is_root; + if (stats_info1->stats.delta_user_count != + stats_info2->stats.delta_user_count) + return stats_info2->stats.delta_user_count - + stats_info1->stats.delta_user_count; + return stats_info2->stats.user_count - stats_info1->stats.user_count; +} + +/** + * objagg_stats_get - obtains stats of the objagg instance + * @objagg: objagg instance + * + * Note: all locking must be provided by the caller. + * + * The returned structure contains statistics of all object + * currently in use, ordered by following rules: + * 1) Root objects are always on lower indexes than the rest. + * 2) Objects with higher delta user count are always on lower + * indexes. + * 3) In case more objects have the same delta user count, + * the objects are ordered by user count. + * + * Returns a pointer to stats instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +const struct objagg_stats *objagg_stats_get(struct objagg *objagg) +{ + struct objagg_stats *objagg_stats; + struct objagg_obj *objagg_obj; + size_t alloc_size; + int i; + + alloc_size = sizeof(*objagg_stats) + + sizeof(objagg_stats->stats_info[0]) * objagg->obj_count; + objagg_stats = kzalloc(alloc_size, GFP_KERNEL); + if (!objagg_stats) + return ERR_PTR(-ENOMEM); + + i = 0; + list_for_each_entry(objagg_obj, &objagg->obj_list, list) { + memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats, + sizeof(objagg_stats->stats_info[0].stats)); + objagg_stats->stats_info[i].objagg_obj = objagg_obj; + objagg_stats->stats_info[i].is_root = + objagg_obj_is_root(objagg_obj); + if (objagg_stats->stats_info[i].is_root) + objagg_stats->root_count++; + i++; + } + objagg_stats->stats_info_count = i; + + sort(objagg_stats->stats_info, objagg_stats->stats_info_count, + sizeof(struct objagg_obj_stats_info), + objagg_stats_info_sort_cmp_func, NULL); + + return objagg_stats; +} +EXPORT_SYMBOL(objagg_stats_get); + +/** + * objagg_stats_put - puts stats of the objagg instance + * @objagg_stats: objagg instance stats + * + * Note: all locking must be provided by the caller. + */ +void objagg_stats_put(const struct objagg_stats *objagg_stats) +{ + kfree(objagg_stats); +} +EXPORT_SYMBOL(objagg_stats_put); + +static struct objagg_hints_node * +objagg_hints_node_create(struct objagg_hints *objagg_hints, + struct objagg_obj *objagg_obj, size_t obj_size, + struct objagg_hints_node *parent_hnode) +{ + unsigned int user_count = objagg_obj->stats.user_count; + struct objagg_hints_node *hnode; + int err; + + hnode = kzalloc(sizeof(*hnode) + obj_size, GFP_KERNEL); + if (!hnode) + return ERR_PTR(-ENOMEM); + memcpy(hnode->obj, &objagg_obj->obj, obj_size); + hnode->stats_info.stats.user_count = user_count; + hnode->stats_info.stats.delta_user_count = user_count; + if (parent_hnode) { + parent_hnode->stats_info.stats.delta_user_count += user_count; + } else { + hnode->root_id = objagg_hints->root_count++; + hnode->stats_info.is_root = true; + } + hnode->stats_info.objagg_obj = objagg_obj; + + err = rhashtable_insert_fast(&objagg_hints->node_ht, &hnode->ht_node, + objagg_hints->ht_params); + if (err) + goto err_ht_insert; + + list_add(&hnode->list, &objagg_hints->node_list); + hnode->parent = parent_hnode; + objagg_hints->node_count++; + + return hnode; + +err_ht_insert: + kfree(hnode); + return ERR_PTR(err); +} + +static void objagg_hints_flush(struct objagg_hints *objagg_hints) +{ + struct objagg_hints_node *hnode, *tmp; + + list_for_each_entry_safe(hnode, tmp, &objagg_hints->node_list, list) { + list_del(&hnode->list); + rhashtable_remove_fast(&objagg_hints->node_ht, &hnode->ht_node, + objagg_hints->ht_params); + kfree(hnode); + } +} + +struct objagg_tmp_node { + struct objagg_obj *objagg_obj; + bool crossed_out; +}; + +struct objagg_tmp_graph { + struct objagg_tmp_node *nodes; + unsigned long nodes_count; + unsigned long *edges; +}; + +static int objagg_tmp_graph_edge_index(struct objagg_tmp_graph *graph, + int parent_index, int index) +{ + return index * graph->nodes_count + parent_index; +} + +static void objagg_tmp_graph_edge_set(struct objagg_tmp_graph *graph, + int parent_index, int index) +{ + int edge_index = objagg_tmp_graph_edge_index(graph, index, + parent_index); + + __set_bit(edge_index, graph->edges); +} + +static bool objagg_tmp_graph_is_edge(struct objagg_tmp_graph *graph, + int parent_index, int index) +{ + int edge_index = objagg_tmp_graph_edge_index(graph, index, + parent_index); + + return test_bit(edge_index, graph->edges); +} + +static unsigned int objagg_tmp_graph_node_weight(struct objagg_tmp_graph *graph, + unsigned int index) +{ + struct objagg_tmp_node *node = &graph->nodes[index]; + unsigned int weight = node->objagg_obj->stats.user_count; + int j; + + /* Node weight is sum of node users and all other nodes users + * that this node can represent with delta. + */ + + for (j = 0; j < graph->nodes_count; j++) { + if (!objagg_tmp_graph_is_edge(graph, index, j)) + continue; + node = &graph->nodes[j]; + if (node->crossed_out) + continue; + weight += node->objagg_obj->stats.user_count; + } + return weight; +} + +static int objagg_tmp_graph_node_max_weight(struct objagg_tmp_graph *graph) +{ + struct objagg_tmp_node *node; + unsigned int max_weight = 0; + unsigned int weight; + int max_index = -1; + int i; + + for (i = 0; i < graph->nodes_count; i++) { + node = &graph->nodes[i]; + if (node->crossed_out) + continue; + weight = objagg_tmp_graph_node_weight(graph, i); + if (weight >= max_weight) { + max_weight = weight; + max_index = i; + } + } + return max_index; +} + +static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg) +{ + unsigned int nodes_count = objagg->obj_count; + struct objagg_tmp_graph *graph; + struct objagg_tmp_node *node; + struct objagg_tmp_node *pnode; + struct objagg_obj *objagg_obj; + size_t alloc_size; + int i, j; + + graph = kzalloc(sizeof(*graph), GFP_KERNEL); + if (!graph) + return NULL; + + graph->nodes = kcalloc(nodes_count, sizeof(*graph->nodes), GFP_KERNEL); + if (!graph->nodes) + goto err_nodes_alloc; + graph->nodes_count = nodes_count; + + alloc_size = BITS_TO_LONGS(nodes_count * nodes_count) * + sizeof(unsigned long); + graph->edges = kzalloc(alloc_size, GFP_KERNEL); + if (!graph->edges) + goto err_edges_alloc; + + i = 0; + list_for_each_entry(objagg_obj, &objagg->obj_list, list) { + node = &graph->nodes[i++]; + node->objagg_obj = objagg_obj; + } + + /* Assemble a temporary graph. Insert edge X->Y in case Y can be + * in delta of X. + */ + for (i = 0; i < nodes_count; i++) { + for (j = 0; j < nodes_count; j++) { + if (i == j) + continue; + pnode = &graph->nodes[i]; + node = &graph->nodes[j]; + if (objagg->ops->delta_check(objagg->priv, + pnode->objagg_obj->obj, + node->objagg_obj->obj)) { + objagg_tmp_graph_edge_set(graph, i, j); + + } + } + } + return graph; + +err_edges_alloc: + kfree(graph->nodes); +err_nodes_alloc: + kfree(graph); + return NULL; +} + +static void objagg_tmp_graph_destroy(struct objagg_tmp_graph *graph) +{ + kfree(graph->edges); + kfree(graph->nodes); + kfree(graph); +} + +static int +objagg_opt_simple_greedy_fillup_hints(struct objagg_hints *objagg_hints, + struct objagg *objagg) +{ + struct objagg_hints_node *hnode, *parent_hnode; + struct objagg_tmp_graph *graph; + struct objagg_tmp_node *node; + int index; + int j; + int err; + + graph = objagg_tmp_graph_create(objagg); + if (!graph) + return -ENOMEM; + + /* Find the nodes from the ones that can accommodate most users + * and cross them out of the graph. Save them to the hint list. + */ + while ((index = objagg_tmp_graph_node_max_weight(graph)) != -1) { + node = &graph->nodes[index]; + node->crossed_out = true; + hnode = objagg_hints_node_create(objagg_hints, + node->objagg_obj, + objagg->ops->obj_size, + NULL); + if (IS_ERR(hnode)) { + err = PTR_ERR(hnode); + goto out; + } + parent_hnode = hnode; + for (j = 0; j < graph->nodes_count; j++) { + if (!objagg_tmp_graph_is_edge(graph, index, j)) + continue; + node = &graph->nodes[j]; + if (node->crossed_out) + continue; + node->crossed_out = true; + hnode = objagg_hints_node_create(objagg_hints, + node->objagg_obj, + objagg->ops->obj_size, + parent_hnode); + if (IS_ERR(hnode)) { + err = PTR_ERR(hnode); + goto out; + } + } + } + + err = 0; +out: + objagg_tmp_graph_destroy(graph); + return err; +} + +struct objagg_opt_algo { + int (*fillup_hints)(struct objagg_hints *objagg_hints, + struct objagg *objagg); +}; + +static const struct objagg_opt_algo objagg_opt_simple_greedy = { + .fillup_hints = objagg_opt_simple_greedy_fillup_hints, +}; + + +static const struct objagg_opt_algo *objagg_opt_algos[] = { + [OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy, +}; + +static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg, + const void *obj) +{ + struct rhashtable *ht = arg->ht; + struct objagg_hints *objagg_hints = + container_of(ht, struct objagg_hints, node_ht); + const struct objagg_ops *ops = objagg_hints->ops; + const char *ptr = obj; + + ptr += ht->p.key_offset; + return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) : + memcmp(ptr, arg->key, ht->p.key_len); +} + +/** + * objagg_hints_get - obtains hints instance + * @objagg: objagg instance + * @opt_algo_type: type of hints finding algorithm + * + * Note: all locking must be provided by the caller. + * + * According to the algo type, the existing objects of objagg instance + * are going to be went-through to assemble an optimal tree. We call this + * tree hints. These hints can be later on used for creation of + * a new objagg instance. There, the future object creations are going + * to be consulted with these hints in order to find out, where exactly + * the new object should be put as a root or delta. + * + * Returns a pointer to hints instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +struct objagg_hints *objagg_hints_get(struct objagg *objagg, + enum objagg_opt_algo_type opt_algo_type) +{ + const struct objagg_opt_algo *algo = objagg_opt_algos[opt_algo_type]; + struct objagg_hints *objagg_hints; + int err; + + objagg_hints = kzalloc(sizeof(*objagg_hints), GFP_KERNEL); + if (!objagg_hints) + return ERR_PTR(-ENOMEM); + + objagg_hints->ops = objagg->ops; + objagg_hints->refcount = 1; + + INIT_LIST_HEAD(&objagg_hints->node_list); + + objagg_hints->ht_params.key_len = objagg->ops->obj_size; + objagg_hints->ht_params.key_offset = + offsetof(struct objagg_hints_node, obj); + objagg_hints->ht_params.head_offset = + offsetof(struct objagg_hints_node, ht_node); + objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp; + + err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params); + if (err) + goto err_rhashtable_init; + + err = algo->fillup_hints(objagg_hints, objagg); + if (err) + goto err_fillup_hints; + + if (WARN_ON(objagg_hints->node_count != objagg->obj_count)) { + err = -EINVAL; + goto err_node_count_check; + } + + return objagg_hints; + +err_node_count_check: +err_fillup_hints: + objagg_hints_flush(objagg_hints); + rhashtable_destroy(&objagg_hints->node_ht); +err_rhashtable_init: + kfree(objagg_hints); + return ERR_PTR(err); +} +EXPORT_SYMBOL(objagg_hints_get); + +/** + * objagg_hints_put - puts hints instance + * @objagg_hints: objagg hints instance + * + * Note: all locking must be provided by the caller. + */ +void objagg_hints_put(struct objagg_hints *objagg_hints) +{ + if (--objagg_hints->refcount) + return; + objagg_hints_flush(objagg_hints); + rhashtable_destroy(&objagg_hints->node_ht); + kfree(objagg_hints); +} +EXPORT_SYMBOL(objagg_hints_put); + +/** + * objagg_hints_stats_get - obtains stats of the hints instance + * @objagg_hints: hints instance + * + * Note: all locking must be provided by the caller. + * + * The returned structure contains statistics of all objects + * currently in use, ordered by following rules: + * 1) Root objects are always on lower indexes than the rest. + * 2) Objects with higher delta user count are always on lower + * indexes. + * 3) In case multiple objects have the same delta user count, + * the objects are ordered by user count. + * + * Returns a pointer to stats instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +const struct objagg_stats * +objagg_hints_stats_get(struct objagg_hints *objagg_hints) +{ + struct objagg_stats *objagg_stats; + struct objagg_hints_node *hnode; + int i; + + objagg_stats = kzalloc(struct_size(objagg_stats, stats_info, + objagg_hints->node_count), + GFP_KERNEL); + if (!objagg_stats) + return ERR_PTR(-ENOMEM); + + i = 0; + list_for_each_entry(hnode, &objagg_hints->node_list, list) { + memcpy(&objagg_stats->stats_info[i], &hnode->stats_info, + sizeof(objagg_stats->stats_info[0])); + if (objagg_stats->stats_info[i].is_root) + objagg_stats->root_count++; + i++; + } + objagg_stats->stats_info_count = i; + + sort(objagg_stats->stats_info, objagg_stats->stats_info_count, + sizeof(struct objagg_obj_stats_info), + objagg_stats_info_sort_cmp_func, NULL); + + return objagg_stats; +} +EXPORT_SYMBOL(objagg_hints_stats_get); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); +MODULE_DESCRIPTION("Object aggregation manager"); diff --git a/lib/parser.c b/lib/parser.c index 3278958b472a..dd70e5e6c9e2 100644 --- a/lib/parser.c +++ b/lib/parser.c @@ -131,13 +131,10 @@ static int match_number(substring_t *s, int *result, int base) char *buf; int ret; long val; - size_t len = s->to - s->from; - buf = kmalloc(len + 1, GFP_KERNEL); + buf = match_strdup(s); if (!buf) return -ENOMEM; - memcpy(buf, s->from, len); - buf[len] = '\0'; ret = 0; val = simple_strtol(buf, &endp, base); @@ -166,13 +163,10 @@ static int match_u64int(substring_t *s, u64 *result, int base) char *buf; int ret; u64 val; - size_t len = s->to - s->from; - buf = kmalloc(len + 1, GFP_KERNEL); + buf = match_strdup(s); if (!buf) return -ENOMEM; - memcpy(buf, s->from, len); - buf[len] = '\0'; ret = kstrtoull(buf, base, &val); if (!ret) @@ -327,10 +321,6 @@ EXPORT_SYMBOL(match_strlcpy); */ char *match_strdup(const substring_t *s) { - size_t sz = s->to - s->from + 1; - char *p = kmalloc(sz, GFP_KERNEL); - if (p) - match_strlcpy(p, s, sz); - return p; + return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL); } EXPORT_SYMBOL(match_strdup); diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index de10b8c0bff6..9877682e49c7 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -181,7 +181,7 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; percpu_ref_get(ref); /* put after confirmation */ - call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); + call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu); } static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index bc03ecc4dfd2..14d51548bea6 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -38,15 +38,13 @@ #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/xarray.h> -/* Number of nodes in fully populated tree of given height */ -static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; - /* * Radix tree node cache. */ -static struct kmem_cache *radix_tree_node_cachep; +struct kmem_cache *radix_tree_node_cachep; /* * The radix tree is variable-height, so an insert operation not only has @@ -98,24 +96,7 @@ static inline void *node_to_entry(void *ptr) return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); } -#define RADIX_TREE_RETRY node_to_entry(NULL) - -#ifdef CONFIG_RADIX_TREE_MULTIORDER -/* Sibling slots point directly to another slot in the same node */ -static inline -bool is_sibling_entry(const struct radix_tree_node *parent, void *node) -{ - void __rcu **ptr = node; - return (parent->slots <= ptr) && - (ptr < parent->slots + RADIX_TREE_MAP_SIZE); -} -#else -static inline -bool is_sibling_entry(const struct radix_tree_node *parent, void *node) -{ - return false; -} -#endif +#define RADIX_TREE_RETRY XA_RETRY_ENTRY static inline unsigned long get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) @@ -129,24 +110,13 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent, unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); -#ifdef CONFIG_RADIX_TREE_MULTIORDER - if (radix_tree_is_internal_node(entry)) { - if (is_sibling_entry(parent, entry)) { - void __rcu **sibentry; - sibentry = (void __rcu **) entry_to_node(entry); - offset = get_slot_offset(parent, sibentry); - entry = rcu_dereference_raw(*sibentry); - } - } -#endif - *nodep = (void *)entry; return offset; } static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) { - return root->gfp_mask & (__GFP_BITS_MASK & ~GFP_ZONEMASK); + return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK); } static inline void tag_set(struct radix_tree_node *node, unsigned int tag, @@ -169,32 +139,32 @@ static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) { - root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); + root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); } static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) { - root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); + root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); } static inline void root_tag_clear_all(struct radix_tree_root *root) { - root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1; + root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1); } static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) { - return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT)); + return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT)); } static inline unsigned root_tags_get(const struct radix_tree_root *root) { - return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT; + return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT; } static inline bool is_idr(const struct radix_tree_root *root) { - return !!(root->gfp_mask & ROOT_IS_IDR); + return !!(root->xa_flags & ROOT_IS_IDR); } /* @@ -254,7 +224,7 @@ radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, static unsigned int iter_offset(const struct radix_tree_iter *iter) { - return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK; + return iter->index & RADIX_TREE_MAP_MASK; } /* @@ -277,99 +247,6 @@ static unsigned long next_index(unsigned long index, return (index & ~node_maxindex(node)) + (offset << node->shift); } -#ifndef __KERNEL__ -static void dump_node(struct radix_tree_node *node, unsigned long index) -{ - unsigned long i; - - pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n", - node, node->offset, index, index | node_maxindex(node), - node->parent, - node->tags[0][0], node->tags[1][0], node->tags[2][0], - node->shift, node->count, node->exceptional); - - for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { - unsigned long first = index | (i << node->shift); - unsigned long last = first | ((1UL << node->shift) - 1); - void *entry = node->slots[i]; - if (!entry) - continue; - if (entry == RADIX_TREE_RETRY) { - pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n", - i, first, last, node); - } else if (!radix_tree_is_internal_node(entry)) { - pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n", - entry, i, first, last, node); - } else if (is_sibling_entry(node, entry)) { - pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n", - entry, i, first, last, node, - *(void **)entry_to_node(entry)); - } else { - dump_node(entry_to_node(entry), first); - } - } -} - -/* For debug */ -static void radix_tree_dump(struct radix_tree_root *root) -{ - pr_debug("radix root: %p rnode %p tags %x\n", - root, root->rnode, - root->gfp_mask >> ROOT_TAG_SHIFT); - if (!radix_tree_is_internal_node(root->rnode)) - return; - dump_node(entry_to_node(root->rnode), 0); -} - -static void dump_ida_node(void *entry, unsigned long index) -{ - unsigned long i; - - if (!entry) - return; - - if (radix_tree_is_internal_node(entry)) { - struct radix_tree_node *node = entry_to_node(entry); - - pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n", - node, node->offset, index * IDA_BITMAP_BITS, - ((index | node_maxindex(node)) + 1) * - IDA_BITMAP_BITS - 1, - node->parent, node->tags[0][0], node->shift, - node->count); - for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) - dump_ida_node(node->slots[i], - index | (i << node->shift)); - } else if (radix_tree_exceptional_entry(entry)) { - pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n", - entry, (int)(index & RADIX_TREE_MAP_MASK), - index * IDA_BITMAP_BITS, - index * IDA_BITMAP_BITS + BITS_PER_LONG - - RADIX_TREE_EXCEPTIONAL_SHIFT, - (unsigned long)entry >> - RADIX_TREE_EXCEPTIONAL_SHIFT); - } else { - struct ida_bitmap *bitmap = entry; - - pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap, - (int)(index & RADIX_TREE_MAP_MASK), - index * IDA_BITMAP_BITS, - (index + 1) * IDA_BITMAP_BITS - 1); - for (i = 0; i < IDA_BITMAP_LONGS; i++) - pr_cont(" %lx", bitmap->bitmap[i]); - pr_cont("\n"); - } -} - -static void ida_dump(struct ida *ida) -{ - struct radix_tree_root *root = &ida->ida_rt; - pr_debug("ida: %p node %p free %d\n", ida, root->rnode, - root->gfp_mask >> ROOT_TAG_SHIFT); - dump_ida_node(root->rnode, 0); -} -#endif - /* * This assumes that the caller has performed appropriate preallocation, and * that the caller has pinned this thread of control to the current CPU. @@ -378,7 +255,7 @@ static struct radix_tree_node * radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, struct radix_tree_root *root, unsigned int shift, unsigned int offset, - unsigned int count, unsigned int exceptional) + unsigned int count, unsigned int nr_values) { struct radix_tree_node *ret = NULL; @@ -425,14 +302,14 @@ out: ret->shift = shift; ret->offset = offset; ret->count = count; - ret->exceptional = exceptional; + ret->nr_values = nr_values; ret->parent = parent; - ret->root = root; + ret->array = root; } return ret; } -static void radix_tree_node_rcu_free(struct rcu_head *head) +void radix_tree_node_rcu_free(struct rcu_head *head) { struct radix_tree_node *node = container_of(head, struct radix_tree_node, rcu_head); @@ -530,77 +407,10 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) } EXPORT_SYMBOL(radix_tree_maybe_preload); -#ifdef CONFIG_RADIX_TREE_MULTIORDER -/* - * Preload with enough objects to ensure that we can split a single entry - * of order @old_order into many entries of size @new_order - */ -int radix_tree_split_preload(unsigned int old_order, unsigned int new_order, - gfp_t gfp_mask) -{ - unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT); - unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) - - (new_order / RADIX_TREE_MAP_SHIFT); - unsigned nr = 0; - - WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); - BUG_ON(new_order >= old_order); - - while (layers--) - nr = nr * RADIX_TREE_MAP_SIZE + 1; - return __radix_tree_preload(gfp_mask, top * nr); -} -#endif - -/* - * The same as function above, but preload number of nodes required to insert - * (1 << order) continuous naturally-aligned elements. - */ -int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) -{ - unsigned long nr_subtrees; - int nr_nodes, subtree_height; - - /* Preloading doesn't help anything with this gfp mask, skip it */ - if (!gfpflags_allow_blocking(gfp_mask)) { - preempt_disable(); - return 0; - } - - /* - * Calculate number and height of fully populated subtrees it takes to - * store (1 << order) elements. - */ - nr_subtrees = 1 << order; - for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE; - subtree_height++) - nr_subtrees >>= RADIX_TREE_MAP_SHIFT; - - /* - * The worst case is zero height tree with a single item at index 0 and - * then inserting items starting at ULONG_MAX - (1 << order). - * - * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to - * 0-index item. - */ - nr_nodes = RADIX_TREE_MAX_PATH; - - /* Plus branch to fully populated subtrees. */ - nr_nodes += RADIX_TREE_MAX_PATH - subtree_height; - - /* Root node is shared. */ - nr_nodes--; - - /* Plus nodes required to build subtrees. */ - nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height]; - - return __radix_tree_preload(gfp_mask, nr_nodes); -} - static unsigned radix_tree_load_root(const struct radix_tree_root *root, struct radix_tree_node **nodep, unsigned long *maxindex) { - struct radix_tree_node *node = rcu_dereference_raw(root->rnode); + struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); *nodep = node; @@ -629,7 +439,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, while (index > shift_maxindex(maxshift)) maxshift += RADIX_TREE_MAP_SHIFT; - entry = rcu_dereference_raw(root->rnode); + entry = rcu_dereference_raw(root->xa_head); if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE))) goto out; @@ -656,9 +466,9 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, BUG_ON(shift > BITS_PER_LONG); if (radix_tree_is_internal_node(entry)) { entry_to_node(entry)->parent = node; - } else if (radix_tree_exceptional_entry(entry)) { - /* Moving an exceptional root->rnode to a node */ - node->exceptional = 1; + } else if (xa_is_value(entry)) { + /* Moving a value entry root->xa_head to a node */ + node->nr_values = 1; } /* * entry was already in the radix tree, so we do not need @@ -666,7 +476,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, */ node->slots[0] = (void __rcu *)entry; entry = node_to_entry(node); - rcu_assign_pointer(root->rnode, entry); + rcu_assign_pointer(root->xa_head, entry); shift += RADIX_TREE_MAP_SHIFT; } while (shift <= maxshift); out: @@ -677,13 +487,12 @@ out: * radix_tree_shrink - shrink radix tree to minimum height * @root radix tree root */ -static inline bool radix_tree_shrink(struct radix_tree_root *root, - radix_tree_update_node_t update_node) +static inline bool radix_tree_shrink(struct radix_tree_root *root) { bool shrunk = false; for (;;) { - struct radix_tree_node *node = rcu_dereference_raw(root->rnode); + struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); struct radix_tree_node *child; if (!radix_tree_is_internal_node(node)) @@ -692,15 +501,20 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root, /* * The candidate node has more than one child, or its child - * is not at the leftmost slot, or the child is a multiorder - * entry, we cannot shrink. + * is not at the leftmost slot, we cannot shrink. */ if (node->count != 1) break; child = rcu_dereference_raw(node->slots[0]); if (!child) break; - if (!radix_tree_is_internal_node(child) && node->shift) + + /* + * For an IDR, we must not shrink entry 0 into the root in + * case somebody calls idr_replace() with a pointer that + * appears to be an internal entry + */ + if (!node->shift && is_idr(root)) break; if (radix_tree_is_internal_node(child)) @@ -711,9 +525,9 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root, * moving the node from one part of the tree to another: if it * was safe to dereference the old pointer to it * (node->slots[0]), it will be safe to dereference the new - * one (root->rnode) as far as dependent read barriers go. + * one (root->xa_head) as far as dependent read barriers go. */ - root->rnode = (void __rcu *)child; + root->xa_head = (void __rcu *)child; if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) root_tag_clear(root, IDR_FREE); @@ -738,8 +552,6 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root, node->count = 0; if (!radix_tree_is_internal_node(child)) { node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; - if (update_node) - update_node(node); } WARN_ON_ONCE(!list_empty(&node->private_list)); @@ -751,8 +563,7 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root, } static bool delete_node(struct radix_tree_root *root, - struct radix_tree_node *node, - radix_tree_update_node_t update_node) + struct radix_tree_node *node) { bool deleted = false; @@ -761,9 +572,8 @@ static bool delete_node(struct radix_tree_root *root, if (node->count) { if (node_to_entry(node) == - rcu_dereference_raw(root->rnode)) - deleted |= radix_tree_shrink(root, - update_node); + rcu_dereference_raw(root->xa_head)) + deleted |= radix_tree_shrink(root); return deleted; } @@ -778,7 +588,7 @@ static bool delete_node(struct radix_tree_root *root, */ if (!is_idr(root)) root_tag_clear_all(root); - root->rnode = NULL; + root->xa_head = NULL; } WARN_ON_ONCE(!list_empty(&node->private_list)); @@ -795,7 +605,6 @@ static bool delete_node(struct radix_tree_root *root, * __radix_tree_create - create a slot in a radix tree * @root: radix tree root * @index: index key - * @order: index occupies 2^order aligned slots * @nodep: returns node * @slotp: returns slot * @@ -803,36 +612,34 @@ static bool delete_node(struct radix_tree_root *root, * at position @index in the radix tree @root. * * Until there is more than one item in the tree, no nodes are - * allocated and @root->rnode is used as a direct slot instead of + * allocated and @root->xa_head is used as a direct slot instead of * pointing to a node, in which case *@nodep will be NULL. * * Returns -ENOMEM, or 0 for success. */ -int __radix_tree_create(struct radix_tree_root *root, unsigned long index, - unsigned order, struct radix_tree_node **nodep, - void __rcu ***slotp) +static int __radix_tree_create(struct radix_tree_root *root, + unsigned long index, struct radix_tree_node **nodep, + void __rcu ***slotp) { struct radix_tree_node *node = NULL, *child; - void __rcu **slot = (void __rcu **)&root->rnode; + void __rcu **slot = (void __rcu **)&root->xa_head; unsigned long maxindex; unsigned int shift, offset = 0; - unsigned long max = index | ((1UL << order) - 1); + unsigned long max = index; gfp_t gfp = root_gfp_mask(root); shift = radix_tree_load_root(root, &child, &maxindex); /* Make sure the tree is high enough. */ - if (order > 0 && max == ((1UL << order) - 1)) - max++; if (max > maxindex) { int error = radix_tree_extend(root, gfp, max, shift); if (error < 0) return error; shift = error; - child = rcu_dereference_raw(root->rnode); + child = rcu_dereference_raw(root->xa_head); } - while (shift > order) { + while (shift > 0) { shift -= RADIX_TREE_MAP_SHIFT; if (child == NULL) { /* Have to add a child node. */ @@ -875,8 +682,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) for (;;) { void *entry = rcu_dereference_raw(child->slots[offset]); - if (radix_tree_is_internal_node(entry) && - !is_sibling_entry(child, entry)) { + if (xa_is_node(entry) && child->shift) { child = entry_to_node(entry); offset = 0; continue; @@ -894,96 +700,30 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) } } -#ifdef CONFIG_RADIX_TREE_MULTIORDER static inline int insert_entries(struct radix_tree_node *node, - void __rcu **slot, void *item, unsigned order, bool replace) -{ - struct radix_tree_node *child; - unsigned i, n, tag, offset, tags = 0; - - if (node) { - if (order > node->shift) - n = 1 << (order - node->shift); - else - n = 1; - offset = get_slot_offset(node, slot); - } else { - n = 1; - offset = 0; - } - - if (n > 1) { - offset = offset & ~(n - 1); - slot = &node->slots[offset]; - } - child = node_to_entry(slot); - - for (i = 0; i < n; i++) { - if (slot[i]) { - if (replace) { - node->count--; - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) - if (tag_get(node, tag, offset + i)) - tags |= 1 << tag; - } else - return -EEXIST; - } - } - - for (i = 0; i < n; i++) { - struct radix_tree_node *old = rcu_dereference_raw(slot[i]); - if (i) { - rcu_assign_pointer(slot[i], child); - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) - if (tags & (1 << tag)) - tag_clear(node, tag, offset + i); - } else { - rcu_assign_pointer(slot[i], item); - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) - if (tags & (1 << tag)) - tag_set(node, tag, offset); - } - if (radix_tree_is_internal_node(old) && - !is_sibling_entry(node, old) && - (old != RADIX_TREE_RETRY)) - radix_tree_free_nodes(old); - if (radix_tree_exceptional_entry(old)) - node->exceptional--; - } - if (node) { - node->count += n; - if (radix_tree_exceptional_entry(item)) - node->exceptional += n; - } - return n; -} -#else -static inline int insert_entries(struct radix_tree_node *node, - void __rcu **slot, void *item, unsigned order, bool replace) + void __rcu **slot, void *item, bool replace) { if (*slot) return -EEXIST; rcu_assign_pointer(*slot, item); if (node) { node->count++; - if (radix_tree_exceptional_entry(item)) - node->exceptional++; + if (xa_is_value(item)) + node->nr_values++; } return 1; } -#endif /** * __radix_tree_insert - insert into a radix tree * @root: radix tree root * @index: index key - * @order: key covers the 2^order indices around index * @item: item to insert * * Insert an item into the radix tree at position @index. */ -int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, - unsigned order, void *item) +int radix_tree_insert(struct radix_tree_root *root, unsigned long index, + void *item) { struct radix_tree_node *node; void __rcu **slot; @@ -991,11 +731,11 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, BUG_ON(radix_tree_is_internal_node(item)); - error = __radix_tree_create(root, index, order, &node, &slot); + error = __radix_tree_create(root, index, &node, &slot); if (error) return error; - error = insert_entries(node, slot, item, order, false); + error = insert_entries(node, slot, item, false); if (error < 0) return error; @@ -1010,7 +750,7 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, return 0; } -EXPORT_SYMBOL(__radix_tree_insert); +EXPORT_SYMBOL(radix_tree_insert); /** * __radix_tree_lookup - lookup an item in a radix tree @@ -1023,7 +763,7 @@ EXPORT_SYMBOL(__radix_tree_insert); * tree @root. * * Until there is more than one item in the tree, no nodes are - * allocated and @root->rnode is used as a direct slot instead of + * allocated and @root->xa_head is used as a direct slot instead of * pointing to a node, in which case *@nodep will be NULL. */ void *__radix_tree_lookup(const struct radix_tree_root *root, @@ -1036,7 +776,7 @@ void *__radix_tree_lookup(const struct radix_tree_root *root, restart: parent = NULL; - slot = (void __rcu **)&root->rnode; + slot = (void __rcu **)&root->xa_head; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) return NULL; @@ -1044,11 +784,13 @@ void *__radix_tree_lookup(const struct radix_tree_root *root, while (radix_tree_is_internal_node(node)) { unsigned offset; - if (node == RADIX_TREE_RETRY) - goto restart; parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); slot = parent->slots + offset; + if (node == RADIX_TREE_RETRY) + goto restart; + if (parent->shift == 0) + break; } if (nodep) @@ -1100,36 +842,12 @@ void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) } EXPORT_SYMBOL(radix_tree_lookup); -static inline void replace_sibling_entries(struct radix_tree_node *node, - void __rcu **slot, int count, int exceptional) -{ -#ifdef CONFIG_RADIX_TREE_MULTIORDER - void *ptr = node_to_entry(slot); - unsigned offset = get_slot_offset(node, slot) + 1; - - while (offset < RADIX_TREE_MAP_SIZE) { - if (rcu_dereference_raw(node->slots[offset]) != ptr) - break; - if (count < 0) { - node->slots[offset] = NULL; - node->count--; - } - node->exceptional += exceptional; - offset++; - } -#endif -} - static void replace_slot(void __rcu **slot, void *item, - struct radix_tree_node *node, int count, int exceptional) + struct radix_tree_node *node, int count, int values) { - if (WARN_ON_ONCE(radix_tree_is_internal_node(item))) - return; - - if (node && (count || exceptional)) { + if (node && (count || values)) { node->count += count; - node->exceptional += exceptional; - replace_sibling_entries(node, slot, count, exceptional); + node->nr_values += values; } rcu_assign_pointer(*slot, item); @@ -1172,37 +890,31 @@ static int calculate_count(struct radix_tree_root *root, * @node: pointer to tree node * @slot: pointer to slot in @node * @item: new item to store in the slot. - * @update_node: callback for changing leaf nodes * * For use with __radix_tree_lookup(). Caller must hold tree write locked * across slot lookup and replacement. */ void __radix_tree_replace(struct radix_tree_root *root, struct radix_tree_node *node, - void __rcu **slot, void *item, - radix_tree_update_node_t update_node) + void __rcu **slot, void *item) { void *old = rcu_dereference_raw(*slot); - int exceptional = !!radix_tree_exceptional_entry(item) - - !!radix_tree_exceptional_entry(old); + int values = !!xa_is_value(item) - !!xa_is_value(old); int count = calculate_count(root, node, slot, item, old); /* - * This function supports replacing exceptional entries and + * This function supports replacing value entries and * deleting entries, but that needs accounting against the - * node unless the slot is root->rnode. + * node unless the slot is root->xa_head. */ - WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) && - (count || exceptional)); - replace_slot(slot, item, node, count, exceptional); + WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) && + (count || values)); + replace_slot(slot, item, node, count, values); if (!node) return; - if (update_node) - update_node(node); - - delete_node(root, node, update_node); + delete_node(root, node); } /** @@ -1211,12 +923,12 @@ void __radix_tree_replace(struct radix_tree_root *root, * @slot: pointer to slot * @item: new item to store in the slot. * - * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(), + * For use with radix_tree_lookup_slot() and * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked * across slot lookup and replacement. * * NOTE: This cannot be used to switch between non-entries (empty slots), - * regular entries, and exceptional entries, as that requires accounting + * regular entries, and value entries, as that requires accounting * inside the radix tree node. When switching from one type of entry or * deleting, use __radix_tree_lookup() and __radix_tree_replace() or * radix_tree_iter_replace(). @@ -1224,7 +936,7 @@ void __radix_tree_replace(struct radix_tree_root *root, void radix_tree_replace_slot(struct radix_tree_root *root, void __rcu **slot, void *item) { - __radix_tree_replace(root, NULL, slot, item, NULL); + __radix_tree_replace(root, NULL, slot, item); } EXPORT_SYMBOL(radix_tree_replace_slot); @@ -1234,162 +946,16 @@ EXPORT_SYMBOL(radix_tree_replace_slot); * @slot: pointer to slot * @item: new item to store in the slot. * - * For use with radix_tree_split() and radix_tree_for_each_slot(). - * Caller must hold tree write locked across split and replacement. + * For use with radix_tree_for_each_slot(). + * Caller must hold tree write locked. */ void radix_tree_iter_replace(struct radix_tree_root *root, const struct radix_tree_iter *iter, void __rcu **slot, void *item) { - __radix_tree_replace(root, iter->node, slot, item, NULL); -} - -#ifdef CONFIG_RADIX_TREE_MULTIORDER -/** - * radix_tree_join - replace multiple entries with one multiorder entry - * @root: radix tree root - * @index: an index inside the new entry - * @order: order of the new entry - * @item: new entry - * - * Call this function to replace several entries with one larger entry. - * The existing entries are presumed to not need freeing as a result of - * this call. - * - * The replacement entry will have all the tags set on it that were set - * on any of the entries it is replacing. - */ -int radix_tree_join(struct radix_tree_root *root, unsigned long index, - unsigned order, void *item) -{ - struct radix_tree_node *node; - void __rcu **slot; - int error; - - BUG_ON(radix_tree_is_internal_node(item)); - - error = __radix_tree_create(root, index, order, &node, &slot); - if (!error) - error = insert_entries(node, slot, item, order, true); - if (error > 0) - error = 0; - - return error; + __radix_tree_replace(root, iter->node, slot, item); } -/** - * radix_tree_split - Split an entry into smaller entries - * @root: radix tree root - * @index: An index within the large entry - * @order: Order of new entries - * - * Call this function as the first step in replacing a multiorder entry - * with several entries of lower order. After this function returns, - * loop over the relevant portion of the tree using radix_tree_for_each_slot() - * and call radix_tree_iter_replace() to set up each new entry. - * - * The tags from this entry are replicated to all the new entries. - * - * The radix tree should be locked against modification during the entire - * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which - * should prompt RCU walkers to restart the lookup from the root. - */ -int radix_tree_split(struct radix_tree_root *root, unsigned long index, - unsigned order) -{ - struct radix_tree_node *parent, *node, *child; - void __rcu **slot; - unsigned int offset, end; - unsigned n, tag, tags = 0; - gfp_t gfp = root_gfp_mask(root); - - if (!__radix_tree_lookup(root, index, &parent, &slot)) - return -ENOENT; - if (!parent) - return -ENOENT; - - offset = get_slot_offset(parent, slot); - - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) - if (tag_get(parent, tag, offset)) - tags |= 1 << tag; - - for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) { - if (!is_sibling_entry(parent, - rcu_dereference_raw(parent->slots[end]))) - break; - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) - if (tags & (1 << tag)) - tag_set(parent, tag, end); - /* rcu_assign_pointer ensures tags are set before RETRY */ - rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY); - } - rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY); - parent->exceptional -= (end - offset); - - if (order == parent->shift) - return 0; - if (order > parent->shift) { - while (offset < end) - offset += insert_entries(parent, &parent->slots[offset], - RADIX_TREE_RETRY, order, true); - return 0; - } - - node = parent; - - for (;;) { - if (node->shift > order) { - child = radix_tree_node_alloc(gfp, node, root, - node->shift - RADIX_TREE_MAP_SHIFT, - offset, 0, 0); - if (!child) - goto nomem; - if (node != parent) { - node->count++; - rcu_assign_pointer(node->slots[offset], - node_to_entry(child)); - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) - if (tags & (1 << tag)) - tag_set(node, tag, offset); - } - - node = child; - offset = 0; - continue; - } - - n = insert_entries(node, &node->slots[offset], - RADIX_TREE_RETRY, order, false); - BUG_ON(n > RADIX_TREE_MAP_SIZE); - - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) - if (tags & (1 << tag)) - tag_set(node, tag, offset); - offset += n; - - while (offset == RADIX_TREE_MAP_SIZE) { - if (node == parent) - break; - offset = node->offset; - child = node; - node = node->parent; - rcu_assign_pointer(node->slots[offset], - node_to_entry(child)); - offset++; - } - if ((node == parent) && (offset == end)) - return 0; - } - - nomem: - /* Shouldn't happen; did user forget to preload? */ - /* TODO: free all the allocated nodes */ - WARN_ON(1); - return -ENOMEM; -} -#endif - static void node_tag_set(struct radix_tree_root *root, struct radix_tree_node *node, unsigned int tag, unsigned int offset) @@ -1447,18 +1013,6 @@ void *radix_tree_tag_set(struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_tag_set); -/** - * radix_tree_iter_tag_set - set a tag on the current iterator entry - * @root: radix tree root - * @iter: iterator state - * @tag: tag to set - */ -void radix_tree_iter_tag_set(struct radix_tree_root *root, - const struct radix_tree_iter *iter, unsigned int tag) -{ - node_tag_set(root, iter->node, tag, iter_offset(iter)); -} - static void node_tag_clear(struct radix_tree_root *root, struct radix_tree_node *node, unsigned int tag, unsigned int offset) @@ -1574,14 +1128,6 @@ int radix_tree_tag_get(const struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_tag_get); -static inline void __set_iter_shift(struct radix_tree_iter *iter, - unsigned int shift) -{ -#ifdef CONFIG_RADIX_TREE_MULTIORDER - iter->shift = shift; -#endif -} - /* Construct iter->tags bit-mask from node->tags[tag] array */ static void set_iter_tags(struct radix_tree_iter *iter, struct radix_tree_node *node, unsigned offset, @@ -1608,92 +1154,11 @@ static void set_iter_tags(struct radix_tree_iter *iter, } } -#ifdef CONFIG_RADIX_TREE_MULTIORDER -static void __rcu **skip_siblings(struct radix_tree_node **nodep, - void __rcu **slot, struct radix_tree_iter *iter) -{ - while (iter->index < iter->next_index) { - *nodep = rcu_dereference_raw(*slot); - if (*nodep && !is_sibling_entry(iter->node, *nodep)) - return slot; - slot++; - iter->index = __radix_tree_iter_add(iter, 1); - iter->tags >>= 1; - } - - *nodep = NULL; - return NULL; -} - -void __rcu **__radix_tree_next_slot(void __rcu **slot, - struct radix_tree_iter *iter, unsigned flags) -{ - unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; - struct radix_tree_node *node; - - slot = skip_siblings(&node, slot, iter); - - while (radix_tree_is_internal_node(node)) { - unsigned offset; - unsigned long next_index; - - if (node == RADIX_TREE_RETRY) - return slot; - node = entry_to_node(node); - iter->node = node; - iter->shift = node->shift; - - if (flags & RADIX_TREE_ITER_TAGGED) { - offset = radix_tree_find_next_bit(node, tag, 0); - if (offset == RADIX_TREE_MAP_SIZE) - return NULL; - slot = &node->slots[offset]; - iter->index = __radix_tree_iter_add(iter, offset); - set_iter_tags(iter, node, offset, tag); - node = rcu_dereference_raw(*slot); - } else { - offset = 0; - slot = &node->slots[0]; - for (;;) { - node = rcu_dereference_raw(*slot); - if (node) - break; - slot++; - offset++; - if (offset == RADIX_TREE_MAP_SIZE) - return NULL; - } - iter->index = __radix_tree_iter_add(iter, offset); - } - if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0)) - goto none; - next_index = (iter->index | shift_maxindex(iter->shift)) + 1; - if (next_index < iter->next_index) - iter->next_index = next_index; - } - - return slot; - none: - iter->next_index = 0; - return NULL; -} -EXPORT_SYMBOL(__radix_tree_next_slot); -#else -static void __rcu **skip_siblings(struct radix_tree_node **nodep, - void __rcu **slot, struct radix_tree_iter *iter) -{ - return slot; -} -#endif - void __rcu **radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter) { - struct radix_tree_node *node; - slot++; iter->index = __radix_tree_iter_add(iter, 1); - skip_siblings(&node, slot, iter); iter->next_index = iter->index; iter->tags = 0; return NULL; @@ -1744,8 +1209,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, iter->next_index = maxindex + 1; iter->tags = 1; iter->node = NULL; - __set_iter_shift(iter, 0); - return (void __rcu **)&root->rnode; + return (void __rcu **)&root->xa_head; } do { @@ -1765,8 +1229,6 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, while (++offset < RADIX_TREE_MAP_SIZE) { void *slot = rcu_dereference_raw( node->slots[offset]); - if (is_sibling_entry(node, slot)) - continue; if (slot) break; } @@ -1784,13 +1246,12 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, goto restart; if (child == RADIX_TREE_RETRY) break; - } while (radix_tree_is_internal_node(child)); + } while (node->shift && radix_tree_is_internal_node(child)); /* Update the iterator state */ - iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); + iter->index = (index &~ node_maxindex(node)) | offset; iter->next_index = (index | node_maxindex(node)) + 1; iter->node = node; - __set_iter_shift(iter, node->shift); if (flags & RADIX_TREE_ITER_TAGGED) set_iter_tags(iter, node, offset, tag); @@ -1847,48 +1308,6 @@ radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, EXPORT_SYMBOL(radix_tree_gang_lookup); /** - * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree - * @root: radix tree root - * @results: where the results of the lookup are placed - * @indices: where their indices should be placed (but usually NULL) - * @first_index: start the lookup from this key - * @max_items: place up to this many items at *results - * - * Performs an index-ascending scan of the tree for present items. Places - * their slots at *@results and returns the number of items which were - * placed at *@results. - * - * The implementation is naive. - * - * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must - * be dereferenced with radix_tree_deref_slot, and if using only RCU - * protection, radix_tree_deref_slot may fail requiring a retry. - */ -unsigned int -radix_tree_gang_lookup_slot(const struct radix_tree_root *root, - void __rcu ***results, unsigned long *indices, - unsigned long first_index, unsigned int max_items) -{ - struct radix_tree_iter iter; - void __rcu **slot; - unsigned int ret = 0; - - if (unlikely(!max_items)) - return 0; - - radix_tree_for_each_slot(slot, root, &iter, first_index) { - results[ret] = slot; - if (indices) - indices[ret] = iter.index; - if (++ret == max_items) - break; - } - - return ret; -} -EXPORT_SYMBOL(radix_tree_gang_lookup_slot); - -/** * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree * based on a tag * @root: radix tree root @@ -1964,28 +1383,11 @@ radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); -/** - * __radix_tree_delete_node - try to free node after clearing a slot - * @root: radix tree root - * @node: node containing @index - * @update_node: callback for changing leaf nodes - * - * After clearing the slot at @index in @node from radix tree - * rooted at @root, call this function to attempt freeing the - * node and shrinking the tree. - */ -void __radix_tree_delete_node(struct radix_tree_root *root, - struct radix_tree_node *node, - radix_tree_update_node_t update_node) -{ - delete_node(root, node, update_node); -} - static bool __radix_tree_delete(struct radix_tree_root *root, struct radix_tree_node *node, void __rcu **slot) { void *old = rcu_dereference_raw(*slot); - int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0; + int values = xa_is_value(old) ? -1 : 0; unsigned offset = get_slot_offset(node, slot); int tag; @@ -1995,8 +1397,8 @@ static bool __radix_tree_delete(struct radix_tree_root *root, for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) node_tag_clear(root, node, tag, offset); - replace_slot(slot, NULL, node, -1, exceptional); - return node && delete_node(root, node, NULL); + replace_slot(slot, NULL, node, -1, values); + return node && delete_node(root, node); } /** @@ -2068,19 +1470,6 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) } EXPORT_SYMBOL(radix_tree_delete); -void radix_tree_clear_tags(struct radix_tree_root *root, - struct radix_tree_node *node, - void __rcu **slot) -{ - if (node) { - unsigned int tag, offset = get_slot_offset(node, slot); - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) - node_tag_clear(root, node, tag, offset); - } else { - root_tag_clear_all(root); - } -} - /** * radix_tree_tagged - test whether any items in the tree are tagged * @root: radix tree root @@ -2106,33 +1495,12 @@ void idr_preload(gfp_t gfp_mask) } EXPORT_SYMBOL(idr_preload); -int ida_pre_get(struct ida *ida, gfp_t gfp) -{ - /* - * The IDA API has no preload_end() equivalent. Instead, - * ida_get_new() can return -EAGAIN, prompting the caller - * to return to the ida_pre_get() step. - */ - if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE)) - preempt_enable(); - - if (!this_cpu_read(ida_bitmap)) { - struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp); - if (!bitmap) - return 0; - if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap)) - kfree(bitmap); - } - - return 1; -} - void __rcu **idr_get_free(struct radix_tree_root *root, struct radix_tree_iter *iter, gfp_t gfp, unsigned long max) { struct radix_tree_node *node = NULL, *child; - void __rcu **slot = (void __rcu **)&root->rnode; + void __rcu **slot = (void __rcu **)&root->xa_head; unsigned long maxindex, start = iter->next_index; unsigned int shift, offset = 0; @@ -2148,8 +1516,10 @@ void __rcu **idr_get_free(struct radix_tree_root *root, if (error < 0) return ERR_PTR(error); shift = error; - child = rcu_dereference_raw(root->rnode); + child = rcu_dereference_raw(root->xa_head); } + if (start == 0 && shift == 0) + shift = RADIX_TREE_MAP_SHIFT; while (shift) { shift -= RADIX_TREE_MAP_SHIFT; @@ -2192,7 +1562,6 @@ void __rcu **idr_get_free(struct radix_tree_root *root, else iter->next_index = 1; iter->node = node; - __set_iter_shift(iter, shift); set_iter_tags(iter, node, offset, IDR_FREE); return slot; @@ -2211,10 +1580,10 @@ void __rcu **idr_get_free(struct radix_tree_root *root, */ void idr_destroy(struct idr *idr) { - struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode); + struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head); if (radix_tree_is_internal_node(node)) radix_tree_free_nodes(node); - idr->idr_rt.rnode = NULL; + idr->idr_rt.xa_head = NULL; root_tag_set(&idr->idr_rt, IDR_FREE); } EXPORT_SYMBOL(idr_destroy); @@ -2228,31 +1597,6 @@ radix_tree_node_ctor(void *arg) INIT_LIST_HEAD(&node->private_list); } -static __init unsigned long __maxindex(unsigned int height) -{ - unsigned int width = height * RADIX_TREE_MAP_SHIFT; - int shift = RADIX_TREE_INDEX_BITS - width; - - if (shift < 0) - return ~0UL; - if (shift >= BITS_PER_LONG) - return 0UL; - return ~0UL >> shift; -} - -static __init void radix_tree_init_maxnodes(void) -{ - unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1]; - unsigned int i, j; - - for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) - height_to_maxindex[i] = __maxindex(i); - for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) { - for (j = i; j > 0; j--) - height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1; - } -} - static int radix_tree_cpu_dead(unsigned int cpu) { struct radix_tree_preload *rtp; @@ -2266,8 +1610,6 @@ static int radix_tree_cpu_dead(unsigned int cpu) kmem_cache_free(radix_tree_node_cachep, node); rtp->nr--; } - kfree(per_cpu(ida_bitmap, cpu)); - per_cpu(ida_bitmap, cpu) = NULL; return 0; } @@ -2277,11 +1619,11 @@ void __init radix_tree_init(void) BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); + BUILD_BUG_ON(XA_CHUNK_SIZE > 255); radix_tree_node_cachep = kmem_cache_create("radix_tree_node", sizeof(struct radix_tree_node), 0, SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, radix_tree_node_ctor); - radix_tree_init_maxnodes(); ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", NULL, radix_tree_cpu_dead); WARN_ON(ret < 0); diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 2f8b61dfd9b0..4e90d443d1b0 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -13,11 +13,25 @@ raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o hostprogs-y += mktables quiet_cmd_unroll = UNROLL $@ - cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \ - < $< > $@ || ( rm -f $@ && exit 1 ) + cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) < $< > $@ ifeq ($(CONFIG_ALTIVEC),y) altivec_flags := -maltivec $(call cc-option,-mabi=altivec) + +ifdef CONFIG_CC_IS_CLANG +# clang ppc port does not yet support -maltivec when -msoft-float is +# enabled. A future release of clang will resolve this +# https://bugs.llvm.org/show_bug.cgi?id=31177 +CFLAGS_REMOVE_altivec1.o += -msoft-float +CFLAGS_REMOVE_altivec2.o += -msoft-float +CFLAGS_REMOVE_altivec4.o += -msoft-float +CFLAGS_REMOVE_altivec8.o += -msoft-float +CFLAGS_REMOVE_altivec8.o += -msoft-float +CFLAGS_REMOVE_vpermxor1.o += -msoft-float +CFLAGS_REMOVE_vpermxor2.o += -msoft-float +CFLAGS_REMOVE_vpermxor4.o += -msoft-float +CFLAGS_REMOVE_vpermxor8.o += -msoft-float +endif endif # The GCC option -ffreestanding is required in order to compile code containing @@ -145,7 +159,7 @@ $(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) quiet_cmd_mktable = TABLE $@ - cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 ) + cmd_mktable = $(obj)/mktables > $@ targets += tables.c $(obj)/tables.c: $(obj)/mktables FORCE diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 5065b1e7e327..7e4f7a8ffa8e 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -34,64 +34,64 @@ struct raid6_calls raid6_call; EXPORT_SYMBOL_GPL(raid6_call); const struct raid6_calls * const raid6_algos[] = { -#if defined(__ia64__) - &raid6_intx16, - &raid6_intx32, -#endif #if defined(__i386__) && !defined(__arch_um__) - &raid6_mmxx1, - &raid6_mmxx2, - &raid6_sse1x1, - &raid6_sse1x2, - &raid6_sse2x1, - &raid6_sse2x2, -#ifdef CONFIG_AS_AVX2 - &raid6_avx2x1, - &raid6_avx2x2, -#endif #ifdef CONFIG_AS_AVX512 - &raid6_avx512x1, &raid6_avx512x2, + &raid6_avx512x1, #endif -#endif -#if defined(__x86_64__) && !defined(__arch_um__) - &raid6_sse2x1, - &raid6_sse2x2, - &raid6_sse2x4, #ifdef CONFIG_AS_AVX2 - &raid6_avx2x1, &raid6_avx2x2, - &raid6_avx2x4, + &raid6_avx2x1, +#endif + &raid6_sse2x2, + &raid6_sse2x1, + &raid6_sse1x2, + &raid6_sse1x1, + &raid6_mmxx2, + &raid6_mmxx1, #endif +#if defined(__x86_64__) && !defined(__arch_um__) #ifdef CONFIG_AS_AVX512 - &raid6_avx512x1, - &raid6_avx512x2, &raid6_avx512x4, + &raid6_avx512x2, + &raid6_avx512x1, #endif +#ifdef CONFIG_AS_AVX2 + &raid6_avx2x4, + &raid6_avx2x2, + &raid6_avx2x1, +#endif + &raid6_sse2x4, + &raid6_sse2x2, + &raid6_sse2x1, #endif #ifdef CONFIG_ALTIVEC - &raid6_altivec1, - &raid6_altivec2, - &raid6_altivec4, - &raid6_altivec8, - &raid6_vpermxor1, - &raid6_vpermxor2, - &raid6_vpermxor4, &raid6_vpermxor8, + &raid6_vpermxor4, + &raid6_vpermxor2, + &raid6_vpermxor1, + &raid6_altivec8, + &raid6_altivec4, + &raid6_altivec2, + &raid6_altivec1, #endif #if defined(CONFIG_S390) &raid6_s390vx8, #endif - &raid6_intx1, - &raid6_intx2, - &raid6_intx4, - &raid6_intx8, #ifdef CONFIG_KERNEL_MODE_NEON - &raid6_neonx1, - &raid6_neonx2, - &raid6_neonx4, &raid6_neonx8, + &raid6_neonx4, + &raid6_neonx2, + &raid6_neonx1, #endif +#if defined(__ia64__) + &raid6_intx32, + &raid6_intx16, +#endif + &raid6_intx8, + &raid6_intx4, + &raid6_intx2, + &raid6_intx1, NULL }; @@ -163,6 +163,11 @@ static inline const struct raid6_calls *raid6_choose_gen( if ((*algo)->valid && !(*algo)->valid()) continue; + if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) { + best = *algo; + break; + } + perf = 0; preempt_disable(); diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index 5d73f5cb4d8a..3ab8720aa2f8 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -27,13 +27,16 @@ ifeq ($(ARCH),arm) CFLAGS += -I../../../arch/arm/include -mfpu=neon HAS_NEON = yes endif -ifeq ($(ARCH),arm64) +ifeq ($(ARCH),aarch64) CFLAGS += -I../../../arch/arm64/include HAS_NEON = yes endif ifeq ($(IS_X86),yes) OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o + CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \ + gcc -c -x assembler - >&/dev/null && \ + rm ./-.o && echo -DCONFIG_AS_SSSE3=1) CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \ gcc -c -x assembler - >&/dev/null && \ rm ./-.o && echo -DCONFIG_AS_AVX2=1) @@ -41,7 +44,7 @@ ifeq ($(IS_X86),yes) gcc -c -x assembler - >&/dev/null && \ rm ./-.o && echo -DCONFIG_AS_AVX512=1) else ifeq ($(HAS_NEON),yes) - OBJS += neon.o neon1.o neon2.o neon4.o neon8.o + OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 else HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\ diff --git a/lib/refcount.c b/lib/refcount.c index ebcf8cd49e05..6e904af0fb3e 100644 --- a/lib/refcount.c +++ b/lib/refcount.c @@ -33,6 +33,9 @@ * Note that the allocator is responsible for ordering things between free() * and alloc(). * + * The decrements dec_and_test() and sub_and_test() also provide acquire + * ordering on success. + * */ #include <linux/mutex.h> @@ -164,8 +167,8 @@ EXPORT_SYMBOL(refcount_inc_checked); * at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. + * before, and provides an acquire ordering on success such that free() + * must come after. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these @@ -190,7 +193,12 @@ bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r) } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); - return !new; + if (!new) { + smp_acquire__after_ctrl_dep(); + return true; + } + return false; + } EXPORT_SYMBOL(refcount_sub_and_test_checked); @@ -202,8 +210,8 @@ EXPORT_SYMBOL(refcount_sub_and_test_checked); * decrement when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. + * before, and provides an acquire ordering on success such that free() + * must come after. * * Return: true if the resulting refcount is 0, false otherwise */ diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 30526afa8343..0a105d4af166 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -682,7 +682,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_enter); * rhashtable_walk_exit - Free an iterator * @iter: Hash table Iterator * - * This function frees resources allocated by rhashtable_walk_init. + * This function frees resources allocated by rhashtable_walk_enter. */ void rhashtable_walk_exit(struct rhashtable_iter *iter) { @@ -1179,8 +1179,7 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); - static struct rhash_head __rcu *rhnull = - (struct rhash_head __rcu *)NULLS_MARKER(0); + static struct rhash_head __rcu *rhnull; unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; unsigned int subhash = hash; @@ -1198,8 +1197,11 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, subhash >>= shift; } - if (!ntbl) + if (!ntbl) { + if (!rhnull) + INIT_RHT_NULLS_HEAD(rhnull); return &rhnull; + } return &ntbl[subhash].bucket; diff --git a/lib/sbitmap.c b/lib/sbitmap.c index fdd1b8aa8ac6..5b382c1244ed 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -20,6 +20,40 @@ #include <linux/sbitmap.h> #include <linux/seq_file.h> +/* + * See if we have deferred clears that we can batch move + */ +static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) +{ + unsigned long mask, val; + bool ret = false; + unsigned long flags; + + spin_lock_irqsave(&sb->map[index].swap_lock, flags); + + if (!sb->map[index].cleared) + goto out_unlock; + + /* + * First get a stable cleared mask, setting the old mask to 0. + */ + do { + mask = sb->map[index].cleared; + } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask); + + /* + * Now clear the masked bits in our free word + */ + do { + val = sb->map[index].word; + } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val); + + ret = true; +out_unlock: + spin_unlock_irqrestore(&sb->map[index].swap_lock, flags); + return ret; +} + int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, gfp_t flags, int node) { @@ -59,6 +93,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, for (i = 0; i < sb->map_nr; i++) { sb->map[i].depth = min(depth, bits_per_word); depth -= sb->map[i].depth; + spin_lock_init(&sb->map[i].swap_lock); } return 0; } @@ -69,6 +104,9 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth) unsigned int bits_per_word = 1U << sb->shift; unsigned int i; + for (i = 0; i < sb->map_nr; i++) + sbitmap_deferred_clear(sb, i); + sb->depth = depth; sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); @@ -111,6 +149,24 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth, return nr; } +static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, + unsigned int alloc_hint, bool round_robin) +{ + int nr; + + do { + nr = __sbitmap_get_word(&sb->map[index].word, + sb->map[index].depth, alloc_hint, + !round_robin); + if (nr != -1) + break; + if (!sbitmap_deferred_clear(sb, index)) + break; + } while (1); + + return nr; +} + int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) { unsigned int i, index; @@ -118,24 +174,28 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) index = SB_NR_TO_INDEX(sb, alloc_hint); + /* + * Unless we're doing round robin tag allocation, just use the + * alloc_hint to find the right word index. No point in looping + * twice in find_next_zero_bit() for that case. + */ + if (round_robin) + alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); + else + alloc_hint = 0; + for (i = 0; i < sb->map_nr; i++) { - nr = __sbitmap_get_word(&sb->map[index].word, - sb->map[index].depth, - SB_NR_TO_BIT(sb, alloc_hint), - !round_robin); + nr = sbitmap_find_bit_in_index(sb, index, alloc_hint, + round_robin); if (nr != -1) { nr += index << sb->shift; break; } /* Jump to next index. */ - index++; - alloc_hint = index << sb->shift; - - if (index >= sb->map_nr) { + alloc_hint = 0; + if (++index >= sb->map_nr) index = 0; - alloc_hint = 0; - } } return nr; @@ -151,6 +211,7 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, index = SB_NR_TO_INDEX(sb, alloc_hint); for (i = 0; i < sb->map_nr; i++) { +again: nr = __sbitmap_get_word(&sb->map[index].word, min(sb->map[index].depth, shallow_depth), SB_NR_TO_BIT(sb, alloc_hint), true); @@ -159,6 +220,9 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, break; } + if (sbitmap_deferred_clear(sb, index)) + goto again; + /* Jump to next index. */ index++; alloc_hint = index << sb->shift; @@ -178,7 +242,7 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb) unsigned int i; for (i = 0; i < sb->map_nr; i++) { - if (sb->map[i].word) + if (sb->map[i].word & ~sb->map[i].cleared) return true; } return false; @@ -191,9 +255,10 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb) for (i = 0; i < sb->map_nr; i++) { const struct sbitmap_word *word = &sb->map[i]; + unsigned long mask = word->word & ~word->cleared; unsigned long ret; - ret = find_first_zero_bit(&word->word, word->depth); + ret = find_first_zero_bit(&mask, word->depth); if (ret < word->depth) return true; } @@ -201,23 +266,36 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb) } EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); -unsigned int sbitmap_weight(const struct sbitmap *sb) +static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) { unsigned int i, weight = 0; for (i = 0; i < sb->map_nr; i++) { const struct sbitmap_word *word = &sb->map[i]; - weight += bitmap_weight(&word->word, word->depth); + if (set) + weight += bitmap_weight(&word->word, word->depth); + else + weight += bitmap_weight(&word->cleared, word->depth); } return weight; } -EXPORT_SYMBOL_GPL(sbitmap_weight); + +static unsigned int sbitmap_weight(const struct sbitmap *sb) +{ + return __sbitmap_weight(sb, true); +} + +static unsigned int sbitmap_cleared(const struct sbitmap *sb) +{ + return __sbitmap_weight(sb, false); +} void sbitmap_show(struct sbitmap *sb, struct seq_file *m) { seq_printf(m, "depth=%u\n", sb->depth); - seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); + seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb)); + seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); seq_printf(m, "map_nr=%u\n", sb->map_nr); } @@ -325,6 +403,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, sbq->min_shallow_depth = UINT_MAX; sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); atomic_set(&sbq->wake_index, 0); + atomic_set(&sbq->ws_active, 0); sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); if (!sbq->ws) { @@ -440,6 +519,9 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) { int i, wake_index; + if (!atomic_read(&sbq->ws_active)) + return NULL; + wake_index = atomic_read(&sbq->wake_index); for (i = 0; i < SBQ_WAIT_QUEUES; i++) { struct sbq_wait_state *ws = &sbq->ws[wake_index]; @@ -509,7 +591,8 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, unsigned int cpu) { - sbitmap_clear_bit_unlock(&sbq->sb, nr); + sbitmap_deferred_clear_bit(&sbq->sb, nr); + /* * Pairs with the memory barrier in set_current_state() to ensure the * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker @@ -564,6 +647,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); + seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); seq_puts(m, "ws={\n"); for (i = 0; i < SBQ_WAIT_QUEUES; i++) { @@ -579,3 +663,48 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); } EXPORT_SYMBOL_GPL(sbitmap_queue_show); + +void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, + struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait) +{ + if (!sbq_wait->sbq) { + sbq_wait->sbq = sbq; + atomic_inc(&sbq->ws_active); + } + add_wait_queue(&ws->wait, &sbq_wait->wait); +} +EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); + +void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) +{ + list_del_init(&sbq_wait->wait.entry); + if (sbq_wait->sbq) { + atomic_dec(&sbq_wait->sbq->ws_active); + sbq_wait->sbq = NULL; + } +} +EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); + +void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, + struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait, int state) +{ + if (!sbq_wait->sbq) { + atomic_inc(&sbq->ws_active); + sbq_wait->sbq = sbq; + } + prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); +} +EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); + +void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait) +{ + finish_wait(&ws->wait, &sbq_wait->wait); + if (sbq_wait->sbq) { + atomic_dec(&sbq->ws_active); + sbq_wait->sbq = NULL; + } +} +EXPORT_SYMBOL_GPL(sbitmap_finish_wait); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 7c6096a71704..9ba349e775ef 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -271,7 +271,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, if (nents == 0) return -EINVAL; -#ifndef CONFIG_ARCH_HAS_SG_CHAIN +#ifdef CONFIG_ARCH_NO_SG_CHAIN if (WARN_ON_ONCE(nents > max_ents)) return -EINVAL; #endif diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 11f2ae0f9099..bd807f545a9d 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c @@ -140,13 +140,17 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) */ int seq_buf_puts(struct seq_buf *s, const char *str) { - unsigned int len = strlen(str); + size_t len = strlen(str); WARN_ON(s->size == 0); + /* Add 1 to len for the trailing null byte which must be there */ + len += 1; + if (seq_buf_can_fit(s, len)) { memcpy(s->buffer + s->len, str, len); - s->len += len; + /* Don't count the trailing null byte against the capacity */ + s->len += len - 1; return 0; } seq_buf_set_overflow(s); diff --git a/lib/sg_pool.c b/lib/sg_pool.c index 6dd30615a201..d1c1e6388eaa 100644 --- a/lib/sg_pool.c +++ b/lib/sg_pool.c @@ -148,10 +148,9 @@ static __init int sg_pool_init(void) cleanup_sdb: for (i = 0; i < SG_MEMPOOL_NR; i++) { struct sg_pool *sgp = sg_pools + i; - if (sgp->pool) - mempool_destroy(sgp->pool); - if (sgp->slab) - kmem_cache_destroy(sgp->slab); + + mempool_destroy(sgp->pool); + kmem_cache_destroy(sgp->slab); } return -ENOMEM; diff --git a/lib/show_mem.c b/lib/show_mem.c index 0beaa1d899aa..6a042f53e7bb 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -18,22 +18,19 @@ void show_mem(unsigned int filter, nodemask_t *nodemask) show_free_areas(filter, nodemask); for_each_online_pgdat(pgdat) { - unsigned long flags; int zoneid; - pgdat_resize_lock(pgdat, &flags); for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { struct zone *zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; total += zone->present_pages; - reserved += zone->present_pages - zone->managed_pages; + reserved += zone->present_pages - zone_managed_pages(zone); if (is_highmem_idx(zoneid)) highmem += zone->present_pages; } - pgdat_resize_unlock(pgdat, &flags); } printk("%lu pages RAM\n", total); diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 85925aaa4fff..157d9e31f6c2 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -5,10 +5,11 @@ * DEBUG_PREEMPT variant of smp_processor_id(). */ #include <linux/export.h> +#include <linux/kprobes.h> #include <linux/sched.h> -notrace static unsigned int check_preemption_disabled(const char *what1, - const char *what2) +notrace static nokprobe_inline +unsigned int check_preemption_disabled(const char *what1, const char *what2) { int this_cpu = raw_smp_processor_id(); @@ -56,9 +57,11 @@ notrace unsigned int debug_smp_processor_id(void) return check_preemption_disabled("smp_processor_id", ""); } EXPORT_SYMBOL(debug_smp_processor_id); +NOKPROBE_SYMBOL(debug_smp_processor_id); notrace void __this_cpu_preempt_check(const char *op) { check_preemption_disabled("__this_cpu_", op); } EXPORT_SYMBOL(__this_cpu_preempt_check); +NOKPROBE_SYMBOL(__this_cpu_preempt_check); diff --git a/lib/string.c b/lib/string.c index 2c0900a5d51a..38e4ca08e757 100644 --- a/lib/string.c +++ b/lib/string.c @@ -27,6 +27,7 @@ #include <linux/export.h> #include <linux/bug.h> #include <linux/errno.h> +#include <linux/slab.h> #include <asm/byteorder.h> #include <asm/word-at-a-time.h> diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index b53e1b5d80f4..58eacd41526c 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -114,10 +114,11 @@ long strncpy_from_user(char *dst, const char __user *src, long count) kasan_check_write(dst, count); check_object_size(dst, count, false); - user_access_begin(); - retval = do_strncpy_from_user(dst, src, count, max); - user_access_end(); - return retval; + if (user_access_begin(src, max)) { + retval = do_strncpy_from_user(dst, src, count, max); + user_access_end(); + return retval; + } } return -EFAULT; } diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 60d0bbda8f5e..1c1a1b0e38a5 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -114,10 +114,11 @@ long strnlen_user(const char __user *str, long count) unsigned long max = max_addr - src_addr; long retval; - user_access_begin(); - retval = do_strnlen_user(str, count, max); - user_access_end(); - return retval; + if (user_access_begin(str, max)) { + retval = do_strnlen_user(str, count, max); + user_access_end(); + return retval; + } } return 0; } diff --git a/lib/test_bpf.c b/lib/test_bpf.c index aa22bcaec1dc..0845f635f404 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -39,6 +39,7 @@ #define SKB_HASH 0x1234aaab #define SKB_QUEUE_MAP 123 #define SKB_VLAN_TCI 0xffff +#define SKB_VLAN_PRESENT 1 #define SKB_DEV_IFINDEX 577 #define SKB_DEV_TYPE 588 @@ -725,8 +726,8 @@ static struct bpf_test tests[] = { CLASSIC, { }, { - { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }, - { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT } + { 1, SKB_VLAN_TCI }, + { 10, SKB_VLAN_TCI } }, }, { @@ -739,8 +740,8 @@ static struct bpf_test tests[] = { CLASSIC, { }, { - { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, - { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } + { 1, SKB_VLAN_PRESENT }, + { 10, SKB_VLAN_PRESENT } }, }, { @@ -5289,8 +5290,8 @@ static struct bpf_test tests[] = { #endif { }, { - { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, - { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } + { 1, SKB_VLAN_PRESENT }, + { 10, SKB_VLAN_PRESENT } }, .fill_helper = bpf_fill_maxinsns6, .expected_errcode = -ENOTSUPP, @@ -6493,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size) skb->hash = SKB_HASH; skb->queue_mapping = SKB_QUEUE_MAP; skb->vlan_tci = SKB_VLAN_TCI; + skb->vlan_present = SKB_VLAN_PRESENT; skb->vlan_proto = htons(ETH_P_IP); dev_net_set(&dev, &init_net); skb->dev = &dev; @@ -6666,12 +6668,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data, u64 start, finish; int ret = 0, i; + preempt_disable(); start = ktime_get_ns(); for (i = 0; i < runs; i++) ret = BPF_PROG_RUN(fp, data); finish = ktime_get_ns(); + preempt_enable(); *duration = finish - start; do_div(*duration, runs); diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c index d5a06addeb27..bf864c73e462 100644 --- a/lib/test_debug_virtual.c +++ b/lib/test_debug_virtual.c @@ -5,6 +5,7 @@ #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/sizes.h> +#include <linux/io.h> #include <asm/page.h> #ifdef CONFIG_MIPS diff --git a/lib/test_firmware.c b/lib/test_firmware.c index b984806d7d7b..7222093ee00b 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -631,11 +631,6 @@ static ssize_t trigger_batched_requests_store(struct device *dev, for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; - if (!req) { - WARN_ON(1); - rc = -ENOMEM; - goto out_bail; - } req->fw = NULL; req->idx = i; req->name = test_fw_config->name; @@ -737,10 +732,6 @@ ssize_t trigger_batched_requests_async_store(struct device *dev, for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; - if (!req) { - WARN_ON(1); - goto out_bail; - } req->name = test_fw_config->name; req->fw = NULL; req->idx = i; @@ -837,6 +828,7 @@ static ssize_t read_firmware_show(struct device *dev, if (req->fw->size > PAGE_SIZE) { pr_err("Testing interface must use PAGE_SIZE firmware for now\n"); rc = -EINVAL; + goto out; } memcpy(buf, req->fw->data, req->fw->size); diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c index 626f580b4ff7..5144899d3c6b 100644 --- a/lib/test_hexdump.c +++ b/lib/test_hexdump.c @@ -99,7 +99,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize, const char *q = *result++; size_t amount = strlen(q); - strncpy(p, q, amount); + memcpy(p, q, amount); p += amount; *p++ = ' '; diff --git a/lib/test_kasan.c b/lib/test_kasan.c index ec657105edbf..7de2702621dc 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -480,29 +480,6 @@ static noinline void __init copy_user_test(void) kfree(kmem); } -static noinline void __init use_after_scope_test(void) -{ - volatile char *volatile p; - - pr_info("use-after-scope on int\n"); - { - int local = 0; - - p = (char *)&local; - } - p[0] = 1; - p[3] = 1; - - pr_info("use-after-scope on array\n"); - { - char local[1024] = {0}; - - p = local; - } - p[0] = 1; - p[1023] = 1; -} - static noinline void __init kasan_alloca_oob_left(void) { volatile int i = 10; @@ -579,6 +556,73 @@ static noinline void __init kmem_cache_invalid_free(void) kmem_cache_destroy(cache); } +static noinline void __init kasan_memchr(void) +{ + char *ptr; + size_t size = 24; + + pr_info("out-of-bounds in memchr\n"); + ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); + if (!ptr) + return; + + memchr(ptr, '1', size + 1); + kfree(ptr); +} + +static noinline void __init kasan_memcmp(void) +{ + char *ptr; + size_t size = 24; + int arr[9]; + + pr_info("out-of-bounds in memcmp\n"); + ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); + if (!ptr) + return; + + memset(arr, 0, sizeof(arr)); + memcmp(ptr, arr, size+1); + kfree(ptr); +} + +static noinline void __init kasan_strings(void) +{ + char *ptr; + size_t size = 24; + + pr_info("use-after-free in strchr\n"); + ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); + if (!ptr) + return; + + kfree(ptr); + + /* + * Try to cause only 1 invalid access (less spam in dmesg). + * For that we need ptr to point to zeroed byte. + * Skip metadata that could be stored in freed object so ptr + * will likely point to zeroed byte. + */ + ptr += 16; + strchr(ptr, '1'); + + pr_info("use-after-free in strrchr\n"); + strrchr(ptr, '1'); + + pr_info("use-after-free in strcmp\n"); + strcmp(ptr, "2"); + + pr_info("use-after-free in strncmp\n"); + strncmp(ptr, "2", 1); + + pr_info("use-after-free in strlen\n"); + strlen(ptr); + + pr_info("use-after-free in strnlen\n"); + strnlen(ptr, 1); +} + static int __init kmalloc_tests_init(void) { /* @@ -615,9 +659,11 @@ static int __init kmalloc_tests_init(void) kasan_alloca_oob_right(); ksize_unpoisons_memory(); copy_user_test(); - use_after_scope_test(); kmem_cache_double_free(); kmem_cache_invalid_free(); + kasan_memchr(); + kasan_memcmp(); + kasan_strings(); kasan_restore_multi_shot(multishot); diff --git a/lib/test_kmod.c b/lib/test_kmod.c index e3ddd836491f..9cf77628fc91 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config) config->test_driver = NULL; kfree_const(config->test_fs); - config->test_driver = NULL; + config->test_fs = NULL; } static void kmod_config_free(struct kmod_test_device *test_dev) @@ -1214,7 +1214,6 @@ void unregister_test_dev_kmod(struct kmod_test_device *test_dev) dev_info(test_dev->dev, "removing interface\n"); misc_deregister(&test_dev->misc_dev); - kfree(&test_dev->misc_dev.name); mutex_unlock(&test_dev->config_mutex); mutex_unlock(&test_dev->trigger_mutex); diff --git a/lib/test_memcat_p.c b/lib/test_memcat_p.c new file mode 100644 index 000000000000..849c477d49d0 --- /dev/null +++ b/lib/test_memcat_p.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test cases for memcat_p() in lib/memcat_p.c + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/module.h> + +struct test_struct { + int num; + unsigned int magic; +}; + +#define MAGIC 0xf00ff00f +/* Size of each of the NULL-terminated input arrays */ +#define INPUT_MAX 128 +/* Expected number of non-NULL elements in the output array */ +#define EXPECT (INPUT_MAX * 2 - 2) + +static int __init test_memcat_p_init(void) +{ + struct test_struct **in0, **in1, **out, **p; + int err = -ENOMEM, i, r, total = 0; + + in0 = kcalloc(INPUT_MAX, sizeof(*in0), GFP_KERNEL); + if (!in0) + return err; + + in1 = kcalloc(INPUT_MAX, sizeof(*in1), GFP_KERNEL); + if (!in1) + goto err_free_in0; + + for (i = 0, r = 1; i < INPUT_MAX - 1; i++) { + in0[i] = kmalloc(sizeof(**in0), GFP_KERNEL); + if (!in0[i]) + goto err_free_elements; + + in1[i] = kmalloc(sizeof(**in1), GFP_KERNEL); + if (!in1[i]) { + kfree(in0[i]); + goto err_free_elements; + } + + /* lifted from test_sort.c */ + r = (r * 725861) % 6599; + in0[i]->num = r; + in1[i]->num = -r; + in0[i]->magic = MAGIC; + in1[i]->magic = MAGIC; + } + + in0[i] = in1[i] = NULL; + + out = memcat_p(in0, in1); + if (!out) + goto err_free_all_elements; + + err = -EINVAL; + for (i = 0, p = out; *p && (i < INPUT_MAX * 2 - 1); p++, i++) { + total += (*p)->num; + + if ((*p)->magic != MAGIC) { + pr_err("test failed: wrong magic at %d: %u\n", i, + (*p)->magic); + goto err_free_out; + } + } + + if (total) { + pr_err("test failed: expected zero total, got %d\n", total); + goto err_free_out; + } + + if (i != EXPECT) { + pr_err("test failed: expected output size %d, got %d\n", + EXPECT, i); + goto err_free_out; + } + + for (i = 0; i < INPUT_MAX - 1; i++) + if (out[i] != in0[i] || out[i + INPUT_MAX - 1] != in1[i]) { + pr_err("test failed: wrong element order at %d\n", i); + goto err_free_out; + } + + err = 0; + pr_info("test passed\n"); + +err_free_out: + kfree(out); +err_free_all_elements: + i = INPUT_MAX; +err_free_elements: + for (i--; i >= 0; i--) { + kfree(in1[i]); + kfree(in0[i]); + } + + kfree(in1); +err_free_in0: + kfree(in0); + + return err; +} + +static void __exit test_memcat_p_exit(void) +{ +} + +module_init(test_memcat_p_init); +module_exit(test_memcat_p_exit); + +MODULE_LICENSE("GPL"); diff --git a/lib/test_objagg.c b/lib/test_objagg.c new file mode 100644 index 000000000000..72c1abfa154d --- /dev/null +++ b/lib/test_objagg.c @@ -0,0 +1,1021 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/random.h> +#include <linux/objagg.h> + +struct tokey { + unsigned int id; +}; + +#define NUM_KEYS 32 + +static int key_id_index(unsigned int key_id) +{ + if (key_id >= NUM_KEYS) { + WARN_ON(1); + return 0; + } + return key_id; +} + +#define BUF_LEN 128 + +struct world { + unsigned int root_count; + unsigned int delta_count; + char next_root_buf[BUF_LEN]; + struct objagg_obj *objagg_objs[NUM_KEYS]; + unsigned int key_refs[NUM_KEYS]; +}; + +struct root { + struct tokey key; + char buf[BUF_LEN]; +}; + +struct delta { + unsigned int key_id_diff; +}; + +static struct objagg_obj *world_obj_get(struct world *world, + struct objagg *objagg, + unsigned int key_id) +{ + struct objagg_obj *objagg_obj; + struct tokey key; + int err; + + key.id = key_id; + objagg_obj = objagg_obj_get(objagg, &key); + if (IS_ERR(objagg_obj)) { + pr_err("Key %u: Failed to get object.\n", key_id); + return objagg_obj; + } + if (!world->key_refs[key_id_index(key_id)]) { + world->objagg_objs[key_id_index(key_id)] = objagg_obj; + } else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) { + pr_err("Key %u: God another object for the same key.\n", + key_id); + err = -EINVAL; + goto err_key_id_check; + } + world->key_refs[key_id_index(key_id)]++; + return objagg_obj; + +err_key_id_check: + objagg_obj_put(objagg, objagg_obj); + return ERR_PTR(err); +} + +static void world_obj_put(struct world *world, struct objagg *objagg, + unsigned int key_id) +{ + struct objagg_obj *objagg_obj; + + if (!world->key_refs[key_id_index(key_id)]) + return; + objagg_obj = world->objagg_objs[key_id_index(key_id)]; + objagg_obj_put(objagg, objagg_obj); + world->key_refs[key_id_index(key_id)]--; +} + +#define MAX_KEY_ID_DIFF 5 + +static bool delta_check(void *priv, const void *parent_obj, const void *obj) +{ + const struct tokey *parent_key = parent_obj; + const struct tokey *key = obj; + int diff = key->id - parent_key->id; + + return diff >= 0 && diff <= MAX_KEY_ID_DIFF; +} + +static void *delta_create(void *priv, void *parent_obj, void *obj) +{ + struct tokey *parent_key = parent_obj; + struct world *world = priv; + struct tokey *key = obj; + int diff = key->id - parent_key->id; + struct delta *delta; + + if (!delta_check(priv, parent_obj, obj)) + return ERR_PTR(-EINVAL); + + delta = kzalloc(sizeof(*delta), GFP_KERNEL); + if (!delta) + return ERR_PTR(-ENOMEM); + delta->key_id_diff = diff; + world->delta_count++; + return delta; +} + +static void delta_destroy(void *priv, void *delta_priv) +{ + struct delta *delta = delta_priv; + struct world *world = priv; + + world->delta_count--; + kfree(delta); +} + +static void *root_create(void *priv, void *obj, unsigned int id) +{ + struct world *world = priv; + struct tokey *key = obj; + struct root *root; + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) + return ERR_PTR(-ENOMEM); + memcpy(&root->key, key, sizeof(root->key)); + memcpy(root->buf, world->next_root_buf, sizeof(root->buf)); + world->root_count++; + return root; +} + +static void root_destroy(void *priv, void *root_priv) +{ + struct root *root = root_priv; + struct world *world = priv; + + world->root_count--; + kfree(root); +} + +static int test_nodelta_obj_get(struct world *world, struct objagg *objagg, + unsigned int key_id, bool should_create_root) +{ + unsigned int orig_root_count = world->root_count; + struct objagg_obj *objagg_obj; + const struct root *root; + int err; + + if (should_create_root) + prandom_bytes(world->next_root_buf, + sizeof(world->next_root_buf)); + + objagg_obj = world_obj_get(world, objagg, key_id); + if (IS_ERR(objagg_obj)) { + pr_err("Key %u: Failed to get object.\n", key_id); + return PTR_ERR(objagg_obj); + } + if (should_create_root) { + if (world->root_count != orig_root_count + 1) { + pr_err("Key %u: Root was not created\n", key_id); + err = -EINVAL; + goto err_check_root_count; + } + } else { + if (world->root_count != orig_root_count) { + pr_err("Key %u: Root was incorrectly created\n", + key_id); + err = -EINVAL; + goto err_check_root_count; + } + } + root = objagg_obj_root_priv(objagg_obj); + if (root->key.id != key_id) { + pr_err("Key %u: Root has unexpected key id\n", key_id); + err = -EINVAL; + goto err_check_key_id; + } + if (should_create_root && + memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) { + pr_err("Key %u: Buffer does not match the expected content\n", + key_id); + err = -EINVAL; + goto err_check_buf; + } + return 0; + +err_check_buf: +err_check_key_id: +err_check_root_count: + objagg_obj_put(objagg, objagg_obj); + return err; +} + +static int test_nodelta_obj_put(struct world *world, struct objagg *objagg, + unsigned int key_id, bool should_destroy_root) +{ + unsigned int orig_root_count = world->root_count; + + world_obj_put(world, objagg, key_id); + + if (should_destroy_root) { + if (world->root_count != orig_root_count - 1) { + pr_err("Key %u: Root was not destroyed\n", key_id); + return -EINVAL; + } + } else { + if (world->root_count != orig_root_count) { + pr_err("Key %u: Root was incorrectly destroyed\n", + key_id); + return -EINVAL; + } + } + return 0; +} + +static int check_stats_zero(struct objagg *objagg) +{ + const struct objagg_stats *stats; + int err = 0; + + stats = objagg_stats_get(objagg); + if (IS_ERR(stats)) + return PTR_ERR(stats); + + if (stats->stats_info_count != 0) { + pr_err("Stats: Object count is not zero while it should be\n"); + err = -EINVAL; + } + + objagg_stats_put(stats); + return err; +} + +static int check_stats_nodelta(struct objagg *objagg) +{ + const struct objagg_stats *stats; + int i; + int err; + + stats = objagg_stats_get(objagg); + if (IS_ERR(stats)) + return PTR_ERR(stats); + + if (stats->stats_info_count != NUM_KEYS) { + pr_err("Stats: Unexpected object count (%u expected, %u returned)\n", + NUM_KEYS, stats->stats_info_count); + err = -EINVAL; + goto stats_put; + } + + for (i = 0; i < stats->stats_info_count; i++) { + if (stats->stats_info[i].stats.user_count != 2) { + pr_err("Stats: incorrect user count\n"); + err = -EINVAL; + goto stats_put; + } + if (stats->stats_info[i].stats.delta_user_count != 2) { + pr_err("Stats: incorrect delta user count\n"); + err = -EINVAL; + goto stats_put; + } + } + err = 0; + +stats_put: + objagg_stats_put(stats); + return err; +} + +static bool delta_check_dummy(void *priv, const void *parent_obj, + const void *obj) +{ + return false; +} + +static void *delta_create_dummy(void *priv, void *parent_obj, void *obj) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static void delta_destroy_dummy(void *priv, void *delta_priv) +{ +} + +static const struct objagg_ops nodelta_ops = { + .obj_size = sizeof(struct tokey), + .delta_check = delta_check_dummy, + .delta_create = delta_create_dummy, + .delta_destroy = delta_destroy_dummy, + .root_create = root_create, + .root_destroy = root_destroy, +}; + +static int test_nodelta(void) +{ + struct world world = {}; + struct objagg *objagg; + int i; + int err; + + objagg = objagg_create(&nodelta_ops, NULL, &world); + if (IS_ERR(objagg)) + return PTR_ERR(objagg); + + err = check_stats_zero(objagg); + if (err) + goto err_stats_first_zero; + + /* First round of gets, the root objects should be created */ + for (i = 0; i < NUM_KEYS; i++) { + err = test_nodelta_obj_get(&world, objagg, i, true); + if (err) + goto err_obj_first_get; + } + + /* Do the second round of gets, all roots are already created, + * make sure that no new root is created + */ + for (i = 0; i < NUM_KEYS; i++) { + err = test_nodelta_obj_get(&world, objagg, i, false); + if (err) + goto err_obj_second_get; + } + + err = check_stats_nodelta(objagg); + if (err) + goto err_stats_nodelta; + + for (i = NUM_KEYS - 1; i >= 0; i--) { + err = test_nodelta_obj_put(&world, objagg, i, false); + if (err) + goto err_obj_first_put; + } + for (i = NUM_KEYS - 1; i >= 0; i--) { + err = test_nodelta_obj_put(&world, objagg, i, true); + if (err) + goto err_obj_second_put; + } + + err = check_stats_zero(objagg); + if (err) + goto err_stats_second_zero; + + objagg_destroy(objagg); + return 0; + +err_stats_nodelta: +err_obj_first_put: +err_obj_second_get: + for (i--; i >= 0; i--) + world_obj_put(&world, objagg, i); + + i = NUM_KEYS; +err_obj_first_get: +err_obj_second_put: + for (i--; i >= 0; i--) + world_obj_put(&world, objagg, i); +err_stats_first_zero: +err_stats_second_zero: + objagg_destroy(objagg); + return err; +} + +static const struct objagg_ops delta_ops = { + .obj_size = sizeof(struct tokey), + .delta_check = delta_check, + .delta_create = delta_create, + .delta_destroy = delta_destroy, + .root_create = root_create, + .root_destroy = root_destroy, +}; + +enum action { + ACTION_GET, + ACTION_PUT, +}; + +enum expect_delta { + EXPECT_DELTA_SAME, + EXPECT_DELTA_INC, + EXPECT_DELTA_DEC, +}; + +enum expect_root { + EXPECT_ROOT_SAME, + EXPECT_ROOT_INC, + EXPECT_ROOT_DEC, +}; + +struct expect_stats_info { + struct objagg_obj_stats stats; + bool is_root; + unsigned int key_id; +}; + +struct expect_stats { + unsigned int info_count; + struct expect_stats_info info[NUM_KEYS]; +}; + +struct action_item { + unsigned int key_id; + enum action action; + enum expect_delta expect_delta; + enum expect_root expect_root; + struct expect_stats expect_stats; +}; + +#define EXPECT_STATS(count, ...) \ +{ \ + .info_count = count, \ + .info = { __VA_ARGS__ } \ +} + +#define ROOT(key_id, user_count, delta_user_count) \ + {{user_count, delta_user_count}, true, key_id} + +#define DELTA(key_id, user_count) \ + {{user_count, user_count}, false, key_id} + +static const struct action_item action_items[] = { + { + 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(1, ROOT(1, 1, 1)), + }, /* r: 1 d: */ + { + 7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)), + }, /* r: 1, 7 d: */ + { + 3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1), + DELTA(3, 1)), + }, /* r: 1, 7 d: 3^1 */ + { + 5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1), + DELTA(3, 1), DELTA(5, 1)), + }, /* r: 1, 7 d: 3^1, 5^1 */ + { + 3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1), + DELTA(3, 2), DELTA(5, 1)), + }, /* r: 1, 7 d: 3^1, 3^1, 5^1 */ + { + 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1), + DELTA(3, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */ + { + 30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1), + DELTA(3, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */ + { + 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1), + DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */ + { + 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */ + { + 3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */ + { + 3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */ + { + 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(5, 1)), + }, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */ + { + 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1), + DELTA(8, 2), DELTA(5, 1)), + }, /* r: 7, 30 d: 5^1, 8^7, 8^7 */ + { + 5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC, + EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(8, 2)), + }, /* r: 7, 30 d: 8^7, 8^7 */ + { + 5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1), + DELTA(8, 2)), + }, /* r: 7, 30, 5 d: 8^7, 8^7 */ + { + 6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */ + { + 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 3), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 1), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1), + DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 6^5 */ + { + 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: 7, 30, 5 d: 6^5, 8^5 */ + { + 7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC, + EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: 30, 5 d: 6^5, 8^5 */ + { + 30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC, + EXPECT_STATS(3, ROOT(5, 1, 3), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: 5 d: 6^5, 8^5 */ + { + 5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(3, ROOT(5, 0, 2), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: d: 6^5, 8^5 */ + { + 6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME, + EXPECT_STATS(2, ROOT(5, 0, 1), + DELTA(8, 1)), + }, /* r: d: 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC, + EXPECT_STATS(0, ), + }, /* r: d: */ +}; + +static int check_expect(struct world *world, + const struct action_item *action_item, + unsigned int orig_delta_count, + unsigned int orig_root_count) +{ + unsigned int key_id = action_item->key_id; + + switch (action_item->expect_delta) { + case EXPECT_DELTA_SAME: + if (orig_delta_count != world->delta_count) { + pr_err("Key %u: Delta count changed while expected to remain the same.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_DELTA_INC: + if (WARN_ON(action_item->action == ACTION_PUT)) + return -EINVAL; + if (orig_delta_count + 1 != world->delta_count) { + pr_err("Key %u: Delta count was not incremented.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_DELTA_DEC: + if (WARN_ON(action_item->action == ACTION_GET)) + return -EINVAL; + if (orig_delta_count - 1 != world->delta_count) { + pr_err("Key %u: Delta count was not decremented.\n", + key_id); + return -EINVAL; + } + break; + } + + switch (action_item->expect_root) { + case EXPECT_ROOT_SAME: + if (orig_root_count != world->root_count) { + pr_err("Key %u: Root count changed while expected to remain the same.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_ROOT_INC: + if (WARN_ON(action_item->action == ACTION_PUT)) + return -EINVAL; + if (orig_root_count + 1 != world->root_count) { + pr_err("Key %u: Root count was not incremented.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_ROOT_DEC: + if (WARN_ON(action_item->action == ACTION_GET)) + return -EINVAL; + if (orig_root_count - 1 != world->root_count) { + pr_err("Key %u: Root count was not decremented.\n", + key_id); + return -EINVAL; + } + } + + return 0; +} + +static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj) +{ + const struct tokey *root_key; + const struct delta *delta; + unsigned int key_id; + + root_key = objagg_obj_root_priv(objagg_obj); + key_id = root_key->id; + delta = objagg_obj_delta_priv(objagg_obj); + if (delta) + key_id += delta->key_id_diff; + return key_id; +} + +static int +check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info, + const struct expect_stats_info *expect_stats_info, + const char **errmsg) +{ + if (stats_info->is_root != expect_stats_info->is_root) { + if (errmsg) + *errmsg = "Incorrect root/delta indication"; + return -EINVAL; + } + if (stats_info->stats.user_count != + expect_stats_info->stats.user_count) { + if (errmsg) + *errmsg = "Incorrect user count"; + return -EINVAL; + } + if (stats_info->stats.delta_user_count != + expect_stats_info->stats.delta_user_count) { + if (errmsg) + *errmsg = "Incorrect delta user count"; + return -EINVAL; + } + return 0; +} + +static int +check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info, + const struct expect_stats_info *expect_stats_info, + const char **errmsg) +{ + if (obj_to_key_id(stats_info->objagg_obj) != + expect_stats_info->key_id) { + if (errmsg) + *errmsg = "incorrect key id"; + return -EINVAL; + } + return 0; +} + +static int check_expect_stats_neigh(const struct objagg_stats *stats, + const struct expect_stats *expect_stats, + int pos) +{ + int i; + int err; + + for (i = pos - 1; i >= 0; i--) { + err = check_expect_stats_nums(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (err) + break; + err = check_expect_stats_key_id(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (!err) + return 0; + } + for (i = pos + 1; i < stats->stats_info_count; i++) { + err = check_expect_stats_nums(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (err) + break; + err = check_expect_stats_key_id(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (!err) + return 0; + } + return -EINVAL; +} + +static int __check_expect_stats(const struct objagg_stats *stats, + const struct expect_stats *expect_stats, + const char **errmsg) +{ + int i; + int err; + + if (stats->stats_info_count != expect_stats->info_count) { + *errmsg = "Unexpected object count"; + return -EINVAL; + } + + for (i = 0; i < stats->stats_info_count; i++) { + err = check_expect_stats_nums(&stats->stats_info[i], + &expect_stats->info[i], errmsg); + if (err) + return err; + err = check_expect_stats_key_id(&stats->stats_info[i], + &expect_stats->info[i], errmsg); + if (err) { + /* It is possible that one of the neighbor stats with + * same numbers have the correct key id, so check it + */ + err = check_expect_stats_neigh(stats, expect_stats, i); + if (err) + return err; + } + } + return 0; +} + +static int check_expect_stats(struct objagg *objagg, + const struct expect_stats *expect_stats, + const char **errmsg) +{ + const struct objagg_stats *stats; + int err; + + stats = objagg_stats_get(objagg); + if (IS_ERR(stats)) { + *errmsg = "objagg_stats_get() failed."; + return PTR_ERR(stats); + } + err = __check_expect_stats(stats, expect_stats, errmsg); + objagg_stats_put(stats); + return err; +} + +static int test_delta_action_item(struct world *world, + struct objagg *objagg, + const struct action_item *action_item, + bool inverse) +{ + unsigned int orig_delta_count = world->delta_count; + unsigned int orig_root_count = world->root_count; + unsigned int key_id = action_item->key_id; + enum action action = action_item->action; + struct objagg_obj *objagg_obj; + const char *errmsg; + int err; + + if (inverse) + action = action == ACTION_GET ? ACTION_PUT : ACTION_GET; + + switch (action) { + case ACTION_GET: + objagg_obj = world_obj_get(world, objagg, key_id); + if (IS_ERR(objagg_obj)) + return PTR_ERR(objagg_obj); + break; + case ACTION_PUT: + world_obj_put(world, objagg, key_id); + break; + } + + if (inverse) + return 0; + err = check_expect(world, action_item, + orig_delta_count, orig_root_count); + if (err) + goto errout; + + err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg); + if (err) { + pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg); + goto errout; + } + + return 0; + +errout: + /* This can only happen when action is not inversed. + * So in case of an error, cleanup by doing inverse action. + */ + test_delta_action_item(world, objagg, action_item, true); + return err; +} + +static int test_delta(void) +{ + struct world world = {}; + struct objagg *objagg; + int i; + int err; + + objagg = objagg_create(&delta_ops, NULL, &world); + if (IS_ERR(objagg)) + return PTR_ERR(objagg); + + for (i = 0; i < ARRAY_SIZE(action_items); i++) { + err = test_delta_action_item(&world, objagg, + &action_items[i], false); + if (err) + goto err_do_action_item; + } + + objagg_destroy(objagg); + return 0; + +err_do_action_item: + for (i--; i >= 0; i--) + test_delta_action_item(&world, objagg, &action_items[i], true); + + objagg_destroy(objagg); + return err; +} + +struct hints_case { + const unsigned int *key_ids; + size_t key_ids_count; + struct expect_stats expect_stats; + struct expect_stats expect_stats_hints; +}; + +static const unsigned int hints_case_key_ids[] = { + 1, 7, 3, 5, 3, 1, 30, 8, 8, 5, 6, 8, +}; + +static const struct hints_case hints_case = { + .key_ids = hints_case_key_ids, + .key_ids_count = ARRAY_SIZE(hints_case_key_ids), + .expect_stats = + EXPECT_STATS(7, ROOT(1, 2, 7), ROOT(7, 1, 4), ROOT(30, 1, 1), + DELTA(8, 3), DELTA(3, 2), + DELTA(5, 2), DELTA(6, 1)), + .expect_stats_hints = + EXPECT_STATS(7, ROOT(3, 2, 9), ROOT(1, 2, 2), ROOT(30, 1, 1), + DELTA(8, 3), DELTA(5, 2), + DELTA(6, 1), DELTA(7, 1)), +}; + +static void __pr_debug_stats(const struct objagg_stats *stats) +{ + int i; + + for (i = 0; i < stats->stats_info_count; i++) + pr_debug("Stat index %d key %u: u %d, d %d, %s\n", i, + obj_to_key_id(stats->stats_info[i].objagg_obj), + stats->stats_info[i].stats.user_count, + stats->stats_info[i].stats.delta_user_count, + stats->stats_info[i].is_root ? "root" : "noroot"); +} + +static void pr_debug_stats(struct objagg *objagg) +{ + const struct objagg_stats *stats; + + stats = objagg_stats_get(objagg); + if (IS_ERR(stats)) + return; + __pr_debug_stats(stats); + objagg_stats_put(stats); +} + +static void pr_debug_hints_stats(struct objagg_hints *objagg_hints) +{ + const struct objagg_stats *stats; + + stats = objagg_hints_stats_get(objagg_hints); + if (IS_ERR(stats)) + return; + __pr_debug_stats(stats); + objagg_stats_put(stats); +} + +static int check_expect_hints_stats(struct objagg_hints *objagg_hints, + const struct expect_stats *expect_stats, + const char **errmsg) +{ + const struct objagg_stats *stats; + int err; + + stats = objagg_hints_stats_get(objagg_hints); + if (IS_ERR(stats)) + return PTR_ERR(stats); + err = __check_expect_stats(stats, expect_stats, errmsg); + objagg_stats_put(stats); + return err; +} + +static int test_hints_case(const struct hints_case *hints_case) +{ + struct objagg_obj *objagg_obj; + struct objagg_hints *hints; + struct world world2 = {}; + struct world world = {}; + struct objagg *objagg2; + struct objagg *objagg; + const char *errmsg; + int i; + int err; + + objagg = objagg_create(&delta_ops, NULL, &world); + if (IS_ERR(objagg)) + return PTR_ERR(objagg); + + for (i = 0; i < hints_case->key_ids_count; i++) { + objagg_obj = world_obj_get(&world, objagg, + hints_case->key_ids[i]); + if (IS_ERR(objagg_obj)) { + err = PTR_ERR(objagg_obj); + goto err_world_obj_get; + } + } + + pr_debug_stats(objagg); + err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg); + if (err) { + pr_err("Stats: %s\n", errmsg); + goto err_check_expect_stats; + } + + hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY); + if (IS_ERR(hints)) { + err = PTR_ERR(hints); + goto err_hints_get; + } + + pr_debug_hints_stats(hints); + err = check_expect_hints_stats(hints, &hints_case->expect_stats_hints, + &errmsg); + if (err) { + pr_err("Hints stats: %s\n", errmsg); + goto err_check_expect_hints_stats; + } + + objagg2 = objagg_create(&delta_ops, hints, &world2); + if (IS_ERR(objagg2)) + return PTR_ERR(objagg2); + + for (i = 0; i < hints_case->key_ids_count; i++) { + objagg_obj = world_obj_get(&world2, objagg2, + hints_case->key_ids[i]); + if (IS_ERR(objagg_obj)) { + err = PTR_ERR(objagg_obj); + goto err_world2_obj_get; + } + } + + pr_debug_stats(objagg2); + err = check_expect_stats(objagg2, &hints_case->expect_stats_hints, + &errmsg); + if (err) { + pr_err("Stats2: %s\n", errmsg); + goto err_check_expect_stats2; + } + + err = 0; + +err_check_expect_stats2: +err_world2_obj_get: + for (i--; i >= 0; i--) + world_obj_put(&world2, objagg, hints_case->key_ids[i]); + objagg_hints_put(hints); + objagg_destroy(objagg2); + i = hints_case->key_ids_count; +err_check_expect_hints_stats: +err_hints_get: +err_check_expect_stats: +err_world_obj_get: + for (i--; i >= 0; i--) + world_obj_put(&world, objagg, hints_case->key_ids[i]); + + objagg_destroy(objagg); + return err; +} +static int test_hints(void) +{ + return test_hints_case(&hints_case); +} + +static int __init test_objagg_init(void) +{ + int err; + + err = test_nodelta(); + if (err) + return err; + err = test_delta(); + if (err) + return err; + return test_hints(); +} + +static void __exit test_objagg_exit(void) +{ +} + +module_init(test_objagg_init); +module_exit(test_objagg_exit); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); +MODULE_DESCRIPTION("Test module for objagg"); diff --git a/lib/test_printf.c b/lib/test_printf.c index 53527ea822b5..659b6cc0d483 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/printk.h> #include <linux/random.h> +#include <linux/rtc.h> #include <linux/slab.h> #include <linux/string.h> @@ -249,12 +250,11 @@ plain_format(void) #endif /* BITS_PER_LONG == 64 */ static int __init -plain_hash(void) +plain_hash_to_buffer(const void *p, char *buf, size_t len) { - char buf[PLAIN_BUF_SIZE]; int nchars; - nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR); + nchars = snprintf(buf, len, "%p", p); if (nchars != PTR_WIDTH) return -1; @@ -265,6 +265,20 @@ plain_hash(void) return 0; } + return 0; +} + + +static int __init +plain_hash(void) +{ + char buf[PLAIN_BUF_SIZE]; + int ret; + + ret = plain_hash_to_buffer(PTR, buf, PLAIN_BUF_SIZE); + if (ret) + return ret; + if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0) return -1; @@ -295,6 +309,23 @@ plain(void) } static void __init +test_hashed(const char *fmt, const void *p) +{ + char buf[PLAIN_BUF_SIZE]; + int ret; + + /* + * No need to increase failed test counter since this is assumed + * to be called after plain(). + */ + ret = plain_hash_to_buffer(p, buf, PLAIN_BUF_SIZE); + if (ret) + return; + + test(buf, fmt, p); +} + +static void __init symbol_ptr(void) { } @@ -419,6 +450,29 @@ struct_va_format(void) } static void __init +struct_rtc_time(void) +{ + /* 1543210543 */ + const struct rtc_time tm = { + .tm_sec = 43, + .tm_min = 35, + .tm_hour = 5, + .tm_mday = 26, + .tm_mon = 10, + .tm_year = 118, + }; + + test_hashed("%pt", &tm); + + test("2018-11-26T05:35:43", "%ptR", &tm); + test("0118-10-26T05:35:43", "%ptRr", &tm); + test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm); + test("05:35:43|0118-10-26", "%ptRtr|%ptRdr", &tm, &tm); + test("05:35:43|2018-11-26", "%ptRttr|%ptRdtr", &tm, &tm); + test("05:35:43 tr|2018-11-26 tr", "%ptRt tr|%ptRd tr", &tm, &tm); +} + +static void __init struct_clk(void) { } @@ -529,6 +583,7 @@ test_pointer(void) uuid(); dentry(); struct_va_format(); + struct_rtc_time(); struct_clk(); bitmap(); netdev_features(); diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 82ac39ce5310..3bd2e91bfc29 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -20,11 +20,11 @@ #include <linux/module.h> #include <linux/rcupdate.h> #include <linux/rhashtable.h> -#include <linux/semaphore.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/random.h> #include <linux/vmalloc.h> +#include <linux/wait.h> #define MAX_ENTRIES 1000000 #define TEST_INSERT_FAIL INT_MAX @@ -112,8 +112,8 @@ static struct rhashtable_params test_rht_params_dup = { .automatic_shrinking = false, }; -static struct semaphore prestart_sem; -static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); +static atomic_t startup_count; +static DECLARE_WAIT_QUEUE_HEAD(startup_wait); static int insert_retry(struct rhashtable *ht, struct test_obj *obj, const struct rhashtable_params params) @@ -177,16 +177,11 @@ static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array, static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) { - unsigned int err, total = 0, chain_len = 0; + unsigned int total = 0, chain_len = 0; struct rhashtable_iter hti; struct rhash_head *pos; - err = rhashtable_walk_init(ht, &hti, GFP_KERNEL); - if (err) { - pr_warn("Test failed: allocation error"); - return; - } - + rhashtable_walk_enter(ht, &hti); rhashtable_walk_start(&hti); while ((pos = rhashtable_walk_next(&hti))) { @@ -395,7 +390,7 @@ static int __init test_rhltable(unsigned int entries) if (WARN(err, "cannot remove element at slot %d", i)) continue; } else { - if (WARN(err != -ENOENT, "removed non-existant element %d, error %d not %d", + if (WARN(err != -ENOENT, "removed non-existent element %d, error %d not %d", i, err, -ENOENT)) continue; } @@ -440,7 +435,7 @@ static int __init test_rhltable(unsigned int entries) if (WARN(err, "cannot remove element at slot %d", i)) continue; } else { - if (WARN(err != -ENOENT, "removed non-existant element, error %d not %d", + if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d", err, -ENOENT)) continue; } @@ -541,38 +536,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt) static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, int cnt, bool slow) { - struct rhltable rhlt; + struct rhltable *rhlt; unsigned int i, ret; const char *key; int err = 0; - err = rhltable_init(&rhlt, &test_rht_params_dup); - if (WARN_ON(err)) + rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL); + if (WARN_ON(!rhlt)) + return -EINVAL; + + err = rhltable_init(rhlt, &test_rht_params_dup); + if (WARN_ON(err)) { + kfree(rhlt); return err; + } for (i = 0; i < cnt; i++) { rhl_test_objects[i].value.tid = i; - key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead); + key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead); key += test_rht_params_dup.key_offset; if (slow) { - err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key, + err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key, &rhl_test_objects[i].list_node.rhead)); if (err == -EAGAIN) err = 0; } else - err = rhltable_insert(&rhlt, + err = rhltable_insert(rhlt, &rhl_test_objects[i].list_node, test_rht_params_dup); if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) goto skip_print; } - ret = print_ht(&rhlt); + ret = print_ht(rhlt); WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); skip_print: - rhltable_destroy(&rhlt); + rhltable_destroy(rhlt); + kfree(rhlt); return 0; } @@ -634,9 +636,12 @@ static int threadfunc(void *data) int i, step, err = 0, insert_retries = 0; struct thread_data *tdata = data; - up(&prestart_sem); - if (down_interruptible(&startup_sem)) - pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); + if (atomic_dec_and_test(&startup_count)) + wake_up(&startup_wait); + if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) { + pr_err(" thread[%d]: interrupted\n", tdata->id); + goto out; + } for (i = 0; i < tdata->entries; i++) { tdata->objs[i].value.id = i; @@ -755,7 +760,7 @@ static int __init test_rht_init(void) pr_info("Testing concurrent rhashtable access from %d threads\n", tcount); - sema_init(&prestart_sem, 1 - tcount); + atomic_set(&startup_count, tcount); tdata = vzalloc(array_size(tcount, sizeof(struct thread_data))); if (!tdata) return -ENOMEM; @@ -781,15 +786,18 @@ static int __init test_rht_init(void) tdata[i].objs = objs + i * entries; tdata[i].task = kthread_run(threadfunc, &tdata[i], "rhashtable_thrad[%d]", i); - if (IS_ERR(tdata[i].task)) + if (IS_ERR(tdata[i].task)) { pr_err(" kthread_run failed for thread %d\n", i); - else + atomic_dec(&startup_count); + } else { started_threads++; + } } - if (down_interruptible(&prestart_sem)) - pr_err(" down interruptible failed\n"); - for (i = 0; i < tcount; i++) - up(&startup_sem); + if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0)) + pr_err(" wait_event interruptible failed\n"); + /* count is 0 now, set it to -1 and wake up all threads together */ + atomic_dec(&startup_count); + wake_up_all(&startup_wait); for (i = 0; i < tcount; i++) { if (IS_ERR(tdata[i].task)) continue; diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c new file mode 100644 index 000000000000..13115b6f2b88 --- /dev/null +++ b/lib/test_stackinit.c @@ -0,0 +1,378 @@ +// SPDX-Licenses: GPLv2 +/* + * Test cases for compiler-based stack variable zeroing via future + * compiler flags or CONFIG_GCC_PLUGIN_STRUCTLEAK*. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> + +/* Exfiltration buffer. */ +#define MAX_VAR_SIZE 128 +static char check_buf[MAX_VAR_SIZE]; + +/* Character array to trigger stack protector in all functions. */ +#define VAR_BUFFER 32 + +/* Volatile mask to convince compiler to copy memory with 0xff. */ +static volatile u8 forced_mask = 0xff; + +/* Location and size tracking to validate fill and test are colocated. */ +static void *fill_start, *target_start; +static size_t fill_size, target_size; + +static bool range_contains(char *haystack_start, size_t haystack_size, + char *needle_start, size_t needle_size) +{ + if (needle_start >= haystack_start && + needle_start + needle_size <= haystack_start + haystack_size) + return true; + return false; +} + +#define DO_NOTHING_TYPE_SCALAR(var_type) var_type +#define DO_NOTHING_TYPE_STRING(var_type) void +#define DO_NOTHING_TYPE_STRUCT(var_type) void + +#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr) +#define DO_NOTHING_RETURN_STRING(ptr) /**/ +#define DO_NOTHING_RETURN_STRUCT(ptr) /**/ + +#define DO_NOTHING_CALL_SCALAR(var, name) \ + (var) = do_nothing_ ## name(&(var)) +#define DO_NOTHING_CALL_STRING(var, name) \ + do_nothing_ ## name(var) +#define DO_NOTHING_CALL_STRUCT(var, name) \ + do_nothing_ ## name(&(var)) + +#define FETCH_ARG_SCALAR(var) &var +#define FETCH_ARG_STRING(var) var +#define FETCH_ARG_STRUCT(var) &var + +#define FILL_SIZE_STRING 16 + +#define INIT_CLONE_SCALAR /**/ +#define INIT_CLONE_STRING [FILL_SIZE_STRING] +#define INIT_CLONE_STRUCT /**/ + +#define INIT_SCALAR_none /**/ +#define INIT_SCALAR_zero = 0 + +#define INIT_STRING_none [FILL_SIZE_STRING] /**/ +#define INIT_STRING_zero [FILL_SIZE_STRING] = { } + +#define INIT_STRUCT_none /**/ +#define INIT_STRUCT_zero = { } +#define INIT_STRUCT_static_partial = { .two = 0, } +#define INIT_STRUCT_static_all = { .one = arg->one, \ + .two = arg->two, \ + .three = arg->three, \ + .four = arg->four, \ + } +#define INIT_STRUCT_dynamic_partial = { .two = arg->two, } +#define INIT_STRUCT_dynamic_all = { .one = arg->one, \ + .two = arg->two, \ + .three = arg->three, \ + .four = arg->four, \ + } +#define INIT_STRUCT_runtime_partial ; \ + var.two = 0 +#define INIT_STRUCT_runtime_all ; \ + var.one = 0; \ + var.two = 0; \ + var.three = 0; \ + memset(&var.four, 0, \ + sizeof(var.four)) + +/* + * @name: unique string name for the test + * @var_type: type to be tested for zeroing initialization + * @which: is this a SCALAR, STRING, or STRUCT type? + * @init_level: what kind of initialization is performed + */ +#define DEFINE_TEST_DRIVER(name, var_type, which) \ +/* Returns 0 on success, 1 on failure. */ \ +static noinline __init int test_ ## name (void) \ +{ \ + var_type zero INIT_CLONE_ ## which; \ + int ignored; \ + u8 sum = 0, i; \ + \ + /* Notice when a new test is larger than expected. */ \ + BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \ + \ + /* Fill clone type with zero for per-field init. */ \ + memset(&zero, 0x00, sizeof(zero)); \ + /* Fill stack with 0xFF. */ \ + ignored = leaf_ ##name((unsigned long)&ignored, 1, \ + FETCH_ARG_ ## which(zero)); \ + /* Clear entire check buffer for later bit tests. */ \ + memset(check_buf, 0x00, sizeof(check_buf)); \ + /* Extract stack-defined variable contents. */ \ + ignored = leaf_ ##name((unsigned long)&ignored, 0, \ + FETCH_ARG_ ## which(zero)); \ + \ + /* Validate that compiler lined up fill and target. */ \ + if (!range_contains(fill_start, fill_size, \ + target_start, target_size)) { \ + pr_err(#name ": stack fill missed target!?\n"); \ + pr_err(#name ": fill %zu wide\n", fill_size); \ + pr_err(#name ": target offset by %d\n", \ + (int)((ssize_t)(uintptr_t)fill_start - \ + (ssize_t)(uintptr_t)target_start)); \ + return 1; \ + } \ + \ + /* Look for any set bits in the check region. */ \ + for (i = 0; i < sizeof(check_buf); i++) \ + sum += (check_buf[i] != 0); \ + \ + if (sum == 0) \ + pr_info(#name " ok\n"); \ + else \ + pr_warn(#name " FAIL (uninit bytes: %d)\n", \ + sum); \ + \ + return (sum != 0); \ +} +#define DEFINE_TEST(name, var_type, which, init_level) \ +/* no-op to force compiler into ignoring "uninitialized" vars */\ +static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \ +do_nothing_ ## name(var_type *ptr) \ +{ \ + /* Will always be true, but compiler doesn't know. */ \ + if ((unsigned long)ptr > 0x2) \ + return DO_NOTHING_RETURN_ ## which(ptr); \ + else \ + return DO_NOTHING_RETURN_ ## which(ptr + 1); \ +} \ +static noinline __init int leaf_ ## name(unsigned long sp, \ + bool fill, \ + var_type *arg) \ +{ \ + char buf[VAR_BUFFER]; \ + var_type var INIT_ ## which ## _ ## init_level; \ + \ + target_start = &var; \ + target_size = sizeof(var); \ + /* \ + * Keep this buffer around to make sure we've got a \ + * stack frame of SOME kind... \ + */ \ + memset(buf, (char)(sp && 0xff), sizeof(buf)); \ + /* Fill variable with 0xFF. */ \ + if (fill) { \ + fill_start = &var; \ + fill_size = sizeof(var); \ + memset(fill_start, \ + (char)((sp && 0xff) | forced_mask), \ + fill_size); \ + } \ + \ + /* Silence "never initialized" warnings. */ \ + DO_NOTHING_CALL_ ## which(var, name); \ + \ + /* Exfiltrate "var". */ \ + memcpy(check_buf, target_start, target_size); \ + \ + return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \ +} \ +DEFINE_TEST_DRIVER(name, var_type, which) + +/* Structure with no padding. */ +struct test_packed { + unsigned long one; + unsigned long two; + unsigned long three; + unsigned long four; +}; + +/* Simple structure with padding likely to be covered by compiler. */ +struct test_small_hole { + size_t one; + char two; + /* 3 byte padding hole here. */ + int three; + unsigned long four; +}; + +/* Try to trigger unhandled padding in a structure. */ +struct test_aligned { + u32 internal1; + u64 internal2; +} __aligned(64); + +struct test_big_hole { + u8 one; + u8 two; + u8 three; + /* 61 byte padding hole here. */ + struct test_aligned four; +} __aligned(64); + +struct test_trailing_hole { + char *one; + char *two; + char *three; + char four; + /* "sizeof(unsigned long) - 1" byte padding hole here. */ +}; + +/* Test if STRUCTLEAK is clearing structs with __user fields. */ +struct test_user { + u8 one; + unsigned long two; + char __user *three; + unsigned long four; +}; + +#define DEFINE_SCALAR_TEST(name, init) \ + DEFINE_TEST(name ## _ ## init, name, SCALAR, init) + +#define DEFINE_SCALAR_TESTS(init) \ + DEFINE_SCALAR_TEST(u8, init); \ + DEFINE_SCALAR_TEST(u16, init); \ + DEFINE_SCALAR_TEST(u32, init); \ + DEFINE_SCALAR_TEST(u64, init); \ + DEFINE_TEST(char_array_ ## init, unsigned char, STRING, init) + +#define DEFINE_STRUCT_TEST(name, init) \ + DEFINE_TEST(name ## _ ## init, \ + struct test_ ## name, STRUCT, init) + +#define DEFINE_STRUCT_TESTS(init) \ + DEFINE_STRUCT_TEST(small_hole, init); \ + DEFINE_STRUCT_TEST(big_hole, init); \ + DEFINE_STRUCT_TEST(trailing_hole, init); \ + DEFINE_STRUCT_TEST(packed, init) + +/* These should be fully initialized all the time! */ +DEFINE_SCALAR_TESTS(zero); +DEFINE_STRUCT_TESTS(zero); +/* Static initialization: padding may be left uninitialized. */ +DEFINE_STRUCT_TESTS(static_partial); +DEFINE_STRUCT_TESTS(static_all); +/* Dynamic initialization: padding may be left uninitialized. */ +DEFINE_STRUCT_TESTS(dynamic_partial); +DEFINE_STRUCT_TESTS(dynamic_all); +/* Runtime initialization: padding may be left uninitialized. */ +DEFINE_STRUCT_TESTS(runtime_partial); +DEFINE_STRUCT_TESTS(runtime_all); +/* No initialization without compiler instrumentation. */ +DEFINE_SCALAR_TESTS(none); +DEFINE_STRUCT_TESTS(none); +DEFINE_TEST(user, struct test_user, STRUCT, none); + +/* + * Check two uses through a variable declaration outside either path, + * which was noticed as a special case in porting earlier stack init + * compiler logic. + */ +static int noinline __leaf_switch_none(int path, bool fill) +{ + switch (path) { + uint64_t var; + + case 1: + target_start = &var; + target_size = sizeof(var); + if (fill) { + fill_start = &var; + fill_size = sizeof(var); + + memset(fill_start, forced_mask | 0x55, fill_size); + } + memcpy(check_buf, target_start, target_size); + break; + case 2: + target_start = &var; + target_size = sizeof(var); + if (fill) { + fill_start = &var; + fill_size = sizeof(var); + + memset(fill_start, forced_mask | 0xaa, fill_size); + } + memcpy(check_buf, target_start, target_size); + break; + default: + var = 5; + return var & forced_mask; + } + return 0; +} + +static noinline __init int leaf_switch_1_none(unsigned long sp, bool fill, + uint64_t *arg) +{ + return __leaf_switch_none(1, fill); +} + +static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill, + uint64_t *arg) +{ + return __leaf_switch_none(2, fill); +} + +DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR); +DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR); + +static int __init test_stackinit_init(void) +{ + unsigned int failures = 0; + +#define test_scalars(init) do { \ + failures += test_u8_ ## init (); \ + failures += test_u16_ ## init (); \ + failures += test_u32_ ## init (); \ + failures += test_u64_ ## init (); \ + failures += test_char_array_ ## init (); \ + } while (0) + +#define test_structs(init) do { \ + failures += test_small_hole_ ## init (); \ + failures += test_big_hole_ ## init (); \ + failures += test_trailing_hole_ ## init (); \ + failures += test_packed_ ## init (); \ + } while (0) + + /* These are explicitly initialized and should always pass. */ + test_scalars(zero); + test_structs(zero); + /* Padding here appears to be accidentally always initialized? */ + test_structs(dynamic_partial); + /* Padding initialization depends on compiler behaviors. */ + test_structs(static_partial); + test_structs(static_all); + test_structs(dynamic_all); + test_structs(runtime_partial); + test_structs(runtime_all); + + /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */ + test_scalars(none); + failures += test_switch_1_none(); + failures += test_switch_2_none(); + + /* STRUCTLEAK_BYREF should cover from here down. */ + test_structs(none); + + /* STRUCTLEAK will only cover this. */ + failures += test_user(); + + if (failures == 0) + pr_info("all tests passed!\n"); + else + pr_err("failures: %u\n", failures); + + return failures ? -EINVAL : 0; +} +module_init(test_stackinit_init); + +static void __exit test_stackinit_exit(void) +{ } +module_exit(test_stackinit_exit); + +MODULE_LICENSE("GPL"); diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 280f4979d00e..9ea10adf7a66 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -42,14 +42,6 @@ static void test_ubsan_divrem_overflow(void) val /= val2; } -static void test_ubsan_vla_bound_not_positive(void) -{ - volatile int size = -1; - char buf[size]; - - (void)buf; -} - static void test_ubsan_shift_out_of_bounds(void) { volatile int val = -1; @@ -61,7 +53,7 @@ static void test_ubsan_shift_out_of_bounds(void) static void test_ubsan_out_of_bounds(void) { volatile int i = 4, j = 5; - volatile int arr[i]; + volatile int arr[4]; arr[j] = i; } @@ -113,7 +105,6 @@ static const test_ubsan_fp test_ubsan_array[] = { test_ubsan_mul_overflow, test_ubsan_negate_overflow, test_ubsan_divrem_overflow, - test_ubsan_vla_bound_not_positive, test_ubsan_shift_out_of_bounds, test_ubsan_out_of_bounds, test_ubsan_load_invalid_value, diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c new file mode 100644 index 000000000000..83cdcaa82bf6 --- /dev/null +++ b/lib/test_vmalloc.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Test module for stress and analyze performance of vmalloc allocator. + * (C) 2018 Uladzislau Rezki (Sony) <urezki@gmail.com> + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/random.h> +#include <linux/kthread.h> +#include <linux/moduleparam.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/rwsem.h> +#include <linux/mm.h> + +#define __param(type, name, init, msg) \ + static type name = init; \ + module_param(name, type, 0444); \ + MODULE_PARM_DESC(name, msg) \ + +__param(bool, single_cpu_test, false, + "Use single first online CPU to run tests"); + +__param(bool, sequential_test_order, false, + "Use sequential stress tests order"); + +__param(int, test_repeat_count, 1, + "Set test repeat counter"); + +__param(int, test_loop_count, 1000000, + "Set test loop counter"); + +__param(int, run_test_mask, INT_MAX, + "Set tests specified in the mask.\n\n" + "\t\tid: 1, name: fix_size_alloc_test\n" + "\t\tid: 2, name: full_fit_alloc_test\n" + "\t\tid: 4, name: long_busy_list_alloc_test\n" + "\t\tid: 8, name: random_size_alloc_test\n" + "\t\tid: 16, name: fix_align_alloc_test\n" + "\t\tid: 32, name: random_size_align_alloc_test\n" + "\t\tid: 64, name: align_shift_alloc_test\n" + "\t\tid: 128, name: pcpu_alloc_test\n" + /* Add a new test case description here. */ +); + +/* + * Depends on single_cpu_test parameter. If it is true, then + * use first online CPU to trigger a test on, otherwise go with + * all online CPUs. + */ +static cpumask_t cpus_run_test_mask = CPU_MASK_NONE; + +/* + * Read write semaphore for synchronization of setup + * phase that is done in main thread and workers. + */ +static DECLARE_RWSEM(prepare_for_test_rwsem); + +/* + * Completion tracking for worker threads. + */ +static DECLARE_COMPLETION(test_all_done_comp); +static atomic_t test_n_undone = ATOMIC_INIT(0); + +static inline void +test_report_one_done(void) +{ + if (atomic_dec_and_test(&test_n_undone)) + complete(&test_all_done_comp); +} + +static int random_size_align_alloc_test(void) +{ + unsigned long size, align, rnd; + void *ptr; + int i; + + for (i = 0; i < test_loop_count; i++) { + get_random_bytes(&rnd, sizeof(rnd)); + + /* + * Maximum 1024 pages, if PAGE_SIZE is 4096. + */ + align = 1 << (rnd % 23); + + /* + * Maximum 10 pages. + */ + size = ((rnd % 10) + 1) * PAGE_SIZE; + + ptr = __vmalloc_node_range(size, align, + VMALLOC_START, VMALLOC_END, + GFP_KERNEL | __GFP_ZERO, + PAGE_KERNEL, + 0, 0, __builtin_return_address(0)); + + if (!ptr) + return -1; + + vfree(ptr); + } + + return 0; +} + +/* + * This test case is supposed to be failed. + */ +static int align_shift_alloc_test(void) +{ + unsigned long align; + void *ptr; + int i; + + for (i = 0; i < BITS_PER_LONG; i++) { + align = ((unsigned long) 1) << i; + + ptr = __vmalloc_node_range(PAGE_SIZE, align, + VMALLOC_START, VMALLOC_END, + GFP_KERNEL | __GFP_ZERO, + PAGE_KERNEL, + 0, 0, __builtin_return_address(0)); + + if (!ptr) + return -1; + + vfree(ptr); + } + + return 0; +} + +static int fix_align_alloc_test(void) +{ + void *ptr; + int i; + + for (i = 0; i < test_loop_count; i++) { + ptr = __vmalloc_node_range(5 * PAGE_SIZE, + THREAD_ALIGN << 1, + VMALLOC_START, VMALLOC_END, + GFP_KERNEL | __GFP_ZERO, + PAGE_KERNEL, + 0, 0, __builtin_return_address(0)); + + if (!ptr) + return -1; + + vfree(ptr); + } + + return 0; +} + +static int random_size_alloc_test(void) +{ + unsigned int n; + void *p; + int i; + + for (i = 0; i < test_loop_count; i++) { + get_random_bytes(&n, sizeof(i)); + n = (n % 100) + 1; + + p = vmalloc(n * PAGE_SIZE); + + if (!p) + return -1; + + *((__u8 *)p) = 1; + vfree(p); + } + + return 0; +} + +static int long_busy_list_alloc_test(void) +{ + void *ptr_1, *ptr_2; + void **ptr; + int rv = -1; + int i; + + ptr = vmalloc(sizeof(void *) * 15000); + if (!ptr) + return rv; + + for (i = 0; i < 15000; i++) + ptr[i] = vmalloc(1 * PAGE_SIZE); + + for (i = 0; i < test_loop_count; i++) { + ptr_1 = vmalloc(100 * PAGE_SIZE); + if (!ptr_1) + goto leave; + + ptr_2 = vmalloc(1 * PAGE_SIZE); + if (!ptr_2) { + vfree(ptr_1); + goto leave; + } + + *((__u8 *)ptr_1) = 0; + *((__u8 *)ptr_2) = 1; + + vfree(ptr_1); + vfree(ptr_2); + } + + /* Success */ + rv = 0; + +leave: + for (i = 0; i < 15000; i++) + vfree(ptr[i]); + + vfree(ptr); + return rv; +} + +static int full_fit_alloc_test(void) +{ + void **ptr, **junk_ptr, *tmp; + int junk_length; + int rv = -1; + int i; + + junk_length = fls(num_online_cpus()); + junk_length *= (32 * 1024 * 1024 / PAGE_SIZE); + + ptr = vmalloc(sizeof(void *) * junk_length); + if (!ptr) + return rv; + + junk_ptr = vmalloc(sizeof(void *) * junk_length); + if (!junk_ptr) { + vfree(ptr); + return rv; + } + + for (i = 0; i < junk_length; i++) { + ptr[i] = vmalloc(1 * PAGE_SIZE); + junk_ptr[i] = vmalloc(1 * PAGE_SIZE); + } + + for (i = 0; i < junk_length; i++) + vfree(junk_ptr[i]); + + for (i = 0; i < test_loop_count; i++) { + tmp = vmalloc(1 * PAGE_SIZE); + + if (!tmp) + goto error; + + *((__u8 *)tmp) = 1; + vfree(tmp); + } + + /* Success */ + rv = 0; + +error: + for (i = 0; i < junk_length; i++) + vfree(ptr[i]); + + vfree(ptr); + vfree(junk_ptr); + + return rv; +} + +static int fix_size_alloc_test(void) +{ + void *ptr; + int i; + + for (i = 0; i < test_loop_count; i++) { + ptr = vmalloc(3 * PAGE_SIZE); + + if (!ptr) + return -1; + + *((__u8 *)ptr) = 0; + + vfree(ptr); + } + + return 0; +} + +static int +pcpu_alloc_test(void) +{ + int rv = 0; +#ifndef CONFIG_NEED_PER_CPU_KM + void __percpu **pcpu; + size_t size, align; + int i; + + pcpu = vmalloc(sizeof(void __percpu *) * 35000); + if (!pcpu) + return -1; + + for (i = 0; i < 35000; i++) { + unsigned int r; + + get_random_bytes(&r, sizeof(i)); + size = (r % (PAGE_SIZE / 4)) + 1; + + /* + * Maximum PAGE_SIZE + */ + get_random_bytes(&r, sizeof(i)); + align = 1 << ((i % 11) + 1); + + pcpu[i] = __alloc_percpu(size, align); + if (!pcpu[i]) + rv = -1; + } + + for (i = 0; i < 35000; i++) + free_percpu(pcpu[i]); + + vfree(pcpu); +#endif + return rv; +} + +struct test_case_desc { + const char *test_name; + int (*test_func)(void); +}; + +static struct test_case_desc test_case_array[] = { + { "fix_size_alloc_test", fix_size_alloc_test }, + { "full_fit_alloc_test", full_fit_alloc_test }, + { "long_busy_list_alloc_test", long_busy_list_alloc_test }, + { "random_size_alloc_test", random_size_alloc_test }, + { "fix_align_alloc_test", fix_align_alloc_test }, + { "random_size_align_alloc_test", random_size_align_alloc_test }, + { "align_shift_alloc_test", align_shift_alloc_test }, + { "pcpu_alloc_test", pcpu_alloc_test }, + /* Add a new test case here. */ +}; + +struct test_case_data { + int test_failed; + int test_passed; + u64 time; +}; + +/* Split it to get rid of: WARNING: line over 80 characters */ +static struct test_case_data + per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)]; + +static struct test_driver { + struct task_struct *task; + unsigned long start; + unsigned long stop; + int cpu; +} per_cpu_test_driver[NR_CPUS]; + +static void shuffle_array(int *arr, int n) +{ + unsigned int rnd; + int i, j, x; + + for (i = n - 1; i > 0; i--) { + get_random_bytes(&rnd, sizeof(rnd)); + + /* Cut the range. */ + j = rnd % i; + + /* Swap indexes. */ + x = arr[i]; + arr[i] = arr[j]; + arr[j] = x; + } +} + +static int test_func(void *private) +{ + struct test_driver *t = private; + cpumask_t newmask = CPU_MASK_NONE; + int random_array[ARRAY_SIZE(test_case_array)]; + int index, i, j, ret; + ktime_t kt; + u64 delta; + + cpumask_set_cpu(t->cpu, &newmask); + set_cpus_allowed_ptr(current, &newmask); + + for (i = 0; i < ARRAY_SIZE(test_case_array); i++) + random_array[i] = i; + + if (!sequential_test_order) + shuffle_array(random_array, ARRAY_SIZE(test_case_array)); + + /* + * Block until initialization is done. + */ + down_read(&prepare_for_test_rwsem); + + t->start = get_cycles(); + for (i = 0; i < ARRAY_SIZE(test_case_array); i++) { + index = random_array[i]; + + /* + * Skip tests if run_test_mask has been specified. + */ + if (!((run_test_mask & (1 << index)) >> index)) + continue; + + kt = ktime_get(); + for (j = 0; j < test_repeat_count; j++) { + ret = test_case_array[index].test_func(); + if (!ret) + per_cpu_test_data[t->cpu][index].test_passed++; + else + per_cpu_test_data[t->cpu][index].test_failed++; + } + + /* + * Take an average time that test took. + */ + delta = (u64) ktime_us_delta(ktime_get(), kt); + do_div(delta, (u32) test_repeat_count); + + per_cpu_test_data[t->cpu][index].time = delta; + } + t->stop = get_cycles(); + + up_read(&prepare_for_test_rwsem); + test_report_one_done(); + + /* + * Wait for the kthread_stop() call. + */ + while (!kthread_should_stop()) + msleep(10); + + return 0; +} + +static void +init_test_configurtion(void) +{ + /* + * Reset all data of all CPUs. + */ + memset(per_cpu_test_data, 0, sizeof(per_cpu_test_data)); + + if (single_cpu_test) + cpumask_set_cpu(cpumask_first(cpu_online_mask), + &cpus_run_test_mask); + else + cpumask_and(&cpus_run_test_mask, cpu_online_mask, + cpu_online_mask); + + if (test_repeat_count <= 0) + test_repeat_count = 1; + + if (test_loop_count <= 0) + test_loop_count = 1; +} + +static void do_concurrent_test(void) +{ + int cpu, ret; + + /* + * Set some basic configurations plus sanity check. + */ + init_test_configurtion(); + + /* + * Put on hold all workers. + */ + down_write(&prepare_for_test_rwsem); + + for_each_cpu(cpu, &cpus_run_test_mask) { + struct test_driver *t = &per_cpu_test_driver[cpu]; + + t->cpu = cpu; + t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu); + + if (!IS_ERR(t->task)) + /* Success. */ + atomic_inc(&test_n_undone); + else + pr_err("Failed to start kthread for %d CPU\n", cpu); + } + + /* + * Now let the workers do their job. + */ + up_write(&prepare_for_test_rwsem); + + /* + * Sleep quiet until all workers are done with 1 second + * interval. Since the test can take a lot of time we + * can run into a stack trace of the hung task. That is + * why we go with completion_timeout and HZ value. + */ + do { + ret = wait_for_completion_timeout(&test_all_done_comp, HZ); + } while (!ret); + + for_each_cpu(cpu, &cpus_run_test_mask) { + struct test_driver *t = &per_cpu_test_driver[cpu]; + int i; + + if (!IS_ERR(t->task)) + kthread_stop(t->task); + + for (i = 0; i < ARRAY_SIZE(test_case_array); i++) { + if (!((run_test_mask & (1 << i)) >> i)) + continue; + + pr_info( + "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n", + test_case_array[i].test_name, + per_cpu_test_data[cpu][i].test_passed, + per_cpu_test_data[cpu][i].test_failed, + test_repeat_count, test_loop_count, + per_cpu_test_data[cpu][i].time); + } + + pr_info("All test took CPU%d=%lu cycles\n", + cpu, t->stop - t->start); + } +} + +static int vmalloc_test_init(void) +{ + do_concurrent_test(); + return -EAGAIN; /* Fail will directly unload the module */ +} + +static void vmalloc_test_exit(void) +{ +} + +module_init(vmalloc_test_init) +module_exit(vmalloc_test_exit) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Uladzislau Rezki"); +MODULE_DESCRIPTION("vmalloc test module"); diff --git a/lib/test_xarray.c b/lib/test_xarray.c new file mode 100644 index 000000000000..c596a957f764 --- /dev/null +++ b/lib/test_xarray.c @@ -0,0 +1,1384 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * test_xarray.c: Test the XArray API + * Copyright (c) 2017-2018 Microsoft Corporation + * Author: Matthew Wilcox <willy@infradead.org> + */ + +#include <linux/xarray.h> +#include <linux/module.h> + +static unsigned int tests_run; +static unsigned int tests_passed; + +#ifndef XA_DEBUG +# ifdef __KERNEL__ +void xa_dump(const struct xarray *xa) { } +# endif +#undef XA_BUG_ON +#define XA_BUG_ON(xa, x) do { \ + tests_run++; \ + if (x) { \ + printk("BUG at %s:%d\n", __func__, __LINE__); \ + xa_dump(xa); \ + dump_stack(); \ + } else { \ + tests_passed++; \ + } \ +} while (0) +#endif + +static void *xa_mk_index(unsigned long index) +{ + return xa_mk_value(index & LONG_MAX); +} + +static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + return xa_store(xa, index, xa_mk_index(index), gfp); +} + +static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + u32 id = 0; + + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index), + gfp) != 0); + XA_BUG_ON(xa, id != index); +} + +static void xa_erase_index(struct xarray *xa, unsigned long index) +{ + XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); + XA_BUG_ON(xa, xa_load(xa, index) != NULL); +} + +/* + * If anyone needs this, please move it to xarray.c. We have no current + * users outside the test suite because all current multislot users want + * to use the advanced API. + */ +static void *xa_store_order(struct xarray *xa, unsigned long index, + unsigned order, void *entry, gfp_t gfp) +{ + XA_STATE_ORDER(xas, xa, index, order); + void *curr; + + do { + xas_lock(&xas); + curr = xas_store(&xas, entry); + xas_unlock(&xas); + } while (xas_nomem(&xas, gfp)); + + return curr; +} + +static noinline void check_xa_err(struct xarray *xa) +{ + XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); + XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); +#ifndef __KERNEL__ + /* The kernel does not fail GFP_NOWAIT allocations */ + XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); + XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); +#endif + XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); + XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); + XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); +// kills the test-suite :-( +// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); +} + +static noinline void check_xas_retry(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + void *entry; + + xa_store_index(xa, 0, GFP_KERNEL); + xa_store_index(xa, 1, GFP_KERNEL); + + rcu_read_lock(); + XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); + xa_erase_index(xa, 1); + XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); + XA_BUG_ON(xa, xas_retry(&xas, NULL)); + XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); + xas_reset(&xas); + XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); + XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); + XA_BUG_ON(xa, xas.xa_node != NULL); + + XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); + xas.xa_node = XAS_RESTART; + XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); + rcu_read_unlock(); + + /* Make sure we can iterate through retry entries */ + xas_lock(&xas); + xas_set(&xas, 0); + xas_store(&xas, XA_RETRY_ENTRY); + xas_set(&xas, 1); + xas_store(&xas, XA_RETRY_ENTRY); + + xas_set(&xas, 0); + xas_for_each(&xas, entry, ULONG_MAX) { + xas_store(&xas, xa_mk_index(xas.xa_index)); + } + xas_unlock(&xas); + + xa_erase_index(xa, 0); + xa_erase_index(xa, 1); +} + +static noinline void check_xa_load(struct xarray *xa) +{ + unsigned long i, j; + + for (i = 0; i < 1024; i++) { + for (j = 0; j < 1024; j++) { + void *entry = xa_load(xa, j); + if (j < i) + XA_BUG_ON(xa, xa_to_value(entry) != j); + else + XA_BUG_ON(xa, entry); + } + XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); + } + + for (i = 0; i < 1024; i++) { + for (j = 0; j < 1024; j++) { + void *entry = xa_load(xa, j); + if (j >= i) + XA_BUG_ON(xa, xa_to_value(entry) != j); + else + XA_BUG_ON(xa, entry); + } + xa_erase_index(xa, i); + } + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) +{ + unsigned int order; + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; + + /* NULL elements have no marks set */ + XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); + xa_set_mark(xa, index, XA_MARK_0); + XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); + + /* Storing a pointer will not make a mark appear */ + XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); + xa_set_mark(xa, index, XA_MARK_0); + XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); + + /* Setting one mark will not set another mark */ + XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); + XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); + + /* Storing NULL clears marks, and they can't be set again */ + xa_erase_index(xa, index); + XA_BUG_ON(xa, !xa_empty(xa)); + XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); + xa_set_mark(xa, index, XA_MARK_0); + XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); + + /* + * Storing a multi-index entry over entries with marks gives the + * entire entry the union of the marks + */ + BUG_ON((index % 4) != 0); + for (order = 2; order < max_order; order++) { + unsigned long base = round_down(index, 1UL << order); + unsigned long next = base + (1UL << order); + unsigned long i; + + XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); + xa_set_mark(xa, index + 1, XA_MARK_0); + XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); + xa_set_mark(xa, index + 2, XA_MARK_2); + XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); + xa_store_order(xa, index, order, xa_mk_index(index), + GFP_KERNEL); + for (i = base; i < next; i++) { + XA_STATE(xas, xa, i); + unsigned int seen = 0; + void *entry; + + XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); + XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); + XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); + + /* We should see two elements in the array */ + rcu_read_lock(); + xas_for_each(&xas, entry, ULONG_MAX) + seen++; + rcu_read_unlock(); + XA_BUG_ON(xa, seen != 2); + + /* One of which is marked */ + xas_set(&xas, 0); + seen = 0; + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) + seen++; + rcu_read_unlock(); + XA_BUG_ON(xa, seen != 1); + } + XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); + XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); + XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); + xa_erase_index(xa, index); + xa_erase_index(xa, next); + XA_BUG_ON(xa, !xa_empty(xa)); + } + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_xa_mark_2(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + unsigned long index; + unsigned int count = 0; + void *entry; + + xa_store_index(xa, 0, GFP_KERNEL); + xa_set_mark(xa, 0, XA_MARK_0); + xas_lock(&xas); + xas_load(&xas); + xas_init_marks(&xas); + xas_unlock(&xas); + XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); + + for (index = 3500; index < 4500; index++) { + xa_store_index(xa, index, GFP_KERNEL); + xa_set_mark(xa, index, XA_MARK_0); + } + + xas_reset(&xas); + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) + count++; + rcu_read_unlock(); + XA_BUG_ON(xa, count != 1000); + + xas_lock(&xas); + xas_for_each(&xas, entry, ULONG_MAX) { + xas_init_marks(&xas); + XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); + XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); + } + xas_unlock(&xas); + + xa_destroy(xa); +} + +static noinline void check_xa_mark(struct xarray *xa) +{ + unsigned long index; + + for (index = 0; index < 16384; index += 4) + check_xa_mark_1(xa, index); + + check_xa_mark_2(xa); +} + +static noinline void check_xa_shrink(struct xarray *xa) +{ + XA_STATE(xas, xa, 1); + struct xa_node *node; + unsigned int order; + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; + + XA_BUG_ON(xa, !xa_empty(xa)); + XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); + + /* + * Check that erasing the entry at 1 shrinks the tree and properly + * marks the node as being deleted. + */ + xas_lock(&xas); + XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); + node = xas.xa_node; + XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); + XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); + XA_BUG_ON(xa, xa_load(xa, 1) != NULL); + XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); + XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); + XA_BUG_ON(xa, xas_load(&xas) != NULL); + xas_unlock(&xas); + XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); + xa_erase_index(xa, 0); + XA_BUG_ON(xa, !xa_empty(xa)); + + for (order = 0; order < max_order; order++) { + unsigned long max = (1UL << order) - 1; + xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); + XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); + XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); + rcu_read_lock(); + node = xa_head(xa); + rcu_read_unlock(); + XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != + NULL); + rcu_read_lock(); + XA_BUG_ON(xa, xa_head(xa) == node); + rcu_read_unlock(); + XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); + xa_erase_index(xa, ULONG_MAX); + XA_BUG_ON(xa, xa->xa_head != node); + xa_erase_index(xa, 0); + } +} + +static noinline void check_cmpxchg(struct xarray *xa) +{ + void *FIVE = xa_mk_value(5); + void *SIX = xa_mk_value(6); + void *LOTS = xa_mk_value(12345678); + + XA_BUG_ON(xa, !xa_empty(xa)); + XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST); + XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); + XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); + XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); + XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); + xa_erase_index(xa, 12345678); + xa_erase_index(xa, 5); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_reserve(struct xarray *xa) +{ + void *entry; + unsigned long index; + + /* An array with a reserved entry is not empty */ + XA_BUG_ON(xa, !xa_empty(xa)); + xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_empty(xa)); + XA_BUG_ON(xa, xa_load(xa, 12345678)); + xa_release(xa, 12345678); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* Releasing a used entry does nothing */ + xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); + xa_release(xa, 12345678); + xa_erase_index(xa, 12345678); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* cmpxchg sees a reserved entry as NULL */ + xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678), + GFP_NOWAIT) != NULL); + xa_release(xa, 12345678); + xa_erase_index(xa, 12345678); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* But xa_insert does not */ + xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != + -EEXIST); + XA_BUG_ON(xa, xa_empty(xa)); + XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* Can iterate through a reserved entry */ + xa_store_index(xa, 5, GFP_KERNEL); + xa_reserve(xa, 6, GFP_KERNEL); + xa_store_index(xa, 7, GFP_KERNEL); + + xa_for_each(xa, index, entry) { + XA_BUG_ON(xa, index != 5 && index != 7); + } + xa_destroy(xa); +} + +static noinline void check_xas_erase(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + void *entry; + unsigned long i, j; + + for (i = 0; i < 200; i++) { + for (j = i; j < 2 * i + 17; j++) { + xas_set(&xas, j); + do { + xas_lock(&xas); + xas_store(&xas, xa_mk_index(j)); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + } + + xas_set(&xas, ULONG_MAX); + do { + xas_lock(&xas); + xas_store(&xas, xa_mk_value(0)); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + xas_lock(&xas); + xas_store(&xas, NULL); + + xas_set(&xas, 0); + j = i; + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(j)); + xas_store(&xas, NULL); + j++; + } + xas_unlock(&xas); + XA_BUG_ON(xa, !xa_empty(xa)); + } +} + +#ifdef CONFIG_XARRAY_MULTI +static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, + unsigned int order) +{ + XA_STATE(xas, xa, index); + unsigned long min = index & ~((1UL << order) - 1); + unsigned long max = min + (1UL << order); + + xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); + XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); + XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); + XA_BUG_ON(xa, xa_load(xa, max) != NULL); + XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); + + xas_lock(&xas); + XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); + xas_unlock(&xas); + XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); + XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); + XA_BUG_ON(xa, xa_load(xa, max) != NULL); + XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); + + xa_erase_index(xa, min); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, + unsigned int order) +{ + XA_STATE(xas, xa, index); + xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); + + xas_lock(&xas); + XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); + XA_BUG_ON(xa, xas.xa_index != index); + XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); + xas_unlock(&xas); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, + unsigned int order) +{ + XA_STATE(xas, xa, 0); + void *entry; + int n = 0; + + xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); + + xas_lock(&xas); + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(index)); + n++; + } + XA_BUG_ON(xa, n != 1); + xas_set(&xas, index + 1); + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(index)); + n++; + } + XA_BUG_ON(xa, n != 2); + xas_unlock(&xas); + + xa_destroy(xa); +} +#endif + +static noinline void check_multi_store(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + unsigned long i, j, k; + unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; + + /* Loading from any position returns the same value */ + xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); + XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); + XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); + XA_BUG_ON(xa, xa_load(xa, 2) != NULL); + rcu_read_lock(); + XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); + XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); + rcu_read_unlock(); + + /* Storing adjacent to the value does not alter the value */ + xa_store(xa, 3, xa, GFP_KERNEL); + XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); + XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); + XA_BUG_ON(xa, xa_load(xa, 2) != NULL); + rcu_read_lock(); + XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); + XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); + rcu_read_unlock(); + + /* Overwriting multiple indexes works */ + xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); + XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); + XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); + XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); + XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); + XA_BUG_ON(xa, xa_load(xa, 4) != NULL); + rcu_read_lock(); + XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); + XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); + rcu_read_unlock(); + + /* We can erase multiple values with a single store */ + xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* Even when the first slot is empty but the others aren't */ + xa_store_index(xa, 1, GFP_KERNEL); + xa_store_index(xa, 2, GFP_KERNEL); + xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); + XA_BUG_ON(xa, !xa_empty(xa)); + + for (i = 0; i < max_order; i++) { + for (j = 0; j < max_order; j++) { + xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); + xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); + + for (k = 0; k < max_order; k++) { + void *entry = xa_load(xa, (1UL << k) - 1); + if ((i < k) && (j < k)) + XA_BUG_ON(xa, entry != NULL); + else + XA_BUG_ON(xa, entry != xa_mk_index(j)); + } + + xa_erase(xa, 0); + XA_BUG_ON(xa, !xa_empty(xa)); + } + } + + for (i = 0; i < 20; i++) { + check_multi_store_1(xa, 200, i); + check_multi_store_1(xa, 0, i); + check_multi_store_1(xa, (1UL << i) + 1, i); + } + check_multi_store_2(xa, 4095, 9); + + for (i = 1; i < 20; i++) { + check_multi_store_3(xa, 0, i); + check_multi_store_3(xa, 1UL << i, i); + } +#endif +} + +static DEFINE_XARRAY_ALLOC(xa0); + +static noinline void check_xa_alloc(void) +{ + int i; + u32 id; + + /* An empty array should assign 0 to the first alloc */ + xa_alloc_index(&xa0, 0, GFP_KERNEL); + + /* Erasing it should make the array empty again */ + xa_erase_index(&xa0, 0); + XA_BUG_ON(&xa0, !xa_empty(&xa0)); + + /* And it should assign 0 again */ + xa_alloc_index(&xa0, 0, GFP_KERNEL); + + /* The next assigned ID should be 1 */ + xa_alloc_index(&xa0, 1, GFP_KERNEL); + xa_erase_index(&xa0, 1); + + /* Storing a value should mark it used */ + xa_store_index(&xa0, 1, GFP_KERNEL); + xa_alloc_index(&xa0, 2, GFP_KERNEL); + + /* If we then erase 0, it should be free */ + xa_erase_index(&xa0, 0); + xa_alloc_index(&xa0, 0, GFP_KERNEL); + + xa_erase_index(&xa0, 1); + xa_erase_index(&xa0, 2); + + for (i = 1; i < 5000; i++) { + xa_alloc_index(&xa0, i, GFP_KERNEL); + } + + xa_destroy(&xa0); + + id = 0xfffffffeU; + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), + GFP_KERNEL) != 0); + XA_BUG_ON(&xa0, id != 0xfffffffeU); + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), + GFP_KERNEL) != 0); + XA_BUG_ON(&xa0, id != 0xffffffffU); + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), + GFP_KERNEL) != -ENOSPC); + XA_BUG_ON(&xa0, id != 0xffffffffU); + xa_destroy(&xa0); + + id = 10; + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), + GFP_KERNEL) != -ENOSPC); + XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), + GFP_KERNEL) != -ENOSPC); + xa_erase_index(&xa0, 3); + XA_BUG_ON(&xa0, !xa_empty(&xa0)); +} + +static noinline void __check_store_iter(struct xarray *xa, unsigned long start, + unsigned int order, unsigned int present) +{ + XA_STATE_ORDER(xas, xa, start, order); + void *entry; + unsigned int count = 0; + +retry: + xas_lock(&xas); + xas_for_each_conflict(&xas, entry) { + XA_BUG_ON(xa, !xa_is_value(entry)); + XA_BUG_ON(xa, entry < xa_mk_index(start)); + XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); + count++; + } + xas_store(&xas, xa_mk_index(start)); + xas_unlock(&xas); + if (xas_nomem(&xas, GFP_KERNEL)) { + count = 0; + goto retry; + } + XA_BUG_ON(xa, xas_error(&xas)); + XA_BUG_ON(xa, count != present); + XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); + XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != + xa_mk_index(start)); + xa_erase_index(xa, start); +} + +static noinline void check_store_iter(struct xarray *xa) +{ + unsigned int i, j; + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; + + for (i = 0; i < max_order; i++) { + unsigned int min = 1 << i; + unsigned int max = (2 << i) - 1; + __check_store_iter(xa, 0, i, 0); + XA_BUG_ON(xa, !xa_empty(xa)); + __check_store_iter(xa, min, i, 0); + XA_BUG_ON(xa, !xa_empty(xa)); + + xa_store_index(xa, min, GFP_KERNEL); + __check_store_iter(xa, min, i, 1); + XA_BUG_ON(xa, !xa_empty(xa)); + xa_store_index(xa, max, GFP_KERNEL); + __check_store_iter(xa, min, i, 1); + XA_BUG_ON(xa, !xa_empty(xa)); + + for (j = 0; j < min; j++) + xa_store_index(xa, j, GFP_KERNEL); + __check_store_iter(xa, 0, i, min); + XA_BUG_ON(xa, !xa_empty(xa)); + for (j = 0; j < min; j++) + xa_store_index(xa, min + j, GFP_KERNEL); + __check_store_iter(xa, min, i, min); + XA_BUG_ON(xa, !xa_empty(xa)); + } +#ifdef CONFIG_XARRAY_MULTI + xa_store_index(xa, 63, GFP_KERNEL); + xa_store_index(xa, 65, GFP_KERNEL); + __check_store_iter(xa, 64, 2, 1); + xa_erase_index(xa, 63); +#endif + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_multi_find(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + unsigned long index; + + xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL); + XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL); + + index = 0; + XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != + xa_mk_value(12)); + XA_BUG_ON(xa, index != 12); + index = 13; + XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != + xa_mk_value(12)); + XA_BUG_ON(xa, (index < 12) || (index >= 16)); + XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != + xa_mk_value(16)); + XA_BUG_ON(xa, index != 16); + + xa_erase_index(xa, 12); + xa_erase_index(xa, 16); + XA_BUG_ON(xa, !xa_empty(xa)); +#endif +} + +static noinline void check_multi_find_2(struct xarray *xa) +{ + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; + unsigned int i, j; + void *entry; + + for (i = 0; i < max_order; i++) { + unsigned long index = 1UL << i; + for (j = 0; j < index; j++) { + XA_STATE(xas, xa, j + index); + xa_store_index(xa, index - 1, GFP_KERNEL); + xa_store_order(xa, index, i, xa_mk_index(index), + GFP_KERNEL); + rcu_read_lock(); + xas_for_each(&xas, entry, ULONG_MAX) { + xa_erase_index(xa, index); + } + rcu_read_unlock(); + xa_erase_index(xa, index - 1); + XA_BUG_ON(xa, !xa_empty(xa)); + } + } +} + +static noinline void check_find_1(struct xarray *xa) +{ + unsigned long i, j, k; + + XA_BUG_ON(xa, !xa_empty(xa)); + + /* + * Check xa_find with all pairs between 0 and 99 inclusive, + * starting at every index between 0 and 99 + */ + for (i = 0; i < 100; i++) { + XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); + xa_set_mark(xa, i, XA_MARK_0); + for (j = 0; j < i; j++) { + XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != + NULL); + xa_set_mark(xa, j, XA_MARK_0); + for (k = 0; k < 100; k++) { + unsigned long index = k; + void *entry = xa_find(xa, &index, ULONG_MAX, + XA_PRESENT); + if (k <= j) + XA_BUG_ON(xa, index != j); + else if (k <= i) + XA_BUG_ON(xa, index != i); + else + XA_BUG_ON(xa, entry != NULL); + + index = k; + entry = xa_find(xa, &index, ULONG_MAX, + XA_MARK_0); + if (k <= j) + XA_BUG_ON(xa, index != j); + else if (k <= i) + XA_BUG_ON(xa, index != i); + else + XA_BUG_ON(xa, entry != NULL); + } + xa_erase_index(xa, j); + XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); + XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); + } + xa_erase_index(xa, i); + XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); + } + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_find_2(struct xarray *xa) +{ + void *entry; + unsigned long i, j, index; + + xa_for_each(xa, index, entry) { + XA_BUG_ON(xa, true); + } + + for (i = 0; i < 1024; i++) { + xa_store_index(xa, index, GFP_KERNEL); + j = 0; + xa_for_each(xa, index, entry) { + XA_BUG_ON(xa, xa_mk_index(index) != entry); + XA_BUG_ON(xa, index != j++); + } + } + + xa_destroy(xa); +} + +static noinline void check_find_3(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + unsigned long i, j, k; + void *entry; + + for (i = 0; i < 100; i++) { + for (j = 0; j < 100; j++) { + rcu_read_lock(); + for (k = 0; k < 100; k++) { + xas_set(&xas, j); + xas_for_each_marked(&xas, entry, k, XA_MARK_0) + ; + if (j > k) + XA_BUG_ON(xa, + xas.xa_node != XAS_RESTART); + } + rcu_read_unlock(); + } + xa_store_index(xa, i, GFP_KERNEL); + xa_set_mark(xa, i, XA_MARK_0); + } + xa_destroy(xa); +} + +static noinline void check_find(struct xarray *xa) +{ + check_find_1(xa); + check_find_2(xa); + check_find_3(xa); + check_multi_find(xa); + check_multi_find_2(xa); +} + +/* See find_swap_entry() in mm/shmem.c */ +static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) +{ + XA_STATE(xas, xa, 0); + unsigned int checked = 0; + void *entry; + + rcu_read_lock(); + xas_for_each(&xas, entry, ULONG_MAX) { + if (xas_retry(&xas, entry)) + continue; + if (entry == item) + break; + checked++; + if ((checked % 4) != 0) + continue; + xas_pause(&xas); + } + rcu_read_unlock(); + + return entry ? xas.xa_index : -1; +} + +static noinline void check_find_entry(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + unsigned int order; + unsigned long offset, index; + + for (order = 0; order < 20; order++) { + for (offset = 0; offset < (1UL << (order + 3)); + offset += (1UL << order)) { + for (index = 0; index < (1UL << (order + 5)); + index += (1UL << order)) { + xa_store_order(xa, index, order, + xa_mk_index(index), GFP_KERNEL); + XA_BUG_ON(xa, xa_load(xa, index) != + xa_mk_index(index)); + XA_BUG_ON(xa, xa_find_entry(xa, + xa_mk_index(index)) != index); + } + XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); + xa_destroy(xa); + } + } +#endif + + XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); + xa_store_index(xa, ULONG_MAX, GFP_KERNEL); + XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); + XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); + xa_erase_index(xa, ULONG_MAX); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_move_small(struct xarray *xa, unsigned long idx) +{ + XA_STATE(xas, xa, 0); + unsigned long i; + + xa_store_index(xa, 0, GFP_KERNEL); + xa_store_index(xa, idx, GFP_KERNEL); + + rcu_read_lock(); + for (i = 0; i < idx * 4; i++) { + void *entry = xas_next(&xas); + if (i <= idx) + XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); + XA_BUG_ON(xa, xas.xa_index != i); + if (i == 0 || i == idx) + XA_BUG_ON(xa, entry != xa_mk_index(i)); + else + XA_BUG_ON(xa, entry != NULL); + } + xas_next(&xas); + XA_BUG_ON(xa, xas.xa_index != i); + + do { + void *entry = xas_prev(&xas); + i--; + if (i <= idx) + XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); + XA_BUG_ON(xa, xas.xa_index != i); + if (i == 0 || i == idx) + XA_BUG_ON(xa, entry != xa_mk_index(i)); + else + XA_BUG_ON(xa, entry != NULL); + } while (i > 0); + + xas_set(&xas, ULONG_MAX); + XA_BUG_ON(xa, xas_next(&xas) != NULL); + XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); + XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); + XA_BUG_ON(xa, xas.xa_index != 0); + XA_BUG_ON(xa, xas_prev(&xas) != NULL); + XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); + rcu_read_unlock(); + + xa_erase_index(xa, 0); + xa_erase_index(xa, idx); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_move(struct xarray *xa) +{ + XA_STATE(xas, xa, (1 << 16) - 1); + unsigned long i; + + for (i = 0; i < (1 << 16); i++) + XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); + + rcu_read_lock(); + do { + void *entry = xas_prev(&xas); + i--; + XA_BUG_ON(xa, entry != xa_mk_index(i)); + XA_BUG_ON(xa, i != xas.xa_index); + } while (i != 0); + + XA_BUG_ON(xa, xas_prev(&xas) != NULL); + XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); + + do { + void *entry = xas_next(&xas); + XA_BUG_ON(xa, entry != xa_mk_index(i)); + XA_BUG_ON(xa, i != xas.xa_index); + i++; + } while (i < (1 << 16)); + rcu_read_unlock(); + + for (i = (1 << 8); i < (1 << 15); i++) + xa_erase_index(xa, i); + + i = xas.xa_index; + + rcu_read_lock(); + do { + void *entry = xas_prev(&xas); + i--; + if ((i < (1 << 8)) || (i >= (1 << 15))) + XA_BUG_ON(xa, entry != xa_mk_index(i)); + else + XA_BUG_ON(xa, entry != NULL); + XA_BUG_ON(xa, i != xas.xa_index); + } while (i != 0); + + XA_BUG_ON(xa, xas_prev(&xas) != NULL); + XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); + + do { + void *entry = xas_next(&xas); + if ((i < (1 << 8)) || (i >= (1 << 15))) + XA_BUG_ON(xa, entry != xa_mk_index(i)); + else + XA_BUG_ON(xa, entry != NULL); + XA_BUG_ON(xa, i != xas.xa_index); + i++; + } while (i < (1 << 16)); + rcu_read_unlock(); + + xa_destroy(xa); + + for (i = 0; i < 16; i++) + check_move_small(xa, 1UL << i); + + for (i = 2; i < 16; i++) + check_move_small(xa, (1UL << i) - 1); +} + +static noinline void xa_store_many_order(struct xarray *xa, + unsigned long index, unsigned order) +{ + XA_STATE_ORDER(xas, xa, index, order); + unsigned int i = 0; + + do { + xas_lock(&xas); + XA_BUG_ON(xa, xas_find_conflict(&xas)); + xas_create_range(&xas); + if (xas_error(&xas)) + goto unlock; + for (i = 0; i < (1U << order); i++) { + XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); + xas_next(&xas); + } +unlock: + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + XA_BUG_ON(xa, xas_error(&xas)); +} + +static noinline void check_create_range_1(struct xarray *xa, + unsigned long index, unsigned order) +{ + unsigned long i; + + xa_store_many_order(xa, index, order); + for (i = index; i < index + (1UL << order); i++) + xa_erase_index(xa, i); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_create_range_2(struct xarray *xa, unsigned order) +{ + unsigned long i; + unsigned long nr = 1UL << order; + + for (i = 0; i < nr * nr; i += nr) + xa_store_many_order(xa, i, order); + for (i = 0; i < nr * nr; i++) + xa_erase_index(xa, i); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_create_range_3(void) +{ + XA_STATE(xas, NULL, 0); + xas_set_err(&xas, -EEXIST); + xas_create_range(&xas); + XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); +} + +static noinline void check_create_range_4(struct xarray *xa, + unsigned long index, unsigned order) +{ + XA_STATE_ORDER(xas, xa, index, order); + unsigned long base = xas.xa_index; + unsigned long i = 0; + + xa_store_index(xa, index, GFP_KERNEL); + do { + xas_lock(&xas); + xas_create_range(&xas); + if (xas_error(&xas)) + goto unlock; + for (i = 0; i < (1UL << order); i++) { + void *old = xas_store(&xas, xa_mk_index(base + i)); + if (xas.xa_index == index) + XA_BUG_ON(xa, old != xa_mk_index(base + i)); + else + XA_BUG_ON(xa, old != NULL); + xas_next(&xas); + } +unlock: + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + XA_BUG_ON(xa, xas_error(&xas)); + + for (i = base; i < base + (1UL << order); i++) + xa_erase_index(xa, i); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_create_range(struct xarray *xa) +{ + unsigned int order; + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; + + for (order = 0; order < max_order; order++) { + check_create_range_1(xa, 0, order); + check_create_range_1(xa, 1U << order, order); + check_create_range_1(xa, 2U << order, order); + check_create_range_1(xa, 3U << order, order); + check_create_range_1(xa, 1U << 24, order); + if (order < 10) + check_create_range_2(xa, order); + + check_create_range_4(xa, 0, order); + check_create_range_4(xa, 1U << order, order); + check_create_range_4(xa, 2U << order, order); + check_create_range_4(xa, 3U << order, order); + check_create_range_4(xa, 1U << 24, order); + + check_create_range_4(xa, 1, order); + check_create_range_4(xa, (1U << order) + 1, order); + check_create_range_4(xa, (2U << order) + 1, order); + check_create_range_4(xa, (2U << order) - 1, order); + check_create_range_4(xa, (3U << order) + 1, order); + check_create_range_4(xa, (3U << order) - 1, order); + check_create_range_4(xa, (1U << 24) + 1, order); + } + + check_create_range_3(); +} + +static noinline void __check_store_range(struct xarray *xa, unsigned long first, + unsigned long last) +{ +#ifdef CONFIG_XARRAY_MULTI + xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); + + XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); + XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); + XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); + XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); + + xa_store_range(xa, first, last, NULL, GFP_KERNEL); +#endif + + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_store_range(struct xarray *xa) +{ + unsigned long i, j; + + for (i = 0; i < 128; i++) { + for (j = i; j < 128; j++) { + __check_store_range(xa, i, j); + __check_store_range(xa, 128 + i, 128 + j); + __check_store_range(xa, 4095 + i, 4095 + j); + __check_store_range(xa, 4096 + i, 4096 + j); + __check_store_range(xa, 123456 + i, 123456 + j); + __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); + } + } +} + +static void check_align_1(struct xarray *xa, char *name) +{ + int i; + unsigned int id; + unsigned long index; + void *entry; + + for (i = 0; i < 8; i++) { + id = 0; + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL) + != 0); + XA_BUG_ON(xa, id != i); + } + xa_for_each(xa, index, entry) + XA_BUG_ON(xa, xa_is_err(entry)); + xa_destroy(xa); +} + +static noinline void check_align(struct xarray *xa) +{ + char name[] = "Motorola 68000"; + + check_align_1(xa, name); + check_align_1(xa, name + 1); + check_align_1(xa, name + 2); + check_align_1(xa, name + 3); +// check_align_2(xa, name); +} + +static LIST_HEAD(shadow_nodes); + +static void test_update_node(struct xa_node *node) +{ + if (node->count && node->count == node->nr_values) { + if (list_empty(&node->private_list)) + list_add(&shadow_nodes, &node->private_list); + } else { + if (!list_empty(&node->private_list)) + list_del_init(&node->private_list); + } +} + +static noinline void shadow_remove(struct xarray *xa) +{ + struct xa_node *node; + + xa_lock(xa); + while ((node = list_first_entry_or_null(&shadow_nodes, + struct xa_node, private_list))) { + XA_STATE(xas, node->array, 0); + XA_BUG_ON(xa, node->array != xa); + list_del_init(&node->private_list); + xas.xa_node = xa_parent_locked(node->array, node); + xas.xa_offset = node->offset; + xas.xa_shift = node->shift + XA_CHUNK_SHIFT; + xas_set_update(&xas, test_update_node); + xas_store(&xas, NULL); + } + xa_unlock(xa); +} + +static noinline void check_workingset(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + xas_set_update(&xas, test_update_node); + + do { + xas_lock(&xas); + xas_store(&xas, xa_mk_value(0)); + xas_next(&xas); + xas_store(&xas, xa_mk_value(1)); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + XA_BUG_ON(xa, list_empty(&shadow_nodes)); + + xas_lock(&xas); + xas_next(&xas); + xas_store(&xas, &xas); + XA_BUG_ON(xa, !list_empty(&shadow_nodes)); + + xas_store(&xas, xa_mk_value(2)); + xas_unlock(&xas); + XA_BUG_ON(xa, list_empty(&shadow_nodes)); + + shadow_remove(xa); + XA_BUG_ON(xa, !list_empty(&shadow_nodes)); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +/* + * Check that the pointer / value / sibling entries are accounted the + * way we expect them to be. + */ +static noinline void check_account(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + unsigned int order; + + for (order = 1; order < 12; order++) { + XA_STATE(xas, xa, 1 << order); + + xa_store_order(xa, 0, order, xa, GFP_KERNEL); + rcu_read_lock(); + xas_load(&xas); + XA_BUG_ON(xa, xas.xa_node->count == 0); + XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); + XA_BUG_ON(xa, xas.xa_node->nr_values != 0); + rcu_read_unlock(); + + xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), + GFP_KERNEL); + XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); + + xa_erase(xa, 1 << order); + XA_BUG_ON(xa, xas.xa_node->nr_values != 0); + + xa_erase(xa, 0); + XA_BUG_ON(xa, !xa_empty(xa)); + } +#endif +} + +static noinline void check_destroy(struct xarray *xa) +{ + unsigned long index; + + XA_BUG_ON(xa, !xa_empty(xa)); + + /* Destroying an empty array is a no-op */ + xa_destroy(xa); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* Destroying an array with a single entry */ + for (index = 0; index < 1000; index++) { + xa_store_index(xa, index, GFP_KERNEL); + XA_BUG_ON(xa, xa_empty(xa)); + xa_destroy(xa); + XA_BUG_ON(xa, !xa_empty(xa)); + } + + /* Destroying an array with a single entry at ULONG_MAX */ + xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); + XA_BUG_ON(xa, xa_empty(xa)); + xa_destroy(xa); + XA_BUG_ON(xa, !xa_empty(xa)); + +#ifdef CONFIG_XARRAY_MULTI + /* Destroying an array with a multi-index entry */ + xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); + XA_BUG_ON(xa, xa_empty(xa)); + xa_destroy(xa); + XA_BUG_ON(xa, !xa_empty(xa)); +#endif +} + +static DEFINE_XARRAY(array); + +static int xarray_checks(void) +{ + check_xa_err(&array); + check_xas_retry(&array); + check_xa_load(&array); + check_xa_mark(&array); + check_xa_shrink(&array); + check_xas_erase(&array); + check_cmpxchg(&array); + check_reserve(&array); + check_multi_store(&array); + check_xa_alloc(); + check_find(&array); + check_find_entry(&array); + check_account(&array); + check_destroy(&array); + check_move(&array); + check_create_range(&array); + check_store_range(&array); + check_store_iter(&array); + check_align(&xa0); + + check_workingset(&array, 0); + check_workingset(&array, 64); + check_workingset(&array, 4096); + + printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); + return (tests_run == tests_passed) ? 0 : -EINVAL; +} + +static void xarray_exit(void) +{ +} + +module_init(xarray_checks); +module_exit(xarray_exit); +MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); +MODULE_LICENSE("GPL"); diff --git a/lib/ubsan.c b/lib/ubsan.c index 59fee96c29a0..e4162f59a81c 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -427,8 +427,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); -void __noreturn -__ubsan_handle_builtin_unreachable(struct unreachable_data *data) +void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) { unsigned long flags; diff --git a/lib/usercopy.c b/lib/usercopy.c index 3744b2a8e591..c2bfbcaeb3dc 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -8,7 +8,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n { unsigned long res = n; might_fault(); - if (likely(access_ok(VERIFY_READ, from, n))) { + if (likely(access_ok(from, n))) { kasan_check_write(to, n); res = raw_copy_from_user(to, from, n); } @@ -23,7 +23,7 @@ EXPORT_SYMBOL(_copy_from_user); unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); - if (likely(access_ok(VERIFY_WRITE, to, n))) { + if (likely(access_ok(to, n))) { kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 458600f4fbc5..791b6fa36905 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -17,6 +17,7 @@ */ #include <stdarg.h> +#include <linux/build_bug.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/module.h> /* for KSYM_SYMBOL_LEN */ @@ -30,6 +31,7 @@ #include <linux/ioport.h> #include <linux/dcache.h> #include <linux/cred.h> +#include <linux/rtc.h> #include <linux/uuid.h> #include <linux/of.h> #include <net/addrconf.h> @@ -404,6 +406,8 @@ struct printf_spec { unsigned int base:8; /* number base, 8, 10 or 16 only */ signed int precision:16; /* # of digits/chars */ } __packed; +static_assert(sizeof(struct printf_spec) == 8); + #define FIELD_WIDTH_MAX ((1 << 23) - 1) #define PRECISION_MAX ((1 << 15) - 1) @@ -421,8 +425,6 @@ char *number(char *buf, char *end, unsigned long long num, int field_width = spec.field_width; int precision = spec.precision; - BUILD_BUG_ON(sizeof(struct printf_spec) != 8); - /* locase = 0 or 0x20. ORing digits or letters with 'locase' * produces same digits or (maybe lowercased) letters */ locase = (spec.flags & SMALL); @@ -822,6 +824,20 @@ static const struct printf_spec default_dec_spec = { .precision = -1, }; +static const struct printf_spec default_dec02_spec = { + .base = 10, + .field_width = 2, + .precision = -1, + .flags = ZEROPAD, +}; + +static const struct printf_spec default_dec04_spec = { + .base = 10, + .field_width = 4, + .precision = -1, + .flags = ZEROPAD, +}; + static noinline_for_stack char *resource_string(char *buf, char *end, struct resource *res, struct printf_spec spec, const char *fmt) @@ -1550,6 +1566,87 @@ char *address_val(char *buf, char *end, const void *addr, const char *fmt) } static noinline_for_stack +char *date_str(char *buf, char *end, const struct rtc_time *tm, bool r) +{ + int year = tm->tm_year + (r ? 0 : 1900); + int mon = tm->tm_mon + (r ? 0 : 1); + + buf = number(buf, end, year, default_dec04_spec); + if (buf < end) + *buf = '-'; + buf++; + + buf = number(buf, end, mon, default_dec02_spec); + if (buf < end) + *buf = '-'; + buf++; + + return number(buf, end, tm->tm_mday, default_dec02_spec); +} + +static noinline_for_stack +char *time_str(char *buf, char *end, const struct rtc_time *tm, bool r) +{ + buf = number(buf, end, tm->tm_hour, default_dec02_spec); + if (buf < end) + *buf = ':'; + buf++; + + buf = number(buf, end, tm->tm_min, default_dec02_spec); + if (buf < end) + *buf = ':'; + buf++; + + return number(buf, end, tm->tm_sec, default_dec02_spec); +} + +static noinline_for_stack +char *rtc_str(char *buf, char *end, const struct rtc_time *tm, const char *fmt) +{ + bool have_t = true, have_d = true; + bool raw = false; + int count = 2; + + switch (fmt[count]) { + case 'd': + have_t = false; + count++; + break; + case 't': + have_d = false; + count++; + break; + } + + raw = fmt[count] == 'r'; + + if (have_d) + buf = date_str(buf, end, tm, raw); + if (have_d && have_t) { + /* Respect ISO 8601 */ + if (buf < end) + *buf = 'T'; + buf++; + } + if (have_t) + buf = time_str(buf, end, tm, raw); + + return buf; +} + +static noinline_for_stack +char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec, + const char *fmt) +{ + switch (fmt[1]) { + case 'R': + return rtc_str(buf, end, (const struct rtc_time *)ptr, fmt); + default: + return ptr_to_id(buf, end, ptr, spec); + } +} + +static noinline_for_stack char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec, const char *fmt) { @@ -1684,6 +1781,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn, fmt = "f"; for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) { + int precision; if (pass) { if (buf < end) *buf = ':'; @@ -1695,7 +1793,11 @@ char *device_node_string(char *buf, char *end, struct device_node *dn, buf = device_node_gen_full_name(dn, buf, end); break; case 'n': /* name */ - buf = string(buf, end, dn->name, str_spec); + p = kbasename(of_node_full_name(dn)); + precision = str_spec.precision; + str_spec.precision = strchrnul(p, '@') - p; + buf = string(buf, end, p, str_spec); + str_spec.precision = precision; break; case 'p': /* phandle */ buf = number(buf, end, (unsigned int)dn->phandle, num_spec); @@ -1823,6 +1925,8 @@ char *device_node_string(char *buf, char *end, struct device_node *dn, * - 'd[234]' For a dentry name (optionally 2-4 last components) * - 'D[234]' Same as 'd' but for a struct file * - 'g' For block_device name (gendisk + partition number) + * - 't[R][dt][r]' For time and date as represented: + * R struct rtc_time * - 'C' For a clock, it prints the name (Common Clock Framework) or address * (legacy clock framework) of the clock * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address @@ -1946,6 +2050,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, return address_val(buf, end, ptr, fmt); case 'd': return dentry_name(buf, end, ptr, spec, fmt); + case 't': + return time_and_date(buf, end, ptr, spec, fmt); case 'C': return clock(buf, end, ptr, spec, fmt); case 'D': diff --git a/lib/xarray.c b/lib/xarray.c new file mode 100644 index 000000000000..81c3171ddde9 --- /dev/null +++ b/lib/xarray.c @@ -0,0 +1,2031 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * XArray implementation + * Copyright (c) 2017 Microsoft Corporation + * Author: Matthew Wilcox <willy@infradead.org> + */ + +#include <linux/bitmap.h> +#include <linux/export.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/xarray.h> + +/* + * Coding conventions in this file: + * + * @xa is used to refer to the entire xarray. + * @xas is the 'xarray operation state'. It may be either a pointer to + * an xa_state, or an xa_state stored on the stack. This is an unfortunate + * ambiguity. + * @index is the index of the entry being operated on + * @mark is an xa_mark_t; a small number indicating one of the mark bits. + * @node refers to an xa_node; usually the primary one being operated on by + * this function. + * @offset is the index into the slots array inside an xa_node. + * @parent refers to the @xa_node closer to the head than @node. + * @entry refers to something stored in a slot in the xarray + */ + +static inline unsigned int xa_lock_type(const struct xarray *xa) +{ + return (__force unsigned int)xa->xa_flags & 3; +} + +static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) +{ + if (lock_type == XA_LOCK_IRQ) + xas_lock_irq(xas); + else if (lock_type == XA_LOCK_BH) + xas_lock_bh(xas); + else + xas_lock(xas); +} + +static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) +{ + if (lock_type == XA_LOCK_IRQ) + xas_unlock_irq(xas); + else if (lock_type == XA_LOCK_BH) + xas_unlock_bh(xas); + else + xas_unlock(xas); +} + +static inline bool xa_track_free(const struct xarray *xa) +{ + return xa->xa_flags & XA_FLAGS_TRACK_FREE; +} + +static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) +{ + if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) + xa->xa_flags |= XA_FLAGS_MARK(mark); +} + +static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark) +{ + if (xa->xa_flags & XA_FLAGS_MARK(mark)) + xa->xa_flags &= ~(XA_FLAGS_MARK(mark)); +} + +static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark) +{ + return node->marks[(__force unsigned)mark]; +} + +static inline bool node_get_mark(struct xa_node *node, + unsigned int offset, xa_mark_t mark) +{ + return test_bit(offset, node_marks(node, mark)); +} + +/* returns true if the bit was set */ +static inline bool node_set_mark(struct xa_node *node, unsigned int offset, + xa_mark_t mark) +{ + return __test_and_set_bit(offset, node_marks(node, mark)); +} + +/* returns true if the bit was set */ +static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, + xa_mark_t mark) +{ + return __test_and_clear_bit(offset, node_marks(node, mark)); +} + +static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark) +{ + return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE); +} + +static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) +{ + bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE); +} + +#define mark_inc(mark) do { \ + mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \ +} while (0) + +/* + * xas_squash_marks() - Merge all marks to the first entry + * @xas: Array operation state. + * + * Set a mark on the first entry if any entry has it set. Clear marks on + * all sibling entries. + */ +static void xas_squash_marks(const struct xa_state *xas) +{ + unsigned int mark = 0; + unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; + + if (!xas->xa_sibs) + return; + + do { + unsigned long *marks = xas->xa_node->marks[mark]; + if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) + continue; + __set_bit(xas->xa_offset, marks); + bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); + } while (mark++ != (__force unsigned)XA_MARK_MAX); +} + +/* extracts the offset within this node from the index */ +static unsigned int get_offset(unsigned long index, struct xa_node *node) +{ + return (index >> node->shift) & XA_CHUNK_MASK; +} + +static void xas_set_offset(struct xa_state *xas) +{ + xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); +} + +/* move the index either forwards (find) or backwards (sibling slot) */ +static void xas_move_index(struct xa_state *xas, unsigned long offset) +{ + unsigned int shift = xas->xa_node->shift; + xas->xa_index &= ~XA_CHUNK_MASK << shift; + xas->xa_index += offset << shift; +} + +static void xas_advance(struct xa_state *xas) +{ + xas->xa_offset++; + xas_move_index(xas, xas->xa_offset); +} + +static void *set_bounds(struct xa_state *xas) +{ + xas->xa_node = XAS_BOUNDS; + return NULL; +} + +/* + * Starts a walk. If the @xas is already valid, we assume that it's on + * the right path and just return where we've got to. If we're in an + * error state, return NULL. If the index is outside the current scope + * of the xarray, return NULL without changing @xas->xa_node. Otherwise + * set @xas->xa_node to NULL and return the current head of the array. + */ +static void *xas_start(struct xa_state *xas) +{ + void *entry; + + if (xas_valid(xas)) + return xas_reload(xas); + if (xas_error(xas)) + return NULL; + + entry = xa_head(xas->xa); + if (!xa_is_node(entry)) { + if (xas->xa_index) + return set_bounds(xas); + } else { + if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) + return set_bounds(xas); + } + + xas->xa_node = NULL; + return entry; +} + +static void *xas_descend(struct xa_state *xas, struct xa_node *node) +{ + unsigned int offset = get_offset(xas->xa_index, node); + void *entry = xa_entry(xas->xa, node, offset); + + xas->xa_node = node; + if (xa_is_sibling(entry)) { + offset = xa_to_sibling(entry); + entry = xa_entry(xas->xa, node, offset); + } + + xas->xa_offset = offset; + return entry; +} + +/** + * xas_load() - Load an entry from the XArray (advanced). + * @xas: XArray operation state. + * + * Usually walks the @xas to the appropriate state to load the entry + * stored at xa_index. However, it will do nothing and return %NULL if + * @xas is in an error state. xas_load() will never expand the tree. + * + * If the xa_state is set up to operate on a multi-index entry, xas_load() + * may return %NULL or an internal entry, even if there are entries + * present within the range specified by @xas. + * + * Context: Any context. The caller should hold the xa_lock or the RCU lock. + * Return: Usually an entry in the XArray, but see description for exceptions. + */ +void *xas_load(struct xa_state *xas) +{ + void *entry = xas_start(xas); + + while (xa_is_node(entry)) { + struct xa_node *node = xa_to_node(entry); + + if (xas->xa_shift > node->shift) + break; + entry = xas_descend(xas, node); + if (node->shift == 0) + break; + } + return entry; +} +EXPORT_SYMBOL_GPL(xas_load); + +/* Move the radix tree node cache here */ +extern struct kmem_cache *radix_tree_node_cachep; +extern void radix_tree_node_rcu_free(struct rcu_head *head); + +#define XA_RCU_FREE ((struct xarray *)1) + +static void xa_node_free(struct xa_node *node) +{ + XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); + node->array = XA_RCU_FREE; + call_rcu(&node->rcu_head, radix_tree_node_rcu_free); +} + +/* + * xas_destroy() - Free any resources allocated during the XArray operation. + * @xas: XArray operation state. + * + * This function is now internal-only. + */ +static void xas_destroy(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_alloc; + + if (!node) + return; + XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); + kmem_cache_free(radix_tree_node_cachep, node); + xas->xa_alloc = NULL; +} + +/** + * xas_nomem() - Allocate memory if needed. + * @xas: XArray operation state. + * @gfp: Memory allocation flags. + * + * If we need to add new nodes to the XArray, we try to allocate memory + * with GFP_NOWAIT while holding the lock, which will usually succeed. + * If it fails, @xas is flagged as needing memory to continue. The caller + * should drop the lock and call xas_nomem(). If xas_nomem() succeeds, + * the caller should retry the operation. + * + * Forward progress is guaranteed as one node is allocated here and + * stored in the xa_state where it will be found by xas_alloc(). More + * nodes will likely be found in the slab allocator, but we do not tie + * them up here. + * + * Return: true if memory was needed, and was successfully allocated. + */ +bool xas_nomem(struct xa_state *xas, gfp_t gfp) +{ + if (xas->xa_node != XA_ERROR(-ENOMEM)) { + xas_destroy(xas); + return false; + } + xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + if (!xas->xa_alloc) + return false; + XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); + xas->xa_node = XAS_RESTART; + return true; +} +EXPORT_SYMBOL_GPL(xas_nomem); + +/* + * __xas_nomem() - Drop locks and allocate memory if needed. + * @xas: XArray operation state. + * @gfp: Memory allocation flags. + * + * Internal variant of xas_nomem(). + * + * Return: true if memory was needed, and was successfully allocated. + */ +static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) + __must_hold(xas->xa->xa_lock) +{ + unsigned int lock_type = xa_lock_type(xas->xa); + + if (xas->xa_node != XA_ERROR(-ENOMEM)) { + xas_destroy(xas); + return false; + } + if (gfpflags_allow_blocking(gfp)) { + xas_unlock_type(xas, lock_type); + xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas_lock_type(xas, lock_type); + } else { + xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + } + if (!xas->xa_alloc) + return false; + XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); + xas->xa_node = XAS_RESTART; + return true; +} + +static void xas_update(struct xa_state *xas, struct xa_node *node) +{ + if (xas->xa_update) + xas->xa_update(node); + else + XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); +} + +static void *xas_alloc(struct xa_state *xas, unsigned int shift) +{ + struct xa_node *parent = xas->xa_node; + struct xa_node *node = xas->xa_alloc; + + if (xas_invalid(xas)) + return NULL; + + if (node) { + xas->xa_alloc = NULL; + } else { + node = kmem_cache_alloc(radix_tree_node_cachep, + GFP_NOWAIT | __GFP_NOWARN); + if (!node) { + xas_set_err(xas, -ENOMEM); + return NULL; + } + } + + if (parent) { + node->offset = xas->xa_offset; + parent->count++; + XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE); + xas_update(xas, parent); + } + XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); + XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); + node->shift = shift; + node->count = 0; + node->nr_values = 0; + RCU_INIT_POINTER(node->parent, xas->xa_node); + node->array = xas->xa; + + return node; +} + +#ifdef CONFIG_XARRAY_MULTI +/* Returns the number of indices covered by a given xa_state */ +static unsigned long xas_size(const struct xa_state *xas) +{ + return (xas->xa_sibs + 1UL) << xas->xa_shift; +} +#endif + +/* + * Use this to calculate the maximum index that will need to be created + * in order to add the entry described by @xas. Because we cannot store a + * multiple-index entry at index 0, the calculation is a little more complex + * than you might expect. + */ +static unsigned long xas_max(struct xa_state *xas) +{ + unsigned long max = xas->xa_index; + +#ifdef CONFIG_XARRAY_MULTI + if (xas->xa_shift || xas->xa_sibs) { + unsigned long mask = xas_size(xas) - 1; + max |= mask; + if (mask == max) + max++; + } +#endif + + return max; +} + +/* The maximum index that can be contained in the array without expanding it */ +static unsigned long max_index(void *entry) +{ + if (!xa_is_node(entry)) + return 0; + return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; +} + +static void xas_shrink(struct xa_state *xas) +{ + struct xarray *xa = xas->xa; + struct xa_node *node = xas->xa_node; + + for (;;) { + void *entry; + + XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); + if (node->count != 1) + break; + entry = xa_entry_locked(xa, node, 0); + if (!entry) + break; + if (!xa_is_node(entry) && node->shift) + break; + xas->xa_node = XAS_BOUNDS; + + RCU_INIT_POINTER(xa->xa_head, entry); + if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK)) + xa_mark_clear(xa, XA_FREE_MARK); + + node->count = 0; + node->nr_values = 0; + if (!xa_is_node(entry)) + RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY); + xas_update(xas, node); + xa_node_free(node); + if (!xa_is_node(entry)) + break; + node = xa_to_node(entry); + node->parent = NULL; + } +} + +/* + * xas_delete_node() - Attempt to delete an xa_node + * @xas: Array operation state. + * + * Attempts to delete the @xas->xa_node. This will fail if xa->node has + * a non-zero reference count. + */ +static void xas_delete_node(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + for (;;) { + struct xa_node *parent; + + XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); + if (node->count) + break; + + parent = xa_parent_locked(xas->xa, node); + xas->xa_node = parent; + xas->xa_offset = node->offset; + xa_node_free(node); + + if (!parent) { + xas->xa->xa_head = NULL; + xas->xa_node = XAS_BOUNDS; + return; + } + + parent->slots[xas->xa_offset] = NULL; + parent->count--; + XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE); + node = parent; + xas_update(xas, node); + } + + if (!node->parent) + xas_shrink(xas); +} + +/** + * xas_free_nodes() - Free this node and all nodes that it references + * @xas: Array operation state. + * @top: Node to free + * + * This node has been removed from the tree. We must now free it and all + * of its subnodes. There may be RCU walkers with references into the tree, + * so we must replace all entries with retry markers. + */ +static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) +{ + unsigned int offset = 0; + struct xa_node *node = top; + + for (;;) { + void *entry = xa_entry_locked(xas->xa, node, offset); + + if (node->shift && xa_is_node(entry)) { + node = xa_to_node(entry); + offset = 0; + continue; + } + if (entry) + RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY); + offset++; + while (offset == XA_CHUNK_SIZE) { + struct xa_node *parent; + + parent = xa_parent_locked(xas->xa, node); + offset = node->offset + 1; + node->count = 0; + node->nr_values = 0; + xas_update(xas, node); + xa_node_free(node); + if (node == top) + return; + node = parent; + } + } +} + +/* + * xas_expand adds nodes to the head of the tree until it has reached + * sufficient height to be able to contain @xas->xa_index + */ +static int xas_expand(struct xa_state *xas, void *head) +{ + struct xarray *xa = xas->xa; + struct xa_node *node = NULL; + unsigned int shift = 0; + unsigned long max = xas_max(xas); + + if (!head) { + if (max == 0) + return 0; + while ((max >> shift) >= XA_CHUNK_SIZE) + shift += XA_CHUNK_SHIFT; + return shift + XA_CHUNK_SHIFT; + } else if (xa_is_node(head)) { + node = xa_to_node(head); + shift = node->shift + XA_CHUNK_SHIFT; + } + xas->xa_node = NULL; + + while (max > max_index(head)) { + xa_mark_t mark = 0; + + XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); + node = xas_alloc(xas, shift); + if (!node) + return -ENOMEM; + + node->count = 1; + if (xa_is_value(head)) + node->nr_values = 1; + RCU_INIT_POINTER(node->slots[0], head); + + /* Propagate the aggregated mark info to the new child */ + for (;;) { + if (xa_track_free(xa) && mark == XA_FREE_MARK) { + node_mark_all(node, XA_FREE_MARK); + if (!xa_marked(xa, XA_FREE_MARK)) { + node_clear_mark(node, 0, XA_FREE_MARK); + xa_mark_set(xa, XA_FREE_MARK); + } + } else if (xa_marked(xa, mark)) { + node_set_mark(node, 0, mark); + } + if (mark == XA_MARK_MAX) + break; + mark_inc(mark); + } + + /* + * Now that the new node is fully initialised, we can add + * it to the tree + */ + if (xa_is_node(head)) { + xa_to_node(head)->offset = 0; + rcu_assign_pointer(xa_to_node(head)->parent, node); + } + head = xa_mk_node(node); + rcu_assign_pointer(xa->xa_head, head); + xas_update(xas, node); + + shift += XA_CHUNK_SHIFT; + } + + xas->xa_node = node; + return shift; +} + +/* + * xas_create() - Create a slot to store an entry in. + * @xas: XArray operation state. + * @allow_root: %true if we can store the entry in the root directly + * + * Most users will not need to call this function directly, as it is called + * by xas_store(). It is useful for doing conditional store operations + * (see the xa_cmpxchg() implementation for an example). + * + * Return: If the slot already existed, returns the contents of this slot. + * If the slot was newly created, returns %NULL. If it failed to create the + * slot, returns %NULL and indicates the error in @xas. + */ +static void *xas_create(struct xa_state *xas, bool allow_root) +{ + struct xarray *xa = xas->xa; + void *entry; + void __rcu **slot; + struct xa_node *node = xas->xa_node; + int shift; + unsigned int order = xas->xa_shift; + + if (xas_top(node)) { + entry = xa_head_locked(xa); + xas->xa_node = NULL; + shift = xas_expand(xas, entry); + if (shift < 0) + return NULL; + if (!shift && !allow_root) + shift = XA_CHUNK_SHIFT; + entry = xa_head_locked(xa); + slot = &xa->xa_head; + } else if (xas_error(xas)) { + return NULL; + } else if (node) { + unsigned int offset = xas->xa_offset; + + shift = node->shift; + entry = xa_entry_locked(xa, node, offset); + slot = &node->slots[offset]; + } else { + shift = 0; + entry = xa_head_locked(xa); + slot = &xa->xa_head; + } + + while (shift > order) { + shift -= XA_CHUNK_SHIFT; + if (!entry) { + node = xas_alloc(xas, shift); + if (!node) + break; + if (xa_track_free(xa)) + node_mark_all(node, XA_FREE_MARK); + rcu_assign_pointer(*slot, xa_mk_node(node)); + } else if (xa_is_node(entry)) { + node = xa_to_node(entry); + } else { + break; + } + entry = xas_descend(xas, node); + slot = &node->slots[xas->xa_offset]; + } + + return entry; +} + +/** + * xas_create_range() - Ensure that stores to this range will succeed + * @xas: XArray operation state. + * + * Creates all of the slots in the range covered by @xas. Sets @xas to + * create single-index entries and positions it at the beginning of the + * range. This is for the benefit of users which have not yet been + * converted to use multi-index entries. + */ +void xas_create_range(struct xa_state *xas) +{ + unsigned long index = xas->xa_index; + unsigned char shift = xas->xa_shift; + unsigned char sibs = xas->xa_sibs; + + xas->xa_index |= ((sibs + 1) << shift) - 1; + if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift) + xas->xa_offset |= sibs; + xas->xa_shift = 0; + xas->xa_sibs = 0; + + for (;;) { + xas_create(xas, true); + if (xas_error(xas)) + goto restore; + if (xas->xa_index <= (index | XA_CHUNK_MASK)) + goto success; + xas->xa_index -= XA_CHUNK_SIZE; + + for (;;) { + struct xa_node *node = xas->xa_node; + xas->xa_node = xa_parent_locked(xas->xa, node); + xas->xa_offset = node->offset - 1; + if (node->offset != 0) + break; + } + } + +restore: + xas->xa_shift = shift; + xas->xa_sibs = sibs; + xas->xa_index = index; + return; +success: + xas->xa_index = index; + if (xas->xa_node) + xas_set_offset(xas); +} +EXPORT_SYMBOL_GPL(xas_create_range); + +static void update_node(struct xa_state *xas, struct xa_node *node, + int count, int values) +{ + if (!node || (!count && !values)) + return; + + node->count += count; + node->nr_values += values; + XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); + XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE); + xas_update(xas, node); + if (count < 0) + xas_delete_node(xas); +} + +/** + * xas_store() - Store this entry in the XArray. + * @xas: XArray operation state. + * @entry: New entry. + * + * If @xas is operating on a multi-index entry, the entry returned by this + * function is essentially meaningless (it may be an internal entry or it + * may be %NULL, even if there are non-NULL entries at some of the indices + * covered by the range). This is not a problem for any current users, + * and can be changed if needed. + * + * Return: The old entry at this index. + */ +void *xas_store(struct xa_state *xas, void *entry) +{ + struct xa_node *node; + void __rcu **slot = &xas->xa->xa_head; + unsigned int offset, max; + int count = 0; + int values = 0; + void *first, *next; + bool value = xa_is_value(entry); + + if (entry) + first = xas_create(xas, !xa_is_node(entry)); + else + first = xas_load(xas); + + if (xas_invalid(xas)) + return first; + node = xas->xa_node; + if (node && (xas->xa_shift < node->shift)) + xas->xa_sibs = 0; + if ((first == entry) && !xas->xa_sibs) + return first; + + next = first; + offset = xas->xa_offset; + max = xas->xa_offset + xas->xa_sibs; + if (node) { + slot = &node->slots[offset]; + if (xas->xa_sibs) + xas_squash_marks(xas); + } + if (!entry) + xas_init_marks(xas); + + for (;;) { + /* + * Must clear the marks before setting the entry to NULL, + * otherwise xas_for_each_marked may find a NULL entry and + * stop early. rcu_assign_pointer contains a release barrier + * so the mark clearing will appear to happen before the + * entry is set to NULL. + */ + rcu_assign_pointer(*slot, entry); + if (xa_is_node(next)) + xas_free_nodes(xas, xa_to_node(next)); + if (!node) + break; + count += !next - !entry; + values += !xa_is_value(first) - !value; + if (entry) { + if (offset == max) + break; + if (!xa_is_sibling(entry)) + entry = xa_mk_sibling(xas->xa_offset); + } else { + if (offset == XA_CHUNK_MASK) + break; + } + next = xa_entry_locked(xas->xa, node, ++offset); + if (!xa_is_sibling(next)) { + if (!entry && (offset > max)) + break; + first = next; + } + slot++; + } + + update_node(xas, node, count, values); + return first; +} +EXPORT_SYMBOL_GPL(xas_store); + +/** + * xas_get_mark() - Returns the state of this mark. + * @xas: XArray operation state. + * @mark: Mark number. + * + * Return: true if the mark is set, false if the mark is clear or @xas + * is in an error state. + */ +bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark) +{ + if (xas_invalid(xas)) + return false; + if (!xas->xa_node) + return xa_marked(xas->xa, mark); + return node_get_mark(xas->xa_node, xas->xa_offset, mark); +} +EXPORT_SYMBOL_GPL(xas_get_mark); + +/** + * xas_set_mark() - Sets the mark on this entry and its parents. + * @xas: XArray operation state. + * @mark: Mark number. + * + * Sets the specified mark on this entry, and walks up the tree setting it + * on all the ancestor entries. Does nothing if @xas has not been walked to + * an entry, or is in an error state. + */ +void xas_set_mark(const struct xa_state *xas, xa_mark_t mark) +{ + struct xa_node *node = xas->xa_node; + unsigned int offset = xas->xa_offset; + + if (xas_invalid(xas)) + return; + + while (node) { + if (node_set_mark(node, offset, mark)) + return; + offset = node->offset; + node = xa_parent_locked(xas->xa, node); + } + + if (!xa_marked(xas->xa, mark)) + xa_mark_set(xas->xa, mark); +} +EXPORT_SYMBOL_GPL(xas_set_mark); + +/** + * xas_clear_mark() - Clears the mark on this entry and its parents. + * @xas: XArray operation state. + * @mark: Mark number. + * + * Clears the specified mark on this entry, and walks back to the head + * attempting to clear it on all the ancestor entries. Does nothing if + * @xas has not been walked to an entry, or is in an error state. + */ +void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark) +{ + struct xa_node *node = xas->xa_node; + unsigned int offset = xas->xa_offset; + + if (xas_invalid(xas)) + return; + + while (node) { + if (!node_clear_mark(node, offset, mark)) + return; + if (node_any_mark(node, mark)) + return; + + offset = node->offset; + node = xa_parent_locked(xas->xa, node); + } + + if (xa_marked(xas->xa, mark)) + xa_mark_clear(xas->xa, mark); +} +EXPORT_SYMBOL_GPL(xas_clear_mark); + +/** + * xas_init_marks() - Initialise all marks for the entry + * @xas: Array operations state. + * + * Initialise all marks for the entry specified by @xas. If we're tracking + * free entries with a mark, we need to set it on all entries. All other + * marks are cleared. + * + * This implementation is not as efficient as it could be; we may walk + * up the tree multiple times. + */ +void xas_init_marks(const struct xa_state *xas) +{ + xa_mark_t mark = 0; + + for (;;) { + if (xa_track_free(xas->xa) && mark == XA_FREE_MARK) + xas_set_mark(xas, mark); + else + xas_clear_mark(xas, mark); + if (mark == XA_MARK_MAX) + break; + mark_inc(mark); + } +} +EXPORT_SYMBOL_GPL(xas_init_marks); + +/** + * xas_pause() - Pause a walk to drop a lock. + * @xas: XArray operation state. + * + * Some users need to pause a walk and drop the lock they're holding in + * order to yield to a higher priority thread or carry out an operation + * on an entry. Those users should call this function before they drop + * the lock. It resets the @xas to be suitable for the next iteration + * of the loop after the user has reacquired the lock. If most entries + * found during a walk require you to call xas_pause(), the xa_for_each() + * iterator may be more appropriate. + * + * Note that xas_pause() only works for forward iteration. If a user needs + * to pause a reverse iteration, we will need a xas_pause_rev(). + */ +void xas_pause(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (xas_invalid(xas)) + return; + + if (node) { + unsigned int offset = xas->xa_offset; + while (++offset < XA_CHUNK_SIZE) { + if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) + break; + } + xas->xa_index += (offset - xas->xa_offset) << node->shift; + } else { + xas->xa_index++; + } + xas->xa_node = XAS_RESTART; +} +EXPORT_SYMBOL_GPL(xas_pause); + +/* + * __xas_prev() - Find the previous entry in the XArray. + * @xas: XArray operation state. + * + * Helper function for xas_prev() which handles all the complex cases + * out of line. + */ +void *__xas_prev(struct xa_state *xas) +{ + void *entry; + + if (!xas_frozen(xas->xa_node)) + xas->xa_index--; + if (xas_not_node(xas->xa_node)) + return xas_load(xas); + + if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) + xas->xa_offset--; + + while (xas->xa_offset == 255) { + xas->xa_offset = xas->xa_node->offset - 1; + xas->xa_node = xa_parent(xas->xa, xas->xa_node); + if (!xas->xa_node) + return set_bounds(xas); + } + + for (;;) { + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (!xa_is_node(entry)) + return entry; + + xas->xa_node = xa_to_node(entry); + xas_set_offset(xas); + } +} +EXPORT_SYMBOL_GPL(__xas_prev); + +/* + * __xas_next() - Find the next entry in the XArray. + * @xas: XArray operation state. + * + * Helper function for xas_next() which handles all the complex cases + * out of line. + */ +void *__xas_next(struct xa_state *xas) +{ + void *entry; + + if (!xas_frozen(xas->xa_node)) + xas->xa_index++; + if (xas_not_node(xas->xa_node)) + return xas_load(xas); + + if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) + xas->xa_offset++; + + while (xas->xa_offset == XA_CHUNK_SIZE) { + xas->xa_offset = xas->xa_node->offset + 1; + xas->xa_node = xa_parent(xas->xa, xas->xa_node); + if (!xas->xa_node) + return set_bounds(xas); + } + + for (;;) { + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (!xa_is_node(entry)) + return entry; + + xas->xa_node = xa_to_node(entry); + xas_set_offset(xas); + } +} +EXPORT_SYMBOL_GPL(__xas_next); + +/** + * xas_find() - Find the next present entry in the XArray. + * @xas: XArray operation state. + * @max: Highest index to return. + * + * If the @xas has not yet been walked to an entry, return the entry + * which has an index >= xas.xa_index. If it has been walked, the entry + * currently being pointed at has been processed, and so we move to the + * next entry. + * + * If no entry is found and the array is smaller than @max, the iterator + * is set to the smallest index not yet in the array. This allows @xas + * to be immediately passed to xas_store(). + * + * Return: The entry, if found, otherwise %NULL. + */ +void *xas_find(struct xa_state *xas, unsigned long max) +{ + void *entry; + + if (xas_error(xas)) + return NULL; + + if (!xas->xa_node) { + xas->xa_index = 1; + return set_bounds(xas); + } else if (xas_top(xas->xa_node)) { + entry = xas_load(xas); + if (entry || xas_not_node(xas->xa_node)) + return entry; + } else if (!xas->xa_node->shift && + xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) { + xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; + } + + xas_advance(xas); + + while (xas->xa_node && (xas->xa_index <= max)) { + if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { + xas->xa_offset = xas->xa_node->offset + 1; + xas->xa_node = xa_parent(xas->xa, xas->xa_node); + continue; + } + + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (xa_is_node(entry)) { + xas->xa_node = xa_to_node(entry); + xas->xa_offset = 0; + continue; + } + if (entry && !xa_is_sibling(entry)) + return entry; + + xas_advance(xas); + } + + if (!xas->xa_node) + xas->xa_node = XAS_BOUNDS; + return NULL; +} +EXPORT_SYMBOL_GPL(xas_find); + +/** + * xas_find_marked() - Find the next marked entry in the XArray. + * @xas: XArray operation state. + * @max: Highest index to return. + * @mark: Mark number to search for. + * + * If the @xas has not yet been walked to an entry, return the marked entry + * which has an index >= xas.xa_index. If it has been walked, the entry + * currently being pointed at has been processed, and so we return the + * first marked entry with an index > xas.xa_index. + * + * If no marked entry is found and the array is smaller than @max, @xas is + * set to the bounds state and xas->xa_index is set to the smallest index + * not yet in the array. This allows @xas to be immediately passed to + * xas_store(). + * + * If no entry is found before @max is reached, @xas is set to the restart + * state. + * + * Return: The entry, if found, otherwise %NULL. + */ +void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) +{ + bool advance = true; + unsigned int offset; + void *entry; + + if (xas_error(xas)) + return NULL; + + if (!xas->xa_node) { + xas->xa_index = 1; + goto out; + } else if (xas_top(xas->xa_node)) { + advance = false; + entry = xa_head(xas->xa); + xas->xa_node = NULL; + if (xas->xa_index > max_index(entry)) + goto out; + if (!xa_is_node(entry)) { + if (xa_marked(xas->xa, mark)) + return entry; + xas->xa_index = 1; + goto out; + } + xas->xa_node = xa_to_node(entry); + xas->xa_offset = xas->xa_index >> xas->xa_node->shift; + } + + while (xas->xa_index <= max) { + if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { + xas->xa_offset = xas->xa_node->offset + 1; + xas->xa_node = xa_parent(xas->xa, xas->xa_node); + if (!xas->xa_node) + break; + advance = false; + continue; + } + + if (!advance) { + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (xa_is_sibling(entry)) { + xas->xa_offset = xa_to_sibling(entry); + xas_move_index(xas, xas->xa_offset); + } + } + + offset = xas_find_chunk(xas, advance, mark); + if (offset > xas->xa_offset) { + advance = false; + xas_move_index(xas, offset); + /* Mind the wrap */ + if ((xas->xa_index - 1) >= max) + goto max; + xas->xa_offset = offset; + if (offset == XA_CHUNK_SIZE) + continue; + } + + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (!xa_is_node(entry)) + return entry; + xas->xa_node = xa_to_node(entry); + xas_set_offset(xas); + } + +out: + if (xas->xa_index > max) + goto max; + return set_bounds(xas); +max: + xas->xa_node = XAS_RESTART; + return NULL; +} +EXPORT_SYMBOL_GPL(xas_find_marked); + +/** + * xas_find_conflict() - Find the next present entry in a range. + * @xas: XArray operation state. + * + * The @xas describes both a range and a position within that range. + * + * Context: Any context. Expects xa_lock to be held. + * Return: The next entry in the range covered by @xas or %NULL. + */ +void *xas_find_conflict(struct xa_state *xas) +{ + void *curr; + + if (xas_error(xas)) + return NULL; + + if (!xas->xa_node) + return NULL; + + if (xas_top(xas->xa_node)) { + curr = xas_start(xas); + if (!curr) + return NULL; + while (xa_is_node(curr)) { + struct xa_node *node = xa_to_node(curr); + curr = xas_descend(xas, node); + } + if (curr) + return curr; + } + + if (xas->xa_node->shift > xas->xa_shift) + return NULL; + + for (;;) { + if (xas->xa_node->shift == xas->xa_shift) { + if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs) + break; + } else if (xas->xa_offset == XA_CHUNK_MASK) { + xas->xa_offset = xas->xa_node->offset; + xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node); + if (!xas->xa_node) + break; + continue; + } + curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset); + if (xa_is_sibling(curr)) + continue; + while (xa_is_node(curr)) { + xas->xa_node = xa_to_node(curr); + xas->xa_offset = 0; + curr = xa_entry_locked(xas->xa, xas->xa_node, 0); + } + if (curr) + return curr; + } + xas->xa_offset -= xas->xa_sibs; + return NULL; +} +EXPORT_SYMBOL_GPL(xas_find_conflict); + +/** + * xa_load() - Load an entry from an XArray. + * @xa: XArray. + * @index: index into array. + * + * Context: Any context. Takes and releases the RCU lock. + * Return: The entry at @index in @xa. + */ +void *xa_load(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + void *entry; + + rcu_read_lock(); + do { + entry = xas_load(&xas); + if (xa_is_zero(entry)) + entry = NULL; + } while (xas_retry(&xas, entry)); + rcu_read_unlock(); + + return entry; +} +EXPORT_SYMBOL(xa_load); + +static void *xas_result(struct xa_state *xas, void *curr) +{ + if (xa_is_zero(curr)) + return NULL; + if (xas_error(xas)) + curr = xas->xa_node; + return curr; +} + +/** + * __xa_erase() - Erase this entry from the XArray while locked. + * @xa: XArray. + * @index: Index into array. + * + * If the entry at this index is a multi-index entry then all indices will + * be erased, and the entry will no longer be a multi-index entry. + * This function expects the xa_lock to be held on entry. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: The old entry at this index. + */ +void *__xa_erase(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + return xas_result(&xas, xas_store(&xas, NULL)); +} +EXPORT_SYMBOL(__xa_erase); + +/** + * xa_erase() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index of entry. + * + * This function is the equivalent of calling xa_store() with %NULL as + * the third argument. The XArray does not need to allocate memory, so + * the user does not need to provide GFP flags. + * + * Context: Any context. Takes and releases the xa_lock. + * Return: The entry which used to be at this index. + */ +void *xa_erase(struct xarray *xa, unsigned long index) +{ + void *entry; + + xa_lock(xa); + entry = __xa_erase(xa, index); + xa_unlock(xa); + + return entry; +} +EXPORT_SYMBOL(xa_erase); + +/** + * __xa_store() - Store this entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * You must already be holding the xa_lock when calling this function. + * It will drop the lock if needed to allocate memory, and then reacquire + * it afterwards. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: The old entry at this index or xa_err() if an error happened. + */ +void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) +{ + XA_STATE(xas, xa, index); + void *curr; + + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return XA_ERROR(-EINVAL); + if (xa_track_free(xa) && !entry) + entry = XA_ZERO_ENTRY; + + do { + curr = xas_store(&xas, entry); + if (xa_track_free(xa)) + xas_clear_mark(&xas, XA_FREE_MARK); + } while (__xas_nomem(&xas, gfp)); + + return xas_result(&xas, curr); +} +EXPORT_SYMBOL(__xa_store); + +/** + * xa_store() - Store this entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * After this function returns, loads from this index will return @entry. + * Storing into an existing multislot entry updates the entry of every index. + * The marks associated with @index are unaffected unless @entry is %NULL. + * + * Context: Any context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry + * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation + * failed. + */ +void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock(xa); + curr = __xa_store(xa, index, entry, gfp); + xa_unlock(xa); + + return curr; +} +EXPORT_SYMBOL(xa_store); + +/** + * __xa_cmpxchg() - Store this entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * You must already be holding the xa_lock when calling this function. + * It will drop the lock if needed to allocate memory, and then reacquire + * it afterwards. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: The old entry at this index or xa_err() if an error happened. + */ +void *__xa_cmpxchg(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + XA_STATE(xas, xa, index); + void *curr; + + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return XA_ERROR(-EINVAL); + if (xa_track_free(xa) && !entry) + entry = XA_ZERO_ENTRY; + + do { + curr = xas_load(&xas); + if (curr == XA_ZERO_ENTRY) + curr = NULL; + if (curr == old) { + xas_store(&xas, entry); + if (xa_track_free(xa)) + xas_clear_mark(&xas, XA_FREE_MARK); + } + } while (__xas_nomem(&xas, gfp)); + + return xas_result(&xas, curr); +} +EXPORT_SYMBOL(__xa_cmpxchg); + +/** + * __xa_insert() - Store this entry in the XArray if no entry is present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) +{ + XA_STATE(xas, xa, index); + void *curr; + + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return -EINVAL; + if (!entry) + entry = XA_ZERO_ENTRY; + + do { + curr = xas_load(&xas); + if (!curr) { + xas_store(&xas, entry); + if (xa_track_free(xa)) + xas_clear_mark(&xas, XA_FREE_MARK); + } else { + xas_set_err(&xas, -EEXIST); + } + } while (__xas_nomem(&xas, gfp)); + + return xas_error(&xas); +} +EXPORT_SYMBOL(__xa_insert); + +/** + * __xa_reserve() - Reserve this index in the XArray. + * @xa: XArray. + * @index: Index into array. + * @gfp: Memory allocation flags. + * + * Ensures there is somewhere to store an entry at @index in the array. + * If there is already something stored at @index, this function does + * nothing. If there was nothing there, the entry is marked as reserved. + * Loading from a reserved entry returns a %NULL pointer. + * + * If you do not use the entry that you have reserved, call xa_release() + * or xa_erase() to free any unnecessary memory. + * + * Context: Any context. Expects the xa_lock to be held on entry. May + * release the lock, sleep and reacquire the lock if the @gfp flags permit. + * Return: 0 if the reservation succeeded or -ENOMEM if it failed. + */ +int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + XA_STATE(xas, xa, index); + void *curr; + + do { + curr = xas_load(&xas); + if (!curr) { + xas_store(&xas, XA_ZERO_ENTRY); + if (xa_track_free(xa)) + xas_clear_mark(&xas, XA_FREE_MARK); + } + } while (__xas_nomem(&xas, gfp)); + + return xas_error(&xas); +} +EXPORT_SYMBOL(__xa_reserve); + +#ifdef CONFIG_XARRAY_MULTI +static void xas_set_range(struct xa_state *xas, unsigned long first, + unsigned long last) +{ + unsigned int shift = 0; + unsigned long sibs = last - first; + unsigned int offset = XA_CHUNK_MASK; + + xas_set(xas, first); + + while ((first & XA_CHUNK_MASK) == 0) { + if (sibs < XA_CHUNK_MASK) + break; + if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK)) + break; + shift += XA_CHUNK_SHIFT; + if (offset == XA_CHUNK_MASK) + offset = sibs & XA_CHUNK_MASK; + sibs >>= XA_CHUNK_SHIFT; + first >>= XA_CHUNK_SHIFT; + } + + offset = first & XA_CHUNK_MASK; + if (offset + sibs > XA_CHUNK_MASK) + sibs = XA_CHUNK_MASK - offset; + if ((((first + sibs + 1) << shift) - 1) > last) + sibs -= 1; + + xas->xa_shift = shift; + xas->xa_sibs = sibs; +} + +/** + * xa_store_range() - Store this entry at a range of indices in the XArray. + * @xa: XArray. + * @first: First index to affect. + * @last: Last index to affect. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * After this function returns, loads from any index between @first and @last, + * inclusive will return @entry. + * Storing into an existing multislot entry updates the entry of every index. + * The marks associated with @index are unaffected unless @entry is %NULL. + * + * Context: Process context. Takes and releases the xa_lock. May sleep + * if the @gfp flags permit. + * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in + * an XArray, or xa_err(-ENOMEM) if memory allocation failed. + */ +void *xa_store_range(struct xarray *xa, unsigned long first, + unsigned long last, void *entry, gfp_t gfp) +{ + XA_STATE(xas, xa, 0); + + if (WARN_ON_ONCE(xa_is_internal(entry))) + return XA_ERROR(-EINVAL); + if (last < first) + return XA_ERROR(-EINVAL); + + do { + xas_lock(&xas); + if (entry) { + unsigned int order = BITS_PER_LONG; + if (last + 1) + order = __ffs(last + 1); + xas_set_order(&xas, last, order); + xas_create(&xas, true); + if (xas_error(&xas)) + goto unlock; + } + do { + xas_set_range(&xas, first, last); + xas_store(&xas, entry); + if (xas_error(&xas)) + goto unlock; + first += xas_size(&xas); + } while (first <= last); +unlock: + xas_unlock(&xas); + } while (xas_nomem(&xas, gfp)); + + return xas_result(&xas, NULL); +} +EXPORT_SYMBOL(xa_store_range); +#endif /* CONFIG_XARRAY_MULTI */ + +/** + * __xa_alloc() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @max: Maximum ID to allocate (inclusive). + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Allocates an unused ID in the range specified by @id and @max. + * Updates the @id pointer with the index, then stores the entry at that + * index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if + * there is no more space in the XArray. + */ +int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) +{ + XA_STATE(xas, xa, 0); + int err; + + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return -EINVAL; + if (WARN_ON_ONCE(!xa_track_free(xa))) + return -EINVAL; + + if (!entry) + entry = XA_ZERO_ENTRY; + + do { + xas.xa_index = *id; + xas_find_marked(&xas, max, XA_FREE_MARK); + if (xas.xa_node == XAS_RESTART) + xas_set_err(&xas, -ENOSPC); + xas_store(&xas, entry); + xas_clear_mark(&xas, XA_FREE_MARK); + } while (__xas_nomem(&xas, gfp)); + + err = xas_error(&xas); + if (!err) + *id = xas.xa_index; + return err; +} +EXPORT_SYMBOL(__xa_alloc); + +/** + * __xa_set_mark() - Set this mark on this entry while locked. + * @xa: XArray. + * @index: Index of entry. + * @mark: Mark number. + * + * Attempting to set a mark on a %NULL entry does not succeed. + * + * Context: Any context. Expects xa_lock to be held on entry. + */ +void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) +{ + XA_STATE(xas, xa, index); + void *entry = xas_load(&xas); + + if (entry) + xas_set_mark(&xas, mark); +} +EXPORT_SYMBOL(__xa_set_mark); + +/** + * __xa_clear_mark() - Clear this mark on this entry while locked. + * @xa: XArray. + * @index: Index of entry. + * @mark: Mark number. + * + * Context: Any context. Expects xa_lock to be held on entry. + */ +void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) +{ + XA_STATE(xas, xa, index); + void *entry = xas_load(&xas); + + if (entry) + xas_clear_mark(&xas, mark); +} +EXPORT_SYMBOL(__xa_clear_mark); + +/** + * xa_get_mark() - Inquire whether this mark is set on this entry. + * @xa: XArray. + * @index: Index of entry. + * @mark: Mark number. + * + * This function uses the RCU read lock, so the result may be out of date + * by the time it returns. If you need the result to be stable, use a lock. + * + * Context: Any context. Takes and releases the RCU lock. + * Return: True if the entry at @index has this mark set, false if it doesn't. + */ +bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) +{ + XA_STATE(xas, xa, index); + void *entry; + + rcu_read_lock(); + entry = xas_start(&xas); + while (xas_get_mark(&xas, mark)) { + if (!xa_is_node(entry)) + goto found; + entry = xas_descend(&xas, xa_to_node(entry)); + } + rcu_read_unlock(); + return false; + found: + rcu_read_unlock(); + return true; +} +EXPORT_SYMBOL(xa_get_mark); + +/** + * xa_set_mark() - Set this mark on this entry. + * @xa: XArray. + * @index: Index of entry. + * @mark: Mark number. + * + * Attempting to set a mark on a %NULL entry does not succeed. + * + * Context: Process context. Takes and releases the xa_lock. + */ +void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) +{ + xa_lock(xa); + __xa_set_mark(xa, index, mark); + xa_unlock(xa); +} +EXPORT_SYMBOL(xa_set_mark); + +/** + * xa_clear_mark() - Clear this mark on this entry. + * @xa: XArray. + * @index: Index of entry. + * @mark: Mark number. + * + * Clearing a mark always succeeds. + * + * Context: Process context. Takes and releases the xa_lock. + */ +void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) +{ + xa_lock(xa); + __xa_clear_mark(xa, index, mark); + xa_unlock(xa); +} +EXPORT_SYMBOL(xa_clear_mark); + +/** + * xa_find() - Search the XArray for an entry. + * @xa: XArray. + * @indexp: Pointer to an index. + * @max: Maximum index to search to. + * @filter: Selection criterion. + * + * Finds the entry in @xa which matches the @filter, and has the lowest + * index that is at least @indexp and no more than @max. + * If an entry is found, @indexp is updated to be the index of the entry. + * This function is protected by the RCU read lock, so it may not find + * entries which are being simultaneously added. It will not return an + * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). + * + * Context: Any context. Takes and releases the RCU lock. + * Return: The entry, if found, otherwise %NULL. + */ +void *xa_find(struct xarray *xa, unsigned long *indexp, + unsigned long max, xa_mark_t filter) +{ + XA_STATE(xas, xa, *indexp); + void *entry; + + rcu_read_lock(); + do { + if ((__force unsigned int)filter < XA_MAX_MARKS) + entry = xas_find_marked(&xas, max, filter); + else + entry = xas_find(&xas, max); + } while (xas_retry(&xas, entry)); + rcu_read_unlock(); + + if (entry) + *indexp = xas.xa_index; + return entry; +} +EXPORT_SYMBOL(xa_find); + +/** + * xa_find_after() - Search the XArray for a present entry. + * @xa: XArray. + * @indexp: Pointer to an index. + * @max: Maximum index to search to. + * @filter: Selection criterion. + * + * Finds the entry in @xa which matches the @filter and has the lowest + * index that is above @indexp and no more than @max. + * If an entry is found, @indexp is updated to be the index of the entry. + * This function is protected by the RCU read lock, so it may miss entries + * which are being simultaneously added. It will not return an + * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). + * + * Context: Any context. Takes and releases the RCU lock. + * Return: The pointer, if found, otherwise %NULL. + */ +void *xa_find_after(struct xarray *xa, unsigned long *indexp, + unsigned long max, xa_mark_t filter) +{ + XA_STATE(xas, xa, *indexp + 1); + void *entry; + + rcu_read_lock(); + for (;;) { + if ((__force unsigned int)filter < XA_MAX_MARKS) + entry = xas_find_marked(&xas, max, filter); + else + entry = xas_find(&xas, max); + if (xas.xa_node == XAS_BOUNDS) + break; + if (xas.xa_shift) { + if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) + continue; + } else { + if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK)) + continue; + } + if (!xas_retry(&xas, entry)) + break; + } + rcu_read_unlock(); + + if (entry) + *indexp = xas.xa_index; + return entry; +} +EXPORT_SYMBOL(xa_find_after); + +static unsigned int xas_extract_present(struct xa_state *xas, void **dst, + unsigned long max, unsigned int n) +{ + void *entry; + unsigned int i = 0; + + rcu_read_lock(); + xas_for_each(xas, entry, max) { + if (xas_retry(xas, entry)) + continue; + dst[i++] = entry; + if (i == n) + break; + } + rcu_read_unlock(); + + return i; +} + +static unsigned int xas_extract_marked(struct xa_state *xas, void **dst, + unsigned long max, unsigned int n, xa_mark_t mark) +{ + void *entry; + unsigned int i = 0; + + rcu_read_lock(); + xas_for_each_marked(xas, entry, max, mark) { + if (xas_retry(xas, entry)) + continue; + dst[i++] = entry; + if (i == n) + break; + } + rcu_read_unlock(); + + return i; +} + +/** + * xa_extract() - Copy selected entries from the XArray into a normal array. + * @xa: The source XArray to copy from. + * @dst: The buffer to copy entries into. + * @start: The first index in the XArray eligible to be selected. + * @max: The last index in the XArray eligible to be selected. + * @n: The maximum number of entries to copy. + * @filter: Selection criterion. + * + * Copies up to @n entries that match @filter from the XArray. The + * copied entries will have indices between @start and @max, inclusive. + * + * The @filter may be an XArray mark value, in which case entries which are + * marked with that mark will be copied. It may also be %XA_PRESENT, in + * which case all entries which are not %NULL will be copied. + * + * The entries returned may not represent a snapshot of the XArray at a + * moment in time. For example, if another thread stores to index 5, then + * index 10, calling xa_extract() may return the old contents of index 5 + * and the new contents of index 10. Indices not modified while this + * function is running will not be skipped. + * + * If you need stronger guarantees, holding the xa_lock across calls to this + * function will prevent concurrent modification. + * + * Context: Any context. Takes and releases the RCU lock. + * Return: The number of entries copied. + */ +unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, + unsigned long max, unsigned int n, xa_mark_t filter) +{ + XA_STATE(xas, xa, start); + + if (!n) + return 0; + + if ((__force unsigned int)filter < XA_MAX_MARKS) + return xas_extract_marked(&xas, dst, max, n, filter); + return xas_extract_present(&xas, dst, max, n); +} +EXPORT_SYMBOL(xa_extract); + +/** + * xa_destroy() - Free all internal data structures. + * @xa: XArray. + * + * After calling this function, the XArray is empty and has freed all memory + * allocated for its internal data structures. You are responsible for + * freeing the objects referenced by the XArray. + * + * Context: Any context. Takes and releases the xa_lock, interrupt-safe. + */ +void xa_destroy(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + unsigned long flags; + void *entry; + + xas.xa_node = NULL; + xas_lock_irqsave(&xas, flags); + entry = xa_head_locked(xa); + RCU_INIT_POINTER(xa->xa_head, NULL); + xas_init_marks(&xas); + /* lockdep checks we're still holding the lock in xas_free_nodes() */ + if (xa_is_node(entry)) + xas_free_nodes(&xas, xa_to_node(entry)); + xas_unlock_irqrestore(&xas, flags); +} +EXPORT_SYMBOL(xa_destroy); + +#ifdef XA_DEBUG +void xa_dump_node(const struct xa_node *node) +{ + unsigned i, j; + + if (!node) + return; + if ((unsigned long)node & 3) { + pr_cont("node %px\n", node); + return; + } + + pr_cont("node %px %s %d parent %px shift %d count %d values %d " + "array %px list %px %px marks", + node, node->parent ? "offset" : "max", node->offset, + node->parent, node->shift, node->count, node->nr_values, + node->array, node->private_list.prev, node->private_list.next); + for (i = 0; i < XA_MAX_MARKS; i++) + for (j = 0; j < XA_MARK_LONGS; j++) + pr_cont(" %lx", node->marks[i][j]); + pr_cont("\n"); +} + +void xa_dump_index(unsigned long index, unsigned int shift) +{ + if (!shift) + pr_info("%lu: ", index); + else if (shift >= BITS_PER_LONG) + pr_info("0-%lu: ", ~0UL); + else + pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1)); +} + +void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift) +{ + if (!entry) + return; + + xa_dump_index(index, shift); + + if (xa_is_node(entry)) { + if (shift == 0) { + pr_cont("%px\n", entry); + } else { + unsigned long i; + struct xa_node *node = xa_to_node(entry); + xa_dump_node(node); + for (i = 0; i < XA_CHUNK_SIZE; i++) + xa_dump_entry(node->slots[i], + index + (i << node->shift), node->shift); + } + } else if (xa_is_value(entry)) + pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry), + xa_to_value(entry), entry); + else if (!xa_is_internal(entry)) + pr_cont("%px\n", entry); + else if (xa_is_retry(entry)) + pr_cont("retry (%ld)\n", xa_to_internal(entry)); + else if (xa_is_sibling(entry)) + pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry)); + else if (xa_is_zero(entry)) + pr_cont("zero (%ld)\n", xa_to_internal(entry)); + else + pr_cont("UNKNOWN ENTRY (%px)\n", entry); +} + +void xa_dump(const struct xarray *xa) +{ + void *entry = xa->xa_head; + unsigned int shift = 0; + + pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry, + xa->xa_flags, xa_marked(xa, XA_MARK_0), + xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2)); + if (xa_is_node(entry)) + shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT; + xa_dump_entry(entry, 0, shift); +} +#endif diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c index 58a733b10387..48f14cd58c77 100644 --- a/lib/zlib_inflate/inflate.c +++ b/lib/zlib_inflate/inflate.c @@ -382,6 +382,7 @@ int zlib_inflate(z_streamp strm, int flush) strm->adler = state->check = REVERSE(hold); INITBITS(); state->mode = DICT; + /* fall through */ case DICT: if (state->havedict == 0) { RESTORE(); @@ -389,8 +390,10 @@ int zlib_inflate(z_streamp strm, int flush) } strm->adler = state->check = zlib_adler32(0L, NULL, 0); state->mode = TYPE; + /* fall through */ case TYPE: if (flush == Z_BLOCK) goto inf_leave; + /* fall through */ case TYPEDO: if (state->last) { BYTEBITS(); @@ -428,6 +431,7 @@ int zlib_inflate(z_streamp strm, int flush) state->length = (unsigned)hold & 0xffff; INITBITS(); state->mode = COPY; + /* fall through */ case COPY: copy = state->length; if (copy) { @@ -461,6 +465,7 @@ int zlib_inflate(z_streamp strm, int flush) #endif state->have = 0; state->mode = LENLENS; + /* fall through */ case LENLENS: while (state->have < state->ncode) { NEEDBITS(3); @@ -481,6 +486,7 @@ int zlib_inflate(z_streamp strm, int flush) } state->have = 0; state->mode = CODELENS; + /* fall through */ case CODELENS: while (state->have < state->nlen + state->ndist) { for (;;) { @@ -554,6 +560,7 @@ int zlib_inflate(z_streamp strm, int flush) break; } state->mode = LEN; + /* fall through */ case LEN: if (have >= 6 && left >= 258) { RESTORE(); @@ -593,6 +600,7 @@ int zlib_inflate(z_streamp strm, int flush) } state->extra = (unsigned)(this.op) & 15; state->mode = LENEXT; + /* fall through */ case LENEXT: if (state->extra) { NEEDBITS(state->extra); @@ -600,6 +608,7 @@ int zlib_inflate(z_streamp strm, int flush) DROPBITS(state->extra); } state->mode = DIST; + /* fall through */ case DIST: for (;;) { this = state->distcode[BITS(state->distbits)]; @@ -625,6 +634,7 @@ int zlib_inflate(z_streamp strm, int flush) state->offset = (unsigned)this.val; state->extra = (unsigned)(this.op) & 15; state->mode = DISTEXT; + /* fall through */ case DISTEXT: if (state->extra) { NEEDBITS(state->extra); @@ -644,6 +654,7 @@ int zlib_inflate(z_streamp strm, int flush) break; } state->mode = MATCH; + /* fall through */ case MATCH: if (left == 0) goto inf_leave; copy = out - left; @@ -694,6 +705,7 @@ int zlib_inflate(z_streamp strm, int flush) INITBITS(); } state->mode = DONE; + /* fall through */ case DONE: ret = Z_STREAM_END; goto inf_leave; |