diff options
Diffstat (limited to 'lib')
35 files changed, 1733 insertions, 362 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index a38cc61256f1..ac3b30697b2b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -701,3 +701,6 @@ config GENERIC_LIB_DEVMEM_IS_ALLOWED config PLDMFW bool default n + +config ASN1_ENCODER + tristate diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2779c29d9981..678c13967580 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -284,8 +284,7 @@ config DEBUG_INFO_DWARF4 config DEBUG_INFO_DWARF5 bool "Generate DWARF Version 5 debuginfo" - depends on GCC_VERSION >= 50000 || CC_IS_CLANG - depends on CC_IS_GCC || $(success,$(srctree)/scripts/test_dwarf5_support.sh $(CC) $(CLANG_FLAGS)) + depends on GCC_VERSION >= 50000 || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502))) depends on !DEBUG_INFO_BTF help Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc @@ -449,6 +448,16 @@ config VMLINUX_VALIDATION depends on STACK_VALIDATION && DEBUG_ENTRY && !PARAVIRT default y +config VMLINUX_MAP + bool "Generate vmlinux.map file when linking" + depends on EXPERT + help + Selecting this option will pass "-Map=vmlinux.map" to ld + when linking vmlinux. That file can be useful for verifying + and debugging magic section games, and for seeing which + pieces of code get eliminated with + CONFIG_LD_DEAD_CODE_DATA_ELIMINATION. + config DEBUG_FORCE_WEAK_PER_CPU bool "Force weak per-cpu definitions" depends on DEBUG_KERNEL @@ -1363,13 +1372,53 @@ config LOCKDEP bool depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT select STACKTRACE - select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86 + depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86 select KALLSYMS select KALLSYMS_ALL config LOCKDEP_SMALL bool +config LOCKDEP_BITS + int "Bitsize for MAX_LOCKDEP_ENTRIES" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 15 + help + Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message. + +config LOCKDEP_CHAINS_BITS + int "Bitsize for MAX_LOCKDEP_CHAINS" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 16 + help + Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message. + +config LOCKDEP_STACK_TRACE_BITS + int "Bitsize for MAX_STACK_TRACE_ENTRIES" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 19 + help + Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message. + +config LOCKDEP_STACK_TRACE_HASH_BITS + int "Bitsize for STACK_TRACE_HASH_SIZE" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 14 + help + Try increasing this value if you need large MAX_STACK_TRACE_ENTRIES. + +config LOCKDEP_CIRCULAR_QUEUE_BITS + int "Bitsize for elements in circular_queue struct" + depends on LOCKDEP + range 10 30 + default 12 + help + Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure. + config DEBUG_LOCKDEP bool "Lock dependency engine debugging" depends on DEBUG_KERNEL && LOCKDEP @@ -1665,12 +1714,11 @@ config LATENCYTOP depends on DEBUG_KERNEL depends on STACKTRACE_SUPPORT depends on PROC_FS - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86 + depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86 select KALLSYMS select KALLSYMS_ALL select STACKTRACE select SCHEDSTATS - select SCHED_DEBUG help Enable this option if you want to use the LatencyTOP tool to find out which userspace is blocking on what kernel operations. @@ -1918,7 +1966,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT depends on !X86_64 select STACKTRACE - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86 + depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86 help Provide stacktrace filter for fault-injection capabilities @@ -2027,6 +2075,16 @@ config TEST_SORT If unsure, say N. +config TEST_DIV64 + tristate "64bit/32bit division and modulo test" + depends on DEBUG_KERNEL || m + help + Enable this to turn on 'do_div()' function test. This test is + executed only once during system boot (so affects only boot time), + or at module load time. + + If unsure, say N. + config KPROBES_SANITY_TEST bool "Kprobes sanity tests" depends on DEBUG_KERNEL @@ -2515,11 +2573,18 @@ config TEST_FPU endif # RUNTIME_TESTING_MENU +config ARCH_USE_MEMTEST + bool + help + An architecture should select this when it uses early_memtest() + during boot process. + config MEMTEST bool "Memtest" + depends on ARCH_USE_MEMTEST help This option adds a kernel parameter 'memtest', which allows memtest - to be set. + to be set and executed. memtest=0, mean disabled; -- default memtest=1, mean do 1 test pattern; ... diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index fba9909e31b7..cffc2ebbf185 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -138,9 +138,10 @@ config KASAN_INLINE endchoice -config KASAN_STACK_ENABLE +config KASAN_STACK bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST depends on KASAN_GENERIC || KASAN_SW_TAGS + default y if CC_IS_GCC help The LLVM stack address sanitizer has a know problem that causes excessive stack usage in a lot of functions, see @@ -154,12 +155,6 @@ config KASAN_STACK_ENABLE CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe to use and enabled by default. -config KASAN_STACK - int - depends on KASAN_GENERIC || KASAN_SW_TAGS - default 1 if KASAN_STACK_ENABLE || CC_IS_GCC - default 0 - config KASAN_SW_TAGS_IDENTIFY bool "Enable memory corruption identification" depends on KASAN_SW_TAGS diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index f271ff5fbb5a..0440f373248e 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -69,8 +69,9 @@ config KCSAN_SELFTEST panic. Recommended to be enabled, ensuring critical functionality works as intended. -config KCSAN_TEST - tristate "KCSAN test for integrated runtime behaviour" +config KCSAN_KUNIT_TEST + tristate "KCSAN test for integrated runtime behaviour" if !KUNIT_ALL_TESTS + default KUNIT_ALL_TESTS depends on TRACEPOINTS && KUNIT select TORTURE_TEST help diff --git a/lib/Makefile b/lib/Makefile index b5307d3eec1a..e11cfc18b6c0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -280,6 +280,7 @@ obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o obj-$(CONFIG_PERCPU_TEST) += percpu_test.o obj-$(CONFIG_ASN1) += asn1_decoder.o +obj-$(CONFIG_ASN1_ENCODER) += asn1_encoder.o obj-$(CONFIG_FONT_SUPPORT) += fonts/ diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c new file mode 100644 index 000000000000..41e71aae3ef6 --- /dev/null +++ b/lib/asn1_encoder.c @@ -0,0 +1,454 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Simple encoder primitives for ASN.1 BER/DER/CER + * + * Copyright (C) 2019 James.Bottomley@HansenPartnership.com + */ + +#include <linux/asn1_encoder.h> +#include <linux/bug.h> +#include <linux/string.h> +#include <linux/module.h> + +/** + * asn1_encode_integer() - encode positive integer to ASN.1 + * @data: pointer to the pointer to the data + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @integer: integer to be encoded + * + * This is a simplified encoder: it only currently does + * positive integers, but it should be simple enough to add the + * negative case if a use comes along. + */ +unsigned char * +asn1_encode_integer(unsigned char *data, const unsigned char *end_data, + s64 integer) +{ + int data_len = end_data - data; + unsigned char *d = &data[2]; + bool found = false; + int i; + + if (WARN(integer < 0, + "BUG: integer encode only supports positive integers")) + return ERR_PTR(-EINVAL); + + if (IS_ERR(data)) + return data; + + /* need at least 3 bytes for tag, length and integer encoding */ + if (data_len < 3) + return ERR_PTR(-EINVAL); + + /* remaining length where at d (the start of the integer encoding) */ + data_len -= 2; + + data[0] = _tag(UNIV, PRIM, INT); + if (integer == 0) { + *d++ = 0; + goto out; + } + + for (i = sizeof(integer); i > 0 ; i--) { + int byte = integer >> (8 * (i - 1)); + + if (!found && byte == 0) + continue; + + /* + * for a positive number the first byte must have bit + * 7 clear in two's complement (otherwise it's a + * negative number) so prepend a leading zero if + * that's not the case + */ + if (!found && (byte & 0x80)) { + /* + * no check needed here, we already know we + * have len >= 1 + */ + *d++ = 0; + data_len--; + } + + found = true; + if (data_len == 0) + return ERR_PTR(-EINVAL); + + *d++ = byte; + data_len--; + } + + out: + data[1] = d - data - 2; + + return d; +} +EXPORT_SYMBOL_GPL(asn1_encode_integer); + +/* calculate the base 128 digit values setting the top bit of the first octet */ +static int asn1_encode_oid_digit(unsigned char **_data, int *data_len, u32 oid) +{ + unsigned char *data = *_data; + int start = 7 + 7 + 7 + 7; + int ret = 0; + + if (*data_len < 1) + return -EINVAL; + + /* quick case */ + if (oid == 0) { + *data++ = 0x80; + (*data_len)--; + goto out; + } + + while (oid >> start == 0) + start -= 7; + + while (start > 0 && *data_len > 0) { + u8 byte; + + byte = oid >> start; + oid = oid - (byte << start); + start -= 7; + byte |= 0x80; + *data++ = byte; + (*data_len)--; + } + + if (*data_len > 0) { + *data++ = oid; + (*data_len)--; + } else { + ret = -EINVAL; + } + + out: + *_data = data; + return ret; +} + +/** + * asn1_encode_oid() - encode an oid to ASN.1 + * @data: position to begin encoding at + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @oid: array of oids + * @oid_len: length of oid array + * + * this encodes an OID up to ASN.1 when presented as an array of OID values + */ +unsigned char * +asn1_encode_oid(unsigned char *data, const unsigned char *end_data, + u32 oid[], int oid_len) +{ + int data_len = end_data - data; + unsigned char *d = data + 2; + int i, ret; + + if (WARN(oid_len < 2, "OID must have at least two elements")) + return ERR_PTR(-EINVAL); + + if (WARN(oid_len > 32, "OID is too large")) + return ERR_PTR(-EINVAL); + + if (IS_ERR(data)) + return data; + + + /* need at least 3 bytes for tag, length and OID encoding */ + if (data_len < 3) + return ERR_PTR(-EINVAL); + + data[0] = _tag(UNIV, PRIM, OID); + *d++ = oid[0] * 40 + oid[1]; + + data_len -= 3; + + ret = 0; + + for (i = 2; i < oid_len; i++) { + ret = asn1_encode_oid_digit(&d, &data_len, oid[i]); + if (ret < 0) + return ERR_PTR(ret); + } + + data[1] = d - data - 2; + + return d; +} +EXPORT_SYMBOL_GPL(asn1_encode_oid); + +/** + * asn1_encode_length() - encode a length to follow an ASN.1 tag + * @data: pointer to encode at + * @data_len: pointer to remaning length (adjusted by routine) + * @len: length to encode + * + * This routine can encode lengths up to 65535 using the ASN.1 rules. + * It will accept a negative length and place a zero length tag + * instead (to keep the ASN.1 valid). This convention allows other + * encoder primitives to accept negative lengths as singalling the + * sequence will be re-encoded when the length is known. + */ +static int asn1_encode_length(unsigned char **data, int *data_len, int len) +{ + if (*data_len < 1) + return -EINVAL; + + if (len < 0) { + *((*data)++) = 0; + (*data_len)--; + return 0; + } + + if (len <= 0x7f) { + *((*data)++) = len; + (*data_len)--; + return 0; + } + + if (*data_len < 2) + return -EINVAL; + + if (len <= 0xff) { + *((*data)++) = 0x81; + *((*data)++) = len & 0xff; + *data_len -= 2; + return 0; + } + + if (*data_len < 3) + return -EINVAL; + + if (len <= 0xffff) { + *((*data)++) = 0x82; + *((*data)++) = (len >> 8) & 0xff; + *((*data)++) = len & 0xff; + *data_len -= 3; + return 0; + } + + if (WARN(len > 0xffffff, "ASN.1 length can't be > 0xffffff")) + return -EINVAL; + + if (*data_len < 4) + return -EINVAL; + *((*data)++) = 0x83; + *((*data)++) = (len >> 16) & 0xff; + *((*data)++) = (len >> 8) & 0xff; + *((*data)++) = len & 0xff; + *data_len -= 4; + + return 0; +} + +/** + * asn1_encode_tag() - add a tag for optional or explicit value + * @data: pointer to place tag at + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @tag: tag to be placed + * @string: the data to be tagged + * @len: the length of the data to be tagged + * + * Note this currently only handles short form tags < 31. + * + * Standard usage is to pass in a @tag, @string and @length and the + * @string will be ASN.1 encoded with @tag and placed into @data. If + * the encoding would put data past @end_data then an error is + * returned, otherwise a pointer to a position one beyond the encoding + * is returned. + * + * To encode in place pass a NULL @string and -1 for @len and the + * maximum allowable beginning and end of the data; all this will do + * is add the current maximum length and update the data pointer to + * the place where the tag contents should be placed is returned. The + * data should be copied in by the calling routine which should then + * repeat the prior statement but now with the known length. In order + * to avoid having to keep both before and after pointers, the repeat + * expects to be called with @data pointing to where the first encode + * returned it and still NULL for @string but the real length in @len. + */ +unsigned char * +asn1_encode_tag(unsigned char *data, const unsigned char *end_data, + u32 tag, const unsigned char *string, int len) +{ + int data_len = end_data - data; + int ret; + + if (WARN(tag > 30, "ASN.1 tag can't be > 30")) + return ERR_PTR(-EINVAL); + + if (!string && WARN(len > 127, + "BUG: recode tag is too big (>127)")) + return ERR_PTR(-EINVAL); + + if (IS_ERR(data)) + return data; + + if (!string && len > 0) { + /* + * we're recoding, so move back to the start of the + * tag and install a dummy length because the real + * data_len should be NULL + */ + data -= 2; + data_len = 2; + } + + if (data_len < 2) + return ERR_PTR(-EINVAL); + + *(data++) = _tagn(CONT, CONS, tag); + data_len--; + ret = asn1_encode_length(&data, &data_len, len); + if (ret < 0) + return ERR_PTR(ret); + + if (!string) + return data; + + if (data_len < len) + return ERR_PTR(-EINVAL); + + memcpy(data, string, len); + data += len; + + return data; +} +EXPORT_SYMBOL_GPL(asn1_encode_tag); + +/** + * asn1_encode_octet_string() - encode an ASN.1 OCTET STRING + * @data: pointer to encode at + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @string: string to be encoded + * @len: length of string + * + * Note ASN.1 octet strings may contain zeros, so the length is obligatory. + */ +unsigned char * +asn1_encode_octet_string(unsigned char *data, + const unsigned char *end_data, + const unsigned char *string, u32 len) +{ + int data_len = end_data - data; + int ret; + + if (IS_ERR(data)) + return data; + + /* need minimum of 2 bytes for tag and length of zero length string */ + if (data_len < 2) + return ERR_PTR(-EINVAL); + + *(data++) = _tag(UNIV, PRIM, OTS); + data_len--; + + ret = asn1_encode_length(&data, &data_len, len); + if (ret) + return ERR_PTR(ret); + + if (data_len < len) + return ERR_PTR(-EINVAL); + + memcpy(data, string, len); + data += len; + + return data; +} +EXPORT_SYMBOL_GPL(asn1_encode_octet_string); + +/** + * asn1_encode_sequence() - wrap a byte stream in an ASN.1 SEQUENCE + * @data: pointer to encode at + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @seq: data to be encoded as a sequence + * @len: length of the data to be encoded as a sequence + * + * Fill in a sequence. To encode in place, pass NULL for @seq and -1 + * for @len; then call again once the length is known (still with NULL + * for @seq). In order to avoid having to keep both before and after + * pointers, the repeat expects to be called with @data pointing to + * where the first encode placed it. + */ +unsigned char * +asn1_encode_sequence(unsigned char *data, const unsigned char *end_data, + const unsigned char *seq, int len) +{ + int data_len = end_data - data; + int ret; + + if (!seq && WARN(len > 127, + "BUG: recode sequence is too big (>127)")) + return ERR_PTR(-EINVAL); + + if (IS_ERR(data)) + return data; + + if (!seq && len >= 0) { + /* + * we're recoding, so move back to the start of the + * sequence and install a dummy length because the + * real length should be NULL + */ + data -= 2; + data_len = 2; + } + + if (data_len < 2) + return ERR_PTR(-EINVAL); + + *(data++) = _tag(UNIV, CONS, SEQ); + data_len--; + + ret = asn1_encode_length(&data, &data_len, len); + if (ret) + return ERR_PTR(ret); + + if (!seq) + return data; + + if (data_len < len) + return ERR_PTR(-EINVAL); + + memcpy(data, seq, len); + data += len; + + return data; +} +EXPORT_SYMBOL_GPL(asn1_encode_sequence); + +/** + * asn1_encode_boolean() - encode a boolean value to ASN.1 + * @data: pointer to encode at + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @val: the boolean true/false value + */ +unsigned char * +asn1_encode_boolean(unsigned char *data, const unsigned char *end_data, + bool val) +{ + int data_len = end_data - data; + + if (IS_ERR(data)) + return data; + + /* booleans are 3 bytes: tag, length == 1 and value == 0 or 1 */ + if (data_len < 3) + return ERR_PTR(-EINVAL); + + *(data++) = _tag(UNIV, PRIM, BOOL); + data_len--; + + asn1_encode_length(&data, &data_len, 1); + + if (val) + *(data++) = 1; + else + *(data++) = 0; + + return data; +} +EXPORT_SYMBOL_GPL(asn1_encode_boolean); + +MODULE_LICENSE("GPL"); diff --git a/lib/bitmap.c b/lib/bitmap.c index 75006c4036e9..9f4626a4c95f 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -487,30 +487,25 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf); /* * Region 9-38:4/10 describes the following bitmap structure: - * 0 9 12 18 38 - * .........****......****......****...... - * ^ ^ ^ ^ - * start off group_len end + * 0 9 12 18 38 N + * .........****......****......****.................. + * ^ ^ ^ ^ ^ + * start off group_len end nbits */ struct region { unsigned int start; unsigned int off; unsigned int group_len; unsigned int end; + unsigned int nbits; }; -static int bitmap_set_region(const struct region *r, - unsigned long *bitmap, int nbits) +static void bitmap_set_region(const struct region *r, unsigned long *bitmap) { unsigned int start; - if (r->end >= nbits) - return -ERANGE; - for (start = r->start; start <= r->end; start += r->group_len) bitmap_set(bitmap, start, min(r->end - start + 1, r->off)); - - return 0; } static int bitmap_check_region(const struct region *r) @@ -518,14 +513,23 @@ static int bitmap_check_region(const struct region *r) if (r->start > r->end || r->group_len == 0 || r->off > r->group_len) return -EINVAL; + if (r->end >= r->nbits) + return -ERANGE; + return 0; } -static const char *bitmap_getnum(const char *str, unsigned int *num) +static const char *bitmap_getnum(const char *str, unsigned int *num, + unsigned int lastbit) { unsigned long long n; unsigned int len; + if (str[0] == 'N') { + *num = lastbit; + return str + 1; + } + len = _parse_integer(str, 10, &n); if (!len) return ERR_PTR(-EINVAL); @@ -573,7 +577,9 @@ static const char *bitmap_find_region_reverse(const char *start, const char *end static const char *bitmap_parse_region(const char *str, struct region *r) { - str = bitmap_getnum(str, &r->start); + unsigned int lastbit = r->nbits - 1; + + str = bitmap_getnum(str, &r->start, lastbit); if (IS_ERR(str)) return str; @@ -583,7 +589,7 @@ static const char *bitmap_parse_region(const char *str, struct region *r) if (*str != '-') return ERR_PTR(-EINVAL); - str = bitmap_getnum(str + 1, &r->end); + str = bitmap_getnum(str + 1, &r->end, lastbit); if (IS_ERR(str)) return str; @@ -593,14 +599,14 @@ static const char *bitmap_parse_region(const char *str, struct region *r) if (*str != ':') return ERR_PTR(-EINVAL); - str = bitmap_getnum(str + 1, &r->off); + str = bitmap_getnum(str + 1, &r->off, lastbit); if (IS_ERR(str)) return str; if (*str != '/') return ERR_PTR(-EINVAL); - return bitmap_getnum(str + 1, &r->group_len); + return bitmap_getnum(str + 1, &r->group_len, lastbit); no_end: r->end = r->start; @@ -627,6 +633,10 @@ no_pattern: * From each group will be used only defined amount of bits. * Syntax: range:used_size/group_size * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769 + * The value 'N' can be used as a dynamically substituted token for the + * maximum allowed value; i.e (nmaskbits - 1). Keep in mind that it is + * dynamic, so if system changes cause the bitmap width to change, such + * as more cores in a CPU list, then any ranges using N will also change. * * Returns: 0 on success, -errno on invalid input strings. Error values: * @@ -640,7 +650,8 @@ int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits) struct region r; long ret; - bitmap_zero(maskp, nmaskbits); + r.nbits = nmaskbits; + bitmap_zero(maskp, r.nbits); while (buf) { buf = bitmap_find_region(buf); @@ -655,9 +666,7 @@ int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits) if (ret) return ret; - ret = bitmap_set_region(&r, maskp, nmaskbits); - if (ret) - return ret; + bitmap_set_region(&r, maskp); } return 0; diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c index 4ccbec442469..b748fd3d256e 100644 --- a/lib/crypto/chacha.c +++ b/lib/crypto/chacha.c @@ -64,7 +64,7 @@ static void chacha_permute(u32 *x, int nrounds) } /** - * chacha_block - generate one keystream block and increment block counter + * chacha_block_generic - generate one keystream block and increment block counter * @state: input state matrix (16 32-bit words) * @stream: output keystream block (64 bytes) * @nrounds: number of rounds (20 or 12; 20 is recommended) @@ -92,7 +92,7 @@ EXPORT_SYMBOL(chacha_block_generic); /** * hchacha_block_generic - abbreviated ChaCha core, for XChaCha * @state: input state matrix (16 32-bit words) - * @out: output (8 32-bit words) + * @stream: output (8 32-bit words) * @nrounds: number of rounds (20 or 12; 20 is recommended) * * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c index 3cc77d94390b..7fb71845cc84 100644 --- a/lib/crypto/poly1305-donna32.c +++ b/lib/crypto/poly1305-donna32.c @@ -10,7 +10,8 @@ #include <asm/unaligned.h> #include <crypto/internal/poly1305.h> -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]) { /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff; diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c index 6ae181bb4345..d34cf4053668 100644 --- a/lib/crypto/poly1305-donna64.c +++ b/lib/crypto/poly1305-donna64.c @@ -12,7 +12,8 @@ typedef __uint128_t u128; -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]) { u64 t0, t1; diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c index 9d2d14df0fee..26d87fc3823e 100644 --- a/lib/crypto/poly1305.c +++ b/lib/crypto/poly1305.c @@ -12,7 +12,8 @@ #include <linux/module.h> #include <asm/unaligned.h> -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key) +void poly1305_init_generic(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]) { poly1305_core_setkey(&desc->core_r, key); desc->s[0] = get_unaligned_le32(key + 16); diff --git a/lib/devres.c b/lib/devres.c index 2a4ff5d64288..4679dbb1bf5f 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -10,6 +10,7 @@ enum devm_ioremap_type { DEVM_IOREMAP = 0, DEVM_IOREMAP_UC, DEVM_IOREMAP_WC, + DEVM_IOREMAP_NP, }; void devm_ioremap_release(struct device *dev, void *res) @@ -42,6 +43,9 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, case DEVM_IOREMAP_WC: addr = ioremap_wc(offset, size); break; + case DEVM_IOREMAP_NP: + addr = ioremap_np(offset, size); + break; } if (addr) { @@ -99,6 +103,21 @@ void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, EXPORT_SYMBOL(devm_ioremap_wc); /** + * devm_ioremap_np - Managed ioremap_np() + * @dev: Generic device to remap IO address for + * @offset: Resource address to map + * @size: Size of map + * + * Managed ioremap_np(). Map is automatically unmapped on driver detach. + */ +void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset, + resource_size_t size) +{ + return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NP); +} +EXPORT_SYMBOL(devm_ioremap_np); + +/** * devm_iounmap - Managed iounmap() * @dev: Generic device to unmap for * @addr: Address to unmap @@ -128,6 +147,9 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res, return IOMEM_ERR_PTR(-EINVAL); } + if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED) + type = DEVM_IOREMAP_NP; + size = resource_size(res); if (res->name) diff --git a/lib/earlycpio.c b/lib/earlycpio.c index e83628882001..7921193f0424 100644 --- a/lib/earlycpio.c +++ b/lib/earlycpio.c @@ -40,7 +40,7 @@ enum cpio_fields { }; /** - * cpio_data find_cpio_data - Search for files in an uncompressed cpio + * find_cpio_data - Search for files in an uncompressed cpio * @path: The directory to search for, including a slash at the end * @data: Pointer to the cpio archive or a header inside * @len: Remaining length of the cpio based on data pointer @@ -49,7 +49,7 @@ enum cpio_fields { * matching file itself. It can be used to iterate through the cpio * to find all files inside of a directory path. * - * @return: struct cpio_data containing the address, length and + * Return: &struct cpio_data containing the address, length and * filename (with the directory path cut off) of the found file. * If you search for a filename and not for files in a directory, * pass the absolute path of the filename in the cpio and make sure diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f66c62aa7154..61228a6c69f8 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -76,7 +76,44 @@ } \ } -#define iterate_all_kinds(i, n, v, I, B, K) { \ +#define iterate_xarray(i, n, __v, skip, STEP) { \ + struct page *head = NULL; \ + size_t wanted = n, seg, offset; \ + loff_t start = i->xarray_start + skip; \ + pgoff_t index = start >> PAGE_SHIFT; \ + int j; \ + \ + XA_STATE(xas, i->xarray, index); \ + \ + rcu_read_lock(); \ + xas_for_each(&xas, head, ULONG_MAX) { \ + if (xas_retry(&xas, head)) \ + continue; \ + if (WARN_ON(xa_is_value(head))) \ + break; \ + if (WARN_ON(PageHuge(head))) \ + break; \ + for (j = (head->index < index) ? index - head->index : 0; \ + j < thp_nr_pages(head); j++) { \ + __v.bv_page = head + j; \ + offset = (i->xarray_start + skip) & ~PAGE_MASK; \ + seg = PAGE_SIZE - offset; \ + __v.bv_offset = offset; \ + __v.bv_len = min(n, seg); \ + (void)(STEP); \ + n -= __v.bv_len; \ + skip += __v.bv_len; \ + if (n == 0) \ + break; \ + } \ + if (n == 0) \ + break; \ + } \ + rcu_read_unlock(); \ + n = wanted - n; \ +} + +#define iterate_all_kinds(i, n, v, I, B, K, X) { \ if (likely(n)) { \ size_t skip = i->iov_offset; \ if (unlikely(i->type & ITER_BVEC)) { \ @@ -88,6 +125,9 @@ struct kvec v; \ iterate_kvec(i, n, v, kvec, skip, (K)) \ } else if (unlikely(i->type & ITER_DISCARD)) { \ + } else if (unlikely(i->type & ITER_XARRAY)) { \ + struct bio_vec v; \ + iterate_xarray(i, n, v, skip, (X)); \ } else { \ const struct iovec *iov; \ struct iovec v; \ @@ -96,7 +136,7 @@ } \ } -#define iterate_and_advance(i, n, v, I, B, K) { \ +#define iterate_and_advance(i, n, v, I, B, K, X) { \ if (unlikely(i->count < n)) \ n = i->count; \ if (i->count) { \ @@ -121,6 +161,9 @@ i->kvec = kvec; \ } else if (unlikely(i->type & ITER_DISCARD)) { \ skip += n; \ + } else if (unlikely(i->type & ITER_XARRAY)) { \ + struct bio_vec v; \ + iterate_xarray(i, n, v, skip, (X)) \ } else { \ const struct iovec *iov; \ struct iovec v; \ @@ -622,7 +665,9 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), memcpy_to_page(v.bv_page, v.bv_offset, (from += v.bv_len) - v.bv_len, v.bv_len), - memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) + memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), + memcpy_to_page(v.bv_page, v.bv_offset, + (from += v.bv_len) - v.bv_len, v.bv_len) ) return bytes; @@ -738,6 +783,18 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) bytes = curr_addr - s_addr - rem; return bytes; } + }), + ({ + rem = copy_mc_to_page(v.bv_page, v.bv_offset, + (from += v.bv_len) - v.bv_len, v.bv_len); + if (rem) { + curr_addr = (unsigned long) from; + bytes = curr_addr - s_addr - rem; + rcu_read_unlock(); + i->iov_offset += bytes; + i->count -= bytes; + return bytes; + } }) ) @@ -759,7 +816,9 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), - memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) + memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), + memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, + v.bv_offset, v.bv_len) ) return bytes; @@ -785,7 +844,9 @@ bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 0;}), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), - memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) + memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), + memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, + v.bv_offset, v.bv_len) ) iov_iter_advance(i, bytes); @@ -805,7 +866,9 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), - memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) + memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), + memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, + v.bv_offset, v.bv_len) ) return bytes; @@ -840,7 +903,9 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, - v.iov_len) + v.iov_len), + memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, + v.bv_offset, v.bv_len) ) return bytes; @@ -864,7 +929,9 @@ bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 0;}), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), - memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) + memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), + memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, + v.bv_offset, v.bv_len) ) iov_iter_advance(i, bytes); @@ -901,7 +968,7 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, { if (unlikely(!page_copy_sane(page, offset, bytes))) return 0; - if (i->type & (ITER_BVEC|ITER_KVEC)) { + if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) { void *kaddr = kmap_atomic(page); size_t wanted = copy_to_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); @@ -924,7 +991,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, WARN_ON(1); return 0; } - if (i->type & (ITER_BVEC|ITER_KVEC)) { + if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) { void *kaddr = kmap_atomic(page); size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); @@ -968,7 +1035,8 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) iterate_and_advance(i, bytes, v, clear_user(v.iov_base, v.iov_len), memzero_page(v.bv_page, v.bv_offset, v.bv_len), - memset(v.iov_base, 0, v.iov_len) + memset(v.iov_base, 0, v.iov_len), + memzero_page(v.bv_page, v.bv_offset, v.bv_len) ) return bytes; @@ -992,7 +1060,9 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), - memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) + memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), + memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, + v.bv_offset, v.bv_len) ) kunmap_atomic(kaddr); return bytes; @@ -1078,11 +1148,17 @@ void iov_iter_advance(struct iov_iter *i, size_t size) i->count -= size; return; } + if (unlikely(iov_iter_is_xarray(i))) { + size = min(size, i->count); + i->iov_offset += size; + i->count -= size; + return; + } if (iov_iter_is_bvec(i)) { iov_iter_bvec_advance(i, size); return; } - iterate_and_advance(i, size, v, 0, 0, 0) + iterate_and_advance(i, size, v, 0, 0, 0, 0) } EXPORT_SYMBOL(iov_iter_advance); @@ -1126,7 +1202,12 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) return; } unroll -= i->iov_offset; - if (iov_iter_is_bvec(i)) { + if (iov_iter_is_xarray(i)) { + BUG(); /* We should never go beyond the start of the specified + * range since we might then be straying into pages that + * aren't pinned. + */ + } else if (iov_iter_is_bvec(i)) { const struct bio_vec *bvec = i->bvec; while (1) { size_t n = (--bvec)->bv_len; @@ -1163,9 +1244,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i) return i->count; // it is a silly place, anyway if (i->nr_segs == 1) return i->count; - if (unlikely(iov_iter_is_discard(i))) + if (unlikely(iov_iter_is_discard(i) || iov_iter_is_xarray(i))) return i->count; - else if (iov_iter_is_bvec(i)) + if (iov_iter_is_bvec(i)) return min(i->count, i->bvec->bv_len - i->iov_offset); else return min(i->count, i->iov->iov_len - i->iov_offset); @@ -1214,6 +1295,31 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction, EXPORT_SYMBOL(iov_iter_pipe); /** + * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray + * @i: The iterator to initialise. + * @direction: The direction of the transfer. + * @xarray: The xarray to access. + * @start: The start file position. + * @count: The size of the I/O buffer in bytes. + * + * Set up an I/O iterator to either draw data out of the pages attached to an + * inode or to inject data into those pages. The pages *must* be prevented + * from evaporation, either by taking a ref on them or locking them by the + * caller. + */ +void iov_iter_xarray(struct iov_iter *i, unsigned int direction, + struct xarray *xarray, loff_t start, size_t count) +{ + BUG_ON(direction & ~1); + i->type = ITER_XARRAY | (direction & (READ | WRITE)); + i->xarray = xarray; + i->xarray_start = start; + i->count = count; + i->iov_offset = 0; +} +EXPORT_SYMBOL(iov_iter_xarray); + +/** * iov_iter_discard - Initialise an I/O iterator that discards data * @i: The iterator to initialise. * @direction: The direction of the transfer. @@ -1243,10 +1349,13 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) return size | i->iov_offset; return size; } + if (unlikely(iov_iter_is_xarray(i))) + return (i->xarray_start + i->iov_offset) | i->count; iterate_all_kinds(i, size, v, (res |= (unsigned long)v.iov_base | v.iov_len, 0), res |= v.bv_offset | v.bv_len, - res |= (unsigned long)v.iov_base | v.iov_len + res |= (unsigned long)v.iov_base | v.iov_len, + res |= v.bv_offset | v.bv_len ) return res; } @@ -1268,7 +1377,9 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i) (res |= (!res ? 0 : (unsigned long)v.bv_offset) | (size != v.bv_len ? size : 0)), (res |= (!res ? 0 : (unsigned long)v.iov_base) | - (size != v.iov_len ? size : 0)) + (size != v.iov_len ? size : 0)), + (res |= (!res ? 0 : (unsigned long)v.bv_offset) | + (size != v.bv_len ? size : 0)) ); return res; } @@ -1318,6 +1429,75 @@ static ssize_t pipe_get_pages(struct iov_iter *i, return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); } +static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, + pgoff_t index, unsigned int nr_pages) +{ + XA_STATE(xas, xa, index); + struct page *page; + unsigned int ret = 0; + + rcu_read_lock(); + for (page = xas_load(&xas); page; page = xas_next(&xas)) { + if (xas_retry(&xas, page)) + continue; + + /* Has the page moved or been split? */ + if (unlikely(page != xas_reload(&xas))) { + xas_reset(&xas); + continue; + } + + pages[ret] = find_subpage(page, xas.xa_index); + get_page(pages[ret]); + if (++ret == nr_pages) + break; + } + rcu_read_unlock(); + return ret; +} + +static ssize_t iter_xarray_get_pages(struct iov_iter *i, + struct page **pages, size_t maxsize, + unsigned maxpages, size_t *_start_offset) +{ + unsigned nr, offset; + pgoff_t index, count; + size_t size = maxsize, actual; + loff_t pos; + + if (!size || !maxpages) + return 0; + + pos = i->xarray_start + i->iov_offset; + index = pos >> PAGE_SHIFT; + offset = pos & ~PAGE_MASK; + *_start_offset = offset; + + count = 1; + if (size > PAGE_SIZE - offset) { + size -= PAGE_SIZE - offset; + count += size >> PAGE_SHIFT; + size &= ~PAGE_MASK; + if (size) + count++; + } + + if (count > maxpages) + count = maxpages; + + nr = iter_xarray_populate_pages(pages, i->xarray, index, count); + if (nr == 0) + return 0; + + actual = PAGE_SIZE * nr; + actual -= offset; + if (nr == count && size > 0) { + unsigned last_offset = (nr > 1) ? 0 : offset; + actual -= PAGE_SIZE - (last_offset + size); + } + return actual; +} + ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) @@ -1327,6 +1507,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, if (unlikely(iov_iter_is_pipe(i))) return pipe_get_pages(i, pages, maxsize, maxpages, start); + if (unlikely(iov_iter_is_xarray(i))) + return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); if (unlikely(iov_iter_is_discard(i))) return -EFAULT; @@ -1353,7 +1535,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, return v.bv_len; }),({ return -EFAULT; - }) + }), + 0 ) return 0; } @@ -1397,6 +1580,51 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i, return n; } +static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, + struct page ***pages, size_t maxsize, + size_t *_start_offset) +{ + struct page **p; + unsigned nr, offset; + pgoff_t index, count; + size_t size = maxsize, actual; + loff_t pos; + + if (!size) + return 0; + + pos = i->xarray_start + i->iov_offset; + index = pos >> PAGE_SHIFT; + offset = pos & ~PAGE_MASK; + *_start_offset = offset; + + count = 1; + if (size > PAGE_SIZE - offset) { + size -= PAGE_SIZE - offset; + count += size >> PAGE_SHIFT; + size &= ~PAGE_MASK; + if (size) + count++; + } + + p = get_pages_array(count); + if (!p) + return -ENOMEM; + *pages = p; + + nr = iter_xarray_populate_pages(p, i->xarray, index, count); + if (nr == 0) + return 0; + + actual = PAGE_SIZE * nr; + actual -= offset; + if (nr == count && size > 0) { + unsigned last_offset = (nr > 1) ? 0 : offset; + actual -= PAGE_SIZE - (last_offset + size); + } + return actual; +} + ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) @@ -1408,6 +1636,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, if (unlikely(iov_iter_is_pipe(i))) return pipe_get_pages_alloc(i, pages, maxsize, start); + if (unlikely(iov_iter_is_xarray(i))) + return iter_xarray_get_pages_alloc(i, pages, maxsize, start); if (unlikely(iov_iter_is_discard(i))) return -EFAULT; @@ -1440,7 +1670,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, return v.bv_len; }),({ return -EFAULT; - }) + }), 0 ) return 0; } @@ -1478,6 +1708,13 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, v.iov_base, v.iov_len, sum, off); off += v.iov_len; + }), ({ + char *p = kmap_atomic(v.bv_page); + sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, + p + v.bv_offset, v.bv_len, + sum, off); + kunmap_atomic(p); + off += v.bv_len; }) ) *csum = sum; @@ -1519,6 +1756,13 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, v.iov_base, v.iov_len, sum, off); off += v.iov_len; + }), ({ + char *p = kmap_atomic(v.bv_page); + sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, + p + v.bv_offset, v.bv_len, + sum, off); + kunmap_atomic(p); + off += v.bv_len; }) ) *csum = sum; @@ -1565,6 +1809,13 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, (from += v.iov_len) - v.iov_len, v.iov_len, sum, off); off += v.iov_len; + }), ({ + char *p = kmap_atomic(v.bv_page); + sum = csum_and_memcpy(p + v.bv_offset, + (from += v.bv_len) - v.bv_len, + v.bv_len, sum, off); + kunmap_atomic(p); + off += v.bv_len; }) ) csstate->csum = sum; @@ -1615,6 +1866,21 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) npages = pipe_space_for_user(iter_head, pipe->tail, pipe); if (npages >= maxpages) return maxpages; + } else if (unlikely(iov_iter_is_xarray(i))) { + unsigned offset; + + offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK; + + npages = 1; + if (size > PAGE_SIZE - offset) { + size -= PAGE_SIZE - offset; + npages += size >> PAGE_SHIFT; + size &= ~PAGE_MASK; + if (size) + npages++; + } + if (npages >= maxpages) + return maxpages; } else iterate_all_kinds(i, size, v, ({ unsigned long p = (unsigned long)v.iov_base; npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) @@ -1631,7 +1897,8 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) - p / PAGE_SIZE; if (npages >= maxpages) return maxpages; - }) + }), + 0 ) return npages; } @@ -1644,7 +1911,7 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) WARN_ON(1); return NULL; } - if (unlikely(iov_iter_is_discard(new))) + if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new))) return NULL; if (iov_iter_is_bvec(new)) return new->bvec = kmemdup(new->bvec, @@ -1849,7 +2116,12 @@ int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, kunmap(v.bv_page); err;}), ({ w = v; - err = f(&w, context);}) + err = f(&w, context);}), ({ + w.iov_base = kmap(v.bv_page) + v.bv_offset; + w.iov_len = v.bv_len; + err = f(&w, context); + kunmap(v.bv_page); + err;}) ) return err; } diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 7998affa45d4..c87d5b6a8a55 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj) static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem) { + int buffer_size = sizeof(env->buf) - env->buflen; int len; - len = strlcpy(&env->buf[env->buflen], subsystem, - sizeof(env->buf) - env->buflen); - if (len >= (sizeof(env->buf) - env->buflen)) { - WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n"); + len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size); + if (len >= buffer_size) { + pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n", + buffer_size, len); return -ENOMEM; } diff --git a/lib/kunit/.kunitconfig b/lib/kunit/.kunitconfig new file mode 100644 index 000000000000..9235b7d42d38 --- /dev/null +++ b/lib/kunit/.kunitconfig @@ -0,0 +1,3 @@ +CONFIG_KUNIT=y +CONFIG_KUNIT_TEST=y +CONFIG_KUNIT_EXAMPLE_TEST=y diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c index e0ec7d6fed6f..b972bda61c0c 100644 --- a/lib/kunit/assert.c +++ b/lib/kunit/assert.c @@ -25,7 +25,7 @@ void kunit_base_assert_format(const struct kunit_assert *assert, } string_stream_add(stream, "%s FAILED at %s:%d\n", - expect_or_assert, assert->file, assert->line); + expect_or_assert, assert->file, assert->line); } EXPORT_SYMBOL_GPL(kunit_base_assert_format); @@ -48,8 +48,9 @@ EXPORT_SYMBOL_GPL(kunit_fail_assert_format); void kunit_unary_assert_format(const struct kunit_assert *assert, struct string_stream *stream) { - struct kunit_unary_assert *unary_assert = container_of( - assert, struct kunit_unary_assert, assert); + struct kunit_unary_assert *unary_assert; + + unary_assert = container_of(assert, struct kunit_unary_assert, assert); kunit_base_assert_format(assert, stream); if (unary_assert->expected_true) @@ -67,8 +68,10 @@ EXPORT_SYMBOL_GPL(kunit_unary_assert_format); void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert, struct string_stream *stream) { - struct kunit_ptr_not_err_assert *ptr_assert = container_of( - assert, struct kunit_ptr_not_err_assert, assert); + struct kunit_ptr_not_err_assert *ptr_assert; + + ptr_assert = container_of(assert, struct kunit_ptr_not_err_assert, + assert); kunit_base_assert_format(assert, stream); if (!ptr_assert->value) { @@ -111,8 +114,10 @@ static bool is_literal(struct kunit *test, const char *text, long long value, void kunit_binary_assert_format(const struct kunit_assert *assert, struct string_stream *stream) { - struct kunit_binary_assert *binary_assert = container_of( - assert, struct kunit_binary_assert, assert); + struct kunit_binary_assert *binary_assert; + + binary_assert = container_of(assert, struct kunit_binary_assert, + assert); kunit_base_assert_format(assert, stream); string_stream_add(stream, @@ -137,8 +142,10 @@ EXPORT_SYMBOL_GPL(kunit_binary_assert_format); void kunit_binary_ptr_assert_format(const struct kunit_assert *assert, struct string_stream *stream) { - struct kunit_binary_ptr_assert *binary_assert = container_of( - assert, struct kunit_binary_ptr_assert, assert); + struct kunit_binary_ptr_assert *binary_assert; + + binary_assert = container_of(assert, struct kunit_binary_ptr_assert, + assert); kunit_base_assert_format(assert, stream); string_stream_add(stream, @@ -156,11 +163,29 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert, } EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format); +/* Checks if KUNIT_EXPECT_STREQ() args were string literals. + * Note: `text` will have ""s where as `value` will not. + */ +static bool is_str_literal(const char *text, const char *value) +{ + int len; + + len = strlen(text); + if (len < 2) + return false; + if (text[0] != '\"' || text[len - 1] != '\"') + return false; + + return strncmp(text + 1, value, len - 2) == 0; +} + void kunit_binary_str_assert_format(const struct kunit_assert *assert, struct string_stream *stream) { - struct kunit_binary_str_assert *binary_assert = container_of( - assert, struct kunit_binary_str_assert, assert); + struct kunit_binary_str_assert *binary_assert; + + binary_assert = container_of(assert, struct kunit_binary_str_assert, + assert); kunit_base_assert_format(assert, stream); string_stream_add(stream, @@ -168,12 +193,14 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert, binary_assert->left_text, binary_assert->operation, binary_assert->right_text); - string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %s\n", - binary_assert->left_text, - binary_assert->left_value); - string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %s", - binary_assert->right_text, - binary_assert->right_value); + if (!is_str_literal(binary_assert->left_text, binary_assert->left_value)) + string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"\n", + binary_assert->left_text, + binary_assert->left_value); + if (!is_str_literal(binary_assert->right_text, binary_assert->right_value)) + string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"", + binary_assert->right_text, + binary_assert->right_value); kunit_assert_print_msg(assert, stream); } EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index ec9494e914ef..2f6cc0123232 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -7,6 +7,7 @@ */ #include <kunit/test.h> +#include <kunit/test-bug.h> #include <linux/kernel.h> #include <linux/kref.h> #include <linux/sched/debug.h> @@ -16,6 +17,40 @@ #include "string-stream.h" #include "try-catch-impl.h" +#if IS_BUILTIN(CONFIG_KUNIT) +/* + * Fail the current test and print an error message to the log. + */ +void __kunit_fail_current_test(const char *file, int line, const char *fmt, ...) +{ + va_list args; + int len; + char *buffer; + + if (!current->kunit_test) + return; + + kunit_set_failure(current->kunit_test); + + /* kunit_err() only accepts literals, so evaluate the args first. */ + va_start(args, fmt); + len = vsnprintf(NULL, 0, fmt, args) + 1; + va_end(args); + + buffer = kunit_kmalloc(current->kunit_test, len, GFP_KERNEL); + if (!buffer) + return; + + va_start(args, fmt); + vsnprintf(buffer, len, fmt, args); + va_end(args); + + kunit_err(current->kunit_test, "%s:%d: %s", file, line, buffer); + kunit_kfree(current->kunit_test, buffer); +} +EXPORT_SYMBOL_GPL(__kunit_fail_current_test); +#endif + /* * Append formatted message to log, size of which is limited to * KUNIT_LOG_SIZE bytes (including null terminating byte). @@ -273,9 +308,7 @@ static void kunit_try_run_case(void *data) struct kunit_suite *suite = ctx->suite; struct kunit_case *test_case = ctx->test_case; -#if (IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT)) current->kunit_test = test; -#endif /* IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT) */ /* * kunit_run_case_internal may encounter a fatal error; if it does, @@ -624,9 +657,7 @@ void kunit_cleanup(struct kunit *test) spin_unlock(&test->lock); kunit_remove_resource(test, res); } -#if (IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT)) current->kunit_test = NULL; -#endif /* IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT)*/ } EXPORT_SYMBOL_GPL(kunit_cleanup); diff --git a/lib/list_sort.c b/lib/list_sort.c index 52f0c258c895..a926d96ffd44 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -7,16 +7,13 @@ #include <linux/list_sort.h> #include <linux/list.h> -typedef int __attribute__((nonnull(2,3))) (*cmp_func)(void *, - struct list_head const *, struct list_head const *); - /* * Returns a list organized in an intermediate format suited * to chaining of merge() calls: null-terminated, no reserved or * sentinel head node, "prev" links not maintained. */ __attribute__((nonnull(2,3,4))) -static struct list_head *merge(void *priv, cmp_func cmp, +static struct list_head *merge(void *priv, list_cmp_func_t cmp, struct list_head *a, struct list_head *b) { struct list_head *head, **tail = &head; @@ -52,7 +49,7 @@ static struct list_head *merge(void *priv, cmp_func cmp, * throughout. */ __attribute__((nonnull(2,3,4,5))) -static void merge_final(void *priv, cmp_func cmp, struct list_head *head, +static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head, struct list_head *a, struct list_head *b) { struct list_head *tail = head; @@ -185,9 +182,7 @@ static void merge_final(void *priv, cmp_func cmp, struct list_head *head, * 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1). */ __attribute__((nonnull(2,3))) -void list_sort(void *priv, struct list_head *head, - int (*cmp)(void *priv, struct list_head *a, - struct list_head *b)) +void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp) { struct list_head *list = head->next, *pending = NULL; size_t count = 0; /* Count of pending */ @@ -227,7 +222,7 @@ void list_sort(void *priv, struct list_head *head, if (likely(bits)) { struct list_head *a = *tail, *b = a->prev; - a = merge(priv, (cmp_func)cmp, b, a); + a = merge(priv, cmp, b, a); /* Install the merged result in place of the inputs */ a->prev = b->prev; *tail = a; @@ -249,10 +244,10 @@ void list_sort(void *priv, struct list_head *head, if (!next) break; - list = merge(priv, (cmp_func)cmp, pending, list); + list = merge(priv, cmp, pending, list); pending = next; } /* The final merge, rebuilding prev links */ - merge_final(priv, (cmp_func)cmp, head, pending, list); + merge_final(priv, cmp, head, pending, list); } EXPORT_SYMBOL(list_sort); diff --git a/lib/lru_cache.c b/lib/lru_cache.c index c69ee53d8dde..52313acbfa62 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c @@ -76,6 +76,7 @@ int lc_try_lock(struct lru_cache *lc) /** * lc_create - prepares to track objects in an active set * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details + * @cache: cache root pointer * @max_pending_changes: maximum changes to accumulate until a transaction is required * @e_count: number of elements allowed to be active simultaneously * @e_size: size of the tracked objects @@ -627,7 +628,7 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index) } /** - * lc_dump - Dump a complete LRU cache to seq in textual form. + * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form. * @lc: the lru cache to operate on * @seq: the &struct seq_file pointer to seq_printf into * @utext: user supplied additional "heading" or other info diff --git a/lib/math/Makefile b/lib/math/Makefile index be6909e943bd..7456edb864fc 100644 --- a/lib/math/Makefile +++ b/lib/math/Makefile @@ -4,3 +4,5 @@ obj-y += div64.o gcd.o lcm.o int_pow.o int_sqrt.o reciprocal_div.o obj-$(CONFIG_CORDIC) += cordic.o obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o obj-$(CONFIG_RATIONAL) += rational.o + +obj-$(CONFIG_TEST_DIV64) += test_div64.o diff --git a/lib/math/test_div64.c b/lib/math/test_div64.c new file mode 100644 index 000000000000..c15edd688dd2 --- /dev/null +++ b/lib/math/test_div64.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Maciej W. Rozycki + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/ktime.h> +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/time64.h> +#include <linux/types.h> + +#include <asm/div64.h> + +#define TEST_DIV64_N_ITER 1024 + +static const u64 test_div64_dividends[] = { + 0x00000000ab275080, + 0x0000000fe73c1959, + 0x000000e54c0a74b1, + 0x00000d4398ff1ef9, + 0x0000a18c2ee1c097, + 0x00079fb80b072e4a, + 0x0072db27380dd689, + 0x0842f488162e2284, + 0xf66745411d8ab063, +}; +#define SIZE_DIV64_DIVIDENDS ARRAY_SIZE(test_div64_dividends) + +#define TEST_DIV64_DIVISOR_0 0x00000009 +#define TEST_DIV64_DIVISOR_1 0x0000007c +#define TEST_DIV64_DIVISOR_2 0x00000204 +#define TEST_DIV64_DIVISOR_3 0x0000cb5b +#define TEST_DIV64_DIVISOR_4 0x00010000 +#define TEST_DIV64_DIVISOR_5 0x0008a880 +#define TEST_DIV64_DIVISOR_6 0x003fd3ae +#define TEST_DIV64_DIVISOR_7 0x0b658fac +#define TEST_DIV64_DIVISOR_8 0xdc08b349 + +static const u32 test_div64_divisors[] = { + TEST_DIV64_DIVISOR_0, + TEST_DIV64_DIVISOR_1, + TEST_DIV64_DIVISOR_2, + TEST_DIV64_DIVISOR_3, + TEST_DIV64_DIVISOR_4, + TEST_DIV64_DIVISOR_5, + TEST_DIV64_DIVISOR_6, + TEST_DIV64_DIVISOR_7, + TEST_DIV64_DIVISOR_8, +}; +#define SIZE_DIV64_DIVISORS ARRAY_SIZE(test_div64_divisors) + +static const struct { + u64 quotient; + u32 remainder; +} test_div64_results[SIZE_DIV64_DIVISORS][SIZE_DIV64_DIVIDENDS] = { + { + { 0x0000000013045e47, 0x00000001 }, + { 0x000000000161596c, 0x00000030 }, + { 0x000000000054e9d4, 0x00000130 }, + { 0x000000000000d776, 0x0000278e }, + { 0x000000000000ab27, 0x00005080 }, + { 0x00000000000013c4, 0x0004ce80 }, + { 0x00000000000002ae, 0x001e143c }, + { 0x000000000000000f, 0x0033e56c }, + { 0x0000000000000000, 0xab275080 }, + }, { + { 0x00000001c45c02d1, 0x00000000 }, + { 0x0000000020d5213c, 0x00000049 }, + { 0x0000000007e3d65f, 0x000001dd }, + { 0x0000000000140531, 0x000065ee }, + { 0x00000000000fe73c, 0x00001959 }, + { 0x000000000001d637, 0x0004e5d9 }, + { 0x0000000000003fc9, 0x000713bb }, + { 0x0000000000000165, 0x029abe7d }, + { 0x0000000000000012, 0x6e9f7e37 }, + }, { + { 0x000000197a3a0cf7, 0x00000002 }, + { 0x00000001d9632e5c, 0x00000021 }, + { 0x0000000071c28039, 0x000001cd }, + { 0x000000000120a844, 0x0000b885 }, + { 0x0000000000e54c0a, 0x000074b1 }, + { 0x00000000001a7bb3, 0x00072331 }, + { 0x00000000000397ad, 0x0002c61b }, + { 0x000000000000141e, 0x06ea2e89 }, + { 0x000000000000010a, 0xab002ad7 }, + }, { + { 0x0000017949e37538, 0x00000001 }, + { 0x0000001b62441f37, 0x00000055 }, + { 0x0000000694a3391d, 0x00000085 }, + { 0x0000000010b2a5d2, 0x0000a753 }, + { 0x000000000d4398ff, 0x00001ef9 }, + { 0x0000000001882ec6, 0x0005cbf9 }, + { 0x000000000035333b, 0x0017abdf }, + { 0x00000000000129f1, 0x0ab4520d }, + { 0x0000000000000f6e, 0x8ac0ce9b }, + }, { + { 0x000011f321a74e49, 0x00000006 }, + { 0x0000014d8481d211, 0x0000005b }, + { 0x0000005025cbd92d, 0x000001e3 }, + { 0x00000000cb5e71e3, 0x000043e6 }, + { 0x00000000a18c2ee1, 0x0000c097 }, + { 0x0000000012a88828, 0x00036c97 }, + { 0x000000000287f16f, 0x002c2a25 }, + { 0x00000000000e2cc7, 0x02d581e3 }, + { 0x000000000000bbf4, 0x1ba08c03 }, + }, { + { 0x0000d8db8f72935d, 0x00000005 }, + { 0x00000fbd5aed7a2e, 0x00000002 }, + { 0x000003c84b6ea64a, 0x00000122 }, + { 0x0000000998fa8829, 0x000044b7 }, + { 0x000000079fb80b07, 0x00002e4a }, + { 0x00000000e16b20fa, 0x0002a14a }, + { 0x000000001e940d22, 0x00353b2e }, + { 0x0000000000ab40ac, 0x06fba6ba }, + { 0x000000000008debd, 0x72d98365 }, + }, { + { 0x000cc3045b8fc281, 0x00000000 }, + { 0x0000ed1f48b5c9fc, 0x00000079 }, + { 0x000038fb9c63406a, 0x000000e1 }, + { 0x000000909705b825, 0x00000a62 }, + { 0x00000072db27380d, 0x0000d689 }, + { 0x0000000d43fce827, 0x00082b09 }, + { 0x00000001ccaba11a, 0x0037e8dd }, + { 0x000000000a13f729, 0x0566dffd }, + { 0x000000000085a14b, 0x23d36726 }, + }, { + { 0x00eafeb9c993592b, 0x00000001 }, + { 0x00110e5befa9a991, 0x00000048 }, + { 0x00041947b4a1d36a, 0x000000dc }, + { 0x00000a6679327311, 0x0000c079 }, + { 0x00000842f488162e, 0x00002284 }, + { 0x000000f4459740fc, 0x00084484 }, + { 0x0000002122c47bf9, 0x002ca446 }, + { 0x00000000b9936290, 0x004979c4 }, + { 0x00000000099ca89d, 0x9db446bf }, + }, { + { 0x1b60cece589da1d2, 0x00000001 }, + { 0x01fcb42be1453f5b, 0x0000004f }, + { 0x007a3f2457df0749, 0x0000013f }, + { 0x0001363130e3ec7b, 0x000017aa }, + { 0x0000f66745411d8a, 0x0000b063 }, + { 0x00001c757dfab350, 0x00048863 }, + { 0x000003dc4979c652, 0x00224ea7 }, + { 0x000000159edc3144, 0x06409ab3 }, + { 0x000000011eadfee3, 0xa99c48a8 }, + }, +}; + +static inline bool test_div64_verify(u64 quotient, u32 remainder, int i, int j) +{ + return (quotient == test_div64_results[i][j].quotient && + remainder == test_div64_results[i][j].remainder); +} + +/* + * This needs to be a macro, because we don't want to rely on the compiler + * to do constant propagation, and `do_div' may take a different path for + * constants, so we do want to verify that as well. + */ +#define test_div64_one(dividend, divisor, i, j) ({ \ + bool result = true; \ + u64 quotient; \ + u32 remainder; \ + \ + quotient = dividend; \ + remainder = do_div(quotient, divisor); \ + if (!test_div64_verify(quotient, remainder, i, j)) { \ + pr_err("ERROR: %016llx / %08x => %016llx,%08x\n", \ + dividend, divisor, quotient, remainder); \ + pr_err("ERROR: expected value => %016llx,%08x\n",\ + test_div64_results[i][j].quotient, \ + test_div64_results[i][j].remainder); \ + result = false; \ + } \ + result; \ +}) + +/* + * Run calculation for the same divisor value expressed as a constant + * and as a variable, so as to verify the implementation for both cases + * should they be handled by different code execution paths. + */ +static bool __init test_div64(void) +{ + u64 dividend; + int i, j; + + for (i = 0; i < SIZE_DIV64_DIVIDENDS; i++) { + dividend = test_div64_dividends[i]; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_0, i, 0)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_1, i, 1)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_2, i, 2)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_3, i, 3)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_4, i, 4)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_5, i, 5)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_6, i, 6)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_7, i, 7)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_8, i, 8)) + return false; + for (j = 0; j < SIZE_DIV64_DIVISORS; j++) { + if (!test_div64_one(dividend, test_div64_divisors[j], + i, j)) + return false; + } + } + return true; +} + +static int __init test_div64_init(void) +{ + struct timespec64 ts, ts0, ts1; + int i; + + pr_info("Starting 64bit/32bit division and modulo test\n"); + ktime_get_ts64(&ts0); + + for (i = 0; i < TEST_DIV64_N_ITER; i++) + if (!test_div64()) + break; + + ktime_get_ts64(&ts1); + ts = timespec64_sub(ts1, ts0); + pr_info("Completed 64bit/32bit division and modulo test, " + "%llu.%09lus elapsed\n", ts.tv_sec, ts.tv_nsec); + + return 0; +} + +static void __exit test_div64_exit(void) +{ +} + +module_init(test_div64_init); +module_exit(test_div64_exit); + +MODULE_AUTHOR("Maciej W. Rozycki <macro@orcam.me.uk>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("64bit/32bit division and modulo test module"); diff --git a/lib/oid_registry.c b/lib/oid_registry.c index f7ad43f28579..3dfaa836e7c5 100644 --- a/lib/oid_registry.c +++ b/lib/oid_registry.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/bug.h> +#include <linux/asn1.h> #include "oid_registry_data.c" MODULE_DESCRIPTION("OID Registry"); @@ -92,6 +93,29 @@ enum OID look_up_OID(const void *data, size_t datasize) } EXPORT_SYMBOL_GPL(look_up_OID); +/** + * parse_OID - Parse an OID from a bytestream + * @data: Binary representation of the header + OID + * @datasize: Size of the binary representation + * @oid: Pointer to oid to return result + * + * Parse an OID from a bytestream that holds the OID in the format + * ASN1_OID | length | oid. The length indicator must equal to datasize - 2. + * -EBADMSG is returned if the bytestream is too short. + */ +int parse_OID(const void *data, size_t datasize, enum OID *oid) +{ + const unsigned char *v = data; + + /* we need 2 bytes of header and at least 1 byte for oid */ + if (datasize < 3 || v[0] != ASN1_OID || v[1] != datasize - 2) + return -EBADMSG; + + *oid = look_up_OID(data + 2, datasize - 2); + return 0; +} +EXPORT_SYMBOL_GPL(parse_OID); + /* * sprint_OID - Print an Object Identifier into a buffer * @data: The encoded OID to print diff --git a/lib/parman.c b/lib/parman.c index a11f2f667639..3f8f8d422e62 100644 --- a/lib/parman.c +++ b/lib/parman.c @@ -297,7 +297,7 @@ EXPORT_SYMBOL(parman_destroy); * parman_prio_init - initializes a parman priority chunk * @parman: parman instance * @prio: parman prio structure to be initialized - * @prority: desired priority of the chunk + * @priority: desired priority of the chunk * * Note: all locking must be provided by the caller. * @@ -356,7 +356,7 @@ int parman_item_add(struct parman *parman, struct parman_prio *prio, EXPORT_SYMBOL(parman_item_add); /** - * parman_item_del - deletes parman item + * parman_item_remove - deletes parman item * @parman: parman instance * @prio: parman prio instance to delete the item from * @item: parman item instance diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 3a4da11b804d..b3afafe46fff 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -166,9 +166,9 @@ static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) /** * radix_tree_find_next_bit - find the next set bit in a memory region * - * @addr: The address to base the search on - * @size: The bitmap size in bits - * @offset: The bitnumber to start searching at + * @node: where to begin the search + * @tag: the tag index + * @offset: the bitnumber to start searching at * * Unrollable variant of find_next_bit() for constant size arrays. * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. @@ -461,7 +461,7 @@ out: /** * radix_tree_shrink - shrink radix tree to minimum height - * @root radix tree root + * @root: radix tree root */ static inline bool radix_tree_shrink(struct radix_tree_root *root) { @@ -691,7 +691,7 @@ static inline int insert_entries(struct radix_tree_node *node, } /** - * __radix_tree_insert - insert into a radix tree + * radix_tree_insert - insert into a radix tree * @root: radix tree root * @index: index key * @item: item to insert @@ -919,6 +919,7 @@ EXPORT_SYMBOL(radix_tree_replace_slot); /** * radix_tree_iter_replace - replace item in a slot * @root: radix tree root + * @iter: iterator state * @slot: pointer to slot * @item: new item to store in the slot. * diff --git a/lib/sbitmap.c b/lib/sbitmap.c index d693d9213ceb..47b3691058eb 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -9,6 +9,54 @@ #include <linux/sbitmap.h> #include <linux/seq_file.h> +static int init_alloc_hint(struct sbitmap *sb, gfp_t flags) +{ + unsigned depth = sb->depth; + + sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags); + if (!sb->alloc_hint) + return -ENOMEM; + + if (depth && !sb->round_robin) { + int i; + + for_each_possible_cpu(i) + *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth; + } + return 0; +} + +static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb, + unsigned int depth) +{ + unsigned hint; + + hint = this_cpu_read(*sb->alloc_hint); + if (unlikely(hint >= depth)) { + hint = depth ? prandom_u32() % depth : 0; + this_cpu_write(*sb->alloc_hint, hint); + } + + return hint; +} + +static inline void update_alloc_hint_after_get(struct sbitmap *sb, + unsigned int depth, + unsigned int hint, + unsigned int nr) +{ + if (nr == -1) { + /* If the map is full, a hint won't do us much good. */ + this_cpu_write(*sb->alloc_hint, 0); + } else if (nr == hint || unlikely(sb->round_robin)) { + /* Only update the hint if we used it. */ + hint = nr + 1; + if (hint >= depth - 1) + hint = 0; + this_cpu_write(*sb->alloc_hint, hint); + } +} + /* * See if we have deferred clears that we can batch move */ @@ -33,24 +81,15 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map) } int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, - gfp_t flags, int node) + gfp_t flags, int node, bool round_robin, + bool alloc_hint) { unsigned int bits_per_word; unsigned int i; - if (shift < 0) { - shift = ilog2(BITS_PER_LONG); - /* - * If the bitmap is small, shrink the number of bits per word so - * we spread over a few cachelines, at least. If less than 4 - * bits, just forget about it, it's not going to work optimally - * anyway. - */ - if (depth >= 4) { - while ((4U << shift) > depth) - shift--; - } - } + if (shift < 0) + shift = sbitmap_calculate_shift(depth); + bits_per_word = 1U << shift; if (bits_per_word > BITS_PER_LONG) return -EINVAL; @@ -58,15 +97,25 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, sb->shift = shift; sb->depth = depth; sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); + sb->round_robin = round_robin; if (depth == 0) { sb->map = NULL; return 0; } + if (alloc_hint) { + if (init_alloc_hint(sb, flags)) + return -ENOMEM; + } else { + sb->alloc_hint = NULL; + } + sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); - if (!sb->map) + if (!sb->map) { + free_percpu(sb->alloc_hint); return -ENOMEM; + } for (i = 0; i < sb->map_nr; i++) { sb->map[i].depth = min(depth, bits_per_word); @@ -129,14 +178,14 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth, } static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, - unsigned int alloc_hint, bool round_robin) + unsigned int alloc_hint) { struct sbitmap_word *map = &sb->map[index]; int nr; do { nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint, - !round_robin); + !sb->round_robin); if (nr != -1) break; if (!sbitmap_deferred_clear(map)) @@ -146,7 +195,7 @@ static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, return nr; } -int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) +static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint) { unsigned int i, index; int nr = -1; @@ -158,14 +207,13 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) * alloc_hint to find the right word index. No point in looping * twice in find_next_zero_bit() for that case. */ - if (round_robin) + if (sb->round_robin) alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); else alloc_hint = 0; for (i = 0; i < sb->map_nr; i++) { - nr = sbitmap_find_bit_in_index(sb, index, alloc_hint, - round_robin); + nr = sbitmap_find_bit_in_index(sb, index, alloc_hint); if (nr != -1) { nr += index << sb->shift; break; @@ -179,10 +227,27 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) return nr; } + +int sbitmap_get(struct sbitmap *sb) +{ + int nr; + unsigned int hint, depth; + + if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) + return -1; + + depth = READ_ONCE(sb->depth); + hint = update_alloc_hint_before_get(sb, depth); + nr = __sbitmap_get(sb, hint); + update_alloc_hint_after_get(sb, depth, hint, nr); + + return nr; +} EXPORT_SYMBOL_GPL(sbitmap_get); -int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, - unsigned long shallow_depth) +static int __sbitmap_get_shallow(struct sbitmap *sb, + unsigned int alloc_hint, + unsigned long shallow_depth) { unsigned int i, index; int nr = -1; @@ -214,6 +279,22 @@ again: return nr; } + +int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth) +{ + int nr; + unsigned int hint, depth; + + if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) + return -1; + + depth = READ_ONCE(sb->depth); + hint = update_alloc_hint_before_get(sb, depth); + nr = __sbitmap_get_shallow(sb, hint, shallow_depth); + update_alloc_hint_after_get(sb, depth, hint, nr); + + return nr; +} EXPORT_SYMBOL_GPL(sbitmap_get_shallow); bool sbitmap_any_bit_set(const struct sbitmap *sb) @@ -243,20 +324,21 @@ static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) return weight; } -static unsigned int sbitmap_weight(const struct sbitmap *sb) +static unsigned int sbitmap_cleared(const struct sbitmap *sb) { - return __sbitmap_weight(sb, true); + return __sbitmap_weight(sb, false); } -static unsigned int sbitmap_cleared(const struct sbitmap *sb) +unsigned int sbitmap_weight(const struct sbitmap *sb) { - return __sbitmap_weight(sb, false); + return __sbitmap_weight(sb, true) - sbitmap_cleared(sb); } +EXPORT_SYMBOL_GPL(sbitmap_weight); void sbitmap_show(struct sbitmap *sb, struct seq_file *m) { seq_printf(m, "depth=%u\n", sb->depth); - seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb)); + seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); seq_printf(m, "map_nr=%u\n", sb->map_nr); @@ -350,21 +432,11 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, int ret; int i; - ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); + ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node, + round_robin, true); if (ret) return ret; - sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); - if (!sbq->alloc_hint) { - sbitmap_free(&sbq->sb); - return -ENOMEM; - } - - if (depth && !round_robin) { - for_each_possible_cpu(i) - *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; - } - sbq->min_shallow_depth = UINT_MAX; sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); atomic_set(&sbq->wake_index, 0); @@ -372,7 +444,6 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); if (!sbq->ws) { - free_percpu(sbq->alloc_hint); sbitmap_free(&sbq->sb); return -ENOMEM; } @@ -382,7 +453,6 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); } - sbq->round_robin = round_robin; return 0; } EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); @@ -415,60 +485,16 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_resize); int __sbitmap_queue_get(struct sbitmap_queue *sbq) { - unsigned int hint, depth; - int nr; - - hint = this_cpu_read(*sbq->alloc_hint); - depth = READ_ONCE(sbq->sb.depth); - if (unlikely(hint >= depth)) { - hint = depth ? prandom_u32() % depth : 0; - this_cpu_write(*sbq->alloc_hint, hint); - } - nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); - - if (nr == -1) { - /* If the map is full, a hint won't do us much good. */ - this_cpu_write(*sbq->alloc_hint, 0); - } else if (nr == hint || unlikely(sbq->round_robin)) { - /* Only update the hint if we used it. */ - hint = nr + 1; - if (hint >= depth - 1) - hint = 0; - this_cpu_write(*sbq->alloc_hint, hint); - } - - return nr; + return sbitmap_get(&sbq->sb); } EXPORT_SYMBOL_GPL(__sbitmap_queue_get); int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, unsigned int shallow_depth) { - unsigned int hint, depth; - int nr; - WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); - hint = this_cpu_read(*sbq->alloc_hint); - depth = READ_ONCE(sbq->sb.depth); - if (unlikely(hint >= depth)) { - hint = depth ? prandom_u32() % depth : 0; - this_cpu_write(*sbq->alloc_hint, hint); - } - nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); - - if (nr == -1) { - /* If the map is full, a hint won't do us much good. */ - this_cpu_write(*sbq->alloc_hint, 0); - } else if (nr == hint || unlikely(sbq->round_robin)) { - /* Only update the hint if we used it. */ - hint = nr + 1; - if (hint >= depth - 1) - hint = 0; - this_cpu_write(*sbq->alloc_hint, hint); - } - - return nr; + return sbitmap_get_shallow(&sbq->sb, shallow_depth); } EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); @@ -576,8 +602,8 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, smp_mb__after_atomic(); sbitmap_queue_wake_up(sbq); - if (likely(!sbq->round_robin && nr < sbq->sb.depth)) - *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; + if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth)) + *per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr; } EXPORT_SYMBOL_GPL(sbitmap_queue_clear); @@ -615,7 +641,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) if (!first) seq_puts(m, ", "); first = false; - seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); + seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i)); } seq_puts(m, "}\n"); @@ -633,7 +659,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) } seq_puts(m, "}\n"); - seq_printf(m, "round_robin=%d\n", sbq->round_robin); + seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin); seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); } EXPORT_SYMBOL_GPL(sbitmap_queue_show); diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 0ea0e8258f14..9cd575583180 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -34,6 +34,8 @@ static const unsigned long exp1[] __initconst = { BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0xffffffff77777777ULL), BITMAP_FROM_U64(0), + BITMAP_FROM_U64(0x00008000), + BITMAP_FROM_U64(0x80000000), }; static const unsigned long exp2[] __initconst = { @@ -334,15 +336,47 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = { {0, " , ,, , , ", &exp1[12 * step], 8, 0}, {0, " , ,, , , \n", &exp1[12 * step], 8, 0}, + {0, "0-0", &exp1[0], 32, 0}, + {0, "1-1", &exp1[1 * step], 32, 0}, + {0, "15-15", &exp1[13 * step], 32, 0}, + {0, "31-31", &exp1[14 * step], 32, 0}, + + {0, "0-0:0/1", &exp1[12 * step], 32, 0}, + {0, "0-0:1/1", &exp1[0], 32, 0}, + {0, "0-0:1/31", &exp1[0], 32, 0}, + {0, "0-0:31/31", &exp1[0], 32, 0}, + {0, "1-1:1/1", &exp1[1 * step], 32, 0}, + {0, "0-15:16/31", &exp1[2 * step], 32, 0}, + {0, "15-15:1/2", &exp1[13 * step], 32, 0}, + {0, "15-15:31/31", &exp1[13 * step], 32, 0}, + {0, "15-31:1/31", &exp1[13 * step], 32, 0}, + {0, "16-31:16/31", &exp1[3 * step], 32, 0}, + {0, "31-31:31/31", &exp1[14 * step], 32, 0}, + + {0, "N-N", &exp1[14 * step], 32, 0}, + {0, "0-0:1/N", &exp1[0], 32, 0}, + {0, "0-0:N/N", &exp1[0], 32, 0}, + {0, "0-15:16/N", &exp1[2 * step], 32, 0}, + {0, "15-15:N/N", &exp1[13 * step], 32, 0}, + {0, "15-N:1/N", &exp1[13 * step], 32, 0}, + {0, "16-N:16/N", &exp1[3 * step], 32, 0}, + {0, "N-N:N/N", &exp1[14 * step], 32, 0}, + + {0, "0-N:1/3,1-N:1/3,2-N:1/3", &exp1[8 * step], 32, 0}, + {0, "0-31:1/3,1-31:1/3,2-31:1/3", &exp1[8 * step], 32, 0}, + {0, "1-10:8/12,8-31:24/29,0-31:0/3", &exp1[9 * step], 32, 0}, + {-EINVAL, "-1", NULL, 8, 0}, {-EINVAL, "-0", NULL, 8, 0}, {-EINVAL, "10-1", NULL, 8, 0}, - {-EINVAL, "0-31:", NULL, 8, 0}, - {-EINVAL, "0-31:0", NULL, 8, 0}, - {-EINVAL, "0-31:0/", NULL, 8, 0}, - {-EINVAL, "0-31:0/0", NULL, 8, 0}, - {-EINVAL, "0-31:1/0", NULL, 8, 0}, - {-EINVAL, "0-31:10/1", NULL, 8, 0}, + {-ERANGE, "8-8", NULL, 8, 0}, + {-ERANGE, "0-31", NULL, 8, 0}, + {-EINVAL, "0-31:", NULL, 32, 0}, + {-EINVAL, "0-31:0", NULL, 32, 0}, + {-EINVAL, "0-31:0/", NULL, 32, 0}, + {-EINVAL, "0-31:0/0", NULL, 32, 0}, + {-EINVAL, "0-31:1/0", NULL, 32, 0}, + {-EINVAL, "0-31:10/1", NULL, 32, 0}, {-EOVERFLOW, "0-98765432123456789:10/1", NULL, 8, 0}, {-EINVAL, "a-31", NULL, 8, 0}, diff --git a/lib/test_kasan.c b/lib/test_kasan.c index e5647d147b35..dc05cfc2d12f 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -54,6 +54,10 @@ static int kasan_test_init(struct kunit *test) multishot = kasan_save_enable_multi_shot(); kasan_set_tagging_report_once(false); + fail_data.report_found = false; + fail_data.report_expected = false; + kunit_add_named_resource(test, NULL, NULL, &resource, + "kasan_data", &fail_data); return 0; } @@ -61,6 +65,7 @@ static void kasan_test_exit(struct kunit *test) { kasan_set_tagging_report_once(true); kasan_restore_multi_shot(multishot); + KUNIT_EXPECT_FALSE(test, fail_data.report_found); } /** @@ -69,37 +74,40 @@ static void kasan_test_exit(struct kunit *test) * resource named "kasan_data". Do not use this name for KUnit resources * outside of KASAN tests. * - * For hardware tag-based KASAN, when a tag fault happens, tag checking is - * normally auto-disabled. When this happens, this test handler reenables - * tag checking. As tag checking can be only disabled or enabled per CPU, this - * handler disables migration (preemption). + * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag + * checking is auto-disabled. When this happens, this test handler reenables + * tag checking. As tag checking can be only disabled or enabled per CPU, + * this handler disables migration (preemption). * * Since the compiler doesn't see that the expression can change the fail_data * fields, it can reorder or optimize away the accesses to those fields. * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the * expression to prevent that. + * + * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as + * false. This allows detecting KASAN reports that happen outside of the checks + * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL + * and in kasan_test_exit. */ -#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \ - migrate_disable(); \ - WRITE_ONCE(fail_data.report_expected, true); \ - WRITE_ONCE(fail_data.report_found, false); \ - kunit_add_named_resource(test, \ - NULL, \ - NULL, \ - &resource, \ - "kasan_data", &fail_data); \ - barrier(); \ - expression; \ - barrier(); \ - KUNIT_EXPECT_EQ(test, \ - READ_ONCE(fail_data.report_expected), \ - READ_ONCE(fail_data.report_found)); \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \ - if (READ_ONCE(fail_data.report_found)) \ - kasan_enable_tagging(); \ - migrate_enable(); \ - } \ +#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ + !kasan_async_mode_enabled()) \ + migrate_disable(); \ + KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \ + WRITE_ONCE(fail_data.report_expected, true); \ + barrier(); \ + expression; \ + barrier(); \ + KUNIT_EXPECT_EQ(test, \ + READ_ONCE(fail_data.report_expected), \ + READ_ONCE(fail_data.report_found)); \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \ + if (READ_ONCE(fail_data.report_found)) \ + kasan_enable_tagging_sync(); \ + migrate_enable(); \ + } \ + WRITE_ONCE(fail_data.report_found, false); \ + WRITE_ONCE(fail_data.report_expected, false); \ } while (0) #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ @@ -1044,14 +1052,14 @@ static void match_all_mem_tag(struct kunit *test) continue; /* Mark the first memory granule with the chosen memory tag. */ - kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag); + kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false); /* This access must cause a KASAN report. */ KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0); } /* Recover the memory tag and free. */ - kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr)); + kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false); kfree(ptr); } diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c index eee017ff8980..f1017f345d6c 100644 --- a/lib/test_kasan_module.c +++ b/lib/test_kasan_module.c @@ -22,7 +22,7 @@ static noinline void __init copy_user_test(void) char *kmem; char __user *usermem; size_t size = 10; - int unused; + int __maybe_unused unused; kmem = kmalloc(size, GFP_KERNEL); if (!kmem) diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c index 1f017d3b610e..00daaf23316f 100644 --- a/lib/test_list_sort.c +++ b/lib/test_list_sort.c @@ -56,7 +56,8 @@ static int __init check(struct debug_el *ela, struct debug_el *elb) return 0; } -static int __init cmp(void *priv, struct list_head *a, struct list_head *b) +static int __init cmp(void *priv, const struct list_head *a, + const struct list_head *b) { struct debug_el *ela, *elb; diff --git a/lib/test_printf.c b/lib/test_printf.c index 95a2f82427c7..ec0d5976bb69 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -577,24 +577,98 @@ netdev_features(void) { } +struct page_flags_test { + int width; + int shift; + int mask; + unsigned long value; + const char *fmt; + const char *name; +}; + +static struct page_flags_test pft[] = { + {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK, + 0, "%d", "section"}, + {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK, + 0, "%d", "node"}, + {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK, + 0, "%d", "zone"}, + {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK, + 0, "%#x", "lastcpupid"}, + {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK, + 0, "%#x", "kasantag"}, +}; + +static void __init +page_flags_test(int section, int node, int zone, int last_cpupid, + int kasan_tag, int flags, const char *name, char *cmp_buf) +{ + unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag}; + unsigned long page_flags = 0; + unsigned long size = 0; + bool append = false; + int i; + + flags &= BIT(NR_PAGEFLAGS) - 1; + if (flags) { + page_flags |= flags; + snprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); + size = strlen(cmp_buf); +#if SECTIONS_WIDTH || NODES_WIDTH || ZONES_WIDTH || \ + LAST_CPUPID_WIDTH || KASAN_TAG_WIDTH + /* Other information also included in page flags */ + snprintf(cmp_buf + size, BUF_SIZE - size, "|"); + size = strlen(cmp_buf); +#endif + } + + /* Set the test value */ + for (i = 0; i < ARRAY_SIZE(pft); i++) + pft[i].value = values[i]; + + for (i = 0; i < ARRAY_SIZE(pft); i++) { + if (!pft[i].width) + continue; + + if (append) { + snprintf(cmp_buf + size, BUF_SIZE - size, "|"); + size = strlen(cmp_buf); + } + + page_flags |= (pft[i].value & pft[i].mask) << pft[i].shift; + snprintf(cmp_buf + size, BUF_SIZE - size, "%s=", pft[i].name); + size = strlen(cmp_buf); + snprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt, + pft[i].value & pft[i].mask); + size = strlen(cmp_buf); + append = true; + } + + test(cmp_buf, "%pGp", &page_flags); +} + static void __init flags(void) { unsigned long flags; - gfp_t gfp; char *cmp_buffer; + gfp_t gfp; + + cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL); + if (!cmp_buffer) + return; flags = 0; - test("", "%pGp", &flags); + page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer); - /* Page flags should filter the zone id */ flags = 1UL << NR_PAGEFLAGS; - test("", "%pGp", &flags); + page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer); flags |= 1UL << PG_uptodate | 1UL << PG_dirty | 1UL << PG_lru | 1UL << PG_active | 1UL << PG_swapbacked; - test("uptodate|dirty|lru|active|swapbacked", "%pGp", &flags); - + page_flags_test(1, 1, 1, 0x1fffff, 1, flags, + "uptodate|dirty|lru|active|swapbacked", + cmp_buffer); flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | VM_DENYWRITE; @@ -609,10 +683,6 @@ flags(void) gfp = __GFP_ATOMIC; test("__GFP_ATOMIC", "%pGg", &gfp); - cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL); - if (!cmp_buffer) - return; - /* Any flags not translated by the table should remain numeric */ gfp = ~__GFP_BITS_MASK; snprintf(cmp_buffer, BUF_SIZE, "%#lx", (unsigned long) gfp); @@ -655,6 +725,23 @@ static void __init fwnode_pointer(void) software_node_unregister_nodes(softnodes); } +static void __init fourcc_pointer(void) +{ + struct { + u32 code; + char *str; + } const try[] = { + { 0x3231564e, "NV12 little-endian (0x3231564e)", }, + { 0xb231564e, "NV12 big-endian (0xb231564e)", }, + { 0x10111213, ".... little-endian (0x10111213)", }, + { 0x20303159, "Y10 little-endian (0x20303159)", }, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(try); i++) + test(try[i].str, "%p4cc", &try[i].code); +} + static void __init errptr(void) { @@ -700,6 +787,7 @@ test_pointer(void) flags(); errptr(); fwnode_pointer(); + fourcc_pointer(); } static void __init selftest(void) diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 76c607ee6db5..5a1dd4736b56 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -487,6 +487,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt) struct rhashtable *ht; const struct bucket_table *tbl; char buff[512] = ""; + int offset = 0; unsigned int i, cnt = 0; ht = &rhlt->ht; @@ -501,18 +502,18 @@ static unsigned int __init print_ht(struct rhltable *rhlt) next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; if (!rht_is_a_nulls(pos)) { - sprintf(buff, "%s\nbucket[%d] -> ", buff, i); + offset += sprintf(buff + offset, "\nbucket[%d] -> ", i); } while (!rht_is_a_nulls(pos)) { struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead); - sprintf(buff, "%s[[", buff); + offset += sprintf(buff + offset, "[["); do { pos = &list->rhead; list = rht_dereference(list->next, ht); p = rht_obj(ht, pos); - sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid, + offset += sprintf(buff + offset, " val %d (tid=%d)%s", p->value.id, p->value.tid, list? ", " : " "); cnt++; } while (list); @@ -521,7 +522,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt) next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; - sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : ""); + offset += sprintf(buff + offset, "]]%s", !rht_is_a_nulls(pos) ? " -> " : ""); } } printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff); diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 5cf2fe9aab9e..01e9543de566 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -23,8 +23,8 @@ module_param(name, type, 0444); \ MODULE_PARM_DESC(name, msg) \ -__param(bool, single_cpu_test, false, - "Use single first online CPU to run tests"); +__param(int, nr_threads, 0, + "Number of workers to perform tests(min: 1 max: USHRT_MAX)"); __param(bool, sequential_test_order, false, "Use sequential stress tests order"); @@ -47,19 +47,10 @@ __param(int, run_test_mask, INT_MAX, "\t\tid: 128, name: pcpu_alloc_test\n" "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" - "\t\tid: 1024, name: kvfree_rcu_1_arg_slab_test\n" - "\t\tid: 2048, name: kvfree_rcu_2_arg_slab_test\n" /* Add a new test case description here. */ ); /* - * Depends on single_cpu_test parameter. If it is true, then - * use first online CPU to trigger a test on, otherwise go with - * all online CPUs. - */ -static cpumask_t cpus_run_test_mask = CPU_MASK_NONE; - -/* * Read write semaphore for synchronization of setup * phase that is done in main thread and workers. */ @@ -363,42 +354,6 @@ kvfree_rcu_2_arg_vmalloc_test(void) return 0; } -static int -kvfree_rcu_1_arg_slab_test(void) -{ - struct test_kvfree_rcu *p; - int i; - - for (i = 0; i < test_loop_count; i++) { - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return -1; - - p->array[0] = 'a'; - kvfree_rcu(p); - } - - return 0; -} - -static int -kvfree_rcu_2_arg_slab_test(void) -{ - struct test_kvfree_rcu *p; - int i; - - for (i = 0; i < test_loop_count; i++) { - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return -1; - - p->array[0] = 'a'; - kvfree_rcu(p, rcu); - } - - return 0; -} - struct test_case_desc { const char *test_name; int (*test_func)(void); @@ -415,8 +370,6 @@ static struct test_case_desc test_case_array[] = { { "pcpu_alloc_test", pcpu_alloc_test }, { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, - { "kvfree_rcu_1_arg_slab_test", kvfree_rcu_1_arg_slab_test }, - { "kvfree_rcu_2_arg_slab_test", kvfree_rcu_2_arg_slab_test }, /* Add a new test case here. */ }; @@ -426,16 +379,13 @@ struct test_case_data { u64 time; }; -/* Split it to get rid of: WARNING: line over 80 characters */ -static struct test_case_data - per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)]; - static struct test_driver { struct task_struct *task; + struct test_case_data data[ARRAY_SIZE(test_case_array)]; + unsigned long start; unsigned long stop; - int cpu; -} per_cpu_test_driver[NR_CPUS]; +} *tdriver; static void shuffle_array(int *arr, int n) { @@ -463,9 +413,6 @@ static int test_func(void *private) ktime_t kt; u64 delta; - if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0) - pr_err("Failed to set affinity to %d CPU\n", t->cpu); - for (i = 0; i < ARRAY_SIZE(test_case_array); i++) random_array[i] = i; @@ -490,9 +437,9 @@ static int test_func(void *private) kt = ktime_get(); for (j = 0; j < test_repeat_count; j++) { if (!test_case_array[index].test_func()) - per_cpu_test_data[t->cpu][index].test_passed++; + t->data[index].test_passed++; else - per_cpu_test_data[t->cpu][index].test_failed++; + t->data[index].test_failed++; } /* @@ -501,7 +448,7 @@ static int test_func(void *private) delta = (u64) ktime_us_delta(ktime_get(), kt); do_div(delta, (u32) test_repeat_count); - per_cpu_test_data[t->cpu][index].time = delta; + t->data[index].time = delta; } t->stop = get_cycles(); @@ -517,53 +464,56 @@ static int test_func(void *private) return 0; } -static void +static int init_test_configurtion(void) { /* - * Reset all data of all CPUs. + * A maximum number of workers is defined as hard-coded + * value and set to USHRT_MAX. We add such gap just in + * case and for potential heavy stressing. */ - memset(per_cpu_test_data, 0, sizeof(per_cpu_test_data)); + nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX); - if (single_cpu_test) - cpumask_set_cpu(cpumask_first(cpu_online_mask), - &cpus_run_test_mask); - else - cpumask_and(&cpus_run_test_mask, cpu_online_mask, - cpu_online_mask); + /* Allocate the space for test instances. */ + tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL); + if (tdriver == NULL) + return -1; if (test_repeat_count <= 0) test_repeat_count = 1; if (test_loop_count <= 0) test_loop_count = 1; + + return 0; } static void do_concurrent_test(void) { - int cpu, ret; + int i, ret; /* * Set some basic configurations plus sanity check. */ - init_test_configurtion(); + ret = init_test_configurtion(); + if (ret < 0) + return; /* * Put on hold all workers. */ down_write(&prepare_for_test_rwsem); - for_each_cpu(cpu, &cpus_run_test_mask) { - struct test_driver *t = &per_cpu_test_driver[cpu]; + for (i = 0; i < nr_threads; i++) { + struct test_driver *t = &tdriver[i]; - t->cpu = cpu; - t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu); + t->task = kthread_run(test_func, t, "vmalloc_test/%d", i); if (!IS_ERR(t->task)) /* Success. */ atomic_inc(&test_n_undone); else - pr_err("Failed to start kthread for %d CPU\n", cpu); + pr_err("Failed to start %d kthread\n", i); } /* @@ -581,29 +531,31 @@ static void do_concurrent_test(void) ret = wait_for_completion_timeout(&test_all_done_comp, HZ); } while (!ret); - for_each_cpu(cpu, &cpus_run_test_mask) { - struct test_driver *t = &per_cpu_test_driver[cpu]; - int i; + for (i = 0; i < nr_threads; i++) { + struct test_driver *t = &tdriver[i]; + int j; if (!IS_ERR(t->task)) kthread_stop(t->task); - for (i = 0; i < ARRAY_SIZE(test_case_array); i++) { - if (!((run_test_mask & (1 << i)) >> i)) + for (j = 0; j < ARRAY_SIZE(test_case_array); j++) { + if (!((run_test_mask & (1 << j)) >> j)) continue; pr_info( "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n", - test_case_array[i].test_name, - per_cpu_test_data[cpu][i].test_passed, - per_cpu_test_data[cpu][i].test_failed, + test_case_array[j].test_name, + t->data[j].test_passed, + t->data[j].test_failed, test_repeat_count, test_loop_count, - per_cpu_test_data[cpu][i].time); + t->data[j].time); } - pr_info("All test took CPU%d=%lu cycles\n", - cpu, t->stop - t->start); + pr_info("All test took worker%d=%lu cycles\n", + i, t->stop - t->start); } + + kvfree(tdriver); } static int vmalloc_test_init(void) diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index 2919f1698140..ce2f69552003 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -46,16 +46,18 @@ static inline bool vdso_cycles_ok(u64 cycles) #endif #ifdef CONFIG_TIME_NS -static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, - struct __kernel_timespec *ts) +static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, + struct __kernel_timespec *ts) { - const struct vdso_data *vd = __arch_get_timens_vdso_data(); + const struct vdso_data *vd; const struct timens_offset *offs = &vdns->offset[clk]; const struct vdso_timestamp *vdso_ts; u64 cycles, last, ns; u32 seq; s64 sec; + vd = vdns - (clk == CLOCK_MONOTONIC_RAW ? CS_RAW : CS_HRES_COARSE); + vd = __arch_get_timens_vdso_data(vd); if (clk != CLOCK_MONOTONIC_RAW) vd = &vd[CS_HRES_COARSE]; else @@ -92,13 +94,14 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, return 0; } #else -static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) +static __always_inline +const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) { return NULL; } -static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, - struct __kernel_timespec *ts) +static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, + struct __kernel_timespec *ts) { return -EINVAL; } @@ -159,10 +162,10 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, } #ifdef CONFIG_TIME_NS -static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, - struct __kernel_timespec *ts) +static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, + struct __kernel_timespec *ts) { - const struct vdso_data *vd = __arch_get_timens_vdso_data(); + const struct vdso_data *vd = __arch_get_timens_vdso_data(vdns); const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; const struct timens_offset *offs = &vdns->offset[clk]; u64 nsec; @@ -188,8 +191,8 @@ static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, return 0; } #else -static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, - struct __kernel_timespec *ts) +static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, + struct __kernel_timespec *ts) { return -1; } @@ -310,7 +313,7 @@ __cvdso_gettimeofday_data(const struct vdso_data *vd, if (unlikely(tz != NULL)) { if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VDSO_CLOCKMODE_TIMENS) - vd = __arch_get_timens_vdso_data(); + vd = __arch_get_timens_vdso_data(vd); tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest; tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime; @@ -333,7 +336,7 @@ __cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time) if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VDSO_CLOCKMODE_TIMENS) - vd = __arch_get_timens_vdso_data(); + vd = __arch_get_timens_vdso_data(vd); t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec); @@ -363,7 +366,7 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock, if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VDSO_CLOCKMODE_TIMENS) - vd = __arch_get_timens_vdso_data(); + vd = __arch_get_timens_vdso_data(vd); /* * Convert the clockid to a bitmask and use it to check which diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 41ddc353ebb8..f0c35d9b65bf 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1734,6 +1734,42 @@ char *netdev_bits(char *buf, char *end, const void *addr, } static noinline_for_stack +char *fourcc_string(char *buf, char *end, const u32 *fourcc, + struct printf_spec spec, const char *fmt) +{ + char output[sizeof("0123 little-endian (0x01234567)")]; + char *p = output; + unsigned int i; + u32 val; + + if (fmt[1] != 'c' || fmt[2] != 'c') + return error_string(buf, end, "(%p4?)", spec); + + if (check_pointer(&buf, end, fourcc, spec)) + return buf; + + val = *fourcc & ~BIT(31); + + for (i = 0; i < sizeof(*fourcc); i++) { + unsigned char c = val >> (i * 8); + + /* Print non-control ASCII characters as-is, dot otherwise */ + *p++ = isascii(c) && isprint(c) ? c : '.'; + } + + strcpy(p, *fourcc & BIT(31) ? " big-endian" : " little-endian"); + p += strlen(p); + + *p++ = ' '; + *p++ = '('; + p = special_hex_number(p, output + sizeof(output) - 2, *fourcc, sizeof(u32)); + *p++ = ')'; + *p = '\0'; + + return string(buf, end, output, spec); +} + +static noinline_for_stack char *address_val(char *buf, char *end, const void *addr, struct printf_spec spec, const char *fmt) { @@ -1916,6 +1952,66 @@ char *format_flags(char *buf, char *end, unsigned long flags, return buf; } +struct page_flags_fields { + int width; + int shift; + int mask; + const struct printf_spec *spec; + const char *name; +}; + +static const struct page_flags_fields pff[] = { + {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK, + &default_dec_spec, "section"}, + {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK, + &default_dec_spec, "node"}, + {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK, + &default_dec_spec, "zone"}, + {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK, + &default_flag_spec, "lastcpupid"}, + {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK, + &default_flag_spec, "kasantag"}, +}; + +static +char *format_page_flags(char *buf, char *end, unsigned long flags) +{ + unsigned long main_flags = flags & (BIT(NR_PAGEFLAGS) - 1); + bool append = false; + int i; + + /* Page flags from the main area. */ + if (main_flags) { + buf = format_flags(buf, end, main_flags, pageflag_names); + append = true; + } + + /* Page flags from the fields area */ + for (i = 0; i < ARRAY_SIZE(pff); i++) { + /* Skip undefined fields. */ + if (!pff[i].width) + continue; + + /* Format: Flag Name + '=' (equals sign) + Number + '|' (separator) */ + if (append) { + if (buf < end) + *buf = '|'; + buf++; + } + + buf = string(buf, end, pff[i].name, default_str_spec); + if (buf < end) + *buf = '='; + buf++; + buf = number(buf, end, (flags >> pff[i].shift) & pff[i].mask, + *pff[i].spec); + + append = true; + } + + return buf; +} + static noinline_for_stack char *flags_string(char *buf, char *end, void *flags_ptr, struct printf_spec spec, const char *fmt) @@ -1928,11 +2024,7 @@ char *flags_string(char *buf, char *end, void *flags_ptr, switch (fmt[1]) { case 'p': - flags = *(unsigned long *)flags_ptr; - /* Remove zone id */ - flags &= (1UL << NR_PAGEFLAGS) - 1; - names = pageflag_names; - break; + return format_page_flags(buf, end, *(unsigned long *)flags_ptr); case 'v': flags = *(unsigned long *)flags_ptr; names = vmaflag_names; @@ -2096,6 +2188,9 @@ EXPORT_SYMBOL_GPL(no_hash_pointers); static int __init no_hash_pointers_enable(char *str) { + if (no_hash_pointers) + return 0; + no_hash_pointers = true; pr_warn("**********************************************************\n"); @@ -2186,8 +2281,11 @@ early_param("no_hash_pointers", no_hash_pointers_enable); * Implements a "recursive vsnprintf". * Do not use this feature without some mechanism to verify the * correctness of the format string and va_list arguments. - * - 'K' For a kernel pointer that should be hidden from unprivileged users + * - 'K' For a kernel pointer that should be hidden from unprivileged users. + * Use only for procfs, sysfs and similar files, not printk(); please + * read the documentation (path below) first. * - 'NF' For a netdev_features_t + * - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value. * - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with * a certain separator (' ' by default): * C colon @@ -2225,7 +2323,8 @@ early_param("no_hash_pointers", no_hash_pointers_enable); * Without an option prints the full name of the node * f full name * P node name, including a possible unit address - * - 'x' For printing the address. Equivalent to "%lx". + * - 'x' For printing the address unmodified. Equivalent to "%lx". + * Please read the documentation (path below) before using! * - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of * bpf_trace_printk() where [ku] prefix specifies either kernel (k) * or user (u) memory to probe, and: @@ -2285,6 +2384,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, return restricted_pointer(buf, end, ptr, spec); case 'N': return netdev_bits(buf, end, ptr, spec, fmt); + case '4': + return fourcc_string(buf, end, ptr, spec, fmt); case 'a': return address_val(buf, end, ptr, spec, fmt); case 'd': @@ -3135,8 +3236,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) switch (*fmt) { case 'S': case 's': - case 'F': - case 'f': case 'x': case 'K': case 'e': |