diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 46 | ||||
-rw-r--r-- | lib/Kconfig.kasan | 9 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/asn1_encoder.c | 454 | ||||
-rw-r--r-- | lib/crypto/chacha.c | 4 | ||||
-rw-r--r-- | lib/crypto/poly1305-donna32.c | 3 | ||||
-rw-r--r-- | lib/crypto/poly1305-donna64.c | 3 | ||||
-rw-r--r-- | lib/crypto/poly1305.c | 3 | ||||
-rw-r--r-- | lib/devres.c | 22 | ||||
-rw-r--r-- | lib/earlycpio.c | 4 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 9 | ||||
-rw-r--r-- | lib/lru_cache.c | 3 | ||||
-rw-r--r-- | lib/oid_registry.c | 24 | ||||
-rw-r--r-- | lib/parman.c | 4 | ||||
-rw-r--r-- | lib/radix-tree.c | 11 | ||||
-rw-r--r-- | lib/test_kasan.c | 19 | ||||
-rw-r--r-- | lib/test_kasan_module.c | 2 |
18 files changed, 587 insertions, 37 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index a38cc61256f1..ac3b30697b2b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -701,3 +701,6 @@ config GENERIC_LIB_DEVMEM_IS_ALLOWED config PLDMFW bool default n + +config ASN1_ENCODER + tristate diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2779c29d9981..2c7f46b366f1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1363,13 +1363,53 @@ config LOCKDEP bool depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT select STACKTRACE - select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86 + depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86 select KALLSYMS select KALLSYMS_ALL config LOCKDEP_SMALL bool +config LOCKDEP_BITS + int "Bitsize for MAX_LOCKDEP_ENTRIES" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 15 + help + Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message. + +config LOCKDEP_CHAINS_BITS + int "Bitsize for MAX_LOCKDEP_CHAINS" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 16 + help + Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message. + +config LOCKDEP_STACK_TRACE_BITS + int "Bitsize for MAX_STACK_TRACE_ENTRIES" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 19 + help + Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message. + +config LOCKDEP_STACK_TRACE_HASH_BITS + int "Bitsize for STACK_TRACE_HASH_SIZE" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 14 + help + Try increasing this value if you need large MAX_STACK_TRACE_ENTRIES. + +config LOCKDEP_CIRCULAR_QUEUE_BITS + int "Bitsize for elements in circular_queue struct" + depends on LOCKDEP + range 10 30 + default 12 + help + Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure. + config DEBUG_LOCKDEP bool "Lock dependency engine debugging" depends on DEBUG_KERNEL && LOCKDEP @@ -1665,7 +1705,7 @@ config LATENCYTOP depends on DEBUG_KERNEL depends on STACKTRACE_SUPPORT depends on PROC_FS - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86 + depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86 select KALLSYMS select KALLSYMS_ALL select STACKTRACE @@ -1918,7 +1958,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT depends on !X86_64 select STACKTRACE - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86 + depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86 help Provide stacktrace filter for fault-injection capabilities diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index fba9909e31b7..cffc2ebbf185 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -138,9 +138,10 @@ config KASAN_INLINE endchoice -config KASAN_STACK_ENABLE +config KASAN_STACK bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST depends on KASAN_GENERIC || KASAN_SW_TAGS + default y if CC_IS_GCC help The LLVM stack address sanitizer has a know problem that causes excessive stack usage in a lot of functions, see @@ -154,12 +155,6 @@ config KASAN_STACK_ENABLE CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe to use and enabled by default. -config KASAN_STACK - int - depends on KASAN_GENERIC || KASAN_SW_TAGS - default 1 if KASAN_STACK_ENABLE || CC_IS_GCC - default 0 - config KASAN_SW_TAGS_IDENTIFY bool "Enable memory corruption identification" depends on KASAN_SW_TAGS diff --git a/lib/Makefile b/lib/Makefile index b5307d3eec1a..e11cfc18b6c0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -280,6 +280,7 @@ obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o obj-$(CONFIG_PERCPU_TEST) += percpu_test.o obj-$(CONFIG_ASN1) += asn1_decoder.o +obj-$(CONFIG_ASN1_ENCODER) += asn1_encoder.o obj-$(CONFIG_FONT_SUPPORT) += fonts/ diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c new file mode 100644 index 000000000000..41e71aae3ef6 --- /dev/null +++ b/lib/asn1_encoder.c @@ -0,0 +1,454 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Simple encoder primitives for ASN.1 BER/DER/CER + * + * Copyright (C) 2019 James.Bottomley@HansenPartnership.com + */ + +#include <linux/asn1_encoder.h> +#include <linux/bug.h> +#include <linux/string.h> +#include <linux/module.h> + +/** + * asn1_encode_integer() - encode positive integer to ASN.1 + * @data: pointer to the pointer to the data + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @integer: integer to be encoded + * + * This is a simplified encoder: it only currently does + * positive integers, but it should be simple enough to add the + * negative case if a use comes along. + */ +unsigned char * +asn1_encode_integer(unsigned char *data, const unsigned char *end_data, + s64 integer) +{ + int data_len = end_data - data; + unsigned char *d = &data[2]; + bool found = false; + int i; + + if (WARN(integer < 0, + "BUG: integer encode only supports positive integers")) + return ERR_PTR(-EINVAL); + + if (IS_ERR(data)) + return data; + + /* need at least 3 bytes for tag, length and integer encoding */ + if (data_len < 3) + return ERR_PTR(-EINVAL); + + /* remaining length where at d (the start of the integer encoding) */ + data_len -= 2; + + data[0] = _tag(UNIV, PRIM, INT); + if (integer == 0) { + *d++ = 0; + goto out; + } + + for (i = sizeof(integer); i > 0 ; i--) { + int byte = integer >> (8 * (i - 1)); + + if (!found && byte == 0) + continue; + + /* + * for a positive number the first byte must have bit + * 7 clear in two's complement (otherwise it's a + * negative number) so prepend a leading zero if + * that's not the case + */ + if (!found && (byte & 0x80)) { + /* + * no check needed here, we already know we + * have len >= 1 + */ + *d++ = 0; + data_len--; + } + + found = true; + if (data_len == 0) + return ERR_PTR(-EINVAL); + + *d++ = byte; + data_len--; + } + + out: + data[1] = d - data - 2; + + return d; +} +EXPORT_SYMBOL_GPL(asn1_encode_integer); + +/* calculate the base 128 digit values setting the top bit of the first octet */ +static int asn1_encode_oid_digit(unsigned char **_data, int *data_len, u32 oid) +{ + unsigned char *data = *_data; + int start = 7 + 7 + 7 + 7; + int ret = 0; + + if (*data_len < 1) + return -EINVAL; + + /* quick case */ + if (oid == 0) { + *data++ = 0x80; + (*data_len)--; + goto out; + } + + while (oid >> start == 0) + start -= 7; + + while (start > 0 && *data_len > 0) { + u8 byte; + + byte = oid >> start; + oid = oid - (byte << start); + start -= 7; + byte |= 0x80; + *data++ = byte; + (*data_len)--; + } + + if (*data_len > 0) { + *data++ = oid; + (*data_len)--; + } else { + ret = -EINVAL; + } + + out: + *_data = data; + return ret; +} + +/** + * asn1_encode_oid() - encode an oid to ASN.1 + * @data: position to begin encoding at + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @oid: array of oids + * @oid_len: length of oid array + * + * this encodes an OID up to ASN.1 when presented as an array of OID values + */ +unsigned char * +asn1_encode_oid(unsigned char *data, const unsigned char *end_data, + u32 oid[], int oid_len) +{ + int data_len = end_data - data; + unsigned char *d = data + 2; + int i, ret; + + if (WARN(oid_len < 2, "OID must have at least two elements")) + return ERR_PTR(-EINVAL); + + if (WARN(oid_len > 32, "OID is too large")) + return ERR_PTR(-EINVAL); + + if (IS_ERR(data)) + return data; + + + /* need at least 3 bytes for tag, length and OID encoding */ + if (data_len < 3) + return ERR_PTR(-EINVAL); + + data[0] = _tag(UNIV, PRIM, OID); + *d++ = oid[0] * 40 + oid[1]; + + data_len -= 3; + + ret = 0; + + for (i = 2; i < oid_len; i++) { + ret = asn1_encode_oid_digit(&d, &data_len, oid[i]); + if (ret < 0) + return ERR_PTR(ret); + } + + data[1] = d - data - 2; + + return d; +} +EXPORT_SYMBOL_GPL(asn1_encode_oid); + +/** + * asn1_encode_length() - encode a length to follow an ASN.1 tag + * @data: pointer to encode at + * @data_len: pointer to remaning length (adjusted by routine) + * @len: length to encode + * + * This routine can encode lengths up to 65535 using the ASN.1 rules. + * It will accept a negative length and place a zero length tag + * instead (to keep the ASN.1 valid). This convention allows other + * encoder primitives to accept negative lengths as singalling the + * sequence will be re-encoded when the length is known. + */ +static int asn1_encode_length(unsigned char **data, int *data_len, int len) +{ + if (*data_len < 1) + return -EINVAL; + + if (len < 0) { + *((*data)++) = 0; + (*data_len)--; + return 0; + } + + if (len <= 0x7f) { + *((*data)++) = len; + (*data_len)--; + return 0; + } + + if (*data_len < 2) + return -EINVAL; + + if (len <= 0xff) { + *((*data)++) = 0x81; + *((*data)++) = len & 0xff; + *data_len -= 2; + return 0; + } + + if (*data_len < 3) + return -EINVAL; + + if (len <= 0xffff) { + *((*data)++) = 0x82; + *((*data)++) = (len >> 8) & 0xff; + *((*data)++) = len & 0xff; + *data_len -= 3; + return 0; + } + + if (WARN(len > 0xffffff, "ASN.1 length can't be > 0xffffff")) + return -EINVAL; + + if (*data_len < 4) + return -EINVAL; + *((*data)++) = 0x83; + *((*data)++) = (len >> 16) & 0xff; + *((*data)++) = (len >> 8) & 0xff; + *((*data)++) = len & 0xff; + *data_len -= 4; + + return 0; +} + +/** + * asn1_encode_tag() - add a tag for optional or explicit value + * @data: pointer to place tag at + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @tag: tag to be placed + * @string: the data to be tagged + * @len: the length of the data to be tagged + * + * Note this currently only handles short form tags < 31. + * + * Standard usage is to pass in a @tag, @string and @length and the + * @string will be ASN.1 encoded with @tag and placed into @data. If + * the encoding would put data past @end_data then an error is + * returned, otherwise a pointer to a position one beyond the encoding + * is returned. + * + * To encode in place pass a NULL @string and -1 for @len and the + * maximum allowable beginning and end of the data; all this will do + * is add the current maximum length and update the data pointer to + * the place where the tag contents should be placed is returned. The + * data should be copied in by the calling routine which should then + * repeat the prior statement but now with the known length. In order + * to avoid having to keep both before and after pointers, the repeat + * expects to be called with @data pointing to where the first encode + * returned it and still NULL for @string but the real length in @len. + */ +unsigned char * +asn1_encode_tag(unsigned char *data, const unsigned char *end_data, + u32 tag, const unsigned char *string, int len) +{ + int data_len = end_data - data; + int ret; + + if (WARN(tag > 30, "ASN.1 tag can't be > 30")) + return ERR_PTR(-EINVAL); + + if (!string && WARN(len > 127, + "BUG: recode tag is too big (>127)")) + return ERR_PTR(-EINVAL); + + if (IS_ERR(data)) + return data; + + if (!string && len > 0) { + /* + * we're recoding, so move back to the start of the + * tag and install a dummy length because the real + * data_len should be NULL + */ + data -= 2; + data_len = 2; + } + + if (data_len < 2) + return ERR_PTR(-EINVAL); + + *(data++) = _tagn(CONT, CONS, tag); + data_len--; + ret = asn1_encode_length(&data, &data_len, len); + if (ret < 0) + return ERR_PTR(ret); + + if (!string) + return data; + + if (data_len < len) + return ERR_PTR(-EINVAL); + + memcpy(data, string, len); + data += len; + + return data; +} +EXPORT_SYMBOL_GPL(asn1_encode_tag); + +/** + * asn1_encode_octet_string() - encode an ASN.1 OCTET STRING + * @data: pointer to encode at + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @string: string to be encoded + * @len: length of string + * + * Note ASN.1 octet strings may contain zeros, so the length is obligatory. + */ +unsigned char * +asn1_encode_octet_string(unsigned char *data, + const unsigned char *end_data, + const unsigned char *string, u32 len) +{ + int data_len = end_data - data; + int ret; + + if (IS_ERR(data)) + return data; + + /* need minimum of 2 bytes for tag and length of zero length string */ + if (data_len < 2) + return ERR_PTR(-EINVAL); + + *(data++) = _tag(UNIV, PRIM, OTS); + data_len--; + + ret = asn1_encode_length(&data, &data_len, len); + if (ret) + return ERR_PTR(ret); + + if (data_len < len) + return ERR_PTR(-EINVAL); + + memcpy(data, string, len); + data += len; + + return data; +} +EXPORT_SYMBOL_GPL(asn1_encode_octet_string); + +/** + * asn1_encode_sequence() - wrap a byte stream in an ASN.1 SEQUENCE + * @data: pointer to encode at + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @seq: data to be encoded as a sequence + * @len: length of the data to be encoded as a sequence + * + * Fill in a sequence. To encode in place, pass NULL for @seq and -1 + * for @len; then call again once the length is known (still with NULL + * for @seq). In order to avoid having to keep both before and after + * pointers, the repeat expects to be called with @data pointing to + * where the first encode placed it. + */ +unsigned char * +asn1_encode_sequence(unsigned char *data, const unsigned char *end_data, + const unsigned char *seq, int len) +{ + int data_len = end_data - data; + int ret; + + if (!seq && WARN(len > 127, + "BUG: recode sequence is too big (>127)")) + return ERR_PTR(-EINVAL); + + if (IS_ERR(data)) + return data; + + if (!seq && len >= 0) { + /* + * we're recoding, so move back to the start of the + * sequence and install a dummy length because the + * real length should be NULL + */ + data -= 2; + data_len = 2; + } + + if (data_len < 2) + return ERR_PTR(-EINVAL); + + *(data++) = _tag(UNIV, CONS, SEQ); + data_len--; + + ret = asn1_encode_length(&data, &data_len, len); + if (ret) + return ERR_PTR(ret); + + if (!seq) + return data; + + if (data_len < len) + return ERR_PTR(-EINVAL); + + memcpy(data, seq, len); + data += len; + + return data; +} +EXPORT_SYMBOL_GPL(asn1_encode_sequence); + +/** + * asn1_encode_boolean() - encode a boolean value to ASN.1 + * @data: pointer to encode at + * @end_data: end of data pointer, points one beyond last usable byte in @data + * @val: the boolean true/false value + */ +unsigned char * +asn1_encode_boolean(unsigned char *data, const unsigned char *end_data, + bool val) +{ + int data_len = end_data - data; + + if (IS_ERR(data)) + return data; + + /* booleans are 3 bytes: tag, length == 1 and value == 0 or 1 */ + if (data_len < 3) + return ERR_PTR(-EINVAL); + + *(data++) = _tag(UNIV, PRIM, BOOL); + data_len--; + + asn1_encode_length(&data, &data_len, 1); + + if (val) + *(data++) = 1; + else + *(data++) = 0; + + return data; +} +EXPORT_SYMBOL_GPL(asn1_encode_boolean); + +MODULE_LICENSE("GPL"); diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c index 4ccbec442469..b748fd3d256e 100644 --- a/lib/crypto/chacha.c +++ b/lib/crypto/chacha.c @@ -64,7 +64,7 @@ static void chacha_permute(u32 *x, int nrounds) } /** - * chacha_block - generate one keystream block and increment block counter + * chacha_block_generic - generate one keystream block and increment block counter * @state: input state matrix (16 32-bit words) * @stream: output keystream block (64 bytes) * @nrounds: number of rounds (20 or 12; 20 is recommended) @@ -92,7 +92,7 @@ EXPORT_SYMBOL(chacha_block_generic); /** * hchacha_block_generic - abbreviated ChaCha core, for XChaCha * @state: input state matrix (16 32-bit words) - * @out: output (8 32-bit words) + * @stream: output (8 32-bit words) * @nrounds: number of rounds (20 or 12; 20 is recommended) * * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c index 3cc77d94390b..7fb71845cc84 100644 --- a/lib/crypto/poly1305-donna32.c +++ b/lib/crypto/poly1305-donna32.c @@ -10,7 +10,8 @@ #include <asm/unaligned.h> #include <crypto/internal/poly1305.h> -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]) { /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff; diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c index 6ae181bb4345..d34cf4053668 100644 --- a/lib/crypto/poly1305-donna64.c +++ b/lib/crypto/poly1305-donna64.c @@ -12,7 +12,8 @@ typedef __uint128_t u128; -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]) { u64 t0, t1; diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c index 9d2d14df0fee..26d87fc3823e 100644 --- a/lib/crypto/poly1305.c +++ b/lib/crypto/poly1305.c @@ -12,7 +12,8 @@ #include <linux/module.h> #include <asm/unaligned.h> -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key) +void poly1305_init_generic(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]) { poly1305_core_setkey(&desc->core_r, key); desc->s[0] = get_unaligned_le32(key + 16); diff --git a/lib/devres.c b/lib/devres.c index 2a4ff5d64288..4679dbb1bf5f 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -10,6 +10,7 @@ enum devm_ioremap_type { DEVM_IOREMAP = 0, DEVM_IOREMAP_UC, DEVM_IOREMAP_WC, + DEVM_IOREMAP_NP, }; void devm_ioremap_release(struct device *dev, void *res) @@ -42,6 +43,9 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, case DEVM_IOREMAP_WC: addr = ioremap_wc(offset, size); break; + case DEVM_IOREMAP_NP: + addr = ioremap_np(offset, size); + break; } if (addr) { @@ -99,6 +103,21 @@ void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, EXPORT_SYMBOL(devm_ioremap_wc); /** + * devm_ioremap_np - Managed ioremap_np() + * @dev: Generic device to remap IO address for + * @offset: Resource address to map + * @size: Size of map + * + * Managed ioremap_np(). Map is automatically unmapped on driver detach. + */ +void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset, + resource_size_t size) +{ + return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NP); +} +EXPORT_SYMBOL(devm_ioremap_np); + +/** * devm_iounmap - Managed iounmap() * @dev: Generic device to unmap for * @addr: Address to unmap @@ -128,6 +147,9 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res, return IOMEM_ERR_PTR(-EINVAL); } + if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED) + type = DEVM_IOREMAP_NP; + size = resource_size(res); if (res->name) diff --git a/lib/earlycpio.c b/lib/earlycpio.c index e83628882001..7921193f0424 100644 --- a/lib/earlycpio.c +++ b/lib/earlycpio.c @@ -40,7 +40,7 @@ enum cpio_fields { }; /** - * cpio_data find_cpio_data - Search for files in an uncompressed cpio + * find_cpio_data - Search for files in an uncompressed cpio * @path: The directory to search for, including a slash at the end * @data: Pointer to the cpio archive or a header inside * @len: Remaining length of the cpio based on data pointer @@ -49,7 +49,7 @@ enum cpio_fields { * matching file itself. It can be used to iterate through the cpio * to find all files inside of a directory path. * - * @return: struct cpio_data containing the address, length and + * Return: &struct cpio_data containing the address, length and * filename (with the directory path cut off) of the found file. * If you search for a filename and not for files in a directory, * pass the absolute path of the filename in the cpio and make sure diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 7998affa45d4..c87d5b6a8a55 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj) static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem) { + int buffer_size = sizeof(env->buf) - env->buflen; int len; - len = strlcpy(&env->buf[env->buflen], subsystem, - sizeof(env->buf) - env->buflen); - if (len >= (sizeof(env->buf) - env->buflen)) { - WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n"); + len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size); + if (len >= buffer_size) { + pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n", + buffer_size, len); return -ENOMEM; } diff --git a/lib/lru_cache.c b/lib/lru_cache.c index c69ee53d8dde..52313acbfa62 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c @@ -76,6 +76,7 @@ int lc_try_lock(struct lru_cache *lc) /** * lc_create - prepares to track objects in an active set * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details + * @cache: cache root pointer * @max_pending_changes: maximum changes to accumulate until a transaction is required * @e_count: number of elements allowed to be active simultaneously * @e_size: size of the tracked objects @@ -627,7 +628,7 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index) } /** - * lc_dump - Dump a complete LRU cache to seq in textual form. + * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form. * @lc: the lru cache to operate on * @seq: the &struct seq_file pointer to seq_printf into * @utext: user supplied additional "heading" or other info diff --git a/lib/oid_registry.c b/lib/oid_registry.c index f7ad43f28579..3dfaa836e7c5 100644 --- a/lib/oid_registry.c +++ b/lib/oid_registry.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/bug.h> +#include <linux/asn1.h> #include "oid_registry_data.c" MODULE_DESCRIPTION("OID Registry"); @@ -92,6 +93,29 @@ enum OID look_up_OID(const void *data, size_t datasize) } EXPORT_SYMBOL_GPL(look_up_OID); +/** + * parse_OID - Parse an OID from a bytestream + * @data: Binary representation of the header + OID + * @datasize: Size of the binary representation + * @oid: Pointer to oid to return result + * + * Parse an OID from a bytestream that holds the OID in the format + * ASN1_OID | length | oid. The length indicator must equal to datasize - 2. + * -EBADMSG is returned if the bytestream is too short. + */ +int parse_OID(const void *data, size_t datasize, enum OID *oid) +{ + const unsigned char *v = data; + + /* we need 2 bytes of header and at least 1 byte for oid */ + if (datasize < 3 || v[0] != ASN1_OID || v[1] != datasize - 2) + return -EBADMSG; + + *oid = look_up_OID(data + 2, datasize - 2); + return 0; +} +EXPORT_SYMBOL_GPL(parse_OID); + /* * sprint_OID - Print an Object Identifier into a buffer * @data: The encoded OID to print diff --git a/lib/parman.c b/lib/parman.c index a11f2f667639..3f8f8d422e62 100644 --- a/lib/parman.c +++ b/lib/parman.c @@ -297,7 +297,7 @@ EXPORT_SYMBOL(parman_destroy); * parman_prio_init - initializes a parman priority chunk * @parman: parman instance * @prio: parman prio structure to be initialized - * @prority: desired priority of the chunk + * @priority: desired priority of the chunk * * Note: all locking must be provided by the caller. * @@ -356,7 +356,7 @@ int parman_item_add(struct parman *parman, struct parman_prio *prio, EXPORT_SYMBOL(parman_item_add); /** - * parman_item_del - deletes parman item + * parman_item_remove - deletes parman item * @parman: parman instance * @prio: parman prio instance to delete the item from * @item: parman item instance diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 3a4da11b804d..b3afafe46fff 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -166,9 +166,9 @@ static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) /** * radix_tree_find_next_bit - find the next set bit in a memory region * - * @addr: The address to base the search on - * @size: The bitmap size in bits - * @offset: The bitnumber to start searching at + * @node: where to begin the search + * @tag: the tag index + * @offset: the bitnumber to start searching at * * Unrollable variant of find_next_bit() for constant size arrays. * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. @@ -461,7 +461,7 @@ out: /** * radix_tree_shrink - shrink radix tree to minimum height - * @root radix tree root + * @root: radix tree root */ static inline bool radix_tree_shrink(struct radix_tree_root *root) { @@ -691,7 +691,7 @@ static inline int insert_entries(struct radix_tree_node *node, } /** - * __radix_tree_insert - insert into a radix tree + * radix_tree_insert - insert into a radix tree * @root: radix tree root * @index: index key * @item: item to insert @@ -919,6 +919,7 @@ EXPORT_SYMBOL(radix_tree_replace_slot); /** * radix_tree_iter_replace - replace item in a slot * @root: radix tree root + * @iter: iterator state * @slot: pointer to slot * @item: new item to store in the slot. * diff --git a/lib/test_kasan.c b/lib/test_kasan.c index e5647d147b35..785e724ce0d8 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -69,10 +69,10 @@ static void kasan_test_exit(struct kunit *test) * resource named "kasan_data". Do not use this name for KUnit resources * outside of KASAN tests. * - * For hardware tag-based KASAN, when a tag fault happens, tag checking is - * normally auto-disabled. When this happens, this test handler reenables - * tag checking. As tag checking can be only disabled or enabled per CPU, this - * handler disables migration (preemption). + * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag + * checking is auto-disabled. When this happens, this test handler reenables + * tag checking. As tag checking can be only disabled or enabled per CPU, + * this handler disables migration (preemption). * * Since the compiler doesn't see that the expression can change the fail_data * fields, it can reorder or optimize away the accesses to those fields. @@ -80,7 +80,8 @@ static void kasan_test_exit(struct kunit *test) * expression to prevent that. */ #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ + !kasan_async_mode_enabled()) \ migrate_disable(); \ WRITE_ONCE(fail_data.report_expected, true); \ WRITE_ONCE(fail_data.report_found, false); \ @@ -92,12 +93,16 @@ static void kasan_test_exit(struct kunit *test) barrier(); \ expression; \ barrier(); \ + if (kasan_async_mode_enabled()) \ + kasan_force_async_fault(); \ + barrier(); \ KUNIT_EXPECT_EQ(test, \ READ_ONCE(fail_data.report_expected), \ READ_ONCE(fail_data.report_found)); \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ + !kasan_async_mode_enabled()) { \ if (READ_ONCE(fail_data.report_found)) \ - kasan_enable_tagging(); \ + kasan_enable_tagging_sync(); \ migrate_enable(); \ } \ } while (0) diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c index eee017ff8980..f1017f345d6c 100644 --- a/lib/test_kasan_module.c +++ b/lib/test_kasan_module.c @@ -22,7 +22,7 @@ static noinline void __init copy_user_test(void) char *kmem; char __user *usermem; size_t size = 10; - int unused; + int __maybe_unused unused; kmem = kmalloc(size, GFP_KERNEL); if (!kmem) |