diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/842/842.h | 2 | ||||
-rw-r--r-- | lib/842/842_compress.c | 13 | ||||
-rw-r--r-- | lib/842/842_decompress.c | 17 | ||||
-rw-r--r-- | lib/Kconfig | 1 | ||||
-rw-r--r-- | lib/Kconfig.debug | 1 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/devres.c | 2 | ||||
-rw-r--r-- | lib/dma-debug.c | 2 | ||||
-rw-r--r-- | lib/fault-inject.c | 2 | ||||
-rw-r--r-- | lib/kobject.c | 12 | ||||
-rw-r--r-- | lib/mpi/mpicoder.c | 199 | ||||
-rw-r--r-- | lib/nmi_backtrace.c | 11 | ||||
-rw-r--r-- | lib/once.c | 62 | ||||
-rw-r--r-- | lib/random32.c | 37 | ||||
-rw-r--r-- | lib/string.c | 3 |
15 files changed, 338 insertions, 29 deletions
diff --git a/lib/842/842.h b/lib/842/842.h index 7c200030acf7..e0a122bc1cdb 100644 --- a/lib/842/842.h +++ b/lib/842/842.h @@ -76,6 +76,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitops.h> +#include <linux/crc32.h> #include <asm/unaligned.h> #include <linux/sw842.h> @@ -98,6 +99,7 @@ #define I2_BITS (8) #define I4_BITS (9) #define I8_BITS (8) +#define CRC_BITS (32) #define REPEAT_BITS_MAX (0x3f) #define SHORT_DATA_BITS_MAX (0x7) diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c index 7ce68948e68c..4051339bdfbd 100644 --- a/lib/842/842_compress.c +++ b/lib/842/842_compress.c @@ -490,6 +490,7 @@ int sw842_compress(const u8 *in, unsigned int ilen, int ret; u64 last, next, pad, total; u8 repeat_count = 0; + u32 crc; BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS); @@ -580,6 +581,18 @@ skip_comp: if (ret) return ret; + /* + * crc(0:31) is appended to target data starting with the next + * bit after End of stream template. + * nx842 calculates CRC for data in big-endian format. So doing + * same here so that sw842 decompression can be used for both + * compressed data. + */ + crc = crc32_be(0, in, ilen); + ret = add_bits(p, crc, CRC_BITS); + if (ret) + return ret; + if (p->bit) { p->out++; p->olen--; diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c index 5446ff0c9ba0..8881dad2a6a0 100644 --- a/lib/842/842_decompress.c +++ b/lib/842/842_decompress.c @@ -285,6 +285,7 @@ int sw842_decompress(const u8 *in, unsigned int ilen, struct sw842_param p; int ret; u64 op, rep, tmp, bytes, total; + u64 crc; p.in = (u8 *)in; p.bit = 0; @@ -375,6 +376,22 @@ int sw842_decompress(const u8 *in, unsigned int ilen, } } while (op != OP_END); + /* + * crc(0:31) is saved in compressed data starting with the + * next bit after End of stream template. + */ + ret = next_bits(&p, &crc, CRC_BITS); + if (ret) + return ret; + + /* + * Validate CRC saved in compressed data. + */ + if (crc != (u64)crc32_be(0, out, total - p.olen)) { + pr_debug("CRC mismatch for decompression\n"); + return -EINVAL; + } + if (unlikely((total - p.olen) > UINT_MAX)) return -ENOSPC; diff --git a/lib/Kconfig b/lib/Kconfig index 2e491ac15622..f0df318104e7 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -220,6 +220,7 @@ config ZLIB_INFLATE config ZLIB_DEFLATE tristate + select BITREVERSE config LZO_COMPRESS tristate diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ab76b99adc85..1d1521c26302 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -197,6 +197,7 @@ config ENABLE_MUST_CHECK config FRAME_WARN int "Warn for stack frames larger than (needs gcc 4.4)" range 0 8192 + default 0 if KASAN default 1024 if !64BIT default 2048 if 64BIT help diff --git a/lib/Makefile b/lib/Makefile index 13a7c6ae3fec..8de3b012eac7 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -26,7 +26,8 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o + percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ + once.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += hexdump.o diff --git a/lib/devres.c b/lib/devres.c index f13a2468ff39..8c85672639d3 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -418,7 +418,7 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) if (!iomap) return; - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + for (i = 0; i < PCIM_IOMAP_MAX; i++) { if (!(mask & (1 << i))) continue; diff --git a/lib/dma-debug.c b/lib/dma-debug.c index dace71fe41f7..fcb65d2a0b94 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -100,7 +100,7 @@ static LIST_HEAD(free_entries); static DEFINE_SPINLOCK(free_entries_lock); /* Global disable flag - will be set in case of an error */ -static u32 global_disable __read_mostly; +static bool global_disable __read_mostly; /* Early initialization disable flag, set at the end of dma_debug_init */ static bool dma_debug_initialized __read_mostly; diff --git a/lib/fault-inject.c b/lib/fault-inject.c index f1cdeb024d17..6a823a53e357 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -44,7 +44,7 @@ static void fail_dump(struct fault_attr *attr) printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" "name %pd, interval %lu, probability %lu, " "space %d, times %d\n", attr->dname, - attr->probability, attr->interval, + attr->interval, attr->probability, atomic_read(&attr->space), atomic_read(&attr->times)); if (attr->verbose > 1) diff --git a/lib/kobject.c b/lib/kobject.c index 3e3a5c3cb330..055407746266 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -568,6 +568,7 @@ void kobject_del(struct kobject *kobj) kobject_put(kobj->parent); kobj->parent = NULL; } +EXPORT_SYMBOL(kobject_del); /** * kobject_get - increment refcount for object. @@ -584,6 +585,7 @@ struct kobject *kobject_get(struct kobject *kobj) } return kobj; } +EXPORT_SYMBOL(kobject_get); static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj) { @@ -675,6 +677,7 @@ void kobject_put(struct kobject *kobj) kref_put(&kobj->kref, kobject_release); } } +EXPORT_SYMBOL(kobject_put); static void dynamic_kobj_release(struct kobject *kobj) { @@ -803,6 +806,7 @@ int kset_register(struct kset *k) kobject_uevent(&k->kobj, KOBJ_ADD); return 0; } +EXPORT_SYMBOL(kset_register); /** * kset_unregister - remove a kset. @@ -815,6 +819,7 @@ void kset_unregister(struct kset *k) kobject_del(&k->kobj); kobject_put(&k->kobj); } +EXPORT_SYMBOL(kset_unregister); /** * kset_find_obj - search for object in kset. @@ -1051,10 +1056,3 @@ void kobj_ns_drop(enum kobj_ns_type type, void *ns) kobj_ns_ops_tbl[type]->drop_ns(ns); spin_unlock(&kobj_ns_type_lock); } - -EXPORT_SYMBOL(kobject_get); -EXPORT_SYMBOL(kobject_put); -EXPORT_SYMBOL(kobject_del); - -EXPORT_SYMBOL(kset_register); -EXPORT_SYMBOL(kset_unregister); diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 95c52a95259e..c7e0a705eecf 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -319,3 +319,202 @@ int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign) return 0; } EXPORT_SYMBOL_GPL(mpi_set_buffer); + +/** + * mpi_write_to_sgl() - Funnction exports MPI to an sgl (msb first) + * + * This function works in the same way as the mpi_read_buffer, but it + * takes an sgl instead of u8 * buf. + * + * @a: a multi precision integer + * @sgl: scatterlist to write to. Needs to be at least + * mpi_get_size(a) long. + * @nbytes: in/out param - it has the be set to the maximum number of + * bytes that can be written to sgl. This has to be at least + * the size of the integer a. On return it receives the actual + * length of the data written. + * @sign: if not NULL, it will be set to the sign of a. + * + * Return: 0 on success or error code in case of error + */ +int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, + int *sign) +{ + u8 *p, *p2; + mpi_limb_t alimb, alimb2; + unsigned int n = mpi_get_size(a); + int i, x, y = 0, lzeros = 0, buf_len; + + if (!nbytes || *nbytes < n) + return -EINVAL; + + if (sign) + *sign = a->sign; + + p = (void *)&a->d[a->nlimbs] - 1; + + for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) { + if (!*p) + lzeros++; + else + break; + } + + *nbytes = n - lzeros; + buf_len = sgl->length; + p2 = sg_virt(sgl); + + for (i = a->nlimbs - 1; i >= 0; i--) { + alimb = a->d[i]; + p = (u8 *)&alimb2; +#if BYTES_PER_MPI_LIMB == 4 + *p++ = alimb >> 24; + *p++ = alimb >> 16; + *p++ = alimb >> 8; + *p++ = alimb; +#elif BYTES_PER_MPI_LIMB == 8 + *p++ = alimb >> 56; + *p++ = alimb >> 48; + *p++ = alimb >> 40; + *p++ = alimb >> 32; + *p++ = alimb >> 24; + *p++ = alimb >> 16; + *p++ = alimb >> 8; + *p++ = alimb; +#else +#error please implement for this limb size. +#endif + if (lzeros > 0) { + if (lzeros >= sizeof(alimb)) { + p -= sizeof(alimb); + continue; + } else { + mpi_limb_t *limb1 = (void *)p - sizeof(alimb); + mpi_limb_t *limb2 = (void *)p - sizeof(alimb) + + lzeros; + *limb1 = *limb2; + p -= lzeros; + y = lzeros; + } + lzeros -= sizeof(alimb); + } + + p = p - (sizeof(alimb) - y); + + for (x = 0; x < sizeof(alimb) - y; x++) { + if (!buf_len) { + sgl = sg_next(sgl); + if (!sgl) + return -EINVAL; + buf_len = sgl->length; + p2 = sg_virt(sgl); + } + *p2++ = *p++; + buf_len--; + } + y = 0; + } + return 0; +} +EXPORT_SYMBOL_GPL(mpi_write_to_sgl); + +/* + * mpi_read_raw_from_sgl() - Function allocates an MPI and populates it with + * data from the sgl + * + * This function works in the same way as the mpi_read_raw_data, but it + * takes an sgl instead of void * buffer. i.e. it allocates + * a new MPI and reads the content of the sgl to the MPI. + * + * @sgl: scatterlist to read from + * @len: number of bytes to read + * + * Return: Pointer to a new MPI or NULL on error + */ +MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len) +{ + struct scatterlist *sg; + int x, i, j, z, lzeros, ents; + unsigned int nbits, nlimbs, nbytes; + mpi_limb_t a; + MPI val = NULL; + + lzeros = 0; + ents = sg_nents(sgl); + + for_each_sg(sgl, sg, ents, i) { + const u8 *buff = sg_virt(sg); + int len = sg->length; + + while (len && !*buff) { + lzeros++; + len--; + buff++; + } + + if (len && *buff) + break; + + ents--; + lzeros = 0; + } + + sgl = sg; + + if (!ents) + nbytes = 0; + else + nbytes = len - lzeros; + + nbits = nbytes * 8; + if (nbits > MAX_EXTERN_MPI_BITS) { + pr_info("MPI: mpi too large (%u bits)\n", nbits); + return NULL; + } + + if (nbytes > 0) + nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros)); + else + nbits = 0; + + nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); + val = mpi_alloc(nlimbs); + if (!val) + return NULL; + + val->nbits = nbits; + val->sign = 0; + val->nlimbs = nlimbs; + + if (nbytes == 0) + return val; + + j = nlimbs - 1; + a = 0; + z = 0; + x = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; + x %= BYTES_PER_MPI_LIMB; + + for_each_sg(sgl, sg, ents, i) { + const u8 *buffer = sg_virt(sg) + lzeros; + int len = sg->length - lzeros; + int buf_shift = x; + + if (sg_is_last(sg) && (len % BYTES_PER_MPI_LIMB)) + len += BYTES_PER_MPI_LIMB - (len % BYTES_PER_MPI_LIMB); + + for (; x < len + buf_shift; x++) { + a <<= 8; + a |= *buffer++; + if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) { + val->d[j--] = a; + a = 0; + } + } + z += x; + x = 0; + lzeros = 0; + } + return val; +} +EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl); diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 88d3d32e5923..6019c53c669e 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -43,6 +43,12 @@ static void print_seq_line(struct nmi_seq_buf *s, int start, int end) printk("%.*s", (end - start) + 1, buf); } +/* + * When raise() is called it will be is passed a pointer to the + * backtrace_mask. Architectures that call nmi_cpu_backtrace() + * directly from their raise() functions may rely on the mask + * they are passed being updated as a side effect of this call. + */ void nmi_trigger_all_cpu_backtrace(bool include_self, void (*raise)(cpumask_t *mask)) { @@ -149,7 +155,10 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) /* Replace printk to write into the NMI seq */ this_cpu_write(printk_func, nmi_vprintk); pr_warn("NMI backtrace for cpu %d\n", cpu); - show_regs(regs); + if (regs) + show_regs(regs); + else + dump_stack(); this_cpu_write(printk_func, printk_func_save); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); diff --git a/lib/once.c b/lib/once.c new file mode 100644 index 000000000000..05c8604627eb --- /dev/null +++ b/lib/once.c @@ -0,0 +1,62 @@ +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/once.h> +#include <linux/random.h> + +struct once_work { + struct work_struct work; + struct static_key *key; +}; + +static void once_deferred(struct work_struct *w) +{ + struct once_work *work; + + work = container_of(w, struct once_work, work); + BUG_ON(!static_key_enabled(work->key)); + static_key_slow_dec(work->key); + kfree(work); +} + +static void once_disable_jump(struct static_key *key) +{ + struct once_work *w; + + w = kmalloc(sizeof(*w), GFP_ATOMIC); + if (!w) + return; + + INIT_WORK(&w->work, once_deferred); + w->key = key; + schedule_work(&w->work); +} + +static DEFINE_SPINLOCK(once_lock); + +bool __do_once_start(bool *done, unsigned long *flags) + __acquires(once_lock) +{ + spin_lock_irqsave(&once_lock, *flags); + if (*done) { + spin_unlock_irqrestore(&once_lock, *flags); + /* Keep sparse happy by restoring an even lock count on + * this lock. In case we return here, we don't call into + * __do_once_done but return early in the DO_ONCE() macro. + */ + __acquire(once_lock); + return false; + } + + return true; +} +EXPORT_SYMBOL(__do_once_start); + +void __do_once_done(bool *done, struct static_key *once_key, + unsigned long *flags) + __releases(once_lock) +{ + *done = true; + spin_unlock_irqrestore(&once_lock, *flags); + once_disable_jump(once_key); +} +EXPORT_SYMBOL(__do_once_done); diff --git a/lib/random32.c b/lib/random32.c index 0bee183fa18f..12111910ccd0 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -181,7 +181,7 @@ void prandom_seed(u32 entropy) * No locking on the CPUs, but then somewhat random results are, well, * expected. */ - for_each_possible_cpu (i) { + for_each_possible_cpu(i) { struct rnd_state *state = &per_cpu(net_rand_state, i); state->s1 = __seed(state->s1 ^ entropy, 2U); @@ -201,7 +201,7 @@ static int __init prandom_init(void) prandom_state_selftest(); for_each_possible_cpu(i) { - struct rnd_state *state = &per_cpu(net_rand_state,i); + struct rnd_state *state = &per_cpu(net_rand_state, i); u32 weak_seed = (i + jiffies) ^ random_get_entropy(); prandom_seed_early(state, weak_seed, true); @@ -238,13 +238,30 @@ static void __init __prandom_start_seed_timer(void) add_timer(&seed_timer); } +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) +{ + int i; + + for_each_possible_cpu(i) { + struct rnd_state *state = per_cpu_ptr(pcpu_state, i); + u32 seeds[4]; + + get_random_bytes(&seeds, sizeof(seeds)); + state->s1 = __seed(seeds[0], 2U); + state->s2 = __seed(seeds[1], 8U); + state->s3 = __seed(seeds[2], 16U); + state->s4 = __seed(seeds[3], 128U); + + prandom_warmup(state); + } +} + /* * Generate better values after random number generator * is fully initialized. */ static void __prandom_reseed(bool late) { - int i; unsigned long flags; static bool latch = false; static DEFINE_SPINLOCK(lock); @@ -266,19 +283,7 @@ static void __prandom_reseed(bool late) goto out; latch = true; - - for_each_possible_cpu(i) { - struct rnd_state *state = &per_cpu(net_rand_state,i); - u32 seeds[4]; - - get_random_bytes(&seeds, sizeof(seeds)); - state->s1 = __seed(seeds[0], 2U); - state->s2 = __seed(seeds[1], 8U); - state->s3 = __seed(seeds[2], 16U); - state->s4 = __seed(seeds[3], 128U); - - prandom_warmup(state); - } + prandom_seed_full_state(&net_rand_state); out: spin_unlock_irqrestore(&lock, flags); } diff --git a/lib/string.c b/lib/string.c index 8dbb7b1eab50..84775ba873b9 100644 --- a/lib/string.c +++ b/lib/string.c @@ -203,12 +203,13 @@ ssize_t strscpy(char *dest, const char *src, size_t count) unsigned long c, data; c = *(unsigned long *)(src+res); - *(unsigned long *)(dest+res) = c; if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); + *(unsigned long *)(dest+res) = c & zero_bytemask(data); return res + find_zero(data); } + *(unsigned long *)(dest+res) = c; res += sizeof(unsigned long); count -= sizeof(unsigned long); max -= sizeof(unsigned long); |