diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 3 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 10 | ||||
| -rw-r--r-- | lib/Makefile | 2 | ||||
| -rw-r--r-- | lib/dma-debug.c | 5 | ||||
| -rw-r--r-- | lib/nmi_backtrace.c | 2 | ||||
| -rw-r--r-- | lib/prime_numbers.c | 315 | ||||
| -rw-r--r-- | lib/rhashtable.c | 9 | ||||
| -rw-r--r-- | lib/show_mem.c | 4 | ||||
| -rw-r--r-- | lib/test_firmware.c | 92 | ||||
| -rw-r--r-- | lib/test_parman.c | 2 | ||||
| -rw-r--r-- | lib/test_user_copy.c | 3 |
11 files changed, 407 insertions, 40 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index f3552604e47a..44d1a1181fb5 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -551,6 +551,9 @@ config SBITMAP bool config PARMAN + tristate "parman" if COMPILE_TEST + +config PRIME_NUMBERS tristate endmenu diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7f44429ac820..66fb4389f05c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -416,6 +416,16 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE This may be set to 1 or 0 to enable or disable them all, or to a bitmask as described in Documentation/sysrq.txt. +config MAGIC_SYSRQ_SERIAL + bool "Enable magic SysRq key over serial" + depends on MAGIC_SYSRQ + default y + help + Many embedded boards have a disconnected TTL level serial which can + generate some garbage that can lead to spurious false sysrq detects. + This option allows you to decide whether you want to enable the + magic SysRq key. + config DEBUG_KERNEL bool "Kernel debugging" help diff --git a/lib/Makefile b/lib/Makefile index 6b768b58a38d..f1a0364af377 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -198,6 +198,8 @@ obj-$(CONFIG_ASN1) += asn1_decoder.o obj-$(CONFIG_FONT_SUPPORT) += fonts/ +obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 8971370bfb16..60c57ec936db 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -1155,6 +1155,11 @@ static void check_unmap(struct dma_debug_entry *ref) dir2name[ref->direction]); } + /* + * Drivers should use dma_mapping_error() to check the returned + * addresses of dma_map_single() and dma_map_page(). + * If not, print this warning message. See Documentation/DMA-API.txt. + */ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { err_printk(ref->dev, entry, "DMA-API: device driver failed to check map error" diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 75554754eadf..5f7999eacad5 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -77,7 +77,7 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, * Force flush any remote buffers that might be stuck in IRQ context * and therefore could not run their irq_work. */ - printk_nmi_flush(); + printk_safe_flush(); clear_bit_unlock(0, &backtrace_flag); put_cpu(); diff --git a/lib/prime_numbers.c b/lib/prime_numbers.c new file mode 100644 index 000000000000..550eec457c2e --- /dev/null +++ b/lib/prime_numbers.c @@ -0,0 +1,315 @@ +#define pr_fmt(fmt) "prime numbers: " fmt "\n" + +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/prime_numbers.h> +#include <linux/slab.h> + +#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long)) + +struct primes { + struct rcu_head rcu; + unsigned long last, sz; + unsigned long primes[]; +}; + +#if BITS_PER_LONG == 64 +static const struct primes small_primes = { + .last = 61, + .sz = 64, + .primes = { + BIT(2) | + BIT(3) | + BIT(5) | + BIT(7) | + BIT(11) | + BIT(13) | + BIT(17) | + BIT(19) | + BIT(23) | + BIT(29) | + BIT(31) | + BIT(37) | + BIT(41) | + BIT(43) | + BIT(47) | + BIT(53) | + BIT(59) | + BIT(61) + } +}; +#elif BITS_PER_LONG == 32 +static const struct primes small_primes = { + .last = 31, + .sz = 32, + .primes = { + BIT(2) | + BIT(3) | + BIT(5) | + BIT(7) | + BIT(11) | + BIT(13) | + BIT(17) | + BIT(19) | + BIT(23) | + BIT(29) | + BIT(31) + } +}; +#else +#error "unhandled BITS_PER_LONG" +#endif + +static DEFINE_MUTEX(lock); +static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes); + +static unsigned long selftest_max; + +static bool slow_is_prime_number(unsigned long x) +{ + unsigned long y = int_sqrt(x); + + while (y > 1) { + if ((x % y) == 0) + break; + y--; + } + + return y == 1; +} + +static unsigned long slow_next_prime_number(unsigned long x) +{ + while (x < ULONG_MAX && !slow_is_prime_number(++x)) + ; + + return x; +} + +static unsigned long clear_multiples(unsigned long x, + unsigned long *p, + unsigned long start, + unsigned long end) +{ + unsigned long m; + + m = 2 * x; + if (m < start) + m = roundup(start, x); + + while (m < end) { + __clear_bit(m, p); + m += x; + } + + return x; +} + +static bool expand_to_next_prime(unsigned long x) +{ + const struct primes *p; + struct primes *new; + unsigned long sz, y; + + /* Betrand's Postulate (or Chebyshev's theorem) states that if n > 3, + * there is always at least one prime p between n and 2n - 2. + * Equivalently, if n > 1, then there is always at least one prime p + * such that n < p < 2n. + * + * http://mathworld.wolfram.com/BertrandsPostulate.html + * https://en.wikipedia.org/wiki/Bertrand's_postulate + */ + sz = 2 * x; + if (sz < x) + return false; + + sz = round_up(sz, BITS_PER_LONG); + new = kmalloc(sizeof(*new) + bitmap_size(sz), + GFP_KERNEL | __GFP_NOWARN); + if (!new) + return false; + + mutex_lock(&lock); + p = rcu_dereference_protected(primes, lockdep_is_held(&lock)); + if (x < p->last) { + kfree(new); + goto unlock; + } + + /* Where memory permits, track the primes using the + * Sieve of Eratosthenes. The sieve is to remove all multiples of known + * primes from the set, what remains in the set is therefore prime. + */ + bitmap_fill(new->primes, sz); + bitmap_copy(new->primes, p->primes, p->sz); + for (y = 2UL; y < sz; y = find_next_bit(new->primes, sz, y + 1)) + new->last = clear_multiples(y, new->primes, p->sz, sz); + new->sz = sz; + + BUG_ON(new->last <= x); + + rcu_assign_pointer(primes, new); + if (p != &small_primes) + kfree_rcu((struct primes *)p, rcu); + +unlock: + mutex_unlock(&lock); + return true; +} + +static void free_primes(void) +{ + const struct primes *p; + + mutex_lock(&lock); + p = rcu_dereference_protected(primes, lockdep_is_held(&lock)); + if (p != &small_primes) { + rcu_assign_pointer(primes, &small_primes); + kfree_rcu((struct primes *)p, rcu); + } + mutex_unlock(&lock); +} + +/** + * next_prime_number - return the next prime number + * @x: the starting point for searching to test + * + * A prime number is an integer greater than 1 that is only divisible by + * itself and 1. The set of prime numbers is computed using the Sieve of + * Eratoshenes (on finding a prime, all multiples of that prime are removed + * from the set) enabling a fast lookup of the next prime number larger than + * @x. If the sieve fails (memory limitation), the search falls back to using + * slow trial-divison, up to the value of ULONG_MAX (which is reported as the + * final prime as a sentinel). + * + * Returns: the next prime number larger than @x + */ +unsigned long next_prime_number(unsigned long x) +{ + const struct primes *p; + + rcu_read_lock(); + p = rcu_dereference(primes); + while (x >= p->last) { + rcu_read_unlock(); + + if (!expand_to_next_prime(x)) + return slow_next_prime_number(x); + + rcu_read_lock(); + p = rcu_dereference(primes); + } + x = find_next_bit(p->primes, p->last, x + 1); + rcu_read_unlock(); + + return x; +} +EXPORT_SYMBOL(next_prime_number); + +/** + * is_prime_number - test whether the given number is prime + * @x: the number to test + * + * A prime number is an integer greater than 1 that is only divisible by + * itself and 1. Internally a cache of prime numbers is kept (to speed up + * searching for sequential primes, see next_prime_number()), but if the number + * falls outside of that cache, its primality is tested using trial-divison. + * + * Returns: true if @x is prime, false for composite numbers. + */ +bool is_prime_number(unsigned long x) +{ + const struct primes *p; + bool result; + + rcu_read_lock(); + p = rcu_dereference(primes); + while (x >= p->sz) { + rcu_read_unlock(); + + if (!expand_to_next_prime(x)) + return slow_is_prime_number(x); + + rcu_read_lock(); + p = rcu_dereference(primes); + } + result = test_bit(x, p->primes); + rcu_read_unlock(); + + return result; +} +EXPORT_SYMBOL(is_prime_number); + +static void dump_primes(void) +{ + const struct primes *p; + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + + rcu_read_lock(); + p = rcu_dereference(primes); + + if (buf) + bitmap_print_to_pagebuf(true, buf, p->primes, p->sz); + pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s", + p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf); + + rcu_read_unlock(); + + kfree(buf); +} + +static int selftest(unsigned long max) +{ + unsigned long x, last; + + if (!max) + return 0; + + for (last = 0, x = 2; x < max; x++) { + bool slow = slow_is_prime_number(x); + bool fast = is_prime_number(x); + + if (slow != fast) { + pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!", + x, slow ? "yes" : "no", fast ? "yes" : "no"); + goto err; + } + + if (!slow) + continue; + + if (next_prime_number(last) != x) { + pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu", + last, x, next_prime_number(last)); + goto err; + } + last = x; + } + + pr_info("selftest(%lu) passed, last prime was %lu", x, last); + return 0; + +err: + dump_primes(); + return -EINVAL; +} + +static int __init primes_init(void) +{ + return selftest(selftest_max); +} + +static void __exit primes_exit(void) +{ + free_primes(); +} + +module_init(primes_init); +module_exit(primes_exit); + +module_param_named(selftest, selftest_max, ulong, 0400); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 172454e6b979..c5b9b9351cec 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -146,9 +146,7 @@ static void bucket_table_free(const struct bucket_table *tbl) if (tbl->nest) nested_bucket_table_free(tbl); - if (tbl) - kvfree(tbl->locks); - + kvfree(tbl->locks); kvfree(tbl); } @@ -1123,12 +1121,13 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, union nested_table *ntbl; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); - ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash); + ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); subhash >>= tbl->nest; while (ntbl && size > (1 << shift)) { index = subhash & ((1 << shift) - 1); - ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash); + ntbl = rht_dereference_bucket_rcu(ntbl[index].table, + tbl, hash); size >>= shift; subhash >>= shift; } diff --git a/lib/show_mem.c b/lib/show_mem.c index 1feed6a2b12a..0beaa1d899aa 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -9,13 +9,13 @@ #include <linux/quicklist.h> #include <linux/cma.h> -void show_mem(unsigned int filter) +void show_mem(unsigned int filter, nodemask_t *nodemask) { pg_data_t *pgdat; unsigned long total = 0, reserved = 0, highmem = 0; printk("Mem-Info:\n"); - show_free_areas(filter); + show_free_areas(filter, nodemask); for_each_online_pgdat(pgdat) { unsigned long flags; diff --git a/lib/test_firmware.c b/lib/test_firmware.c index a3e8ec3fb1c5..09371b0a9baf 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -42,12 +42,6 @@ static const struct file_operations test_fw_fops = { .read = test_fw_misc_read, }; -static struct miscdevice test_fw_misc_device = { - .minor = MISC_DYNAMIC_MINOR, - .name = "test_firmware", - .fops = &test_fw_fops, -}; - static ssize_t trigger_request_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -132,39 +126,81 @@ out: } static DEVICE_ATTR_WO(trigger_async_request); -static int __init test_firmware_init(void) +static ssize_t trigger_custom_fallback_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { int rc; + char *name; - rc = misc_register(&test_fw_misc_device); + name = kstrndup(buf, count, GFP_KERNEL); + if (!name) + return -ENOSPC; + + pr_info("loading '%s' using custom fallback mechanism\n", name); + + mutex_lock(&test_fw_mutex); + release_firmware(test_firmware); + test_firmware = NULL; + rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name, + dev, GFP_KERNEL, NULL, + trigger_async_request_cb); if (rc) { - pr_err("could not register misc device: %d\n", rc); - return rc; + pr_info("async load of '%s' failed: %d\n", name, rc); + kfree(name); + goto out; } - rc = device_create_file(test_fw_misc_device.this_device, - &dev_attr_trigger_request); - if (rc) { - pr_err("could not create sysfs interface: %d\n", rc); - goto dereg; + /* Free 'name' ASAP, to test for race conditions */ + kfree(name); + + wait_for_completion(&async_fw_done); + + if (test_firmware) { + pr_info("loaded: %zu\n", test_firmware->size); + rc = count; + } else { + pr_err("failed to async load firmware\n"); + rc = -ENODEV; } - rc = device_create_file(test_fw_misc_device.this_device, - &dev_attr_trigger_async_request); +out: + mutex_unlock(&test_fw_mutex); + + return rc; +} +static DEVICE_ATTR_WO(trigger_custom_fallback); + +#define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr + +static struct attribute *test_dev_attrs[] = { + TEST_FW_DEV_ATTR(trigger_request), + TEST_FW_DEV_ATTR(trigger_async_request), + TEST_FW_DEV_ATTR(trigger_custom_fallback), + NULL, +}; + +ATTRIBUTE_GROUPS(test_dev); + +static struct miscdevice test_fw_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "test_firmware", + .fops = &test_fw_fops, + .groups = test_dev_groups, +}; + +static int __init test_firmware_init(void) +{ + int rc; + + rc = misc_register(&test_fw_misc_device); if (rc) { - pr_err("could not create async sysfs interface: %d\n", rc); - goto remove_file; + pr_err("could not register misc device: %d\n", rc); + return rc; } pr_warn("interface ready\n"); return 0; - -remove_file: - device_remove_file(test_fw_misc_device.this_device, - &dev_attr_trigger_async_request); -dereg: - misc_deregister(&test_fw_misc_device); - return rc; } module_init(test_firmware_init); @@ -172,10 +208,6 @@ module_init(test_firmware_init); static void __exit test_firmware_exit(void) { release_firmware(test_firmware); - device_remove_file(test_fw_misc_device.this_device, - &dev_attr_trigger_async_request); - device_remove_file(test_fw_misc_device.this_device, - &dev_attr_trigger_request); misc_deregister(&test_fw_misc_device); pr_warn("removed interface\n"); } diff --git a/lib/test_parman.c b/lib/test_parman.c index fe9f3a785804..35e32243693c 100644 --- a/lib/test_parman.c +++ b/lib/test_parman.c @@ -334,7 +334,7 @@ static int test_parman_check_array(struct test_parman *test_parman, last_priority = item->prio->priority; if (item->parman_item.index != i) { - pr_err("Item has different index in compare to where it actualy is (%lu != %d)\n", + pr_err("Item has different index in compare to where it actually is (%lu != %d)\n", item->parman_item.index, i); return -EINVAL; } diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c index 6f335a3d4ae2..1a8d71a68531 100644 --- a/lib/test_user_copy.c +++ b/lib/test_user_copy.c @@ -30,7 +30,8 @@ * As there doesn't appear to be anything that can safely determine * their capability at compile-time, we just have to opt-out certain archs. */ -#if BITS_PER_LONG == 64 || (!defined(CONFIG_AVR32) && \ +#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ + !defined(CONFIG_AVR32) && \ !defined(CONFIG_BLACKFIN) && \ !defined(CONFIG_M32R) && \ !defined(CONFIG_M68K) && \ |
