diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-03-28 13:27:35 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-03-28 13:27:35 +0300 |
commit | b24d0d5b12a678b96676348976982686fbe222b4 (patch) | |
tree | 565ce37d2d971cb94436241bc2ac48028b6b66d0 /lib | |
parent | 4ac0d3fb13d5acc138d8be7c45715567c2e2ec47 (diff) | |
parent | 3eb2ce825ea1ad89d20f7a3b5780df850e4be274 (diff) | |
download | linux-b24d0d5b12a678b96676348976982686fbe222b4.tar.xz |
Merge 4.16-rc7 into char-misc-next
We want the hyperv fix in here for merging and testing.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/btree.c | 10 | ||||
-rw-r--r-- | lib/bug.c | 4 | ||||
-rw-r--r-- | lib/ioremap.c | 6 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 2 | ||||
-rw-r--r-- | lib/rhashtable.c | 4 | ||||
-rw-r--r-- | lib/test_bpf.c | 6 | ||||
-rw-r--r-- | lib/test_kmod.c | 2 | ||||
-rw-r--r-- | lib/test_rhashtable.c | 134 |
8 files changed, 157 insertions, 11 deletions
diff --git a/lib/btree.c b/lib/btree.c index f93a945274af..590facba2c50 100644 --- a/lib/btree.c +++ b/lib/btree.c @@ -3,7 +3,7 @@ * * As should be obvious for Linux kernel code, license is GPLv2 * - * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> + * Copyright (c) 2007-2008 Joern Engel <joern@purestorage.com> * Bits and pieces stolen from Peter Zijlstra's code, which is * Copyright 2007, Red Hat Inc. Peter Zijlstra * GPLv2 @@ -76,6 +76,8 @@ struct btree_geo btree_geo128 = { }; EXPORT_SYMBOL_GPL(btree_geo128); +#define MAX_KEYLEN (2 * LONG_PER_U64) + static struct kmem_cache *btree_cachep; void *btree_alloc(gfp_t gfp_mask, void *pool_data) @@ -313,7 +315,7 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, { int i, height; unsigned long *node, *oldnode; - unsigned long *retry_key = NULL, key[geo->keylen]; + unsigned long *retry_key = NULL, key[MAX_KEYLEN]; if (keyzero(geo, __key)) return NULL; @@ -639,8 +641,8 @@ EXPORT_SYMBOL_GPL(btree_remove); int btree_merge(struct btree_head *target, struct btree_head *victim, struct btree_geo *geo, gfp_t gfp) { - unsigned long key[geo->keylen]; - unsigned long dup[geo->keylen]; + unsigned long key[MAX_KEYLEN]; + unsigned long dup[MAX_KEYLEN]; void *val; int err; diff --git a/lib/bug.c b/lib/bug.c index c1b0fad31b10..1077366f496b 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) return BUG_TRAP_TYPE_NONE; bug = find_bug(bugaddr); + if (!bug) + return BUG_TRAP_TYPE_NONE; file = NULL; line = 0; @@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) if (file) pr_crit("kernel BUG at %s:%u!\n", file, line); else - pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n", + pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n", (void *)bugaddr); return BUG_TRAP_TYPE_BUG; diff --git a/lib/ioremap.c b/lib/ioremap.c index b808a390e4c3..54e5bbaa3200 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -91,7 +91,8 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, if (ioremap_pmd_enabled() && ((next - addr) == PMD_SIZE) && - IS_ALIGNED(phys_addr + addr, PMD_SIZE)) { + IS_ALIGNED(phys_addr + addr, PMD_SIZE) && + pmd_free_pte_page(pmd)) { if (pmd_set_huge(pmd, phys_addr + addr, prot)) continue; } @@ -117,7 +118,8 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, if (ioremap_pud_enabled() && ((next - addr) == PUD_SIZE) && - IS_ALIGNED(phys_addr + addr, PUD_SIZE)) { + IS_ALIGNED(phys_addr + addr, PUD_SIZE) && + pud_free_pmd_page(pud)) { if (pud_set_huge(pud, phys_addr + addr, prot)) continue; } diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 30e7dd88148b..9f96fa7bc000 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -322,6 +322,8 @@ EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu); * This function normally doesn't block and can be called from any context * but it may block if @confirm_kill is specified and @ref is in the * process of switching to atomic mode by percpu_ref_switch_to_atomic(). + * + * There are no implied RCU grace periods between kill and release. */ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 3825c30aaa36..47de025b6245 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -506,8 +506,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, if (!key || (ht->p.obj_cmpfn ? ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : - rhashtable_compare(&arg, rht_obj(ht, head)))) + rhashtable_compare(&arg, rht_obj(ht, head)))) { + pprev = &head->next; continue; + } if (!ht->rhlist) return rht_obj(ht, head); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index b4e22345963f..3e9335493fe4 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -24,10 +24,11 @@ #include <linux/if_vlan.h> #include <linux/random.h> #include <linux/highmem.h> +#include <linux/sched.h> /* General test specific settings */ #define MAX_SUBTESTS 3 -#define MAX_TESTRUNS 10000 +#define MAX_TESTRUNS 1000 #define MAX_DATA 128 #define MAX_INSNS 512 #define MAX_K 0xffffFFFF @@ -5466,7 +5467,7 @@ static struct bpf_test tests[] = { { "BPF_MAXINSNS: Jump, gap, jump, ...", { }, -#ifdef CONFIG_BPF_JIT_ALWAYS_ON +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86) CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, #else CLASSIC | FLAG_NO_DATA, @@ -6582,6 +6583,7 @@ static __init int test_bpf(void) struct bpf_prog *fp; int err; + cond_resched(); if (exclude_test(i)) continue; diff --git a/lib/test_kmod.c b/lib/test_kmod.c index e372b97eee13..0e5b7a61460b 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -1141,7 +1141,7 @@ static struct kmod_test_device *register_test_dev_kmod(void) mutex_lock(®_dev_mutex); /* int should suffice for number of devices, test for wrap */ - if (unlikely(num_test_devs + 1) < 0) { + if (num_test_devs + 1 == INT_MAX) { pr_err("reached limit of number of test devices\n"); goto out; } diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 76d3667fdea2..f4000c137dbe 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -79,6 +79,21 @@ struct thread_data { struct test_obj *objs; }; +static u32 my_hashfn(const void *data, u32 len, u32 seed) +{ + const struct test_obj_rhl *obj = data; + + return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE; +} + +static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj) +{ + const struct test_obj_rhl *test_obj = obj; + const struct test_obj_val *val = arg->key; + + return test_obj->value.id - val->id; +} + static struct rhashtable_params test_rht_params = { .head_offset = offsetof(struct test_obj, node), .key_offset = offsetof(struct test_obj, value), @@ -87,6 +102,17 @@ static struct rhashtable_params test_rht_params = { .nulls_base = (3U << RHT_BASE_SHIFT), }; +static struct rhashtable_params test_rht_params_dup = { + .head_offset = offsetof(struct test_obj_rhl, list_node), + .key_offset = offsetof(struct test_obj_rhl, value), + .key_len = sizeof(struct test_obj_val), + .hashfn = jhash, + .obj_hashfn = my_hashfn, + .obj_cmpfn = my_cmpfn, + .nelem_hint = 128, + .automatic_shrinking = false, +}; + static struct semaphore prestart_sem; static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); @@ -465,6 +491,112 @@ static int __init test_rhashtable_max(struct test_obj *array, return err; } +static unsigned int __init print_ht(struct rhltable *rhlt) +{ + struct rhashtable *ht; + const struct bucket_table *tbl; + char buff[512] = ""; + unsigned int i, cnt = 0; + + ht = &rhlt->ht; + tbl = rht_dereference(ht->tbl, ht); + for (i = 0; i < tbl->size; i++) { + struct rhash_head *pos, *next; + struct test_obj_rhl *p; + + pos = rht_dereference(tbl->buckets[i], ht); + next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; + + if (!rht_is_a_nulls(pos)) { + sprintf(buff, "%s\nbucket[%d] -> ", buff, i); + } + + while (!rht_is_a_nulls(pos)) { + struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead); + sprintf(buff, "%s[[", buff); + do { + pos = &list->rhead; + list = rht_dereference(list->next, ht); + p = rht_obj(ht, pos); + + sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid, + list? ", " : " "); + cnt++; + } while (list); + + pos = next, + next = !rht_is_a_nulls(pos) ? + rht_dereference(pos->next, ht) : NULL; + + sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : ""); + } + } + printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff); + + return cnt; +} + +static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, + int cnt, bool slow) +{ + struct rhltable rhlt; + unsigned int i, ret; + const char *key; + int err = 0; + + err = rhltable_init(&rhlt, &test_rht_params_dup); + if (WARN_ON(err)) + return err; + + for (i = 0; i < cnt; i++) { + rhl_test_objects[i].value.tid = i; + key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead); + key += test_rht_params_dup.key_offset; + + if (slow) { + err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key, + &rhl_test_objects[i].list_node.rhead)); + if (err == -EAGAIN) + err = 0; + } else + err = rhltable_insert(&rhlt, + &rhl_test_objects[i].list_node, + test_rht_params_dup); + if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) + goto skip_print; + } + + ret = print_ht(&rhlt); + WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); + +skip_print: + rhltable_destroy(&rhlt); + + return 0; +} + +static int __init test_insert_duplicates_run(void) +{ + struct test_obj_rhl rhl_test_objects[3] = {}; + + pr_info("test inserting duplicates\n"); + + /* two different values that map to same bucket */ + rhl_test_objects[0].value.id = 1; + rhl_test_objects[1].value.id = 21; + + /* and another duplicate with same as [0] value + * which will be second on the bucket list */ + rhl_test_objects[2].value.id = rhl_test_objects[0].value.id; + + test_insert_dup(rhl_test_objects, 2, false); + test_insert_dup(rhl_test_objects, 3, false); + test_insert_dup(rhl_test_objects, 2, true); + test_insert_dup(rhl_test_objects, 3, true); + + return 0; +} + static int thread_lookup_test(struct thread_data *tdata) { unsigned int entries = tdata->entries; @@ -613,6 +745,8 @@ static int __init test_rht_init(void) do_div(total_time, runs); pr_info("Average test time: %llu\n", total_time); + test_insert_duplicates_run(); + if (!tcount) return 0; |