diff options
author | Matthew Wilcox <mawilcox@microsoft.com> | 2016-12-15 02:09:31 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-15 03:04:10 +0300 |
commit | e8de4340767dd002978c285e3adddaeda8ac652c (patch) | |
tree | 3e14e4a3b7b0874e4e824b7873558a1017d2e368 /tools | |
parent | bbe9d71f2c545398987a6fea5090a6ca76f4a8dc (diff) | |
download | linux-e8de4340767dd002978c285e3adddaeda8ac652c.tar.xz |
radix-tree: ensure counts are initialised
radix_tree_join() was freeing nodes with a non-zero ->exceptional count,
and radix_tree_split() wasn't zeroing ->exceptional when it allocated
the new node. Fix this by making all callers of radix_tree_node_alloc()
pass in the new counts (and some other always-initialised fields), which
will prevent the problem recurring if in future we decide to do
something similar.
Link: http://lkml.kernel.org/r/1481667692-14500-3-git-send-email-mawilcox@linuxonhyperv.com
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/testing/radix-tree/multiorder.c | 45 |
1 files changed, 41 insertions, 4 deletions
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 08b4e16dc86f..f79812a5e070 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -355,7 +355,7 @@ void multiorder_tagged_iteration(void) item_kill_tree(&tree); } -static void __multiorder_join(unsigned long index, +static void multiorder_join1(unsigned long index, unsigned order1, unsigned order2) { unsigned long loc; @@ -373,7 +373,7 @@ static void __multiorder_join(unsigned long index, item_kill_tree(&tree); } -static void __multiorder_join2(unsigned order1, unsigned order2) +static void multiorder_join2(unsigned order1, unsigned order2) { RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_node *node; @@ -393,6 +393,39 @@ static void __multiorder_join2(unsigned order1, unsigned order2) item_kill_tree(&tree); } +/* + * This test revealed an accounting bug for exceptional entries at one point. + * Nodes were being freed back into the pool with an elevated exception count + * by radix_tree_join() and then radix_tree_split() was failing to zero the + * count of exceptional entries. + */ +static void multiorder_join3(unsigned int order) +{ + RADIX_TREE(tree, GFP_KERNEL); + struct radix_tree_node *node; + void **slot; + struct radix_tree_iter iter; + unsigned long i; + + for (i = 0; i < (1 << order); i++) { + radix_tree_insert(&tree, i, (void *)0x12UL); + } + + radix_tree_join(&tree, 0, order, (void *)0x16UL); + rcu_barrier(); + + radix_tree_split(&tree, 0, 0); + + radix_tree_for_each_slot(slot, &tree, &iter, 0) { + radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL); + } + + __radix_tree_lookup(&tree, 0, &node, NULL); + assert(node->exceptional == node->count); + + item_kill_tree(&tree); +} + static void multiorder_join(void) { int i, j, idx; @@ -400,16 +433,20 @@ static void multiorder_join(void) for (idx = 0; idx < 1024; idx = idx * 2 + 3) { for (i = 1; i < 15; i++) { for (j = 0; j < i; j++) { - __multiorder_join(idx, i, j); + multiorder_join1(idx, i, j); } } } for (i = 1; i < 15; i++) { for (j = 0; j < i; j++) { - __multiorder_join2(i, j); + multiorder_join2(i, j); } } + + for (i = 3; i < 10; i++) { + multiorder_join3(i); + } } static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) |