diff options
author | Liam R. Howlett <Liam.Howlett@oracle.com> | 2023-11-01 20:16:28 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-12-12 21:56:59 +0300 |
commit | 24662decdd44645e8f027d7912be962dd461d1aa (patch) | |
tree | 5ed4605a1141585023b316983b682373668a9c91 /lib/test_maple_tree.c | |
parent | 0de56e38b307b0cb2ac825e8e7cb371a28daf844 (diff) | |
download | linux-24662decdd44645e8f027d7912be962dd461d1aa.tar.xz |
maple_tree: don't find node end in mtree_lookup_walk()
Since the pivot being set is now reliable, the optimized loop no longer
needs to find the node end. The redundant check for a dead node can also
be avoided as there is no danger of using the wrong pivot since the
results will be thrown out in the case of a dead node by the later check.
This patch also adds a benchmark test for the function to the maple tree
test framework. The benchmark shows an average increase performance of
5.98% over 3 runs with this commit.
Link: https://lkml.kernel.org/r/20231101171629.3612299-12-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Peng Zhang <zhangpeng.00@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib/test_maple_tree.c')
-rw-r--r-- | lib/test_maple_tree.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index e7a5d688c9e0..29185ac5c727 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -43,6 +43,7 @@ atomic_t maple_tree_tests_passed; /* #define BENCH_NODE_STORE */ /* #define BENCH_AWALK */ /* #define BENCH_WALK */ +/* #define BENCH_LOAD */ /* #define BENCH_MT_FOR_EACH */ /* #define BENCH_FORK */ /* #define BENCH_MAS_FOR_EACH */ @@ -1754,6 +1755,19 @@ static noinline void __init bench_walk(struct maple_tree *mt) } #endif +#if defined(BENCH_LOAD) +static noinline void __init bench_load(struct maple_tree *mt) +{ + int i, max = 2500, count = 550000000; + + for (i = 0; i < max; i += 10) + mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL); + + for (i = 0; i < count; i++) + mtree_load(mt, 1470); +} +#endif + #if defined(BENCH_MT_FOR_EACH) static noinline void __init bench_mt_for_each(struct maple_tree *mt) { @@ -3623,6 +3637,13 @@ static int __init maple_tree_seed(void) mtree_destroy(&tree); goto skip; #endif +#if defined(BENCH_LOAD) +#define BENCH + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + bench_load(&tree); + mtree_destroy(&tree); + goto skip; +#endif #if defined(BENCH_FORK) #define BENCH bench_forking(); |