summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKemeng Shi <shikemeng@huaweicloud.com>2024-12-13 15:25:20 +0300
committerAndrew Morton <akpm@linux-foundation.org>2025-01-25 09:47:27 +0300
commitc9ba5249ef8b080c6779d3da14a061f6d4d6d5fa (patch)
tree7b09aa3fa002ad701eda1287aec53d8b2dc4bd2f
parent7e060df04f562b37bdc101fd06b16f013e2d989b (diff)
downloadlinux-c9ba5249ef8b080c6779d3da14a061f6d4d6d5fa.tar.xz
Xarray: move forward index correctly in xas_pause()
After xas_load(), xas->index could point to mid of found multi-index entry and xas->index's bits under node->shift maybe non-zero. The afterward xas_pause() will move forward xas->index with xa->node->shift with bits under node->shift un-masked and thus skip some index unexpectedly. Consider following case: Assume XA_CHUNK_SHIFT is 4. xa_store_range(xa, 16, 31, ...) xa_store(xa, 32, ...) XA_STATE(xas, xa, 17); xas_for_each(&xas,...) xas_load(&xas) /* xas->index = 17, xas->xa_offset = 1, xas->xa_node->xa_shift = 4 */ xas_pause() /* xas->index = 33, xas->xa_offset = 2, xas->xa_node->xa_shift = 4 */ As we can see, index of 32 is skipped unexpectedly. Fix this by mask bit under node->xa_shift when move forward index in xas_pause(). For now, this will not cause serious problems. Only minor problem like cachestat return less number of page status could happen. Link: https://lkml.kernel.org/r/20241213122523.12764-3-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Mattew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--lib/test_xarray.c35
-rw-r--r--lib/xarray.c1
2 files changed, 36 insertions, 0 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index b6cac747ec46..eab5971d0a48 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1511,6 +1511,41 @@ static noinline void check_pause(struct kunit *test)
XA_BUG_ON(xa, count != order_limit);
xa_destroy(xa);
+
+ index = 0;
+ for (order = XA_CHUNK_SHIFT; order > 0; order--) {
+ XA_BUG_ON(xa, xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL));
+ index += 1UL << order;
+ }
+
+ index = 0;
+ count = 0;
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ index += 1UL << (XA_CHUNK_SHIFT - count);
+ count++;
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != XA_CHUNK_SHIFT);
+
+ index = 0;
+ count = 0;
+ xas_set(&xas, XA_CHUNK_SIZE / 2 + 1);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ index += 1UL << (XA_CHUNK_SHIFT - count);
+ count++;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != XA_CHUNK_SHIFT);
+
+ xa_destroy(xa);
+
}
static noinline void check_move_tiny(struct kunit *test)
diff --git a/lib/xarray.c b/lib/xarray.c
index 19a9ea183c2d..091e2c927915 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1152,6 +1152,7 @@ void xas_pause(struct xa_state *xas)
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
}
+ xas->xa_index &= ~0UL << node->shift;
xas->xa_index += (offset - xas->xa_offset) << node->shift;
if (xas->xa_index == 0)
xas->xa_node = XAS_BOUNDS;