diff options
author | Matthew Wilcox <willy@infradead.org> | 2017-11-14 16:30:11 +0300 |
---|---|---|
committer | Matthew Wilcox <willy@infradead.org> | 2018-10-21 17:45:58 +0300 |
commit | b803b42823d0d9e8b6deccf01ffc2aba5d0738df (patch) | |
tree | a3ea387087a9f64258e0bc9300f92ee63d1313fa /lib | |
parent | 41aec91f55985e7f14ee75fe2f6e7bcfff0d0fae (diff) | |
download | linux-b803b42823d0d9e8b6deccf01ffc2aba5d0738df.tar.xz |
xarray: Add XArray iterators
The xa_for_each iterator allows the user to efficiently walk a range
of the array, executing the loop body once for each entry in that
range that matches the filter. This commit also includes xa_find()
and xa_find_after() which are helper functions for xa_for_each() but
may also be useful in their own right.
In the xas family of functions, we have xas_for_each(), xas_find(),
xas_next_entry(), xas_for_each_tagged(), xas_find_tagged(),
xas_next_tagged() and xas_pause().
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/test_xarray.c | 183 | ||||
-rw-r--r-- | lib/xarray.c | 292 |
2 files changed, 475 insertions, 0 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c index fb472258b639..e3c2d4d00b15 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -75,6 +75,48 @@ static noinline void check_xa_err(struct xarray *xa) // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); } +static noinline void check_xas_retry(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + void *entry; + + xa_store_index(xa, 0, GFP_KERNEL); + xa_store_index(xa, 1, GFP_KERNEL); + + rcu_read_lock(); + XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); + xa_erase_index(xa, 1); + XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); + XA_BUG_ON(xa, xas_retry(&xas, NULL)); + XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); + xas_reset(&xas); + XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); + XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); + XA_BUG_ON(xa, xas.xa_node != NULL); + + XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); + xas.xa_node = XAS_RESTART; + XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); + rcu_read_unlock(); + + /* Make sure we can iterate through retry entries */ + xas_lock(&xas); + xas_set(&xas, 0); + xas_store(&xas, XA_RETRY_ENTRY); + xas_set(&xas, 1); + xas_store(&xas, XA_RETRY_ENTRY); + + xas_set(&xas, 0); + xas_for_each(&xas, entry, ULONG_MAX) { + xas_store(&xas, xa_mk_value(xas.xa_index)); + } + xas_unlock(&xas); + + xa_erase_index(xa, 0); + xa_erase_index(xa, 1); +} + static noinline void check_xa_load(struct xarray *xa) { unsigned long i, j; @@ -217,6 +259,44 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_xas_erase(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + void *entry; + unsigned long i, j; + + for (i = 0; i < 200; i++) { + for (j = i; j < 2 * i + 17; j++) { + xas_set(&xas, j); + do { + xas_lock(&xas); + xas_store(&xas, xa_mk_value(j)); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + } + + xas_set(&xas, ULONG_MAX); + do { + xas_lock(&xas); + xas_store(&xas, xa_mk_value(0)); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + xas_lock(&xas); + xas_store(&xas, NULL); + + xas_set(&xas, 0); + j = i; + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_value(j)); + xas_store(&xas, NULL); + j++; + } + xas_unlock(&xas); + XA_BUG_ON(xa, !xa_empty(xa)); + } +} + static noinline void check_multi_store(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI @@ -285,16 +365,119 @@ static noinline void check_multi_store(struct xarray *xa) #endif } +static noinline void check_multi_find(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + unsigned long index; + + xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL); + XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL); + + index = 0; + XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != + xa_mk_value(12)); + XA_BUG_ON(xa, index != 12); + index = 13; + XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != + xa_mk_value(12)); + XA_BUG_ON(xa, (index < 12) || (index >= 16)); + XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != + xa_mk_value(16)); + XA_BUG_ON(xa, index != 16); + + xa_erase_index(xa, 12); + xa_erase_index(xa, 16); + XA_BUG_ON(xa, !xa_empty(xa)); +#endif +} + +static noinline void check_multi_find_2(struct xarray *xa) +{ + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; + unsigned int i, j; + void *entry; + + for (i = 0; i < max_order; i++) { + unsigned long index = 1UL << i; + for (j = 0; j < index; j++) { + XA_STATE(xas, xa, j + index); + xa_store_index(xa, index - 1, GFP_KERNEL); + xa_store_order(xa, index, i, xa_mk_value(index), + GFP_KERNEL); + rcu_read_lock(); + xas_for_each(&xas, entry, ULONG_MAX) { + xa_erase_index(xa, index); + } + rcu_read_unlock(); + xa_erase_index(xa, index - 1); + XA_BUG_ON(xa, !xa_empty(xa)); + } + } +} + +static noinline void check_find(struct xarray *xa) +{ + unsigned long i, j, k; + + XA_BUG_ON(xa, !xa_empty(xa)); + + /* + * Check xa_find with all pairs between 0 and 99 inclusive, + * starting at every index between 0 and 99 + */ + for (i = 0; i < 100; i++) { + XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); + xa_set_mark(xa, i, XA_MARK_0); + for (j = 0; j < i; j++) { + XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != + NULL); + xa_set_mark(xa, j, XA_MARK_0); + for (k = 0; k < 100; k++) { + unsigned long index = k; + void *entry = xa_find(xa, &index, ULONG_MAX, + XA_PRESENT); + if (k <= j) + XA_BUG_ON(xa, index != j); + else if (k <= i) + XA_BUG_ON(xa, index != i); + else + XA_BUG_ON(xa, entry != NULL); + + index = k; + entry = xa_find(xa, &index, ULONG_MAX, + XA_MARK_0); + if (k <= j) + XA_BUG_ON(xa, index != j); + else if (k <= i) + XA_BUG_ON(xa, index != i); + else + XA_BUG_ON(xa, entry != NULL); + } + xa_erase_index(xa, j); + XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); + XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); + } + xa_erase_index(xa, i); + XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); + } + XA_BUG_ON(xa, !xa_empty(xa)); + check_multi_find(xa); + check_multi_find_2(xa); +} + static DEFINE_XARRAY(array); static int xarray_checks(void) { check_xa_err(&array); + check_xas_retry(&array); check_xa_load(&array); check_xa_mark(&array); check_xa_shrink(&array); + check_xas_erase(&array); check_cmpxchg(&array); check_multi_store(&array); + check_find(&array); printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); return (tests_run == tests_passed) ? 0 : -EINVAL; diff --git a/lib/xarray.c b/lib/xarray.c index 2ba5a98ec618..24494f42daa6 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -128,6 +128,11 @@ static unsigned int get_offset(unsigned long index, struct xa_node *node) return (index >> node->shift) & XA_CHUNK_MASK; } +static void xas_set_offset(struct xa_state *xas) +{ + xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); +} + /* move the index either forwards (find) or backwards (sibling slot) */ static void xas_move_index(struct xa_state *xas, unsigned long offset) { @@ -136,6 +141,12 @@ static void xas_move_index(struct xa_state *xas, unsigned long offset) xas->xa_index += offset << shift; } +static void xas_advance(struct xa_state *xas) +{ + xas->xa_offset++; + xas_move_index(xas, xas->xa_offset); +} + static void *set_bounds(struct xa_state *xas) { xas->xa_node = XAS_BOUNDS; @@ -830,6 +841,202 @@ void xas_init_marks(const struct xa_state *xas) EXPORT_SYMBOL_GPL(xas_init_marks); /** + * xas_pause() - Pause a walk to drop a lock. + * @xas: XArray operation state. + * + * Some users need to pause a walk and drop the lock they're holding in + * order to yield to a higher priority thread or carry out an operation + * on an entry. Those users should call this function before they drop + * the lock. It resets the @xas to be suitable for the next iteration + * of the loop after the user has reacquired the lock. If most entries + * found during a walk require you to call xas_pause(), the xa_for_each() + * iterator may be more appropriate. + * + * Note that xas_pause() only works for forward iteration. If a user needs + * to pause a reverse iteration, we will need a xas_pause_rev(). + */ +void xas_pause(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (xas_invalid(xas)) + return; + + if (node) { + unsigned int offset = xas->xa_offset; + while (++offset < XA_CHUNK_SIZE) { + if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) + break; + } + xas->xa_index += (offset - xas->xa_offset) << node->shift; + } else { + xas->xa_index++; + } + xas->xa_node = XAS_RESTART; +} +EXPORT_SYMBOL_GPL(xas_pause); + +/** + * xas_find() - Find the next present entry in the XArray. + * @xas: XArray operation state. + * @max: Highest index to return. + * + * If the @xas has not yet been walked to an entry, return the entry + * which has an index >= xas.xa_index. If it has been walked, the entry + * currently being pointed at has been processed, and so we move to the + * next entry. + * + * If no entry is found and the array is smaller than @max, the iterator + * is set to the smallest index not yet in the array. This allows @xas + * to be immediately passed to xas_store(). + * + * Return: The entry, if found, otherwise %NULL. + */ +void *xas_find(struct xa_state *xas, unsigned long max) +{ + void *entry; + + if (xas_error(xas)) + return NULL; + + if (!xas->xa_node) { + xas->xa_index = 1; + return set_bounds(xas); + } else if (xas_top(xas->xa_node)) { + entry = xas_load(xas); + if (entry || xas_not_node(xas->xa_node)) + return entry; + } else if (!xas->xa_node->shift && + xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) { + xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; + } + + xas_advance(xas); + + while (xas->xa_node && (xas->xa_index <= max)) { + if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { + xas->xa_offset = xas->xa_node->offset + 1; + xas->xa_node = xa_parent(xas->xa, xas->xa_node); + continue; + } + + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (xa_is_node(entry)) { + xas->xa_node = xa_to_node(entry); + xas->xa_offset = 0; + continue; + } + if (entry && !xa_is_sibling(entry)) + return entry; + + xas_advance(xas); + } + + if (!xas->xa_node) + xas->xa_node = XAS_BOUNDS; + return NULL; +} +EXPORT_SYMBOL_GPL(xas_find); + +/** + * xas_find_marked() - Find the next marked entry in the XArray. + * @xas: XArray operation state. + * @max: Highest index to return. + * @mark: Mark number to search for. + * + * If the @xas has not yet been walked to an entry, return the marked entry + * which has an index >= xas.xa_index. If it has been walked, the entry + * currently being pointed at has been processed, and so we return the + * first marked entry with an index > xas.xa_index. + * + * If no marked entry is found and the array is smaller than @max, @xas is + * set to the bounds state and xas->xa_index is set to the smallest index + * not yet in the array. This allows @xas to be immediately passed to + * xas_store(). + * + * If no entry is found before @max is reached, @xas is set to the restart + * state. + * + * Return: The entry, if found, otherwise %NULL. + */ +void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) +{ + bool advance = true; + unsigned int offset; + void *entry; + + if (xas_error(xas)) + return NULL; + + if (!xas->xa_node) { + xas->xa_index = 1; + goto out; + } else if (xas_top(xas->xa_node)) { + advance = false; + entry = xa_head(xas->xa); + xas->xa_node = NULL; + if (xas->xa_index > max_index(entry)) + goto bounds; + if (!xa_is_node(entry)) { + if (xa_marked(xas->xa, mark)) + return entry; + xas->xa_index = 1; + goto out; + } + xas->xa_node = xa_to_node(entry); + xas->xa_offset = xas->xa_index >> xas->xa_node->shift; + } + + while (xas->xa_index <= max) { + if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { + xas->xa_offset = xas->xa_node->offset + 1; + xas->xa_node = xa_parent(xas->xa, xas->xa_node); + if (!xas->xa_node) + break; + advance = false; + continue; + } + + if (!advance) { + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (xa_is_sibling(entry)) { + xas->xa_offset = xa_to_sibling(entry); + xas_move_index(xas, xas->xa_offset); + } + } + + offset = xas_find_chunk(xas, advance, mark); + if (offset > xas->xa_offset) { + advance = false; + xas_move_index(xas, offset); + /* Mind the wrap */ + if ((xas->xa_index - 1) >= max) + goto max; + xas->xa_offset = offset; + if (offset == XA_CHUNK_SIZE) + continue; + } + + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (!xa_is_node(entry)) + return entry; + xas->xa_node = xa_to_node(entry); + xas_set_offset(xas); + } + +out: + if (!max) + goto max; +bounds: + xas->xa_node = XAS_BOUNDS; + return NULL; +max: + xas->xa_node = XAS_RESTART; + return NULL; +} +EXPORT_SYMBOL_GPL(xas_find_marked); + +/** * xa_init_flags() - Initialise an empty XArray with flags. * @xa: XArray. * @flags: XA_FLAG values. @@ -1152,6 +1359,91 @@ void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) } EXPORT_SYMBOL(xa_clear_mark); +/** + * xa_find() - Search the XArray for an entry. + * @xa: XArray. + * @indexp: Pointer to an index. + * @max: Maximum index to search to. + * @filter: Selection criterion. + * + * Finds the entry in @xa which matches the @filter, and has the lowest + * index that is at least @indexp and no more than @max. + * If an entry is found, @indexp is updated to be the index of the entry. + * This function is protected by the RCU read lock, so it may not find + * entries which are being simultaneously added. It will not return an + * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). + * + * Context: Any context. Takes and releases the RCU lock. + * Return: The entry, if found, otherwise %NULL. + */ +void *xa_find(struct xarray *xa, unsigned long *indexp, + unsigned long max, xa_mark_t filter) +{ + XA_STATE(xas, xa, *indexp); + void *entry; + + rcu_read_lock(); + do { + if ((__force unsigned int)filter < XA_MAX_MARKS) + entry = xas_find_marked(&xas, max, filter); + else + entry = xas_find(&xas, max); + } while (xas_retry(&xas, entry)); + rcu_read_unlock(); + + if (entry) + *indexp = xas.xa_index; + return entry; +} +EXPORT_SYMBOL(xa_find); + +/** + * xa_find_after() - Search the XArray for a present entry. + * @xa: XArray. + * @indexp: Pointer to an index. + * @max: Maximum index to search to. + * @filter: Selection criterion. + * + * Finds the entry in @xa which matches the @filter and has the lowest + * index that is above @indexp and no more than @max. + * If an entry is found, @indexp is updated to be the index of the entry. + * This function is protected by the RCU read lock, so it may miss entries + * which are being simultaneously added. It will not return an + * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). + * + * Context: Any context. Takes and releases the RCU lock. + * Return: The pointer, if found, otherwise %NULL. + */ +void *xa_find_after(struct xarray *xa, unsigned long *indexp, + unsigned long max, xa_mark_t filter) +{ + XA_STATE(xas, xa, *indexp + 1); + void *entry; + + rcu_read_lock(); + for (;;) { + if ((__force unsigned int)filter < XA_MAX_MARKS) + entry = xas_find_marked(&xas, max, filter); + else + entry = xas_find(&xas, max); + if (xas.xa_shift) { + if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) + continue; + } else { + if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK)) + continue; + } + if (!xas_retry(&xas, entry)) + break; + } + rcu_read_unlock(); + + if (entry) + *indexp = xas.xa_index; + return entry; +} +EXPORT_SYMBOL(xa_find_after); + #ifdef XA_DEBUG void xa_dump_node(const struct xa_node *node) { |