From 02669b17a433c242a40f01f14b691c9c9d1f8a13 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 5 Dec 2018 16:37:03 -0500 Subject: XArray: Turn xa_init_flags into a static inline A regular xa_init_flags() put all dynamically-initialised XArrays into the same locking class. That leads to lockdep believing that taking one XArray lock while holding another is a deadlock. It's possible to work around some of these situations with separate locking classes for irq/bh/regular XArrays, and SINGLE_DEPTH_NESTING, but that's ugly, and it doesn't work for all situations (where we have completely unrelated XArrays). Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'include/linux/xarray.h') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index f492e21c4aa2..4cf3cd128689 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -286,7 +286,6 @@ struct xarray { */ #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) -void xa_init_flags(struct xarray *, gfp_t flags); void *xa_load(struct xarray *, unsigned long index); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *xa_erase(struct xarray *, unsigned long index); @@ -303,6 +302,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, unsigned long max, unsigned int n, xa_mark_t); void xa_destroy(struct xarray *); +/** + * xa_init_flags() - Initialise an empty XArray with flags. + * @xa: XArray. + * @flags: XA_FLAG values. + * + * If you need to initialise an XArray with special flags (eg you need + * to take the lock from interrupt context), use this function instead + * of xa_init(). + * + * Context: Any context. + */ +static inline void xa_init_flags(struct xarray *xa, gfp_t flags) +{ + spin_lock_init(&xa->xa_lock); + xa->xa_flags = flags; + xa->xa_head = NULL; +} + /** * xa_init() - Initialise an empty XArray. * @xa: XArray. -- cgit v1.2.3 From 4a31896c5b5a2715ecf4033426aa0a35066d92d6 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 17 Dec 2018 14:45:36 -0500 Subject: XArray: Change xa_for_each iterator There were three problems with this API: 1. It took too many arguments; almost all users wanted to iterate over every element in the array rather than a subset. 2. It required that 'index' be initialised before use, and there's no realistic way to make GCC catch that. 3. 'index' and 'entry' were the opposite way round from every other member of the XArray APIs. So split it into three different APIs: xa_for_each(xa, index, entry) xa_for_each_start(xa, index, entry, start) xa_for_each_marked(xa, index, entry, filter) Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 78 +++++++++++++++++++++++++++++++++++++++++--------- lib/test_xarray.c | 11 ++++--- 2 files changed, 70 insertions(+), 19 deletions(-) (limited to 'include/linux/xarray.h') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 4cf3cd128689..3d0ce8b267e3 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -359,20 +359,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) } /** - * xa_for_each() - Iterate over a portion of an XArray. + * xa_for_each_start() - Iterate over a portion of an XArray. * @xa: XArray. + * @index: Index of @entry. * @entry: Entry retrieved from array. + * @start: First index to retrieve from array. + * + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. You may modify @index during the iteration if you + * want to skip or reprocess indices. It is safe to modify the array + * during the iteration. At the end of the iteration, @entry will be set + * to NULL and @index will have a value less than or equal to max. + * + * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have + * to handle your own locking with xas_for_each(), and if you have to unlock + * after each iteration, it will also end up being O(n.log(n)). + * xa_for_each_start() will spin if it hits a retry entry; if you intend to + * see retry entries, you should use the xas_for_each() iterator instead. + * The xas_for_each() iterator will expand into more inline code than + * xa_for_each_start(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each_start(xa, index, entry, start) \ + for (index = start, \ + entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \ + entry; \ + entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)) + +/** + * xa_for_each() - Iterate over present entries in an XArray. + * @xa: XArray. * @index: Index of @entry. - * @max: Maximum index to retrieve from array. - * @filter: Selection criterion. + * @entry: Entry retrieved from array. * - * Initialise @index to the lowest index you want to retrieve from the - * array. During the iteration, @entry will have the value of the entry - * stored in @xa at @index. The iteration will skip all entries in the - * array which do not match @filter. You may modify @index during the - * iteration if you want to skip or reprocess indices. It is safe to modify - * the array during the iteration. At the end of the iteration, @entry will - * be set to NULL and @index will have a value less than or equal to max. + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. You may modify @index during the iteration if you want + * to skip or reprocess indices. It is safe to modify the array during the + * iteration. At the end of the iteration, @entry will be set to NULL and + * @index will have a value less than or equal to max. * * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have * to handle your own locking with xas_for_each(), and if you have to unlock @@ -383,9 +408,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) * * Context: Any context. Takes and releases the RCU lock. */ -#define xa_for_each(xa, entry, index, max, filter) \ - for (entry = xa_find(xa, &index, max, filter); entry; \ - entry = xa_find_after(xa, &index, max, filter)) +#define xa_for_each(xa, index, entry) \ + xa_for_each_start(xa, index, entry, 0) + +/** + * xa_for_each_marked() - Iterate over marked entries in an XArray. + * @xa: XArray. + * @index: Index of @entry. + * @entry: Entry retrieved from array. + * @filter: Selection criterion. + * + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. The iteration will skip all entries in the array + * which do not match @filter. You may modify @index during the iteration + * if you want to skip or reprocess indices. It is safe to modify the array + * during the iteration. At the end of the iteration, @entry will be set to + * NULL and @index will have a value less than or equal to max. + * + * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n). + * You have to handle your own locking with xas_for_each(), and if you have + * to unlock after each iteration, it will also end up being O(n.log(n)). + * xa_for_each_marked() will spin if it hits a retry entry; if you intend to + * see retry entries, you should use the xas_for_each_marked() iterator + * instead. The xas_for_each_marked() iterator will expand into more inline + * code than xa_for_each_marked(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each_marked(xa, index, entry, filter) \ + for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \ + entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter)) #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) #define xa_lock(xa) spin_lock(&(xa)->xa_lock) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index a885afde0aef..dc02eff562b8 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -357,7 +357,7 @@ static noinline void check_cmpxchg(struct xarray *xa) static noinline void check_reserve(struct xarray *xa) { void *entry; - unsigned long index = 0; + unsigned long index; /* An array with a reserved entry is not empty */ XA_BUG_ON(xa, !xa_empty(xa)); @@ -393,7 +393,7 @@ static noinline void check_reserve(struct xarray *xa) xa_reserve(xa, 6, GFP_KERNEL); xa_store_index(xa, 7, GFP_KERNEL); - xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { + xa_for_each(xa, index, entry) { XA_BUG_ON(xa, index != 5 && index != 7); } xa_destroy(xa); @@ -812,17 +812,16 @@ static noinline void check_find_1(struct xarray *xa) static noinline void check_find_2(struct xarray *xa) { void *entry; - unsigned long i, j, index = 0; + unsigned long i, j, index; - xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { + xa_for_each(xa, index, entry) { XA_BUG_ON(xa, true); } for (i = 0; i < 1024; i++) { xa_store_index(xa, index, GFP_KERNEL); j = 0; - index = 0; - xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { + xa_for_each(xa, index, entry) { XA_BUG_ON(xa, xa_mk_index(index) != entry); XA_BUG_ON(xa, index != j++); } -- cgit v1.2.3 From 76b4e52995654af260f14558e0e07b5b039ae202 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 28 Dec 2018 23:20:44 -0500 Subject: XArray: Permit storing 2-byte-aligned pointers On m68k, statically allocated pointers may only be two-byte aligned. This clashes with the XArray's method for tagging internal pointers. Permit storing these pointers in single slots (ie not in multislots). Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 18 +++++++++++++++--- lib/test_xarray.c | 30 ++++++++++++++++++++++++++++++ lib/xarray.c | 22 +++++++++++++--------- 3 files changed, 58 insertions(+), 12 deletions(-) (limited to 'include/linux/xarray.h') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 3d0ce8b267e3..435c25b29079 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry) */ static inline bool xa_is_err(const void *entry) { - return unlikely(xa_is_internal(entry)); + return unlikely(xa_is_internal(entry) && + (unsigned long)entry >= -((MAX_ERRNO << 2) + 2)); } /** @@ -1039,8 +1040,8 @@ static inline bool xa_is_sibling(const void *entry) (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); } -#define XA_ZERO_ENTRY xa_mk_internal(256) -#define XA_RETRY_ENTRY xa_mk_internal(257) +#define XA_RETRY_ENTRY xa_mk_internal(256) +#define XA_ZERO_ENTRY xa_mk_internal(257) /** * xa_is_zero() - Is the entry a zero entry? @@ -1064,6 +1065,17 @@ static inline bool xa_is_retry(const void *entry) return unlikely(entry == XA_RETRY_ENTRY); } +/** + * xa_is_advanced() - Is the entry only permitted for the advanced API? + * @entry: Entry to be stored in the XArray. + * + * Return: %true if the entry cannot be stored by the normal API. + */ +static inline bool xa_is_advanced(const void *entry) +{ + return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); +} + /** * typedef xa_update_node_t - A callback function from the XArray. * @node: The node which is being processed diff --git a/lib/test_xarray.c b/lib/test_xarray.c index dc02eff562b8..6e0212a60b08 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1184,6 +1184,35 @@ static noinline void check_store_range(struct xarray *xa) } } +static void check_align_1(struct xarray *xa, char *name) +{ + int i; + unsigned int id; + unsigned long index; + void *entry; + + for (i = 0; i < 8; i++) { + id = 0; + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL) + != 0); + XA_BUG_ON(xa, id != i); + } + xa_for_each(xa, index, entry) + XA_BUG_ON(xa, xa_is_err(entry)); + xa_destroy(xa); +} + +static noinline void check_align(struct xarray *xa) +{ + char name[] = "Motorola 68000"; + + check_align_1(xa, name); + check_align_1(xa, name + 1); + check_align_1(xa, name + 2); + check_align_1(xa, name + 3); +// check_align_2(xa, name); +} + static LIST_HEAD(shadow_nodes); static void test_update_node(struct xa_node *node) @@ -1333,6 +1362,7 @@ static int xarray_checks(void) check_create_range(&array); check_store_range(&array); check_store_iter(&array); + check_align(&xa0); check_workingset(&array, 0); check_workingset(&array, 64); diff --git a/lib/xarray.c b/lib/xarray.c index dda6026d202e..bffa26b1f0d6 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -232,6 +232,8 @@ void *xas_load(struct xa_state *xas) if (xas->xa_shift > node->shift) break; entry = xas_descend(xas, node); + if (node->shift == 0) + break; } return entry; } @@ -506,7 +508,7 @@ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) for (;;) { void *entry = xa_entry_locked(xas->xa, node, offset); - if (xa_is_node(entry)) { + if (node->shift && xa_is_node(entry)) { node = xa_to_node(entry); offset = 0; continue; @@ -604,6 +606,7 @@ static int xas_expand(struct xa_state *xas, void *head) /* * xas_create() - Create a slot to store an entry in. * @xas: XArray operation state. + * @allow_root: %true if we can store the entry in the root directly * * Most users will not need to call this function directly, as it is called * by xas_store(). It is useful for doing conditional store operations @@ -613,7 +616,7 @@ static int xas_expand(struct xa_state *xas, void *head) * If the slot was newly created, returns %NULL. If it failed to create the * slot, returns %NULL and indicates the error in @xas. */ -static void *xas_create(struct xa_state *xas) +static void *xas_create(struct xa_state *xas, bool allow_root) { struct xarray *xa = xas->xa; void *entry; @@ -628,6 +631,8 @@ static void *xas_create(struct xa_state *xas) shift = xas_expand(xas, entry); if (shift < 0) return NULL; + if (!shift && !allow_root) + shift = XA_CHUNK_SHIFT; entry = xa_head_locked(xa); slot = &xa->xa_head; } else if (xas_error(xas)) { @@ -687,7 +692,7 @@ void xas_create_range(struct xa_state *xas) xas->xa_sibs = 0; for (;;) { - xas_create(xas); + xas_create(xas, true); if (xas_error(xas)) goto restore; if (xas->xa_index <= (index | XA_CHUNK_MASK)) @@ -754,7 +759,7 @@ void *xas_store(struct xa_state *xas, void *entry) bool value = xa_is_value(entry); if (entry) - first = xas_create(xas); + first = xas_create(xas, !xa_is_node(entry)); else first = xas_load(xas); @@ -1279,7 +1284,6 @@ static void *xas_result(struct xa_state *xas, void *curr) { if (xa_is_zero(curr)) return NULL; - XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr)); if (xas_error(xas)) curr = xas->xa_node; return curr; @@ -1349,7 +1353,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) XA_STATE(xas, xa, index); void *curr; - if (WARN_ON_ONCE(xa_is_internal(entry))) + if (WARN_ON_ONCE(xa_is_advanced(entry))) return XA_ERROR(-EINVAL); if (xa_track_free(xa) && !entry) entry = XA_ZERO_ENTRY; @@ -1415,7 +1419,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, XA_STATE(xas, xa, index); void *curr; - if (WARN_ON_ONCE(xa_is_internal(entry))) + if (WARN_ON_ONCE(xa_is_advanced(entry))) return XA_ERROR(-EINVAL); if (xa_track_free(xa) && !entry) entry = XA_ZERO_ENTRY; @@ -1538,7 +1542,7 @@ void *xa_store_range(struct xarray *xa, unsigned long first, if (last + 1) order = __ffs(last + 1); xas_set_order(&xas, last, order); - xas_create(&xas); + xas_create(&xas, true); if (xas_error(&xas)) goto unlock; } @@ -1580,7 +1584,7 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) XA_STATE(xas, xa, 0); int err; - if (WARN_ON_ONCE(xa_is_internal(entry))) + if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; if (WARN_ON_ONCE(!xa_track_free(xa))) return -EINVAL; -- cgit v1.2.3 From b0606fed6eece16a421034eca0bbea9a08b90e91 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 2 Jan 2019 13:57:03 -0500 Subject: XArray: Honour reserved entries in xa_insert xa_insert() should treat reserved entries as occupied, not as available. Also, it should treat requests to insert a NULL pointer as a request to reserve the slot. Add xa_insert_bh() and xa_insert_irq() for completeness. Signed-off-by: Matthew Wilcox --- Documentation/core-api/xarray.rst | 15 +++--- include/linux/xarray.h | 110 ++++++++++++++++++++++++-------------- lib/test_xarray.c | 8 +-- lib/xarray.c | 41 ++++++++++++++ 4 files changed, 126 insertions(+), 48 deletions(-) (limited to 'include/linux/xarray.h') diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index 6a6d67acaf69..5d54b27c6eba 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -108,12 +108,13 @@ some, but not all of the other indices changing. Sometimes you need to ensure that a subsequent call to :c:func:`xa_store` will not need to allocate memory. The :c:func:`xa_reserve` function -will store a reserved entry at the indicated index. Users of the normal -API will see this entry as containing ``NULL``. If you do not need to -use the reserved entry, you can call :c:func:`xa_release` to remove the -unused entry. If another user has stored to the entry in the meantime, -:c:func:`xa_release` will do nothing; if instead you want the entry to -become ``NULL``, you should use :c:func:`xa_erase`. +will store a reserved entry at the indicated index. Users of the +normal API will see this entry as containing ``NULL``. If you do +not need to use the reserved entry, you can call :c:func:`xa_release` +to remove the unused entry. If another user has stored to the entry +in the meantime, :c:func:`xa_release` will do nothing; if instead you +want the entry to become ``NULL``, you should use :c:func:`xa_erase`. +Using :c:func:`xa_insert` on a reserved entry will fail. If all entries in the array are ``NULL``, the :c:func:`xa_empty` function will return ``true``. @@ -183,6 +184,8 @@ Takes xa_lock internally: * :c:func:`xa_store_bh` * :c:func:`xa_store_irq` * :c:func:`xa_insert` + * :c:func:`xa_insert_bh` + * :c:func:`xa_insert_irq` * :c:func:`xa_erase` * :c:func:`xa_erase_bh` * :c:func:`xa_erase_irq` diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 435c25b29079..12244aa98a69 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -463,39 +463,12 @@ void *__xa_erase(struct xarray *, unsigned long index); void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); +int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); -/** - * __xa_insert() - Store this entry in the XArray unless another entry is - * already present. - * @xa: XArray. - * @index: Index into array. - * @entry: New entry. - * @gfp: Memory allocation flags. - * - * If you would rather see the existing entry in the array, use __xa_cmpxchg(). - * This function is for users who don't care what the entry is, only that - * one is present. - * - * Context: Any context. Expects xa_lock to be held on entry. May - * release and reacquire xa_lock if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. - * -ENOMEM if memory could not be allocated. - */ -static inline int __xa_insert(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) -{ - void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp); - if (!curr) - return 0; - if (xa_is_err(curr)) - return xa_err(curr); - return -EEXIST; -} - /** * xa_store_bh() - Store this entry in the XArray. * @xa: XArray. @@ -685,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, * @entry: New entry. * @gfp: Memory allocation flags. * - * If you would rather see the existing entry in the array, use xa_cmpxchg(). - * This function is for users who don't care what the entry is, only that - * one is present. + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. * - * Context: Process context. Takes and releases the xa_lock. - * May sleep if the @gfp flags permit. + * Context: Any context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. * Return: 0 if the store succeeded. -EEXIST if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { - void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); - if (!curr) - return 0; - if (xa_is_err(curr)) - return xa_err(curr); - return -EEXIST; + int err; + + xa_lock(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_insert_bh() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int xa_insert_bh(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_insert_irq() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int xa_insert_irq(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock_irq(xa); + + return err; } /** diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 6e0212a60b08..3cf17338b0a4 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -382,10 +382,12 @@ static noinline void check_reserve(struct xarray *xa) xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); - /* And so does xa_insert */ + /* But xa_insert does not */ xa_reserve(xa, 12345678, GFP_KERNEL); - XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0); - xa_erase_index(xa, 12345678); + XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != + -EEXIST); + XA_BUG_ON(xa, xa_empty(xa)); + XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); XA_BUG_ON(xa, !xa_empty(xa)); /* Can iterate through a reserved entry */ diff --git a/lib/xarray.c b/lib/xarray.c index bffa26b1f0d6..81c3171ddde9 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1439,6 +1439,47 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, } EXPORT_SYMBOL(__xa_cmpxchg); +/** + * __xa_insert() - Store this entry in the XArray if no entry is present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) +{ + XA_STATE(xas, xa, index); + void *curr; + + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return -EINVAL; + if (!entry) + entry = XA_ZERO_ENTRY; + + do { + curr = xas_load(&xas); + if (!curr) { + xas_store(&xas, entry); + if (xa_track_free(xa)) + xas_clear_mark(&xas, XA_FREE_MARK); + } else { + xas_set_err(&xas, -EEXIST); + } + } while (__xas_nomem(&xas, gfp)); + + return xas_error(&xas); +} +EXPORT_SYMBOL(__xa_insert); + /** * __xa_reserve() - Reserve this index in the XArray. * @xa: XArray. -- cgit v1.2.3 From 19ba9ecf24189bd74d070aa1b1c4bcb9fe4ae849 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 14 Jan 2019 11:40:47 +0300 Subject: XArray: Fix typo in comment Seems copy and paste typo, not a big deal but still for consistency sake better to fix. Signed-off-by: Cyrill Gorcunov Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/xarray.h') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 12244aa98a69..7da665f5cb20 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -496,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, } /** - * xa_store_irq() - Erase this entry from the XArray. + * xa_store_irq() - Store this entry in the XArray. * @xa: XArray. * @index: Index into array. * @entry: New entry. -- cgit v1.2.3 From edcddd4c879af48ec922d680b2d56834c085683b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 17 Jan 2019 07:15:35 -0500 Subject: XArray: Fix an arithmetic error in xa_is_err There is a math problem here which leads to a lot of static checker warnings for me: net/sunrpc/clnt.c:451 rpc_new_client() error: (-4096) too low for ERR_PTR Error values are from -1 to -4095 or from 0xffffffff to 0xfffff001 in hexadecimal. (I am assuming a 32 bit system for simplicity). We are using the lowest two bits to hold some internal XArray data so the error is shifted two spaces to the left. 0xfffff001 << 2 is 0xffffc004. And finally we want to check that BIT(1) is set so we add 2 which gives us 0xffffc006. In other words, we should be checking that "entry >= 0xffffc006", but the check is actually testing if "entry >= 0xffffc002". Fixes: 76b4e5299565 ("XArray: Permit storing 2-byte-aligned pointers") Signed-off-by: Dan Carpenter [Use xa_mk_internal() instead of changing the bracketing] Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/xarray.h') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 7da665f5cb20..5d9d318bcf7a 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -177,7 +177,7 @@ static inline bool xa_is_internal(const void *entry) static inline bool xa_is_err(const void *entry) { return unlikely(xa_is_internal(entry) && - (unsigned long)entry >= -((MAX_ERRNO << 2) + 2)); + entry >= xa_mk_internal(-MAX_ERRNO)); } /** -- cgit v1.2.3 From 809ab9371ca0a96b44d9866ad82849410759a45b Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Sat, 26 Jan 2019 00:52:26 -0500 Subject: XArray: Update xa_erase family descriptions xa_erase does not allocate memory and doesn't have a gfp parameter. Update the descriptions of all four variants to be more useful. Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 12 ++++++------ lib/xarray.c | 17 ++++++++--------- 2 files changed, 14 insertions(+), 15 deletions(-) (limited to 'include/linux/xarray.h') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 5d9d318bcf7a..e11841537631 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -526,9 +526,9 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, * @xa: XArray. * @index: Index of entry. * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. @@ -550,9 +550,9 @@ static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) * @xa: XArray. * @index: Index of entry. * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. diff --git a/lib/xarray.c b/lib/xarray.c index 81c3171ddde9..fb783bf2a441 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1294,13 +1294,12 @@ static void *xas_result(struct xa_state *xas, void *curr) * @xa: XArray. * @index: Index into array. * - * If the entry at this index is a multi-index entry then all indices will - * be erased, and the entry will no longer be a multi-index entry. - * This function expects the xa_lock to be held on entry. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * - * Context: Any context. Expects xa_lock to be held on entry. May - * release and reacquire xa_lock if @gfp flags permit. - * Return: The old entry at this index. + * Context: Any context. Expects xa_lock to be held on entry. + * Return: The entry which used to be at this index. */ void *__xa_erase(struct xarray *xa, unsigned long index) { @@ -1314,9 +1313,9 @@ EXPORT_SYMBOL(__xa_erase); * @xa: XArray. * @index: Index of entry. * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * * Context: Any context. Takes and releases the xa_lock. * Return: The entry which used to be at this index. -- cgit v1.2.3 From fd9dc93e36231fb6d520e0edd467058fad4fd12d Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 6 Feb 2019 13:07:11 -0500 Subject: XArray: Change xa_insert to return -EBUSY Userspace translates EEXIST to "File exists" which isn't a very good error message for the problem. "Device or resource busy" is a better indication of what went wrong. Signed-off-by: Matthew Wilcox --- Documentation/core-api/xarray.rst | 2 +- fs/nilfs2/btnode.c | 2 +- include/linux/xarray.h | 6 +++--- lib/test_xarray.c | 4 ++-- lib/xarray.c | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux/xarray.h') diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index 5d54b27c6eba..42bb1a62650f 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -85,7 +85,7 @@ which was at that index; if it returns the same entry which was passed as If you want to only store a new entry to an index if the current entry at that index is ``NULL``, you can use :c:func:`xa_insert` which -returns ``-EEXIST`` if the entry is not empty. +returns ``-EBUSY`` if the entry is not empty. You can enquire whether a mark is set on an entry by using :c:func:`xa_get_mark`. If the entry is not ``NULL``, you can set a mark diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index f2129a5d9f23..4391fd3abd8f 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -189,7 +189,7 @@ retry: */ if (!err) return 0; - else if (err != -EEXIST) + else if (err != -EBUSY) goto failed_unlock; err = invalidate_inode_pages2_range(btnc, newkey, newkey); diff --git a/include/linux/xarray.h b/include/linux/xarray.h index e11841537631..57cf35c4d094 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -664,7 +664,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, * * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert(struct xarray *xa, unsigned long index, @@ -693,7 +693,7 @@ static inline int xa_insert(struct xarray *xa, unsigned long index, * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert_bh(struct xarray *xa, unsigned long index, @@ -722,7 +722,7 @@ static inline int xa_insert_bh(struct xarray *xa, unsigned long index, * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert_irq(struct xarray *xa, unsigned long index, diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 671a93ee09e6..9d894e93456c 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -346,7 +346,7 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); - XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST); + XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); @@ -388,7 +388,7 @@ static noinline void check_reserve(struct xarray *xa) /* But xa_insert does not */ xa_reserve(xa, 12345678, GFP_KERNEL); XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != - -EEXIST); + -EBUSY); XA_BUG_ON(xa, xa_empty(xa)); XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); XA_BUG_ON(xa, !xa_empty(xa)); diff --git a/lib/xarray.c b/lib/xarray.c index fb783bf2a441..1b97ca58bd15 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1451,7 +1451,7 @@ EXPORT_SYMBOL(__xa_cmpxchg); * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) @@ -1471,7 +1471,7 @@ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) if (xa_track_free(xa)) xas_clear_mark(&xas, XA_FREE_MARK); } else { - xas_set_err(&xas, -EEXIST); + xas_set_err(&xas, -EBUSY); } } while (__xas_nomem(&xas, gfp)); -- cgit v1.2.3 From 3ccaf57a6a63ad171a951dcaddffc453b2414c7b Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 26 Oct 2018 14:43:22 -0400 Subject: XArray: Add support for 1s-based allocation A lot of places want to allocate IDs starting at 1 instead of 0. While the xa_alloc() API supports this, it's not very efficient if lots of IDs are allocated, due to having to walk down to the bottom of the tree to see if ID 1 is available, then all the way over to the next non-allocated ID. This method marks ID 0 as being occupied which wastes one slot in the XArray, but preserves xa_empty() as working. Signed-off-by: Matthew Wilcox --- Documentation/core-api/xarray.rst | 10 +++-- include/linux/xarray.h | 14 ++++++- lib/test_xarray.c | 88 ++++++++++++++++++++++++--------------- lib/xarray.c | 11 +++++ 4 files changed, 86 insertions(+), 37 deletions(-) (limited to 'include/linux/xarray.h') diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index 42bb1a62650f..e90c4925cd37 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -131,17 +131,21 @@ If you use :c:func:`DEFINE_XARRAY_ALLOC` to define the XArray, or initialise it by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`, the XArray changes to track whether entries are in use or not. -You can call :c:func:`xa_alloc` to store the entry at any unused index +You can call :c:func:`xa_alloc` to store the entry at an unused index in the XArray. If you need to modify the array from interrupt context, you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable interrupts while allocating the ID. -Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert` -will mark the entry as being allocated. Unlike a normal XArray, storing +Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert` will +also mark the entry as being allocated. Unlike a normal XArray, storing ``NULL`` will mark the entry as being in use, like :c:func:`xa_reserve`. To free an entry, use :c:func:`xa_erase` (or :c:func:`xa_release` if you only want to free the entry if it's ``NULL``). +By default, the lowest free entry is allocated starting from 0. If you +want to allocate entries starting at 1, it is more efficient to use +:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. + You cannot use ``XA_MARK_0`` with an allocating XArray as this mark is used to track whether an entry is free or not. The other marks are available for your use. diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 57cf35c4d094..99dd0838b4ba 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -220,10 +220,13 @@ enum xa_lock_type { #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) +#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ (__force unsigned)(mark))) +/* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */ #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) +#define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY) /** * struct xarray - The anchor of the XArray. @@ -279,7 +282,7 @@ struct xarray { #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) /** - * DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs. + * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0. * @name: A string that names your XArray. * * This is intended for file scope definitions of allocating XArrays. @@ -287,6 +290,15 @@ struct xarray { */ #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) +/** + * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of allocating XArrays. + * See also DEFINE_XARRAY(). + */ +#define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1) + void *xa_load(struct xarray *, unsigned long index); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *xa_erase(struct xarray *, unsigned long index); diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 9d894e93456c..cd74f8f32abe 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -589,64 +589,86 @@ static noinline void check_multi_store(struct xarray *xa) #endif } -static DEFINE_XARRAY_ALLOC(xa0); - -static noinline void check_xa_alloc(void) +static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) { int i; u32 id; - /* An empty array should assign 0 to the first alloc */ - xa_alloc_index(&xa0, 0, GFP_KERNEL); + XA_BUG_ON(xa, !xa_empty(xa)); + /* An empty array should assign %base to the first alloc */ + xa_alloc_index(xa, base, GFP_KERNEL); /* Erasing it should make the array empty again */ - xa_erase_index(&xa0, 0); - XA_BUG_ON(&xa0, !xa_empty(&xa0)); + xa_erase_index(xa, base); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* And it should assign %base again */ + xa_alloc_index(xa, base, GFP_KERNEL); + + /* Allocating and then erasing a lot should not lose base */ + for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) + xa_alloc_index(xa, i, GFP_KERNEL); + for (i = base; i < 2 * XA_CHUNK_SIZE; i++) + xa_erase_index(xa, i); + xa_alloc_index(xa, base, GFP_KERNEL); + + /* Destroying the array should do the same as erasing */ + xa_destroy(xa); - /* And it should assign 0 again */ - xa_alloc_index(&xa0, 0, GFP_KERNEL); + /* And it should assign %base again */ + xa_alloc_index(xa, base, GFP_KERNEL); - /* The next assigned ID should be 1 */ - xa_alloc_index(&xa0, 1, GFP_KERNEL); - xa_erase_index(&xa0, 1); + /* The next assigned ID should be base+1 */ + xa_alloc_index(xa, base + 1, GFP_KERNEL); + xa_erase_index(xa, base + 1); /* Storing a value should mark it used */ - xa_store_index(&xa0, 1, GFP_KERNEL); - xa_alloc_index(&xa0, 2, GFP_KERNEL); + xa_store_index(xa, base + 1, GFP_KERNEL); + xa_alloc_index(xa, base + 2, GFP_KERNEL); - /* If we then erase 0, it should be free */ - xa_erase_index(&xa0, 0); - xa_alloc_index(&xa0, 0, GFP_KERNEL); + /* If we then erase base, it should be free */ + xa_erase_index(xa, base); + xa_alloc_index(xa, base, GFP_KERNEL); - xa_erase_index(&xa0, 1); - xa_erase_index(&xa0, 2); + xa_erase_index(xa, base + 1); + xa_erase_index(xa, base + 2); for (i = 1; i < 5000; i++) { - xa_alloc_index(&xa0, i, GFP_KERNEL); + xa_alloc_index(xa, base + i, GFP_KERNEL); } - xa_destroy(&xa0); + xa_destroy(xa); + /* Check that we fail properly at the limit of allocation */ id = 0xfffffffeU; - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), GFP_KERNEL) != 0); - XA_BUG_ON(&xa0, id != 0xfffffffeU); - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), + XA_BUG_ON(xa, id != 0xfffffffeU); + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), GFP_KERNEL) != 0); - XA_BUG_ON(&xa0, id != 0xffffffffU); - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), + XA_BUG_ON(xa, id != 0xffffffffU); + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), GFP_KERNEL) != -ENOSPC); - XA_BUG_ON(&xa0, id != 0xffffffffU); - xa_destroy(&xa0); + XA_BUG_ON(xa, id != 0xffffffffU); + xa_destroy(xa); id = 10; - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), + XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id), GFP_KERNEL) != -ENOSPC); - XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), + XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); + XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id), GFP_KERNEL) != -ENOSPC); - xa_erase_index(&xa0, 3); - XA_BUG_ON(&xa0, !xa_empty(&xa0)); + xa_erase_index(xa, 3); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static DEFINE_XARRAY_ALLOC(xa0); +static DEFINE_XARRAY_ALLOC1(xa1); + +static noinline void check_xa_alloc(void) +{ + check_xa_alloc_1(&xa0, 0); + check_xa_alloc_1(&xa1, 1); } static noinline void __check_store_iter(struct xarray *xa, unsigned long start, diff --git a/lib/xarray.c b/lib/xarray.c index 1b97ca58bd15..468fb7b7963f 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -57,6 +57,11 @@ static inline bool xa_track_free(const struct xarray *xa) return xa->xa_flags & XA_FLAGS_TRACK_FREE; } +static inline bool xa_zero_busy(const struct xarray *xa) +{ + return xa->xa_flags & XA_FLAGS_ZERO_BUSY; +} + static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) { if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) @@ -432,6 +437,8 @@ static void xas_shrink(struct xa_state *xas) break; if (!xa_is_node(entry) && node->shift) break; + if (xa_is_zero(entry) && xa_zero_busy(xa)) + entry = NULL; xas->xa_node = XAS_BOUNDS; RCU_INIT_POINTER(xa->xa_head, entry); @@ -628,6 +635,8 @@ static void *xas_create(struct xa_state *xas, bool allow_root) if (xas_top(node)) { entry = xa_head_locked(xa); xas->xa_node = NULL; + if (!entry && xa_zero_busy(xa)) + entry = XA_ZERO_ENTRY; shift = xas_expand(xas, entry); if (shift < 0) return NULL; @@ -1942,6 +1951,8 @@ void xa_destroy(struct xarray *xa) entry = xa_head_locked(xa); RCU_INIT_POINTER(xa->xa_head, NULL); xas_init_marks(&xas); + if (xa_zero_busy(xa)) + xa_mark_clear(xa, XA_FREE_MARK); /* lockdep checks we're still holding the lock in xas_free_nodes() */ if (xa_is_node(entry)) xas_free_nodes(&xas, xa_to_node(entry)); -- cgit v1.2.3 From a3e4d3f97ec844de005a679585c04c5c03dfbdb6 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 31 Dec 2018 10:41:01 -0500 Subject: XArray: Redesign xa_alloc API It was too easy to forget to initialise the start index. Add an xa_limit data structure which can be used to pass min & max, and define a couple of special values for common cases. Also add some more tests cribbed from the IDR test suite. Change the return value from -ENOSPC to -EBUSY to match xa_insert(). Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 80 +++++++++++++++++++++++++++++----------------- lib/test_xarray.c | 86 ++++++++++++++++++++++++++++++++++++++++---------- lib/xarray.c | 29 ++++++++--------- 3 files changed, 135 insertions(+), 60 deletions(-) (limited to 'include/linux/xarray.h') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 99dd0838b4ba..883bb958e462 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -200,6 +200,27 @@ static inline int xa_err(void *entry) return 0; } +/** + * struct xa_limit - Represents a range of IDs. + * @min: The lowest ID to allocate (inclusive). + * @max: The maximum ID to allocate (inclusive). + * + * This structure is used either directly or via the XA_LIMIT() macro + * to communicate the range of IDs that are valid for allocation. + * Two common ranges are predefined for you: + * * xa_limit_32b - [0 - UINT_MAX] + * * xa_limit_31b - [0 - INT_MAX] + */ +struct xa_limit { + u32 max; + u32 min; +}; + +#define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max } + +#define xa_limit_32b XA_LIMIT(0, UINT_MAX) +#define xa_limit_31b XA_LIMIT(0, INT_MAX) + typedef unsigned __bitwise xa_mark_t; #define XA_MARK_0 ((__force xa_mark_t)0U) #define XA_MARK_1 ((__force xa_mark_t)1U) @@ -476,7 +497,8 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); -int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); +int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, + struct xa_limit, gfp_t); int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -753,26 +775,26 @@ static inline int xa_insert_irq(struct xarray *xa, unsigned long index, * xa_alloc() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * - * Context: Process context. Takes and releases the xa_lock. May sleep if + * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock(xa); return err; @@ -782,26 +804,26 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, * xa_alloc_bh() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock_bh(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_bh(xa); return err; @@ -811,26 +833,26 @@ static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, * xa_alloc_irq() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock_irq(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_irq(xa); return err; diff --git a/lib/test_xarray.c b/lib/test_xarray.c index cd74f8f32abe..b5a6b981454d 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -40,9 +40,9 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) { - u32 id = 0; + u32 id; - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index), + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, gfp) != 0); XA_BUG_ON(xa, id != index); } @@ -640,28 +640,81 @@ static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) xa_destroy(xa); /* Check that we fail properly at the limit of allocation */ - id = 0xfffffffeU; - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), + XA_LIMIT(UINT_MAX - 1, UINT_MAX), GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0xfffffffeU); - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), + XA_LIMIT(UINT_MAX - 1, UINT_MAX), GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0xffffffffU); - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), - GFP_KERNEL) != -ENOSPC); - XA_BUG_ON(xa, id != 0xffffffffU); + id = 3; + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), + XA_LIMIT(UINT_MAX - 1, UINT_MAX), + GFP_KERNEL) != -EBUSY); + XA_BUG_ON(xa, id != 3); xa_destroy(xa); - id = 10; - XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id), - GFP_KERNEL) != -ENOSPC); + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), + GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); - XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id), - GFP_KERNEL) != -ENOSPC); + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), + GFP_KERNEL) != -EBUSY); xa_erase_index(xa, 3); XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) +{ + unsigned int i, id; + unsigned long index; + void *entry; + + /* Allocate and free a NULL and check xa_empty() behaves */ + XA_BUG_ON(xa, !xa_empty(xa)); + XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != base); + XA_BUG_ON(xa, xa_empty(xa)); + XA_BUG_ON(xa, xa_erase(xa, id) != NULL); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* Ditto, but check destroy instead of erase */ + XA_BUG_ON(xa, !xa_empty(xa)); + XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != base); + XA_BUG_ON(xa, xa_empty(xa)); + xa_destroy(xa); + XA_BUG_ON(xa, !xa_empty(xa)); + + for (i = base; i < base + 10; i++) { + XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, + GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != i); + } + + XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); + XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); + XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != 5); + + xa_for_each(xa, index, entry) { + xa_erase_index(xa, index); + } + + for (i = base; i < base + 9; i++) { + XA_BUG_ON(xa, xa_erase(xa, i) != NULL); + XA_BUG_ON(xa, xa_empty(xa)); + } + XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); + XA_BUG_ON(xa, xa_empty(xa)); + XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); + XA_BUG_ON(xa, !xa_empty(xa)); + + xa_destroy(xa); +} + static DEFINE_XARRAY_ALLOC(xa0); static DEFINE_XARRAY_ALLOC1(xa1); @@ -669,6 +722,8 @@ static noinline void check_xa_alloc(void) { check_xa_alloc_1(&xa0, 0); check_xa_alloc_1(&xa1, 1); + check_xa_alloc_2(&xa0, 0); + check_xa_alloc_2(&xa1, 1); } static noinline void __check_store_iter(struct xarray *xa, unsigned long start, @@ -1219,9 +1274,8 @@ static void check_align_1(struct xarray *xa, char *name) void *entry; for (i = 0; i < 8; i++) { - id = 0; - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL) - != 0); + XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, + GFP_KERNEL) != 0); XA_BUG_ON(xa, id != i); } xa_for_each(xa, index, entry) diff --git a/lib/xarray.c b/lib/xarray.c index 468fb7b7963f..c707388fb05e 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1615,23 +1615,23 @@ EXPORT_SYMBOL(xa_store_range); * __xa_alloc() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). + * @limit: Range for allocated ID. * @entry: New entry. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) +int __xa_alloc(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, gfp_t gfp) { XA_STATE(xas, xa, 0); - int err; if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; @@ -1642,18 +1642,17 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) entry = XA_ZERO_ENTRY; do { - xas.xa_index = *id; - xas_find_marked(&xas, max, XA_FREE_MARK); + xas.xa_index = limit.min; + xas_find_marked(&xas, limit.max, XA_FREE_MARK); if (xas.xa_node == XAS_RESTART) - xas_set_err(&xas, -ENOSPC); + xas_set_err(&xas, -EBUSY); + else + *id = xas.xa_index; xas_store(&xas, entry); xas_clear_mark(&xas, XA_FREE_MARK); } while (__xas_nomem(&xas, gfp)); - err = xas_error(&xas); - if (!err) - *id = xas.xa_index; - return err; + return xas_error(&xas); } EXPORT_SYMBOL(__xa_alloc); -- cgit v1.2.3 From 2fa044e51a1f35d7b04cbde07ec513b0ba195e38 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 6 Nov 2018 14:13:35 -0500 Subject: XArray: Add cyclic allocation This differs slightly from the IDR equivalent in five ways. 1. It can allocate up to UINT_MAX instead of being limited to INT_MAX, like xa_alloc(). Also like xa_alloc(), it will write to the 'id' pointer before placing the entry in the XArray. 2. The 'next' cursor is allocated separately from the XArray instead of being part of the IDR. This saves memory for all the users which do not use the cyclic allocation API and suits some users better. 3. It returns -EBUSY instead of -ENOSPC. 4. It will attempt to wrap back to the minimum value on memory allocation failure as well as on an -EBUSY error, assuming that a user would rather allocate a small ID than suffer an ID allocation failure. 5. It reports whether it has wrapped, which is important to some users. Signed-off-by: Matthew Wilcox --- Documentation/core-api/xarray.rst | 4 +- include/linux/xarray.h | 102 ++++++++++++++++++++++++++++++++++++++ lib/test_xarray.c | 53 ++++++++++++++++++++ lib/xarray.c | 50 +++++++++++++++++++ 4 files changed, 208 insertions(+), 1 deletion(-) (limited to 'include/linux/xarray.h') diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index e90c4925cd37..c7436da5c4ad 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -144,7 +144,9 @@ you only want to free the entry if it's ``NULL``). By default, the lowest free entry is allocated starting from 0. If you want to allocate entries starting at 1, it is more efficient to use -:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. +:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. If you want to +allocate IDs up to a maximum, then wrap back around to the lowest free +ID, you can use :c:func:`xa_alloc_cyclic`. You cannot use ``XA_MARK_0`` with an allocating XArray as this mark is used to track whether an entry is free or not. The other marks are diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 883bb958e462..5ed6b462e754 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -242,6 +242,7 @@ enum xa_lock_type { #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) +#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ (__force unsigned)(mark))) @@ -499,6 +500,8 @@ void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, struct xa_limit, gfp_t); +int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, + struct xa_limit, u32 *next, gfp_t); int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -858,6 +861,105 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, return err; } +/** + * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock_irq(xa); + + return err; +} + /** * xa_reserve() - Reserve this index in the XArray. * @xa: XArray. diff --git a/lib/test_xarray.c b/lib/test_xarray.c index b5a6b981454d..eaf53f742c72 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -715,6 +715,57 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) xa_destroy(xa); } +static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) +{ + struct xa_limit limit = XA_LIMIT(1, 0x3fff); + u32 next = 0; + unsigned int i, id; + unsigned long index; + void *entry; + + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, + &next, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != 1); + + next = 0x3ffd; + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, + &next, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != 0x3ffd); + xa_erase_index(xa, 0x3ffd); + xa_erase_index(xa, 1); + XA_BUG_ON(xa, !xa_empty(xa)); + + for (i = 0x3ffe; i < 0x4003; i++) { + if (i < 0x4000) + entry = xa_mk_index(i); + else + entry = xa_mk_index(i - 0x3fff); + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, + &next, GFP_KERNEL) != (id == 1)); + XA_BUG_ON(xa, xa_mk_index(id) != entry); + } + + /* Check wrap-around is handled correctly */ + if (base != 0) + xa_erase_index(xa, base); + xa_erase_index(xa, base + 1); + next = UINT_MAX; + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), + xa_limit_32b, &next, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != UINT_MAX); + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), + xa_limit_32b, &next, GFP_KERNEL) != 1); + XA_BUG_ON(xa, id != base); + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), + xa_limit_32b, &next, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != base + 1); + + xa_for_each(xa, index, entry) + xa_erase_index(xa, index); + + XA_BUG_ON(xa, !xa_empty(xa)); +} + static DEFINE_XARRAY_ALLOC(xa0); static DEFINE_XARRAY_ALLOC1(xa1); @@ -724,6 +775,8 @@ static noinline void check_xa_alloc(void) check_xa_alloc_1(&xa1, 1); check_xa_alloc_2(&xa0, 0); check_xa_alloc_2(&xa1, 1); + check_xa_alloc_3(&xa0, 0); + check_xa_alloc_3(&xa1, 1); } static noinline void __check_store_iter(struct xarray *xa, unsigned long start, diff --git a/lib/xarray.c b/lib/xarray.c index c707388fb05e..89e37ac50850 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1656,6 +1656,56 @@ int __xa_alloc(struct xarray *xa, u32 *id, void *entry, } EXPORT_SYMBOL(__xa_alloc); +/** + * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + u32 min = limit.min; + int ret; + + limit.min = max(min, *next); + ret = __xa_alloc(xa, id, entry, limit, gfp); + if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) { + xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED; + ret = 1; + } + + if (ret < 0 && limit.min > min) { + limit.min = min; + ret = __xa_alloc(xa, id, entry, limit, gfp); + if (ret == 0) + ret = 1; + } + + if (ret >= 0) { + *next = *id + 1; + if (*next == 0) + xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED; + } + return ret; +} +EXPORT_SYMBOL(__xa_alloc_cyclic); + /** * __xa_set_mark() - Set this mark on this entry while locked. * @xa: XArray. -- cgit v1.2.3 From f818b82b80164014d7ee3df89bb110808778c796 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 8 Feb 2019 14:02:45 -0500 Subject: XArray: Mark xa_insert and xa_reserve as must_check If the user doesn't care about the return value from xa_insert(), then they should be using xa_store() instead. The point of xa_reserve() is to get the return value early before taking another lock, so this should also be __must_check. Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 23 ++++++++++++----------- lib/test_xarray.c | 10 +++++----- 2 files changed, 17 insertions(+), 16 deletions(-) (limited to 'include/linux/xarray.h') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 5ed6b462e754..687c150071a5 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -497,12 +497,13 @@ void *__xa_erase(struct xarray *, unsigned long index); void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); -int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); +int __must_check __xa_insert(struct xarray *, unsigned long index, + void *entry, gfp_t); int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, struct xa_limit, gfp_t); int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, struct xa_limit, u32 *next, gfp_t); -int __xa_reserve(struct xarray *, unsigned long index, gfp_t); +int __must_check __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -704,8 +705,8 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -733,8 +734,8 @@ static inline int xa_insert(struct xarray *xa, unsigned long index, * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert_bh(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert_bh(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -762,8 +763,8 @@ static inline int xa_insert_bh(struct xarray *xa, unsigned long index, * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert_irq(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert_irq(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -978,7 +979,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, * May sleep if the @gfp flags permit. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) { int ret; @@ -1002,7 +1003,7 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) * disabling softirqs. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) { int ret; @@ -1026,7 +1027,7 @@ int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) * disabling interrupts. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) { int ret; diff --git a/lib/test_xarray.c b/lib/test_xarray.c index eaf53f742c72..3eaa40ddc390 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -364,21 +364,21 @@ static noinline void check_reserve(struct xarray *xa) /* An array with a reserved entry is not empty */ XA_BUG_ON(xa, !xa_empty(xa)); - xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_empty(xa)); XA_BUG_ON(xa, xa_load(xa, 12345678)); xa_release(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* Releasing a used entry does nothing */ - xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); xa_release(xa, 12345678); xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* cmpxchg sees a reserved entry as NULL */ - xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678), GFP_NOWAIT) != NULL); xa_release(xa, 12345678); @@ -386,7 +386,7 @@ static noinline void check_reserve(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); /* But xa_insert does not */ - xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != -EBUSY); XA_BUG_ON(xa, xa_empty(xa)); @@ -395,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa) /* Can iterate through a reserved entry */ xa_store_index(xa, 5, GFP_KERNEL); - xa_reserve(xa, 6, GFP_KERNEL); + XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); xa_store_index(xa, 7, GFP_KERNEL); xa_for_each(xa, index, entry) { -- cgit v1.2.3 From b38f6c50270683abf35a388f82cafecce971a003 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 20 Feb 2019 11:30:49 -0500 Subject: XArray: Fix xa_release in allocating arrays xa_cmpxchg() was a little too magic in turning ZERO entries into NULL, and would leave the entry set to the ZERO entry instead of releasing it for future use. After careful review of existing users of xa_cmpxchg(), change the semantics so that it does not translate either incoming argument from NULL into ZERO entries. Add several tests to the test-suite to make sure this problem doesn't come back. Reported-by: Jason Gunthorpe Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 36 +++++++++++++++++++++++------------- lib/test_xarray.c | 28 ++++++++++++++++++++++++---- lib/xarray.c | 6 +----- 3 files changed, 48 insertions(+), 22 deletions(-) (limited to 'include/linux/xarray.h') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 687c150071a5..588733abd19d 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -131,6 +131,12 @@ static inline unsigned int xa_pointer_tag(void *entry) * xa_mk_internal() - Create an internal entry. * @v: Value to turn into an internal entry. * + * Internal entries are used for a number of purposes. Entries 0-255 are + * used for sibling entries (only 0-62 are used by the current code). 256 + * is used for the retry entry. 257 is used for the reserved / zero entry. + * Negative internal entries are used to represent errnos. Node pointers + * are also tagged as internal entries in some situations. + * * Context: Any context. * Return: An XArray internal entry corresponding to this value. */ @@ -163,6 +169,22 @@ static inline bool xa_is_internal(const void *entry) return ((unsigned long)entry & 3) == 2; } +#define XA_ZERO_ENTRY xa_mk_internal(257) + +/** + * xa_is_zero() - Is the entry a zero entry? + * @entry: Entry retrieved from the XArray + * + * The normal API will return NULL as the contents of a slot containing + * a zero entry. You can only see zero entries by using the advanced API. + * + * Return: %true if the entry is a zero entry. + */ +static inline bool xa_is_zero(const void *entry) +{ + return unlikely(entry == XA_ZERO_ENTRY); +} + /** * xa_is_err() - Report whether an XArray operation returned an error * @entry: Result from calling an XArray function @@ -1050,7 +1072,7 @@ int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) */ static inline void xa_release(struct xarray *xa, unsigned long index) { - xa_cmpxchg(xa, index, NULL, NULL, 0); + xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0); } /* Everything below here is the Advanced API. Proceed with caution. */ @@ -1210,18 +1232,6 @@ static inline bool xa_is_sibling(const void *entry) } #define XA_RETRY_ENTRY xa_mk_internal(256) -#define XA_ZERO_ENTRY xa_mk_internal(257) - -/** - * xa_is_zero() - Is the entry a zero entry? - * @entry: Entry retrieved from the XArray - * - * Return: %true if the entry is a zero entry. - */ -static inline bool xa_is_zero(const void *entry) -{ - return unlikely(entry == XA_ZERO_ENTRY); -} /** * xa_is_retry() - Is the entry a retry entry? diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 3eaa40ddc390..52f8ecff8c0c 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -361,6 +361,7 @@ static noinline void check_reserve(struct xarray *xa) { void *entry; unsigned long index; + int count; /* An array with a reserved entry is not empty */ XA_BUG_ON(xa, !xa_empty(xa)); @@ -377,15 +378,15 @@ static noinline void check_reserve(struct xarray *xa) xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); - /* cmpxchg sees a reserved entry as NULL */ + /* cmpxchg sees a reserved entry as ZERO */ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); - XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678), - GFP_NOWAIT) != NULL); + XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, + xa_mk_value(12345678), GFP_NOWAIT) != NULL); xa_release(xa, 12345678); xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); - /* But xa_insert does not */ + /* xa_insert treats it as busy */ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != -EBUSY); @@ -398,9 +399,27 @@ static noinline void check_reserve(struct xarray *xa) XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); xa_store_index(xa, 7, GFP_KERNEL); + count = 0; xa_for_each(xa, index, entry) { XA_BUG_ON(xa, index != 5 && index != 7); + count++; + } + XA_BUG_ON(xa, count != 2); + + /* If we free a reserved entry, we should be able to allocate it */ + if (xa->xa_flags & XA_FLAGS_ALLOC) { + u32 id; + + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8), + XA_LIMIT(5, 10), GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != 8); + + xa_release(xa, 6); + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6), + XA_LIMIT(5, 10), GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != 6); } + xa_destroy(xa); } @@ -1486,6 +1505,7 @@ static int xarray_checks(void) check_xas_erase(&array); check_cmpxchg(&array); check_reserve(&array); + check_reserve(&xa0); check_multi_store(&array); check_xa_alloc(); check_find(&array); diff --git a/lib/xarray.c b/lib/xarray.c index 89e37ac50850..b9a6cf42feee 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1429,16 +1429,12 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, if (WARN_ON_ONCE(xa_is_advanced(entry))) return XA_ERROR(-EINVAL); - if (xa_track_free(xa) && !entry) - entry = XA_ZERO_ENTRY; do { curr = xas_load(&xas); - if (curr == XA_ZERO_ENTRY) - curr = NULL; if (curr == old) { xas_store(&xas, entry); - if (xa_track_free(xa)) + if (xa_track_free(xa) && entry && !curr) xas_clear_mark(&xas, XA_FREE_MARK); } } while (__xas_nomem(&xas, gfp)); -- cgit v1.2.3 From 962033d55d0761e0716a01a715c6659c8c8dfc41 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 20 Feb 2019 11:51:22 -0500 Subject: XArray: Use xa_cmpxchg to implement xa_reserve Jason feels this is clearer, and it saves a function and an exported symbol. Suggested-by: Jason Gunthorpe Signed-off-by: Matthew Wilcox --- Documentation/core-api/xarray.rst | 1 - include/linux/xarray.h | 25 +++---------------------- lib/xarray.c | 36 ------------------------------------ 3 files changed, 3 insertions(+), 59 deletions(-) (limited to 'include/linux/xarray.h') diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index c7436da5c4ad..ef6f9f98f595 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -215,7 +215,6 @@ Assumes xa_lock held on entry: * :c:func:`__xa_erase` * :c:func:`__xa_cmpxchg` * :c:func:`__xa_alloc` - * :c:func:`__xa_reserve` * :c:func:`__xa_set_mark` * :c:func:`__xa_clear_mark` diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 588733abd19d..0e01e6129145 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -525,7 +525,6 @@ int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, struct xa_limit, gfp_t); int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, struct xa_limit, u32 *next, gfp_t); -int __must_check __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -1004,13 +1003,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, static inline __must_check int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) { - int ret; - - xa_lock(xa); - ret = __xa_reserve(xa, index, gfp); - xa_unlock(xa); - - return ret; + return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp)); } /** @@ -1028,13 +1021,7 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) static inline __must_check int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) { - int ret; - - xa_lock_bh(xa); - ret = __xa_reserve(xa, index, gfp); - xa_unlock_bh(xa); - - return ret; + return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp)); } /** @@ -1052,13 +1039,7 @@ int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) static inline __must_check int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) { - int ret; - - xa_lock_irq(xa); - ret = __xa_reserve(xa, index, gfp); - xa_unlock_irq(xa); - - return ret; + return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp)); } /** diff --git a/lib/xarray.c b/lib/xarray.c index b9a6cf42feee..3f10198f00b7 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1484,42 +1484,6 @@ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) } EXPORT_SYMBOL(__xa_insert); -/** - * __xa_reserve() - Reserve this index in the XArray. - * @xa: XArray. - * @index: Index into array. - * @gfp: Memory allocation flags. - * - * Ensures there is somewhere to store an entry at @index in the array. - * If there is already something stored at @index, this function does - * nothing. If there was nothing there, the entry is marked as reserved. - * Loading from a reserved entry returns a %NULL pointer. - * - * If you do not use the entry that you have reserved, call xa_release() - * or xa_erase() to free any unnecessary memory. - * - * Context: Any context. Expects the xa_lock to be held on entry. May - * release the lock, sleep and reacquire the lock if the @gfp flags permit. - * Return: 0 if the reservation succeeded or -ENOMEM if it failed. - */ -int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) -{ - XA_STATE(xas, xa, index); - void *curr; - - do { - curr = xas_load(&xas); - if (!curr) { - xas_store(&xas, XA_ZERO_ENTRY); - if (xa_track_free(xa)) - xas_clear_mark(&xas, XA_FREE_MARK); - } - } while (__xas_nomem(&xas, gfp)); - - return xas_error(&xas); -} -EXPORT_SYMBOL(__xa_reserve); - #ifdef CONFIG_XARRAY_MULTI static void xas_set_range(struct xa_state *xas, unsigned long first, unsigned long last) -- cgit v1.2.3