From 8a7f97b902f4fb0d94b355b6b3f1fbd7154cafb9 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 11 Mar 2019 23:30:31 -0700 Subject: treewide: add checks for the return value of memblock_alloc*() Add check for the return value of memblock_alloc*() functions and call panic() in case of error. The panic message repeats the one used by panicing memblock allocators with adjustment of parameters to include only relevant ones. The replacement was mostly automated with semantic patches like the one below with manual massaging of format strings. @@ expression ptr, size, align; @@ ptr = memblock_alloc(size, align); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align); [anders.roxell@linaro.org: use '%pa' with 'phys_addr_t' type] Link: http://lkml.kernel.org/r/20190131161046.21886-1-anders.roxell@linaro.org [rppt@linux.ibm.com: fix format strings for panics after memblock_alloc] Link: http://lkml.kernel.org/r/1548950940-15145-1-git-send-email-rppt@linux.ibm.com [rppt@linux.ibm.com: don't panic if the allocation in sparse_buffer_init fails] Link: http://lkml.kernel.org/r/20190131074018.GD28876@rapoport-lnx [akpm@linux-foundation.org: fix xtensa printk warning] Link: http://lkml.kernel.org/r/1548057848-15136-20-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport Signed-off-by: Anders Roxell Reviewed-by: Guo Ren [c-sky] Acked-by: Paul Burton [MIPS] Acked-by: Heiko Carstens [s390] Reviewed-by: Juergen Gross [Xen] Reviewed-by: Geert Uytterhoeven [m68k] Acked-by: Max Filippov [xtensa] Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Dennis Zhou Cc: Greentime Hu Cc: Greg Kroah-Hartman Cc: Guan Xuetao Cc: Guo Ren Cc: Mark Salter Cc: Matt Turner Cc: Michael Ellerman Cc: Michal Simek Cc: Petr Mladek Cc: Richard Weinberger Cc: Rich Felker Cc: Rob Herring Cc: Rob Herring Cc: Russell King Cc: Stafford Horne Cc: Tony Luck Cc: Vineet Gupta Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/cpumask.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'lib') diff --git a/lib/cpumask.c b/lib/cpumask.c index 087a3e9a0202..0cb672eb107c 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -165,6 +165,9 @@ EXPORT_SYMBOL(zalloc_cpumask_var); void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) { *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); + if (!*mask) + panic("%s: Failed to allocate %u bytes\n", __func__, + cpumask_size()); } /** -- cgit v1.2.3 From ba20ba2e3743bac786dff777954c11930256075e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 11 Mar 2019 23:31:14 -0700 Subject: generic radix trees Very simple radix tree implementation that supports storing arbitrary size entries, up to PAGE_SIZE - upcoming patches will convert existing flex_array users to genradixes. The new genradix code has a much simpler API and implementation, and doesn't have a hard limit on the number of elements like flex_array does. Link: http://lkml.kernel.org/r/20181217131929.11727-5-kent.overstreet@gmail.com Signed-off-by: Kent Overstreet Cc: Alexey Dobriyan Cc: Al Viro Cc: Dave Hansen Cc: Eric Paris Cc: Marcelo Ricardo Leitner Cc: Matthew Wilcox Cc: Neil Horman Cc: Paul Moore Cc: Pravin B Shelar Cc: Shaohua Li Cc: Stephen Smalley Cc: Vlad Yasevich Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/core-api/generic-radix-tree.rst | 12 ++ Documentation/core-api/index.rst | 1 + include/linux/generic-radix-tree.h | 231 ++++++++++++++++++++++++++ lib/Makefile | 3 +- lib/generic-radix-tree.c | 217 ++++++++++++++++++++++++ 5 files changed, 463 insertions(+), 1 deletion(-) create mode 100644 Documentation/core-api/generic-radix-tree.rst create mode 100644 include/linux/generic-radix-tree.h create mode 100644 lib/generic-radix-tree.c (limited to 'lib') diff --git a/Documentation/core-api/generic-radix-tree.rst b/Documentation/core-api/generic-radix-tree.rst new file mode 100644 index 000000000000..ed42839ae42f --- /dev/null +++ b/Documentation/core-api/generic-radix-tree.rst @@ -0,0 +1,12 @@ +================================= +Generic radix trees/sparse arrays +================================= + +.. kernel-doc:: include/linux/generic-radix-tree.h + :doc: Generic radix trees/sparse arrays + +generic radix tree functions +---------------------------- + +.. kernel-doc:: include/linux/generic-radix-tree.h + :functions: diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst index 3adee82be311..6870baffef82 100644 --- a/Documentation/core-api/index.rst +++ b/Documentation/core-api/index.rst @@ -28,6 +28,7 @@ Core utilities errseq printk-formats circular-buffers + generic-radix-tree memory-allocation mm-api gfp_mask-from-fs-io diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h new file mode 100644 index 000000000000..3a91130a4fbd --- /dev/null +++ b/include/linux/generic-radix-tree.h @@ -0,0 +1,231 @@ +#ifndef _LINUX_GENERIC_RADIX_TREE_H +#define _LINUX_GENERIC_RADIX_TREE_H + +/** + * DOC: Generic radix trees/sparse arrays: + * + * Very simple and minimalistic, supporting arbitrary size entries up to + * PAGE_SIZE. + * + * A genradix is defined with the type it will store, like so: + * + * static GENRADIX(struct foo) foo_genradix; + * + * The main operations are: + * + * - genradix_init(radix) - initialize an empty genradix + * + * - genradix_free(radix) - free all memory owned by the genradix and + * reinitialize it + * + * - genradix_ptr(radix, idx) - gets a pointer to the entry at idx, returning + * NULL if that entry does not exist + * + * - genradix_ptr_alloc(radix, idx, gfp) - gets a pointer to an entry, + * allocating it if necessary + * + * - genradix_for_each(radix, iter, p) - iterate over each entry in a genradix + * + * The radix tree allocates one page of entries at a time, so entries may exist + * that were never explicitly allocated - they will be initialized to all + * zeroes. + * + * Internally, a genradix is just a radix tree of pages, and indexing works in + * terms of byte offsets. The wrappers in this header file use sizeof on the + * type the radix contains to calculate a byte offset from the index - see + * __idx_to_offset. + */ + +#include +#include +#include +#include + +struct genradix_root; + +struct __genradix { + struct genradix_root __rcu *root; +}; + +/* + * NOTE: currently, sizeof(_type) must not be larger than PAGE_SIZE: + */ + +#define __GENRADIX_INITIALIZER \ + { \ + .tree = { \ + .root = NULL, \ + } \ + } + +/* + * We use a 0 size array to stash the type we're storing without taking any + * space at runtime - then the various accessor macros can use typeof() to get + * to it for casts/sizeof - we also force the alignment so that storing a type + * with a ridiculous alignment doesn't blow up the alignment or size of the + * genradix. + */ + +#define GENRADIX(_type) \ +struct { \ + struct __genradix tree; \ + _type type[0] __aligned(1); \ +} + +#define DEFINE_GENRADIX(_name, _type) \ + GENRADIX(_type) _name = __GENRADIX_INITIALIZER + +/** + * genradix_init - initialize a genradix + * @_radix: genradix to initialize + * + * Does not fail + */ +#define genradix_init(_radix) \ +do { \ + *(_radix) = (typeof(*_radix)) __GENRADIX_INITIALIZER; \ +} while (0) + +void __genradix_free(struct __genradix *); + +/** + * genradix_free: free all memory owned by a genradix + * @_radix: the genradix to free + * + * After freeing, @_radix will be reinitialized and empty + */ +#define genradix_free(_radix) __genradix_free(&(_radix)->tree) + +static inline size_t __idx_to_offset(size_t idx, size_t obj_size) +{ + if (__builtin_constant_p(obj_size)) + BUILD_BUG_ON(obj_size > PAGE_SIZE); + else + BUG_ON(obj_size > PAGE_SIZE); + + if (!is_power_of_2(obj_size)) { + size_t objs_per_page = PAGE_SIZE / obj_size; + + return (idx / objs_per_page) * PAGE_SIZE + + (idx % objs_per_page) * obj_size; + } else { + return idx * obj_size; + } +} + +#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *) +#define __genradix_obj_size(_radix) sizeof((_radix)->type[0]) +#define __genradix_idx_to_offset(_radix, _idx) \ + __idx_to_offset(_idx, __genradix_obj_size(_radix)) + +void *__genradix_ptr(struct __genradix *, size_t); + +/** + * genradix_ptr - get a pointer to a genradix entry + * @_radix: genradix to access + * @_idx: index to fetch + * + * Returns a pointer to entry at @_idx, or NULL if that entry does not exist. + */ +#define genradix_ptr(_radix, _idx) \ + (__genradix_cast(_radix) \ + __genradix_ptr(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _idx))) + +void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t); + +/** + * genradix_ptr_alloc - get a pointer to a genradix entry, allocating it + * if necessary + * @_radix: genradix to access + * @_idx: index to fetch + * @_gfp: gfp mask + * + * Returns a pointer to entry at @_idx, or NULL on allocation failure + */ +#define genradix_ptr_alloc(_radix, _idx, _gfp) \ + (__genradix_cast(_radix) \ + __genradix_ptr_alloc(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _idx), \ + _gfp)) + +struct genradix_iter { + size_t offset; + size_t pos; +}; + +/** + * genradix_iter_init - initialize a genradix_iter + * @_radix: genradix that will be iterated over + * @_idx: index to start iterating from + */ +#define genradix_iter_init(_radix, _idx) \ + ((struct genradix_iter) { \ + .pos = (_idx), \ + .offset = __genradix_idx_to_offset((_radix), (_idx)),\ + }) + +void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t); + +/** + * genradix_iter_peek - get first entry at or above iterator's current + * position + * @_iter: a genradix_iter + * @_radix: genradix being iterated over + * + * If no more entries exist at or above @_iter's current position, returns NULL + */ +#define genradix_iter_peek(_iter, _radix) \ + (__genradix_cast(_radix) \ + __genradix_iter_peek(_iter, &(_radix)->tree, \ + PAGE_SIZE / __genradix_obj_size(_radix))) + +static inline void __genradix_iter_advance(struct genradix_iter *iter, + size_t obj_size) +{ + iter->offset += obj_size; + + if (!is_power_of_2(obj_size) && + (iter->offset & (PAGE_SIZE - 1)) + obj_size > PAGE_SIZE) + iter->offset = round_up(iter->offset, PAGE_SIZE); + + iter->pos++; +} + +#define genradix_iter_advance(_iter, _radix) \ + __genradix_iter_advance(_iter, __genradix_obj_size(_radix)) + +#define genradix_for_each_from(_radix, _iter, _p, _start) \ + for (_iter = genradix_iter_init(_radix, _start); \ + (_p = genradix_iter_peek(&_iter, _radix)) != NULL; \ + genradix_iter_advance(&_iter, _radix)) + +/** + * genradix_for_each - iterate over entry in a genradix + * @_radix: genradix to iterate over + * @_iter: a genradix_iter to track current position + * @_p: pointer to genradix entry type + * + * On every iteration, @_p will point to the current entry, and @_iter.pos + * will be the current entry's index. + */ +#define genradix_for_each(_radix, _iter, _p) \ + genradix_for_each_from(_radix, _iter, _p, 0) + +int __genradix_prealloc(struct __genradix *, size_t, gfp_t); + +/** + * genradix_prealloc - preallocate entries in a generic radix tree + * @_radix: genradix to preallocate + * @_nr: number of entries to preallocate + * @_gfp: gfp mask + * + * Returns 0 on success, -ENOMEM on failure + */ +#define genradix_prealloc(_radix, _nr, _gfp) \ + __genradix_prealloc(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _nr + 1),\ + _gfp) + + +#endif /* _LINUX_GENERIC_RADIX_TREE_H */ diff --git a/lib/Makefile b/lib/Makefile index 647517940b29..b798b41d01ae 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -38,7 +38,8 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o rhashtable.o reciprocal_div.o \ - once.o refcount.o usercopy.o errseq.o bucket_locks.o + once.o refcount.o usercopy.o errseq.o bucket_locks.o \ + generic-radix-tree.o obj-$(CONFIG_STRING_SELFTEST) += test_string.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c new file mode 100644 index 000000000000..a7bafc413730 --- /dev/null +++ b/lib/generic-radix-tree.c @@ -0,0 +1,217 @@ + +#include +#include +#include + +#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *)) +#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY) + +struct genradix_node { + union { + /* Interior node: */ + struct genradix_node *children[GENRADIX_ARY]; + + /* Leaf: */ + u8 data[PAGE_SIZE]; + }; +}; + +static inline int genradix_depth_shift(unsigned depth) +{ + return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth; +} + +/* + * Returns size (of data, in bytes) that a tree of a given depth holds: + */ +static inline size_t genradix_depth_size(unsigned depth) +{ + return 1UL << genradix_depth_shift(depth); +} + +/* depth that's needed for a genradix that can address up to ULONG_MAX: */ +#define GENRADIX_MAX_DEPTH \ + DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT) + +#define GENRADIX_DEPTH_MASK \ + ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1)) + +unsigned genradix_root_to_depth(struct genradix_root *r) +{ + return (unsigned long) r & GENRADIX_DEPTH_MASK; +} + +struct genradix_node *genradix_root_to_node(struct genradix_root *r) +{ + return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK); +} + +/* + * Returns pointer to the specified byte @offset within @radix, or NULL if not + * allocated + */ +void *__genradix_ptr(struct __genradix *radix, size_t offset) +{ + struct genradix_root *r = READ_ONCE(radix->root); + struct genradix_node *n = genradix_root_to_node(r); + unsigned level = genradix_root_to_depth(r); + + if (ilog2(offset) >= genradix_depth_shift(level)) + return NULL; + + while (1) { + if (!n) + return NULL; + if (!level) + break; + + level--; + + n = n->children[offset >> genradix_depth_shift(level)]; + offset &= genradix_depth_size(level) - 1; + } + + return &n->data[offset]; +} +EXPORT_SYMBOL(__genradix_ptr); + +/* + * Returns pointer to the specified byte @offset within @radix, allocating it if + * necessary - newly allocated slots are always zeroed out: + */ +void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, + gfp_t gfp_mask) +{ + struct genradix_root *v = READ_ONCE(radix->root); + struct genradix_node *n, *new_node = NULL; + unsigned level; + + /* Increase tree depth if necessary: */ + while (1) { + struct genradix_root *r = v, *new_root; + + n = genradix_root_to_node(r); + level = genradix_root_to_depth(r); + + if (n && ilog2(offset) < genradix_depth_shift(level)) + break; + + if (!new_node) { + new_node = (void *) + __get_free_page(gfp_mask|__GFP_ZERO); + if (!new_node) + return NULL; + } + + new_node->children[0] = n; + new_root = ((struct genradix_root *) + ((unsigned long) new_node | (n ? level + 1 : 0))); + + if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) { + v = new_root; + new_node = NULL; + } + } + + while (level--) { + struct genradix_node **p = + &n->children[offset >> genradix_depth_shift(level)]; + offset &= genradix_depth_size(level) - 1; + + n = READ_ONCE(*p); + if (!n) { + if (!new_node) { + new_node = (void *) + __get_free_page(gfp_mask|__GFP_ZERO); + if (!new_node) + return NULL; + } + + if (!(n = cmpxchg_release(p, NULL, new_node))) + swap(n, new_node); + } + } + + if (new_node) + free_page((unsigned long) new_node); + + return &n->data[offset]; +} +EXPORT_SYMBOL(__genradix_ptr_alloc); + +void *__genradix_iter_peek(struct genradix_iter *iter, + struct __genradix *radix, + size_t objs_per_page) +{ + struct genradix_root *r; + struct genradix_node *n; + unsigned level, i; +restart: + r = READ_ONCE(radix->root); + if (!r) + return NULL; + + n = genradix_root_to_node(r); + level = genradix_root_to_depth(r); + + if (ilog2(iter->offset) >= genradix_depth_shift(level)) + return NULL; + + while (level) { + level--; + + i = (iter->offset >> genradix_depth_shift(level)) & + (GENRADIX_ARY - 1); + + while (!n->children[i]) { + i++; + iter->offset = round_down(iter->offset + + genradix_depth_size(level), + genradix_depth_size(level)); + iter->pos = (iter->offset >> PAGE_SHIFT) * + objs_per_page; + if (i == GENRADIX_ARY) + goto restart; + } + + n = n->children[i]; + } + + return &n->data[iter->offset & (PAGE_SIZE - 1)]; +} +EXPORT_SYMBOL(__genradix_iter_peek); + +static void genradix_free_recurse(struct genradix_node *n, unsigned level) +{ + if (level) { + unsigned i; + + for (i = 0; i < GENRADIX_ARY; i++) + if (n->children[i]) + genradix_free_recurse(n->children[i], level - 1); + } + + free_page((unsigned long) n); +} + +int __genradix_prealloc(struct __genradix *radix, size_t size, + gfp_t gfp_mask) +{ + size_t offset; + + for (offset = 0; offset < size; offset += PAGE_SIZE) + if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(__genradix_prealloc); + +void __genradix_free(struct __genradix *radix) +{ + struct genradix_root *r = xchg(&radix->root, NULL); + + genradix_free_recurse(genradix_root_to_node(r), + genradix_root_to_depth(r)); +} +EXPORT_SYMBOL(__genradix_free); -- cgit v1.2.3 From 586187d7de71b4da7956ba588ae42253b9ff6482 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 11 Mar 2019 23:31:26 -0700 Subject: Drop flex_arrays All existing users have been converted to generic radix trees Link: http://lkml.kernel.org/r/20181217131929.11727-8-kent.overstreet@gmail.com Signed-off-by: Kent Overstreet Acked-by: Dave Hansen Cc: Alexey Dobriyan Cc: Al Viro Cc: Eric Paris Cc: Marcelo Ricardo Leitner Cc: Matthew Wilcox Cc: Neil Horman Cc: Paul Moore Cc: Pravin B Shelar Cc: Shaohua Li Cc: Stephen Smalley Cc: Vlad Yasevich Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/core-api/flexible-arrays.rst | 130 ---------- Documentation/flexible-arrays.txt | 123 --------- include/linux/flex_array.h | 149 ----------- include/linux/poison.h | 3 - lib/Makefile | 2 +- lib/flex_array.c | 398 ----------------------------- tools/include/linux/poison.h | 3 - 7 files changed, 1 insertion(+), 807 deletions(-) delete mode 100644 Documentation/core-api/flexible-arrays.rst delete mode 100644 Documentation/flexible-arrays.txt delete mode 100644 include/linux/flex_array.h delete mode 100644 lib/flex_array.c (limited to 'lib') diff --git a/Documentation/core-api/flexible-arrays.rst b/Documentation/core-api/flexible-arrays.rst deleted file mode 100644 index b6b85a1b518e..000000000000 --- a/Documentation/core-api/flexible-arrays.rst +++ /dev/null @@ -1,130 +0,0 @@ - -=================================== -Using flexible arrays in the kernel -=================================== - -Large contiguous memory allocations can be unreliable in the Linux kernel. -Kernel programmers will sometimes respond to this problem by allocating -pages with :c:func:`vmalloc()`. This solution not ideal, though. On 32-bit -systems, memory from vmalloc() must be mapped into a relatively small address -space; it's easy to run out. On SMP systems, the page table changes required -by vmalloc() allocations can require expensive cross-processor interrupts on -all CPUs. And, on all systems, use of space in the vmalloc() range increases -pressure on the translation lookaside buffer (TLB), reducing the performance -of the system. - -In many cases, the need for memory from vmalloc() can be eliminated by piecing -together an array from smaller parts; the flexible array library exists to make -this task easier. - -A flexible array holds an arbitrary (within limits) number of fixed-sized -objects, accessed via an integer index. Sparse arrays are handled -reasonably well. Only single-page allocations are made, so memory -allocation failures should be relatively rare. The down sides are that the -arrays cannot be indexed directly, individual object size cannot exceed the -system page size, and putting data into a flexible array requires a copy -operation. It's also worth noting that flexible arrays do no internal -locking at all; if concurrent access to an array is possible, then the -caller must arrange for appropriate mutual exclusion. - -The creation of a flexible array is done with :c:func:`flex_array_alloc()`:: - - #include - - struct flex_array *flex_array_alloc(int element_size, - unsigned int total, - gfp_t flags); - -The individual object size is provided by ``element_size``, while total is the -maximum number of objects which can be stored in the array. The flags -argument is passed directly to the internal memory allocation calls. With -the current code, using flags to ask for high memory is likely to lead to -notably unpleasant side effects. - -It is also possible to define flexible arrays at compile time with:: - - DEFINE_FLEX_ARRAY(name, element_size, total); - -This macro will result in a definition of an array with the given name; the -element size and total will be checked for validity at compile time. - -Storing data into a flexible array is accomplished with a call to -:c:func:`flex_array_put()`:: - - int flex_array_put(struct flex_array *array, unsigned int element_nr, - void *src, gfp_t flags); - -This call will copy the data from src into the array, in the position -indicated by ``element_nr`` (which must be less than the maximum specified when -the array was created). If any memory allocations must be performed, flags -will be used. The return value is zero on success, a negative error code -otherwise. - -There might possibly be a need to store data into a flexible array while -running in some sort of atomic context; in this situation, sleeping in the -memory allocator would be a bad thing. That can be avoided by using -``GFP_ATOMIC`` for the flags value, but, often, there is a better way. The -trick is to ensure that any needed memory allocations are done before -entering atomic context, using :c:func:`flex_array_prealloc()`:: - - int flex_array_prealloc(struct flex_array *array, unsigned int start, - unsigned int nr_elements, gfp_t flags); - -This function will ensure that memory for the elements indexed in the range -defined by ``start`` and ``nr_elements`` has been allocated. Thereafter, a -``flex_array_put()`` call on an element in that range is guaranteed not to -block. - -Getting data back out of the array is done with :c:func:`flex_array_get()`:: - - void *flex_array_get(struct flex_array *fa, unsigned int element_nr); - -The return value is a pointer to the data element, or NULL if that -particular element has never been allocated. - -Note that it is possible to get back a valid pointer for an element which -has never been stored in the array. Memory for array elements is allocated -one page at a time; a single allocation could provide memory for several -adjacent elements. Flexible array elements are normally initialized to the -value ``FLEX_ARRAY_FREE`` (defined as 0x6c in ), so errors -involving that number probably result from use of unstored array entries. -Note that, if array elements are allocated with ``__GFP_ZERO``, they will be -initialized to zero and this poisoning will not happen. - -Individual elements in the array can be cleared with -:c:func:`flex_array_clear()`:: - - int flex_array_clear(struct flex_array *array, unsigned int element_nr); - -This function will set the given element to ``FLEX_ARRAY_FREE`` and return -zero. If storage for the indicated element is not allocated for the array, -``flex_array_clear()`` will return ``-EINVAL`` instead. Note that clearing an -element does not release the storage associated with it; to reduce the -allocated size of an array, call :c:func:`flex_array_shrink()`:: - - int flex_array_shrink(struct flex_array *array); - -The return value will be the number of pages of memory actually freed. -This function works by scanning the array for pages containing nothing but -``FLEX_ARRAY_FREE`` bytes, so (1) it can be expensive, and (2) it will not work -if the array's pages are allocated with ``__GFP_ZERO``. - -It is possible to remove all elements of an array with a call to -:c:func:`flex_array_free_parts()`:: - - void flex_array_free_parts(struct flex_array *array); - -This call frees all elements, but leaves the array itself in place. -Freeing the entire array is done with :c:func:`flex_array_free()`:: - - void flex_array_free(struct flex_array *array); - -As of this writing, there are no users of flexible arrays in the mainline -kernel. The functions described here are also not exported to modules; -that will probably be fixed when somebody comes up with a need for it. - - -Flexible array functions ------------------------- - -.. kernel-doc:: include/linux/flex_array.h diff --git a/Documentation/flexible-arrays.txt b/Documentation/flexible-arrays.txt deleted file mode 100644 index a0f2989dd804..000000000000 --- a/Documentation/flexible-arrays.txt +++ /dev/null @@ -1,123 +0,0 @@ -=================================== -Using flexible arrays in the kernel -=================================== - -:Updated: Last updated for 2.6.32 -:Author: Jonathan Corbet - -Large contiguous memory allocations can be unreliable in the Linux kernel. -Kernel programmers will sometimes respond to this problem by allocating -pages with vmalloc(). This solution not ideal, though. On 32-bit systems, -memory from vmalloc() must be mapped into a relatively small address space; -it's easy to run out. On SMP systems, the page table changes required by -vmalloc() allocations can require expensive cross-processor interrupts on -all CPUs. And, on all systems, use of space in the vmalloc() range -increases pressure on the translation lookaside buffer (TLB), reducing the -performance of the system. - -In many cases, the need for memory from vmalloc() can be eliminated by -piecing together an array from smaller parts; the flexible array library -exists to make this task easier. - -A flexible array holds an arbitrary (within limits) number of fixed-sized -objects, accessed via an integer index. Sparse arrays are handled -reasonably well. Only single-page allocations are made, so memory -allocation failures should be relatively rare. The down sides are that the -arrays cannot be indexed directly, individual object size cannot exceed the -system page size, and putting data into a flexible array requires a copy -operation. It's also worth noting that flexible arrays do no internal -locking at all; if concurrent access to an array is possible, then the -caller must arrange for appropriate mutual exclusion. - -The creation of a flexible array is done with:: - - #include - - struct flex_array *flex_array_alloc(int element_size, - unsigned int total, - gfp_t flags); - -The individual object size is provided by element_size, while total is the -maximum number of objects which can be stored in the array. The flags -argument is passed directly to the internal memory allocation calls. With -the current code, using flags to ask for high memory is likely to lead to -notably unpleasant side effects. - -It is also possible to define flexible arrays at compile time with:: - - DEFINE_FLEX_ARRAY(name, element_size, total); - -This macro will result in a definition of an array with the given name; the -element size and total will be checked for validity at compile time. - -Storing data into a flexible array is accomplished with a call to:: - - int flex_array_put(struct flex_array *array, unsigned int element_nr, - void *src, gfp_t flags); - -This call will copy the data from src into the array, in the position -indicated by element_nr (which must be less than the maximum specified when -the array was created). If any memory allocations must be performed, flags -will be used. The return value is zero on success, a negative error code -otherwise. - -There might possibly be a need to store data into a flexible array while -running in some sort of atomic context; in this situation, sleeping in the -memory allocator would be a bad thing. That can be avoided by using -GFP_ATOMIC for the flags value, but, often, there is a better way. The -trick is to ensure that any needed memory allocations are done before -entering atomic context, using:: - - int flex_array_prealloc(struct flex_array *array, unsigned int start, - unsigned int nr_elements, gfp_t flags); - -This function will ensure that memory for the elements indexed in the range -defined by start and nr_elements has been allocated. Thereafter, a -flex_array_put() call on an element in that range is guaranteed not to -block. - -Getting data back out of the array is done with:: - - void *flex_array_get(struct flex_array *fa, unsigned int element_nr); - -The return value is a pointer to the data element, or NULL if that -particular element has never been allocated. - -Note that it is possible to get back a valid pointer for an element which -has never been stored in the array. Memory for array elements is allocated -one page at a time; a single allocation could provide memory for several -adjacent elements. Flexible array elements are normally initialized to the -value FLEX_ARRAY_FREE (defined as 0x6c in ), so errors -involving that number probably result from use of unstored array entries. -Note that, if array elements are allocated with __GFP_ZERO, they will be -initialized to zero and this poisoning will not happen. - -Individual elements in the array can be cleared with:: - - int flex_array_clear(struct flex_array *array, unsigned int element_nr); - -This function will set the given element to FLEX_ARRAY_FREE and return -zero. If storage for the indicated element is not allocated for the array, -flex_array_clear() will return -EINVAL instead. Note that clearing an -element does not release the storage associated with it; to reduce the -allocated size of an array, call:: - - int flex_array_shrink(struct flex_array *array); - -The return value will be the number of pages of memory actually freed. -This function works by scanning the array for pages containing nothing but -FLEX_ARRAY_FREE bytes, so (1) it can be expensive, and (2) it will not work -if the array's pages are allocated with __GFP_ZERO. - -It is possible to remove all elements of an array with a call to:: - - void flex_array_free_parts(struct flex_array *array); - -This call frees all elements, but leaves the array itself in place. -Freeing the entire array is done with:: - - void flex_array_free(struct flex_array *array); - -As of this writing, there are no users of flexible arrays in the mainline -kernel. The functions described here are also not exported to modules; -that will probably be fixed when somebody comes up with a need for it. diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h deleted file mode 100644 index b94fa61b51fb..000000000000 --- a/include/linux/flex_array.h +++ /dev/null @@ -1,149 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _FLEX_ARRAY_H -#define _FLEX_ARRAY_H - -#include -#include -#include - -#define FLEX_ARRAY_PART_SIZE PAGE_SIZE -#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE - -struct flex_array_part; - -/* - * This is meant to replace cases where an array-like - * structure has gotten too big to fit into kmalloc() - * and the developer is getting tempted to use - * vmalloc(). - */ - -struct flex_array { - union { - struct { - int element_size; - int total_nr_elements; - int elems_per_part; - struct reciprocal_value reciprocal_elems; - struct flex_array_part *parts[]; - }; - /* - * This little trick makes sure that - * sizeof(flex_array) == PAGE_SIZE - */ - char padding[FLEX_ARRAY_BASE_SIZE]; - }; -}; - -/* Number of bytes left in base struct flex_array, excluding metadata */ -#define FLEX_ARRAY_BASE_BYTES_LEFT \ - (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts)) - -/* Number of pointers in base to struct flex_array_part pages */ -#define FLEX_ARRAY_NR_BASE_PTRS \ - (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *)) - -/* Number of elements of size that fit in struct flex_array_part */ -#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \ - (FLEX_ARRAY_PART_SIZE / size) - -/* - * Defines a statically allocated flex array and ensures its parameters are - * valid. - */ -#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \ - struct flex_array __arrayname = { { { \ - .element_size = (__element_size), \ - .total_nr_elements = (__total), \ - } } }; \ - static inline void __arrayname##_invalid_parameter(void) \ - { \ - BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \ - FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \ - } - -/** - * flex_array_alloc() - Creates a flexible array. - * @element_size: individual object size. - * @total: maximum number of objects which can be stored. - * @flags: GFP flags - * - * Return: Returns an object of structure flex_array. - */ -struct flex_array *flex_array_alloc(int element_size, unsigned int total, - gfp_t flags); - -/** - * flex_array_prealloc() - Ensures that memory for the elements indexed in the - * range defined by start and nr_elements has been allocated. - * @fa: array to allocate memory to. - * @start: start address - * @nr_elements: number of elements to be allocated. - * @flags: GFP flags - * - */ -int flex_array_prealloc(struct flex_array *fa, unsigned int start, - unsigned int nr_elements, gfp_t flags); - -/** - * flex_array_free() - Removes all elements of a flexible array. - * @fa: array to be freed. - */ -void flex_array_free(struct flex_array *fa); - -/** - * flex_array_free_parts() - Removes all elements of a flexible array, but - * leaves the array itself in place. - * @fa: array to be emptied. - */ -void flex_array_free_parts(struct flex_array *fa); - -/** - * flex_array_put() - Stores data into a flexible array. - * @fa: array where element is to be stored. - * @element_nr: position to copy, must be less than the maximum specified when - * the array was created. - * @src: data source to be copied into the array. - * @flags: GFP flags - * - * Return: Returns zero on success, a negative error code otherwise. - */ -int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, - gfp_t flags); - -/** - * flex_array_clear() - Clears an individual element in the array, sets the - * given element to FLEX_ARRAY_FREE. - * @element_nr: element position to clear. - * @fa: array to which element to be cleared belongs. - * - * Return: Returns zero on success, -EINVAL otherwise. - */ -int flex_array_clear(struct flex_array *fa, unsigned int element_nr); - -/** - * flex_array_get() - Retrieves data into a flexible array. - * - * @element_nr: Element position to retrieve data from. - * @fa: array from which data is to be retrieved. - * - * Return: Returns a pointer to the data element, or NULL if that - * particular element has never been allocated. - */ -void *flex_array_get(struct flex_array *fa, unsigned int element_nr); - -/** - * flex_array_shrink() - Reduces the allocated size of an array. - * @fa: array to shrink. - * - * Return: Returns number of pages of memory actually freed. - * - */ -int flex_array_shrink(struct flex_array *fa); - -#define flex_array_put_ptr(fa, nr, src, gfp) \ - flex_array_put(fa, nr, (void *)&(src), gfp) - -void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr); - -#endif /* _FLEX_ARRAY_H */ diff --git a/include/linux/poison.h b/include/linux/poison.h index 5046bad0c1c5..d6d980a681c7 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -83,9 +83,6 @@ #define MUTEX_DEBUG_FREE 0x22 #define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA) -/********** lib/flex_array.c **********/ -#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ - /********** security/ **********/ #define KEY_DESTROY 0xbd diff --git a/lib/Makefile b/lib/Makefile index b798b41d01ae..4e066120a0d6 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -35,7 +35,7 @@ obj-y += lockref.o obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ - gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ + gcd.o lcm.o list_sort.o uuid.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o rhashtable.o reciprocal_div.o \ once.o refcount.o usercopy.o errseq.o bucket_locks.o \ diff --git a/lib/flex_array.c b/lib/flex_array.c deleted file mode 100644 index 2eed22fa507c..000000000000 --- a/lib/flex_array.c +++ /dev/null @@ -1,398 +0,0 @@ -/* - * Flexible array managed in PAGE_SIZE parts - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright IBM Corporation, 2009 - * - * Author: Dave Hansen - */ - -#include -#include -#include -#include -#include - -struct flex_array_part { - char elements[FLEX_ARRAY_PART_SIZE]; -}; - -/* - * If a user requests an allocation which is small - * enough, we may simply use the space in the - * flex_array->parts[] array to store the user - * data. - */ -static inline int elements_fit_in_base(struct flex_array *fa) -{ - int data_size = fa->element_size * fa->total_nr_elements; - if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT) - return 1; - return 0; -} - -/** - * flex_array_alloc - allocate a new flexible array - * @element_size: the size of individual elements in the array - * @total: total number of elements that this should hold - * @flags: page allocation flags to use for base array - * - * Note: all locking must be provided by the caller. - * - * @total is used to size internal structures. If the user ever - * accesses any array indexes >=@total, it will produce errors. - * - * The maximum number of elements is defined as: the number of - * elements that can be stored in a page times the number of - * page pointers that we can fit in the base structure or (using - * integer math): - * - * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *) - * - * Here's a table showing example capacities. Note that the maximum - * index that the get/put() functions is just nr_objects-1. This - * basically means that you get 4MB of storage on 32-bit and 2MB on - * 64-bit. - * - * - * Element size | Objects | Objects | - * PAGE_SIZE=4k | 32-bit | 64-bit | - * ---------------------------------| - * 1 bytes | 4177920 | 2088960 | - * 2 bytes | 2088960 | 1044480 | - * 3 bytes | 1392300 | 696150 | - * 4 bytes | 1044480 | 522240 | - * 32 bytes | 130560 | 65408 | - * 33 bytes | 126480 | 63240 | - * 2048 bytes | 2040 | 1020 | - * 2049 bytes | 1020 | 510 | - * void * | 1044480 | 261120 | - * - * Since 64-bit pointers are twice the size, we lose half the - * capacity in the base structure. Also note that no effort is made - * to efficiently pack objects across page boundaries. - */ -struct flex_array *flex_array_alloc(int element_size, unsigned int total, - gfp_t flags) -{ - struct flex_array *ret; - int elems_per_part = 0; - int max_size = 0; - struct reciprocal_value reciprocal_elems = { 0 }; - - if (element_size) { - elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size); - reciprocal_elems = reciprocal_value(elems_per_part); - max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part; - } - - /* max_size will end up 0 if element_size > PAGE_SIZE */ - if (total > max_size) - return NULL; - ret = kzalloc(sizeof(struct flex_array), flags); - if (!ret) - return NULL; - ret->element_size = element_size; - ret->total_nr_elements = total; - ret->elems_per_part = elems_per_part; - ret->reciprocal_elems = reciprocal_elems; - if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) - memset(&ret->parts[0], FLEX_ARRAY_FREE, - FLEX_ARRAY_BASE_BYTES_LEFT); - return ret; -} -EXPORT_SYMBOL(flex_array_alloc); - -static int fa_element_to_part_nr(struct flex_array *fa, - unsigned int element_nr) -{ - /* - * if element_size == 0 we don't get here, so we never touch - * the zeroed fa->reciprocal_elems, which would yield invalid - * results - */ - return reciprocal_divide(element_nr, fa->reciprocal_elems); -} - -/** - * flex_array_free_parts - just free the second-level pages - * @fa: the flex array from which to free parts - * - * This is to be used in cases where the base 'struct flex_array' - * has been statically allocated and should not be free. - */ -void flex_array_free_parts(struct flex_array *fa) -{ - int part_nr; - - if (elements_fit_in_base(fa)) - return; - for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) - kfree(fa->parts[part_nr]); -} -EXPORT_SYMBOL(flex_array_free_parts); - -void flex_array_free(struct flex_array *fa) -{ - flex_array_free_parts(fa); - kfree(fa); -} -EXPORT_SYMBOL(flex_array_free); - -static unsigned int index_inside_part(struct flex_array *fa, - unsigned int element_nr, - unsigned int part_nr) -{ - unsigned int part_offset; - - part_offset = element_nr - part_nr * fa->elems_per_part; - return part_offset * fa->element_size; -} - -static struct flex_array_part * -__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) -{ - struct flex_array_part *part = fa->parts[part_nr]; - if (!part) { - part = kmalloc(sizeof(struct flex_array_part), flags); - if (!part) - return NULL; - if (!(flags & __GFP_ZERO)) - memset(part, FLEX_ARRAY_FREE, - sizeof(struct flex_array_part)); - fa->parts[part_nr] = part; - } - return part; -} - -/** - * flex_array_put - copy data into the array at @element_nr - * @fa: the flex array to copy data into - * @element_nr: index of the position in which to insert - * the new element. - * @src: address of data to copy into the array - * @flags: page allocation flags to use for array expansion - * - * - * Note that this *copies* the contents of @src into - * the array. If you are trying to store an array of - * pointers, make sure to pass in &ptr instead of ptr. - * You may instead wish to use the flex_array_put_ptr() - * helper function. - * - * Locking must be provided by the caller. - */ -int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, - gfp_t flags) -{ - int part_nr = 0; - struct flex_array_part *part; - void *dst; - - if (element_nr >= fa->total_nr_elements) - return -ENOSPC; - if (!fa->element_size) - return 0; - if (elements_fit_in_base(fa)) - part = (struct flex_array_part *)&fa->parts[0]; - else { - part_nr = fa_element_to_part_nr(fa, element_nr); - part = __fa_get_part(fa, part_nr, flags); - if (!part) - return -ENOMEM; - } - dst = &part->elements[index_inside_part(fa, element_nr, part_nr)]; - memcpy(dst, src, fa->element_size); - return 0; -} -EXPORT_SYMBOL(flex_array_put); - -/** - * flex_array_clear - clear element in array at @element_nr - * @fa: the flex array of the element. - * @element_nr: index of the position to clear. - * - * Locking must be provided by the caller. - */ -int flex_array_clear(struct flex_array *fa, unsigned int element_nr) -{ - int part_nr = 0; - struct flex_array_part *part; - void *dst; - - if (element_nr >= fa->total_nr_elements) - return -ENOSPC; - if (!fa->element_size) - return 0; - if (elements_fit_in_base(fa)) - part = (struct flex_array_part *)&fa->parts[0]; - else { - part_nr = fa_element_to_part_nr(fa, element_nr); - part = fa->parts[part_nr]; - if (!part) - return -EINVAL; - } - dst = &part->elements[index_inside_part(fa, element_nr, part_nr)]; - memset(dst, FLEX_ARRAY_FREE, fa->element_size); - return 0; -} -EXPORT_SYMBOL(flex_array_clear); - -/** - * flex_array_prealloc - guarantee that array space exists - * @fa: the flex array for which to preallocate parts - * @start: index of first array element for which space is allocated - * @nr_elements: number of elements for which space is allocated - * @flags: page allocation flags - * - * This will guarantee that no future calls to flex_array_put() - * will allocate memory. It can be used if you are expecting to - * be holding a lock or in some atomic context while writing - * data into the array. - * - * Locking must be provided by the caller. - */ -int flex_array_prealloc(struct flex_array *fa, unsigned int start, - unsigned int nr_elements, gfp_t flags) -{ - int start_part; - int end_part; - int part_nr; - unsigned int end; - struct flex_array_part *part; - - if (!start && !nr_elements) - return 0; - if (start >= fa->total_nr_elements) - return -ENOSPC; - if (!nr_elements) - return 0; - - end = start + nr_elements - 1; - - if (end >= fa->total_nr_elements) - return -ENOSPC; - if (!fa->element_size) - return 0; - if (elements_fit_in_base(fa)) - return 0; - start_part = fa_element_to_part_nr(fa, start); - end_part = fa_element_to_part_nr(fa, end); - for (part_nr = start_part; part_nr <= end_part; part_nr++) { - part = __fa_get_part(fa, part_nr, flags); - if (!part) - return -ENOMEM; - } - return 0; -} -EXPORT_SYMBOL(flex_array_prealloc); - -/** - * flex_array_get - pull data back out of the array - * @fa: the flex array from which to extract data - * @element_nr: index of the element to fetch from the array - * - * Returns a pointer to the data at index @element_nr. Note - * that this is a copy of the data that was passed in. If you - * are using this to store pointers, you'll get back &ptr. You - * may instead wish to use the flex_array_get_ptr helper. - * - * Locking must be provided by the caller. - */ -void *flex_array_get(struct flex_array *fa, unsigned int element_nr) -{ - int part_nr = 0; - struct flex_array_part *part; - - if (!fa->element_size) - return NULL; - if (element_nr >= fa->total_nr_elements) - return NULL; - if (elements_fit_in_base(fa)) - part = (struct flex_array_part *)&fa->parts[0]; - else { - part_nr = fa_element_to_part_nr(fa, element_nr); - part = fa->parts[part_nr]; - if (!part) - return NULL; - } - return &part->elements[index_inside_part(fa, element_nr, part_nr)]; -} -EXPORT_SYMBOL(flex_array_get); - -/** - * flex_array_get_ptr - pull a ptr back out of the array - * @fa: the flex array from which to extract data - * @element_nr: index of the element to fetch from the array - * - * Returns the pointer placed in the flex array at element_nr using - * flex_array_put_ptr(). This function should not be called if the - * element in question was not set using the _put_ptr() helper. - */ -void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr) -{ - void **tmp; - - tmp = flex_array_get(fa, element_nr); - if (!tmp) - return NULL; - - return *tmp; -} -EXPORT_SYMBOL(flex_array_get_ptr); - -static int part_is_free(struct flex_array_part *part) -{ - int i; - - for (i = 0; i < sizeof(struct flex_array_part); i++) - if (part->elements[i] != FLEX_ARRAY_FREE) - return 0; - return 1; -} - -/** - * flex_array_shrink - free unused second-level pages - * @fa: the flex array to shrink - * - * Frees all second-level pages that consist solely of unused - * elements. Returns the number of pages freed. - * - * Locking must be provided by the caller. - */ -int flex_array_shrink(struct flex_array *fa) -{ - struct flex_array_part *part; - int part_nr; - int ret = 0; - - if (!fa->total_nr_elements || !fa->element_size) - return 0; - if (elements_fit_in_base(fa)) - return ret; - for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) { - part = fa->parts[part_nr]; - if (!part) - continue; - if (part_is_free(part)) { - fa->parts[part_nr] = NULL; - kfree(part); - ret++; - } - } - return ret; -} -EXPORT_SYMBOL(flex_array_shrink); diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h index 9fdcd3eaac3b..d29725769107 100644 --- a/tools/include/linux/poison.h +++ b/tools/include/linux/poison.h @@ -87,9 +87,6 @@ #define MUTEX_DEBUG_INIT 0x11 #define MUTEX_DEBUG_FREE 0x22 -/********** lib/flex_array.c **********/ -#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ - /********** security/ **********/ #define KEY_DESTROY 0xbd -- cgit v1.2.3