diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/cpu_rmap.c | 2 | ||||
-rw-r--r-- | lib/debugobjects.c | 26 | ||||
-rw-r--r-- | lib/dim/dim.c | 5 | ||||
-rw-r--r-- | lib/dim/net_dim.c | 3 | ||||
-rw-r--r-- | lib/dim/rdma_dim.c | 3 | ||||
-rw-r--r-- | lib/iov_iter.c | 466 | ||||
-rw-r--r-- | lib/kunit/executor_test.c | 11 | ||||
-rw-r--r-- | lib/kunit/kunit-example-test.c | 56 | ||||
-rw-r--r-- | lib/kunit/kunit-test.c | 88 | ||||
-rw-r--r-- | lib/kunit/resource.c | 99 | ||||
-rw-r--r-- | lib/kunit/test.c | 157 | ||||
-rw-r--r-- | lib/maple_tree.c | 12 | ||||
-rw-r--r-- | lib/radix-tree.c | 2 | ||||
-rw-r--r-- | lib/radix-tree.h | 8 | ||||
-rw-r--r-- | lib/raid6/neon.h | 22 | ||||
-rw-r--r-- | lib/raid6/neon.uc | 1 | ||||
-rw-r--r-- | lib/raid6/recov_neon.c | 8 | ||||
-rw-r--r-- | lib/raid6/recov_neon_inner.c | 1 | ||||
-rw-r--r-- | lib/test_firmware.c | 81 | ||||
-rw-r--r-- | lib/test_vmalloc.c | 2 | ||||
-rw-r--r-- | lib/xarray.c | 6 |
21 files changed, 480 insertions, 579 deletions
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 73c1636b927b..4c348670da31 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -280,8 +280,8 @@ static void irq_cpu_rmap_release(struct kref *ref) struct irq_glue *glue = container_of(ref, struct irq_glue, notify.kref); - cpu_rmap_put(glue->rmap); glue->rmap->obj[glue->index] = NULL; + cpu_rmap_put(glue->rmap); kfree(glue); } diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 003edc5ebd67..a517256a270b 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -126,7 +126,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = { static void fill_pool(void) { - gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; + gfp_t gfp = __GFP_HIGH | __GFP_NOWARN; struct debug_obj *obj; unsigned long flags; @@ -498,6 +498,15 @@ static void debug_print_object(struct debug_obj *obj, char *msg) const struct debug_obj_descr *descr = obj->descr; static int limit; + /* + * Don't report if lookup_object_or_alloc() by the current thread + * failed because lookup_object_or_alloc()/debug_objects_oom() by a + * concurrent thread turned off debug_objects_enabled and cleared + * the hash buckets. + */ + if (!debug_objects_enabled) + return; + if (limit < 5 && descr != descr_test) { void *hint = descr->debug_hint ? descr->debug_hint(obj->object) : NULL; @@ -591,10 +600,21 @@ static void debug_objects_fill_pool(void) { /* * On RT enabled kernels the pool refill must happen in preemptible - * context: + * context -- for !RT kernels we rely on the fact that spinlock_t and + * raw_spinlock_t are basically the same type and this lock-type + * inversion works just fine. */ - if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { + /* + * Annotate away the spinlock_t inside raw_spinlock_t warning + * by temporarily raising the wait-type to WAIT_SLEEP, matching + * the preemptible() condition above. + */ + static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); + lock_map_acquire_try(&fill_pool_map); fill_pool(); + lock_map_release(&fill_pool_map); + } } static void diff --git a/lib/dim/dim.c b/lib/dim/dim.c index 38045d6d0538..e89aaf07bde5 100644 --- a/lib/dim/dim.c +++ b/lib/dim/dim.c @@ -54,7 +54,7 @@ void dim_park_tired(struct dim *dim) } EXPORT_SYMBOL(dim_park_tired); -void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, +bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end, struct dim_stats *curr_stats) { /* u32 holds up to 71 minutes, should be enough */ @@ -66,7 +66,7 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, start->comp_ctr); if (!delta_us) - return; + return false; curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); @@ -79,5 +79,6 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, else curr_stats->cpe_ratio = 0; + return true; } EXPORT_SYMBOL(dim_calc_stats); diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c index 53f6b9c6e936..4e32f7aaac86 100644 --- a/lib/dim/net_dim.c +++ b/lib/dim/net_dim.c @@ -227,7 +227,8 @@ void net_dim(struct dim *dim, struct dim_sample end_sample) dim->start_sample.event_ctr); if (nevents < DIM_NEVENTS) break; - dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats); + if (!dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats)) + break; if (net_dim_decision(&curr_stats, dim)) { dim->state = DIM_APPLY_NEW_PROFILE; schedule_work(&dim->work); diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c index 15462d54758d..88f779486707 100644 --- a/lib/dim/rdma_dim.c +++ b/lib/dim/rdma_dim.c @@ -88,7 +88,8 @@ void rdma_dim(struct dim *dim, u64 completions) nevents = curr_sample->event_ctr - dim->start_sample.event_ctr; if (nevents < DIM_NEVENTS) break; - dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats); + if (!dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats)) + break; if (rdma_dim_decision(&curr_stats, dim)) { dim->state = DIM_APPLY_NEW_PROFILE; schedule_work(&dim->work); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 960223ed9199..b667b1e2f688 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -14,8 +14,6 @@ #include <linux/scatterlist.h> #include <linux/instrumented.h> -#define PIPE_PARANOIA /* for now */ - /* covers ubuf and kbuf alike */ #define iterate_buf(i, n, base, len, off, __p, STEP) { \ size_t __maybe_unused off = 0; \ @@ -198,150 +196,6 @@ static int copyin(void *to, const void __user *from, size_t n) return res; } -#ifdef PIPE_PARANOIA -static bool sanity(const struct iov_iter *i) -{ - struct pipe_inode_info *pipe = i->pipe; - unsigned int p_head = pipe->head; - unsigned int p_tail = pipe->tail; - unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); - unsigned int i_head = i->head; - unsigned int idx; - - if (i->last_offset) { - struct pipe_buffer *p; - if (unlikely(p_occupancy == 0)) - goto Bad; // pipe must be non-empty - if (unlikely(i_head != p_head - 1)) - goto Bad; // must be at the last buffer... - - p = pipe_buf(pipe, i_head); - if (unlikely(p->offset + p->len != abs(i->last_offset))) - goto Bad; // ... at the end of segment - } else { - if (i_head != p_head) - goto Bad; // must be right after the last buffer - } - return true; -Bad: - printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset); - printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", - p_head, p_tail, pipe->ring_size); - for (idx = 0; idx < pipe->ring_size; idx++) - printk(KERN_ERR "[%p %p %d %d]\n", - pipe->bufs[idx].ops, - pipe->bufs[idx].page, - pipe->bufs[idx].offset, - pipe->bufs[idx].len); - WARN_ON(1); - return false; -} -#else -#define sanity(i) true -#endif - -static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) -{ - struct page *page = alloc_page(GFP_USER); - if (page) { - struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); - *buf = (struct pipe_buffer) { - .ops = &default_pipe_buf_ops, - .page = page, - .offset = 0, - .len = size - }; - } - return page; -} - -static void push_page(struct pipe_inode_info *pipe, struct page *page, - unsigned int offset, unsigned int size) -{ - struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); - *buf = (struct pipe_buffer) { - .ops = &page_cache_pipe_buf_ops, - .page = page, - .offset = offset, - .len = size - }; - get_page(page); -} - -static inline int last_offset(const struct pipe_buffer *buf) -{ - if (buf->ops == &default_pipe_buf_ops) - return buf->len; // buf->offset is 0 for those - else - return -(buf->offset + buf->len); -} - -static struct page *append_pipe(struct iov_iter *i, size_t size, - unsigned int *off) -{ - struct pipe_inode_info *pipe = i->pipe; - int offset = i->last_offset; - struct pipe_buffer *buf; - struct page *page; - - if (offset > 0 && offset < PAGE_SIZE) { - // some space in the last buffer; add to it - buf = pipe_buf(pipe, pipe->head - 1); - size = min_t(size_t, size, PAGE_SIZE - offset); - buf->len += size; - i->last_offset += size; - i->count -= size; - *off = offset; - return buf->page; - } - // OK, we need a new buffer - *off = 0; - size = min_t(size_t, size, PAGE_SIZE); - if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) - return NULL; - page = push_anon(pipe, size); - if (!page) - return NULL; - i->head = pipe->head - 1; - i->last_offset = size; - i->count -= size; - return page; -} - -static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, - struct iov_iter *i) -{ - struct pipe_inode_info *pipe = i->pipe; - unsigned int head = pipe->head; - - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - if (offset && i->last_offset == -offset) { // could we merge it? - struct pipe_buffer *buf = pipe_buf(pipe, head - 1); - if (buf->page == page) { - buf->len += bytes; - i->last_offset -= bytes; - i->count -= bytes; - return bytes; - } - } - if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) - return 0; - - push_page(pipe, page, offset, bytes); - i->last_offset = -(offset + bytes); - i->head = head; - i->count -= bytes; - return bytes; -} - /* * fault_in_iov_iter_readable - fault in iov iterator for reading * @i: iterator @@ -446,46 +300,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_init); -// returns the offset in partial buffer (if any) -static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages) -{ - struct pipe_inode_info *pipe = i->pipe; - int used = pipe->head - pipe->tail; - int off = i->last_offset; - - *npages = max((int)pipe->max_usage - used, 0); - - if (off > 0 && off < PAGE_SIZE) { // anon and not full - (*npages)++; - return off; - } - return 0; -} - -static size_t copy_pipe_to_iter(const void *addr, size_t bytes, - struct iov_iter *i) -{ - unsigned int off, chunk; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - for (size_t n = bytes; n; n -= chunk) { - struct page *page = append_pipe(i, n, &off); - chunk = min_t(size_t, n, PAGE_SIZE - off); - if (!page) - return bytes - n; - memcpy_to_page(page, off, addr, chunk); - addr += chunk; - } - return bytes; -} - static __wsum csum_and_memcpy(void *to, const void *from, size_t len, __wsum sum, size_t off) { @@ -493,44 +307,10 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len, return csum_block_add(sum, next, off); } -static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, - struct iov_iter *i, __wsum *sump) -{ - __wsum sum = *sump; - size_t off = 0; - unsigned int chunk, r; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - while (bytes) { - struct page *page = append_pipe(i, bytes, &r); - char *p; - - if (!page) - break; - chunk = min_t(size_t, bytes, PAGE_SIZE - r); - p = kmap_local_page(page); - sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); - kunmap_local(p); - off += chunk; - bytes -= chunk; - } - *sump = sum; - return off; -} - size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_pipe_to_iter(addr, bytes, i); if (user_backed_iter(i)) might_fault(); iterate_and_advance(i, bytes, base, len, off, @@ -552,42 +332,6 @@ static int copyout_mc(void __user *to, const void *from, size_t n) return n; } -static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, - struct iov_iter *i) -{ - size_t xfer = 0; - unsigned int off, chunk; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - while (bytes) { - struct page *page = append_pipe(i, bytes, &off); - unsigned long rem; - char *p; - - if (!page) - break; - chunk = min_t(size_t, bytes, PAGE_SIZE - off); - p = kmap_local_page(page); - rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); - chunk -= rem; - kunmap_local(p); - xfer += chunk; - bytes -= chunk; - if (rem) { - iov_iter_revert(i, rem); - break; - } - } - return xfer; -} - /** * _copy_mc_to_iter - copy to iter with source memory error exception handling * @addr: source kernel address @@ -607,9 +351,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, * alignment and poison alignment assumptions to avoid re-triggering * hardware exceptions. * - * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. - * Compare to copy_to_iter() where only ITER_IOVEC attempts might return - * a short copy. + * * ITER_KVEC and ITER_BVEC can return short copies. Compare to + * copy_to_iter() where only ITER_IOVEC attempts might return a short copy. * * Return: number of bytes copied (may be %0) */ @@ -617,8 +360,6 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_mc_pipe_to_iter(addr, bytes, i); if (user_backed_iter(i)) might_fault(); __iterate_and_advance(i, bytes, base, len, off, @@ -732,8 +473,6 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, return 0; if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_page_to_iter_pipe(page, offset, bytes, i); page += offset / PAGE_SIZE; // first subpage offset %= PAGE_SIZE; while (1) { @@ -764,8 +503,6 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte return 0; if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_page_to_iter_pipe(page, offset, bytes, i); page += offset / PAGE_SIZE; // first subpage offset %= PAGE_SIZE; while (1) { @@ -818,36 +555,8 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, } EXPORT_SYMBOL(copy_page_from_iter); -static size_t pipe_zero(size_t bytes, struct iov_iter *i) -{ - unsigned int chunk, off; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - for (size_t n = bytes; n; n -= chunk) { - struct page *page = append_pipe(i, n, &off); - char *p; - - if (!page) - return bytes - n; - chunk = min_t(size_t, n, PAGE_SIZE - off); - p = kmap_local_page(page); - memset(p + off, 0, chunk); - kunmap_local(p); - } - return bytes; -} - size_t iov_iter_zero(size_t bytes, struct iov_iter *i) { - if (unlikely(iov_iter_is_pipe(i))) - return pipe_zero(bytes, i); iterate_and_advance(i, bytes, base, len, count, clear_user(base, len), memset(base, 0, len) @@ -878,32 +587,6 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt } EXPORT_SYMBOL(copy_page_from_iter_atomic); -static void pipe_advance(struct iov_iter *i, size_t size) -{ - struct pipe_inode_info *pipe = i->pipe; - int off = i->last_offset; - - if (!off && !size) { - pipe_discard_from(pipe, i->start_head); // discard everything - return; - } - i->count -= size; - while (1) { - struct pipe_buffer *buf = pipe_buf(pipe, i->head); - if (off) /* make it relative to the beginning of buffer */ - size += abs(off) - buf->offset; - if (size <= buf->len) { - buf->len = size; - i->last_offset = last_offset(buf); - break; - } - size -= buf->len; - i->head++; - off = 0; - } - pipe_discard_from(pipe, i->head + 1); // discard everything past this one -} - static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) { const struct bio_vec *bvec, *end; @@ -955,8 +638,6 @@ void iov_iter_advance(struct iov_iter *i, size_t size) iov_iter_iovec_advance(i, size); } else if (iov_iter_is_bvec(i)) { iov_iter_bvec_advance(i, size); - } else if (iov_iter_is_pipe(i)) { - pipe_advance(i, size); } else if (iov_iter_is_discard(i)) { i->count -= size; } @@ -970,26 +651,6 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) if (WARN_ON(unroll > MAX_RW_COUNT)) return; i->count += unroll; - if (unlikely(iov_iter_is_pipe(i))) { - struct pipe_inode_info *pipe = i->pipe; - unsigned int head = pipe->head; - - while (head > i->start_head) { - struct pipe_buffer *b = pipe_buf(pipe, --head); - if (unroll < b->len) { - b->len -= unroll; - i->last_offset = last_offset(b); - i->head = head; - return; - } - unroll -= b->len; - pipe_buf_release(pipe, b); - pipe->head--; - } - i->last_offset = 0; - i->head = head; - return; - } if (unlikely(iov_iter_is_discard(i))) return; if (unroll <= i->iov_offset) { @@ -1079,24 +740,6 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_bvec); -void iov_iter_pipe(struct iov_iter *i, unsigned int direction, - struct pipe_inode_info *pipe, - size_t count) -{ - BUG_ON(direction != READ); - WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); - *i = (struct iov_iter){ - .iter_type = ITER_PIPE, - .data_source = false, - .pipe = pipe, - .head = pipe->head, - .start_head = pipe->head, - .last_offset = 0, - .count = count - }; -} -EXPORT_SYMBOL(iov_iter_pipe); - /** * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray * @i: The iterator to initialise. @@ -1224,19 +867,6 @@ bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, if (iov_iter_is_bvec(i)) return iov_iter_aligned_bvec(i, addr_mask, len_mask); - if (iov_iter_is_pipe(i)) { - size_t size = i->count; - - if (size & len_mask) - return false; - if (size && i->last_offset > 0) { - if (i->last_offset & addr_mask) - return false; - } - - return true; - } - if (iov_iter_is_xarray(i)) { if (i->count & len_mask) return false; @@ -1307,14 +937,6 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) if (iov_iter_is_bvec(i)) return iov_iter_alignment_bvec(i); - if (iov_iter_is_pipe(i)) { - size_t size = i->count; - - if (size && i->last_offset > 0) - return size | i->last_offset; - return size; - } - if (iov_iter_is_xarray(i)) return (i->xarray_start + i->iov_offset) | i->count; @@ -1367,36 +989,6 @@ static int want_pages_array(struct page ***res, size_t size, return count; } -static ssize_t pipe_get_pages(struct iov_iter *i, - struct page ***pages, size_t maxsize, unsigned maxpages, - size_t *start) -{ - unsigned int npages, count, off, chunk; - struct page **p; - size_t left; - - if (!sanity(i)) - return -EFAULT; - - *start = off = pipe_npages(i, &npages); - if (!npages) - return -EFAULT; - count = want_pages_array(pages, maxsize, off, min(npages, maxpages)); - if (!count) - return -ENOMEM; - p = *pages; - for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) { - struct page *page = append_pipe(i, left, &off); - if (!page) - break; - chunk = min_t(size_t, left, PAGE_SIZE - off); - get_page(*p++ = page); - } - if (!npages) - return -EFAULT; - return maxsize - left; -} - static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, pgoff_t index, unsigned int nr_pages) { @@ -1490,8 +1082,7 @@ static struct page *first_bvec_segment(const struct iov_iter *i, static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, - unsigned int maxpages, size_t *start, - iov_iter_extraction_t extraction_flags) + unsigned int maxpages, size_t *start) { unsigned int n, gup_flags = 0; @@ -1501,8 +1092,6 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, return 0; if (maxsize > MAX_RW_COUNT) maxsize = MAX_RW_COUNT; - if (extraction_flags & ITER_ALLOW_P2PDMA) - gup_flags |= FOLL_PCI_P2PDMA; if (likely(user_backed_iter(i))) { unsigned long addr; @@ -1547,56 +1136,36 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, } return maxsize; } - if (iov_iter_is_pipe(i)) - return pipe_get_pages(i, pages, maxsize, maxpages, start); if (iov_iter_is_xarray(i)) return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); return -EFAULT; } -ssize_t iov_iter_get_pages(struct iov_iter *i, - struct page **pages, size_t maxsize, unsigned maxpages, - size_t *start, iov_iter_extraction_t extraction_flags) +ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, + size_t maxsize, unsigned maxpages, size_t *start) { if (!maxpages) return 0; BUG_ON(!pages); - return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, - start, extraction_flags); -} -EXPORT_SYMBOL_GPL(iov_iter_get_pages); - -ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, - size_t maxsize, unsigned maxpages, size_t *start) -{ - return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0); + return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); } EXPORT_SYMBOL(iov_iter_get_pages2); -ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, - struct page ***pages, size_t maxsize, - size_t *start, iov_iter_extraction_t extraction_flags) +ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, + struct page ***pages, size_t maxsize, size_t *start) { ssize_t len; *pages = NULL; - len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start, - extraction_flags); + len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); if (len <= 0) { kvfree(*pages); *pages = NULL; } return len; } -EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc); - -ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, - struct page ***pages, size_t maxsize, size_t *start) -{ - return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0); -} EXPORT_SYMBOL(iov_iter_get_pages_alloc2); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, @@ -1638,9 +1207,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, } sum = csum_shift(csstate->csum, csstate->off); - if (unlikely(iov_iter_is_pipe(i))) - bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); - else iterate_and_advance(i, bytes, base, len, off, ({ + iterate_and_advance(i, bytes, base, len, off, ({ next = csum_and_copy_to_user(addr + off, base, len); sum = csum_block_add(sum, next, off); next ? 0 : len; @@ -1725,15 +1292,6 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) return iov_npages(i, maxpages); if (iov_iter_is_bvec(i)) return bvec_npages(i, maxpages); - if (iov_iter_is_pipe(i)) { - int npages; - - if (!sanity(i)) - return 0; - - pipe_npages(i, &npages); - return min(npages, maxpages); - } if (iov_iter_is_xarray(i)) { unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); @@ -1746,10 +1304,6 @@ EXPORT_SYMBOL(iov_iter_npages); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) { *new = *old; - if (unlikely(iov_iter_is_pipe(new))) { - WARN_ON(1); - return NULL; - } if (iov_iter_is_bvec(new)) return new->bvec = kmemdup(new->bvec, new->nr_segs * sizeof(struct bio_vec), diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index 0cea31c27b23..ce6749af374d 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -125,11 +125,6 @@ kunit_test_suites(&executor_test_suite); /* Test helpers */ -static void kfree_res_free(struct kunit_resource *res) -{ - kfree(res->data); -} - /* Use the resource API to register a call to kfree(to_free). * Since we never actually use the resource, it's safe to use on const data. */ @@ -138,8 +133,10 @@ static void kfree_at_end(struct kunit *test, const void *to_free) /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */ if (IS_ERR_OR_NULL(to_free)) return; - kunit_alloc_resource(test, NULL, kfree_res_free, GFP_KERNEL, - (void *)to_free); + + kunit_add_action(test, + (kunit_action_t *)kfree, + (void *)to_free); } static struct kunit_suite *alloc_fake_suite(struct kunit *test, diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c index cd8b7e51d02b..b69b689ea850 100644 --- a/lib/kunit/kunit-example-test.c +++ b/lib/kunit/kunit-example-test.c @@ -42,6 +42,16 @@ static int example_test_init(struct kunit *test) } /* + * This is run once after each test case, see the comment on + * example_test_suite for more information. + */ +static void example_test_exit(struct kunit *test) +{ + kunit_info(test, "cleaning up\n"); +} + + +/* * This is run once before all test cases in the suite. * See the comment on example_test_suite for more information. */ @@ -53,6 +63,16 @@ static int example_test_init_suite(struct kunit_suite *suite) } /* + * This is run once after all test cases in the suite. + * See the comment on example_test_suite for more information. + */ +static void example_test_exit_suite(struct kunit_suite *suite) +{ + kunit_info(suite, "exiting suite\n"); +} + + +/* * This test should always be skipped. */ static void example_skip_test(struct kunit *test) @@ -167,6 +187,39 @@ static void example_static_stub_test(struct kunit *test) KUNIT_EXPECT_EQ(test, add_one(1), 2); } +static const struct example_param { + int value; +} example_params_array[] = { + { .value = 2, }, + { .value = 1, }, + { .value = 0, }, +}; + +static void example_param_get_desc(const struct example_param *p, char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "example value %d", p->value); +} + +KUNIT_ARRAY_PARAM(example, example_params_array, example_param_get_desc); + +/* + * This test shows the use of params. + */ +static void example_params_test(struct kunit *test) +{ + const struct example_param *param = test->param_value; + + /* By design, param pointer will not be NULL */ + KUNIT_ASSERT_NOT_NULL(test, param); + + /* Test can be skipped on unsupported param values */ + if (!param->value) + kunit_skip(test, "unsupported param value"); + + /* You can use param values for parameterized testing */ + KUNIT_EXPECT_EQ(test, param->value % param->value, 0); +} + /* * Here we make a list of all the test cases we want to add to the test suite * below. @@ -183,6 +236,7 @@ static struct kunit_case example_test_cases[] = { KUNIT_CASE(example_mark_skipped_test), KUNIT_CASE(example_all_expect_macros_test), KUNIT_CASE(example_static_stub_test), + KUNIT_CASE_PARAM(example_params_test, example_gen_params), {} }; @@ -211,7 +265,9 @@ static struct kunit_case example_test_cases[] = { static struct kunit_suite example_test_suite = { .name = "example", .init = example_test_init, + .exit = example_test_exit, .suite_init = example_test_init_suite, + .suite_exit = example_test_exit_suite, .test_cases = example_test_cases, }; diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 42e44caa1bdd..83d8e90ca7a2 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -112,7 +112,7 @@ struct kunit_test_resource_context { struct kunit test; bool is_resource_initialized; int allocate_order[2]; - int free_order[2]; + int free_order[4]; }; static int fake_resource_init(struct kunit_resource *res, void *context) @@ -403,6 +403,88 @@ static void kunit_resource_test_named(struct kunit *test) KUNIT_EXPECT_TRUE(test, list_empty(&test->resources)); } +static void increment_int(void *ctx) +{ + int *i = (int *)ctx; + (*i)++; +} + +static void kunit_resource_test_action(struct kunit *test) +{ + int num_actions = 0; + + kunit_add_action(test, increment_int, &num_actions); + KUNIT_EXPECT_EQ(test, num_actions, 0); + kunit_cleanup(test); + KUNIT_EXPECT_EQ(test, num_actions, 1); + + /* Once we've cleaned up, the action queue is empty. */ + kunit_cleanup(test); + KUNIT_EXPECT_EQ(test, num_actions, 1); + + /* Check the same function can be deferred multiple times. */ + kunit_add_action(test, increment_int, &num_actions); + kunit_add_action(test, increment_int, &num_actions); + kunit_cleanup(test); + KUNIT_EXPECT_EQ(test, num_actions, 3); +} +static void kunit_resource_test_remove_action(struct kunit *test) +{ + int num_actions = 0; + + kunit_add_action(test, increment_int, &num_actions); + KUNIT_EXPECT_EQ(test, num_actions, 0); + + kunit_remove_action(test, increment_int, &num_actions); + kunit_cleanup(test); + KUNIT_EXPECT_EQ(test, num_actions, 0); +} +static void kunit_resource_test_release_action(struct kunit *test) +{ + int num_actions = 0; + + kunit_add_action(test, increment_int, &num_actions); + KUNIT_EXPECT_EQ(test, num_actions, 0); + /* Runs immediately on trigger. */ + kunit_release_action(test, increment_int, &num_actions); + KUNIT_EXPECT_EQ(test, num_actions, 1); + + /* Doesn't run again on test exit. */ + kunit_cleanup(test); + KUNIT_EXPECT_EQ(test, num_actions, 1); +} +static void action_order_1(void *ctx) +{ + struct kunit_test_resource_context *res_ctx = (struct kunit_test_resource_context *)ctx; + + KUNIT_RESOURCE_TEST_MARK_ORDER(res_ctx, free_order, 1); + kunit_log(KERN_INFO, current->kunit_test, "action_order_1"); +} +static void action_order_2(void *ctx) +{ + struct kunit_test_resource_context *res_ctx = (struct kunit_test_resource_context *)ctx; + + KUNIT_RESOURCE_TEST_MARK_ORDER(res_ctx, free_order, 2); + kunit_log(KERN_INFO, current->kunit_test, "action_order_2"); +} +static void kunit_resource_test_action_ordering(struct kunit *test) +{ + struct kunit_test_resource_context *ctx = test->priv; + + kunit_add_action(test, action_order_1, ctx); + kunit_add_action(test, action_order_2, ctx); + kunit_add_action(test, action_order_1, ctx); + kunit_add_action(test, action_order_2, ctx); + kunit_remove_action(test, action_order_1, ctx); + kunit_release_action(test, action_order_2, ctx); + kunit_cleanup(test); + + /* [2 is triggered] [2], [(1 is cancelled)] [1] */ + KUNIT_EXPECT_EQ(test, ctx->free_order[0], 2); + KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2); + KUNIT_EXPECT_EQ(test, ctx->free_order[2], 1); +} + static int kunit_resource_test_init(struct kunit *test) { struct kunit_test_resource_context *ctx = @@ -434,6 +516,10 @@ static struct kunit_case kunit_resource_test_cases[] = { KUNIT_CASE(kunit_resource_test_proper_free_ordering), KUNIT_CASE(kunit_resource_test_static), KUNIT_CASE(kunit_resource_test_named), + KUNIT_CASE(kunit_resource_test_action), + KUNIT_CASE(kunit_resource_test_remove_action), + KUNIT_CASE(kunit_resource_test_release_action), + KUNIT_CASE(kunit_resource_test_action_ordering), {} }; diff --git a/lib/kunit/resource.c b/lib/kunit/resource.c index c414df922f34..f0209252b179 100644 --- a/lib/kunit/resource.c +++ b/lib/kunit/resource.c @@ -77,3 +77,102 @@ int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, return 0; } EXPORT_SYMBOL_GPL(kunit_destroy_resource); + +struct kunit_action_ctx { + struct kunit_resource res; + kunit_action_t *func; + void *ctx; +}; + +static void __kunit_action_free(struct kunit_resource *res) +{ + struct kunit_action_ctx *action_ctx = container_of(res, struct kunit_action_ctx, res); + + action_ctx->func(action_ctx->ctx); +} + + +int kunit_add_action(struct kunit *test, void (*action)(void *), void *ctx) +{ + struct kunit_action_ctx *action_ctx; + + KUNIT_ASSERT_NOT_NULL_MSG(test, action, "Tried to action a NULL function!"); + + action_ctx = kzalloc(sizeof(*action_ctx), GFP_KERNEL); + if (!action_ctx) + return -ENOMEM; + + action_ctx->func = action; + action_ctx->ctx = ctx; + + action_ctx->res.should_kfree = true; + /* As init is NULL, this cannot fail. */ + __kunit_add_resource(test, NULL, __kunit_action_free, &action_ctx->res, action_ctx); + + return 0; +} +EXPORT_SYMBOL_GPL(kunit_add_action); + +int kunit_add_action_or_reset(struct kunit *test, void (*action)(void *), + void *ctx) +{ + int res = kunit_add_action(test, action, ctx); + + if (res) + action(ctx); + return res; +} +EXPORT_SYMBOL_GPL(kunit_add_action_or_reset); + +static bool __kunit_action_match(struct kunit *test, + struct kunit_resource *res, void *match_data) +{ + struct kunit_action_ctx *match_ctx = (struct kunit_action_ctx *)match_data; + struct kunit_action_ctx *res_ctx = container_of(res, struct kunit_action_ctx, res); + + /* Make sure this is a free function. */ + if (res->free != __kunit_action_free) + return false; + + /* Both the function and context data should match. */ + return (match_ctx->func == res_ctx->func) && (match_ctx->ctx == res_ctx->ctx); +} + +void kunit_remove_action(struct kunit *test, + kunit_action_t *action, + void *ctx) +{ + struct kunit_action_ctx match_ctx; + struct kunit_resource *res; + + match_ctx.func = action; + match_ctx.ctx = ctx; + + res = kunit_find_resource(test, __kunit_action_match, &match_ctx); + if (res) { + /* Remove the free function so we don't run the action. */ + res->free = NULL; + kunit_remove_resource(test, res); + kunit_put_resource(res); + } +} +EXPORT_SYMBOL_GPL(kunit_remove_action); + +void kunit_release_action(struct kunit *test, + kunit_action_t *action, + void *ctx) +{ + struct kunit_action_ctx match_ctx; + struct kunit_resource *res; + + match_ctx.func = action; + match_ctx.ctx = ctx; + + res = kunit_find_resource(test, __kunit_action_match, &match_ctx); + if (res) { + kunit_remove_resource(test, res); + /* We have to put() this here, else free won't be called. */ + kunit_put_resource(res); + } +} +EXPORT_SYMBOL_GPL(kunit_release_action); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index e2910b261112..84e4666555c9 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -185,16 +185,28 @@ static void kunit_print_suite_start(struct kunit_suite *suite) kunit_suite_num_test_cases(suite)); } -static void kunit_print_ok_not_ok(void *test_or_suite, - bool is_test, +/* Currently supported test levels */ +enum { + KUNIT_LEVEL_SUITE = 0, + KUNIT_LEVEL_CASE, + KUNIT_LEVEL_CASE_PARAM, +}; + +static void kunit_print_ok_not_ok(struct kunit *test, + unsigned int test_level, enum kunit_status status, size_t test_number, const char *description, const char *directive) { - struct kunit_suite *suite = is_test ? NULL : test_or_suite; - struct kunit *test = is_test ? test_or_suite : NULL; const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : ""; + const char *directive_body = (status == KUNIT_SKIPPED) ? directive : ""; + + /* + * When test is NULL assume that results are from the suite + * and today suite results are expected at level 0 only. + */ + WARN(!test && test_level, "suite test level can't be %u!\n", test_level); /* * We do not log the test suite results as doing so would @@ -203,17 +215,18 @@ static void kunit_print_ok_not_ok(void *test_or_suite, * separately seq_printf() the suite results for the debugfs * representation. */ - if (suite) + if (!test) pr_info("%s %zd %s%s%s\n", kunit_status_to_ok_not_ok(status), test_number, description, directive_header, - (status == KUNIT_SKIPPED) ? directive : ""); + directive_body); else kunit_log(KERN_INFO, test, - KUNIT_SUBTEST_INDENT "%s %zd %s%s%s", + "%*s%s %zd %s%s%s", + KUNIT_INDENT_LEN * test_level, "", kunit_status_to_ok_not_ok(status), test_number, description, directive_header, - (status == KUNIT_SKIPPED) ? directive : ""); + directive_body); } enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite) @@ -239,7 +252,7 @@ static size_t kunit_suite_counter = 1; static void kunit_print_suite_end(struct kunit_suite *suite) { - kunit_print_ok_not_ok((void *)suite, false, + kunit_print_ok_not_ok(NULL, KUNIT_LEVEL_SUITE, kunit_suite_has_succeeded(suite), kunit_suite_counter++, suite->name, @@ -310,7 +323,7 @@ static void kunit_fail(struct kunit *test, const struct kunit_loc *loc, string_stream_destroy(stream); } -static void __noreturn kunit_abort(struct kunit *test) +void __noreturn __kunit_abort(struct kunit *test) { kunit_try_catch_throw(&test->try_catch); /* Does not return. */ @@ -322,8 +335,9 @@ static void __noreturn kunit_abort(struct kunit *test) */ WARN_ONCE(true, "Throw could not abort from test!\n"); } +EXPORT_SYMBOL_GPL(__kunit_abort); -void kunit_do_failed_assertion(struct kunit *test, +void __kunit_do_failed_assertion(struct kunit *test, const struct kunit_loc *loc, enum kunit_assert_type type, const struct kunit_assert *assert, @@ -340,11 +354,8 @@ void kunit_do_failed_assertion(struct kunit *test, kunit_fail(test, loc, type, assert, assert_format, &message); va_end(args); - - if (type == KUNIT_ASSERTION) - kunit_abort(test); } -EXPORT_SYMBOL_GPL(kunit_do_failed_assertion); +EXPORT_SYMBOL_GPL(__kunit_do_failed_assertion); void kunit_init_test(struct kunit *test, const char *name, char *log) { @@ -419,15 +430,54 @@ static void kunit_try_run_case(void *data) * thread will resume control and handle any necessary clean up. */ kunit_run_case_internal(test, suite, test_case); - /* This line may never be reached. */ +} + +static void kunit_try_run_case_cleanup(void *data) +{ + struct kunit_try_catch_context *ctx = data; + struct kunit *test = ctx->test; + struct kunit_suite *suite = ctx->suite; + + current->kunit_test = test; + kunit_run_case_cleanup(test, suite); } +static void kunit_catch_run_case_cleanup(void *data) +{ + struct kunit_try_catch_context *ctx = data; + struct kunit *test = ctx->test; + int try_exit_code = kunit_try_catch_get_result(&test->try_catch); + + /* It is always a failure if cleanup aborts. */ + kunit_set_failure(test); + + if (try_exit_code) { + /* + * Test case could not finish, we have no idea what state it is + * in, so don't do clean up. + */ + if (try_exit_code == -ETIMEDOUT) { + kunit_err(test, "test case cleanup timed out\n"); + /* + * Unknown internal error occurred preventing test case from + * running, so there is nothing to clean up. + */ + } else { + kunit_err(test, "internal error occurred during test case cleanup: %d\n", + try_exit_code); + } + return; + } + + kunit_err(test, "test aborted during cleanup. continuing without cleaning up\n"); +} + + static void kunit_catch_run_case(void *data) { struct kunit_try_catch_context *ctx = data; struct kunit *test = ctx->test; - struct kunit_suite *suite = ctx->suite; int try_exit_code = kunit_try_catch_get_result(&test->try_catch); if (try_exit_code) { @@ -448,12 +498,6 @@ static void kunit_catch_run_case(void *data) } return; } - - /* - * Test case was run, but aborted. It is the test case's business as to - * whether it failed or not, we just need to clean up. - */ - kunit_run_case_cleanup(test, suite); } /* @@ -478,6 +522,13 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite, context.test_case = test_case; kunit_try_catch_run(try_catch, &context); + /* Now run the cleanup */ + kunit_try_catch_init(try_catch, + test, + kunit_try_run_case_cleanup, + kunit_catch_run_case_cleanup); + kunit_try_catch_run(try_catch, &context); + /* Propagate the parameter result to the test case. */ if (test->status == KUNIT_FAILURE) test_case->status = KUNIT_FAILURE; @@ -585,11 +636,11 @@ int kunit_run_tests(struct kunit_suite *suite) "param-%d", test.param_index); } - kunit_log(KERN_INFO, &test, - KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT - "%s %d %s", - kunit_status_to_ok_not_ok(test.status), - test.param_index + 1, param_desc); + kunit_print_ok_not_ok(&test, KUNIT_LEVEL_CASE_PARAM, + test.status, + test.param_index + 1, + param_desc, + test.status_comment); /* Get next param. */ param_desc[0] = '\0'; @@ -603,7 +654,7 @@ int kunit_run_tests(struct kunit_suite *suite) kunit_print_test_stats(&test, param_stats); - kunit_print_ok_not_ok(&test, true, test_case->status, + kunit_print_ok_not_ok(&test, KUNIT_LEVEL_CASE, test_case->status, kunit_test_case_num(suite, test_case), test_case->name, test.status_comment); @@ -712,58 +763,28 @@ static struct notifier_block kunit_mod_nb = { }; #endif -struct kunit_kmalloc_array_params { - size_t n; - size_t size; - gfp_t gfp; -}; - -static int kunit_kmalloc_array_init(struct kunit_resource *res, void *context) +void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp) { - struct kunit_kmalloc_array_params *params = context; + void *data; - res->data = kmalloc_array(params->n, params->size, params->gfp); - if (!res->data) - return -ENOMEM; + data = kmalloc_array(n, size, gfp); - return 0; -} + if (!data) + return NULL; -static void kunit_kmalloc_array_free(struct kunit_resource *res) -{ - kfree(res->data); -} - -void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp) -{ - struct kunit_kmalloc_array_params params = { - .size = size, - .n = n, - .gfp = gfp - }; + if (kunit_add_action_or_reset(test, (kunit_action_t *)kfree, data) != 0) + return NULL; - return kunit_alloc_resource(test, - kunit_kmalloc_array_init, - kunit_kmalloc_array_free, - gfp, - ¶ms); + return data; } EXPORT_SYMBOL_GPL(kunit_kmalloc_array); -static inline bool kunit_kfree_match(struct kunit *test, - struct kunit_resource *res, void *match_data) -{ - /* Only match resources allocated with kunit_kmalloc() and friends. */ - return res->free == kunit_kmalloc_array_free && res->data == match_data; -} - void kunit_kfree(struct kunit *test, const void *ptr) { if (!ptr) return; - if (kunit_destroy_resource(test, kunit_kfree_match, (void *)ptr)) - KUNIT_FAIL(test, "kunit_kfree: %px already freed or not allocated by kunit", ptr); + kunit_release_action(test, (kunit_action_t *)kfree, (void *)ptr); } EXPORT_SYMBOL_GPL(kunit_kfree); diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 110a36479dce..8ebc43d4cc8c 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5317,15 +5317,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, mt = mte_node_type(mas->node); pivots = ma_pivots(mas_mn(mas), mt); - if (offset) - mas->min = pivots[offset - 1] + 1; - - if (offset < mt_pivots[mt]) - mas->max = pivots[offset]; - - if (mas->index < mas->min) - mas->index = mas->min; - + min = mas_safe_min(mas, pivots, offset); + if (mas->index < min) + mas->index = min; mas->last = mas->index + size - 1; return 0; } diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 049ba132f7ef..1a31065b2036 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -27,6 +27,8 @@ #include <linux/string.h> #include <linux/xarray.h> +#include "radix-tree.h" + /* * Radix tree node cache. */ diff --git a/lib/radix-tree.h b/lib/radix-tree.h new file mode 100644 index 000000000000..40d5c03e2b09 --- /dev/null +++ b/lib/radix-tree.h @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* radix-tree helpers that are only shared with xarray */ + +struct kmem_cache; +struct rcu_head; + +extern struct kmem_cache *radix_tree_node_cachep; +extern void radix_tree_node_rcu_free(struct rcu_head *head); diff --git a/lib/raid6/neon.h b/lib/raid6/neon.h new file mode 100644 index 000000000000..2ca41ee9b499 --- /dev/null +++ b/lib/raid6/neon.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only + +void raid6_neon1_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); +void raid6_neon1_xor_syndrome_real(int disks, int start, int stop, + unsigned long bytes, void **ptrs); +void raid6_neon2_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); +void raid6_neon2_xor_syndrome_real(int disks, int start, int stop, + unsigned long bytes, void **ptrs); +void raid6_neon4_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); +void raid6_neon4_xor_syndrome_real(int disks, int start, int stop, + unsigned long bytes, void **ptrs); +void raid6_neon8_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); +void raid6_neon8_xor_syndrome_real(int disks, int start, int stop, + unsigned long bytes, void **ptrs); +void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, + uint8_t *dq, const uint8_t *pbmul, + const uint8_t *qmul); + +void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, + const uint8_t *qmul); + + diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc index b7c68030da4f..355270af0cd6 100644 --- a/lib/raid6/neon.uc +++ b/lib/raid6/neon.uc @@ -25,6 +25,7 @@ */ #include <arm_neon.h> +#include "neon.h" typedef uint8x16_t unative_t; diff --git a/lib/raid6/recov_neon.c b/lib/raid6/recov_neon.c index d6fba8bf8c0a..1bfc14174d4d 100644 --- a/lib/raid6/recov_neon.c +++ b/lib/raid6/recov_neon.c @@ -8,6 +8,7 @@ #ifdef __KERNEL__ #include <asm/neon.h> +#include "neon.h" #else #define kernel_neon_begin() #define kernel_neon_end() @@ -19,13 +20,6 @@ static int raid6_has_neon(void) return cpu_has_neon(); } -void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, - uint8_t *dq, const uint8_t *pbmul, - const uint8_t *qmul); - -void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, - const uint8_t *qmul); - static void raid6_2data_recov_neon(int disks, size_t bytes, int faila, int failb, void **ptrs) { diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c index 90eb80d43790..f9e7e8f5a151 100644 --- a/lib/raid6/recov_neon_inner.c +++ b/lib/raid6/recov_neon_inner.c @@ -5,6 +5,7 @@ */ #include <arm_neon.h> +#include "neon.h" #ifdef CONFIG_ARM /* diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 05ed84c2fc4c..1d7d480b8eeb 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -45,6 +45,7 @@ struct test_batched_req { bool sent; const struct firmware *fw; const char *name; + const char *fw_buf; struct completion completion; struct task_struct *task; struct device *dev; @@ -175,8 +176,14 @@ static void __test_release_all_firmware(void) for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; - if (req->fw) + if (req->fw) { + if (req->fw_buf) { + kfree_const(req->fw_buf); + req->fw_buf = NULL; + } release_firmware(req->fw); + req->fw = NULL; + } } vfree(test_fw_config->reqs); @@ -353,16 +360,26 @@ static ssize_t config_test_show_str(char *dst, return len; } -static int test_dev_config_update_bool(const char *buf, size_t size, +static inline int __test_dev_config_update_bool(const char *buf, size_t size, bool *cfg) { int ret; - mutex_lock(&test_fw_mutex); if (kstrtobool(buf, cfg) < 0) ret = -EINVAL; else ret = size; + + return ret; +} + +static int test_dev_config_update_bool(const char *buf, size_t size, + bool *cfg) +{ + int ret; + + mutex_lock(&test_fw_mutex); + ret = __test_dev_config_update_bool(buf, size, cfg); mutex_unlock(&test_fw_mutex); return ret; @@ -373,7 +390,8 @@ static ssize_t test_dev_config_show_bool(char *buf, bool val) return snprintf(buf, PAGE_SIZE, "%d\n", val); } -static int test_dev_config_update_size_t(const char *buf, +static int __test_dev_config_update_size_t( + const char *buf, size_t size, size_t *cfg) { @@ -384,9 +402,7 @@ static int test_dev_config_update_size_t(const char *buf, if (ret) return ret; - mutex_lock(&test_fw_mutex); *(size_t *)cfg = new; - mutex_unlock(&test_fw_mutex); /* Always return full write size even if we didn't consume all */ return size; @@ -402,7 +418,7 @@ static ssize_t test_dev_config_show_int(char *buf, int val) return snprintf(buf, PAGE_SIZE, "%d\n", val); } -static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) +static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) { u8 val; int ret; @@ -411,14 +427,23 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) if (ret) return ret; - mutex_lock(&test_fw_mutex); *(u8 *)cfg = val; - mutex_unlock(&test_fw_mutex); /* Always return full write size even if we didn't consume all */ return size; } +static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) +{ + int ret; + + mutex_lock(&test_fw_mutex); + ret = __test_dev_config_update_u8(buf, size, cfg); + mutex_unlock(&test_fw_mutex); + + return ret; +} + static ssize_t test_dev_config_show_u8(char *buf, u8 val) { return snprintf(buf, PAGE_SIZE, "%u\n", val); @@ -471,10 +496,10 @@ static ssize_t config_num_requests_store(struct device *dev, mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_u8(buf, count, - &test_fw_config->num_requests); + rc = __test_dev_config_update_u8(buf, count, + &test_fw_config->num_requests); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -518,10 +543,10 @@ static ssize_t config_buf_size_store(struct device *dev, mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_size_t(buf, count, - &test_fw_config->buf_size); + rc = __test_dev_config_update_size_t(buf, count, + &test_fw_config->buf_size); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -548,10 +573,10 @@ static ssize_t config_file_offset_store(struct device *dev, mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_size_t(buf, count, - &test_fw_config->file_offset); + rc = __test_dev_config_update_size_t(buf, count, + &test_fw_config->file_offset); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -652,6 +677,8 @@ static ssize_t trigger_request_store(struct device *dev, mutex_lock(&test_fw_mutex); release_firmware(test_firmware); + if (test_fw_config->reqs) + __test_release_all_firmware(); test_firmware = NULL; rc = request_firmware(&test_firmware, name, dev); if (rc) { @@ -752,6 +779,8 @@ static ssize_t trigger_async_request_store(struct device *dev, mutex_lock(&test_fw_mutex); release_firmware(test_firmware); test_firmware = NULL; + if (test_fw_config->reqs) + __test_release_all_firmware(); rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL, NULL, trigger_async_request_cb); if (rc) { @@ -794,6 +823,8 @@ static ssize_t trigger_custom_fallback_store(struct device *dev, mutex_lock(&test_fw_mutex); release_firmware(test_firmware); + if (test_fw_config->reqs) + __test_release_all_firmware(); test_firmware = NULL; rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name, dev, GFP_KERNEL, NULL, @@ -856,6 +887,8 @@ static int test_fw_run_batch_request(void *data) test_fw_config->buf_size); if (!req->fw) kfree(test_buf); + else + req->fw_buf = test_buf; } else { req->rc = test_fw_config->req_firmware(&req->fw, req->name, @@ -895,6 +928,11 @@ static ssize_t trigger_batched_requests_store(struct device *dev, mutex_lock(&test_fw_mutex); + if (test_fw_config->reqs) { + rc = -EBUSY; + goto out_bail; + } + test_fw_config->reqs = vzalloc(array3_size(sizeof(struct test_batched_req), test_fw_config->num_requests, 2)); @@ -911,6 +949,7 @@ static ssize_t trigger_batched_requests_store(struct device *dev, req->fw = NULL; req->idx = i; req->name = test_fw_config->name; + req->fw_buf = NULL; req->dev = dev; init_completion(&req->completion); req->task = kthread_run(test_fw_run_batch_request, req, @@ -993,6 +1032,11 @@ ssize_t trigger_batched_requests_async_store(struct device *dev, mutex_lock(&test_fw_mutex); + if (test_fw_config->reqs) { + rc = -EBUSY; + goto out_bail; + } + test_fw_config->reqs = vzalloc(array3_size(sizeof(struct test_batched_req), test_fw_config->num_requests, 2)); @@ -1010,6 +1054,7 @@ ssize_t trigger_batched_requests_async_store(struct device *dev, for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; req->name = test_fw_config->name; + req->fw_buf = NULL; req->fw = NULL; req->idx = i; init_completion(&req->completion); diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 9dd9745d365f..3718d9886407 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -369,7 +369,7 @@ vm_map_ram_test(void) int i; map_nr_pages = nr_pages > 0 ? nr_pages:1; - pages = kmalloc(map_nr_pages * sizeof(struct page), GFP_KERNEL); + pages = kcalloc(map_nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) return -1; diff --git a/lib/xarray.c b/lib/xarray.c index ea9ce1f0b386..2071a3718f4e 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -12,6 +12,8 @@ #include <linux/slab.h> #include <linux/xarray.h> +#include "radix-tree.h" + /* * Coding conventions in this file: * @@ -247,10 +249,6 @@ void *xas_load(struct xa_state *xas) } EXPORT_SYMBOL_GPL(xas_load); -/* Move the radix tree node cache here */ -extern struct kmem_cache *radix_tree_node_cachep; -extern void radix_tree_node_rcu_free(struct rcu_head *head); - #define XA_RCU_FREE ((struct xarray *)1) static void xa_node_free(struct xa_node *node) |