diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 10 | ||||
-rw-r--r-- | lib/genalloc.c | 3 | ||||
-rw-r--r-- | lib/iov_iter.c | 22 | ||||
-rw-r--r-- | lib/irq_poll.c | 2 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 169 | ||||
-rw-r--r-- | lib/random32.c | 2 | ||||
-rw-r--r-- | lib/stackdepot.c | 2 | ||||
-rw-r--r-- | lib/test_bpf.c | 2 |
8 files changed, 124 insertions, 88 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 39d07e754822..b01e547d4d04 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -198,6 +198,7 @@ config FRAME_WARN int "Warn for stack frames larger than (needs gcc 4.4)" range 0 8192 default 0 if KASAN + default 2048 if GCC_PLUGIN_LATENT_ENTROPY default 1024 if !64BIT default 2048 if 64BIT help @@ -1857,15 +1858,6 @@ config PROVIDE_OHCI1394_DMA_INIT See Documentation/debugging-via-ohci1394.txt for more information. -config BUILD_DOCSRC - bool "Build targets in Documentation/ tree" - depends on HEADERS_CHECK - help - This option attempts to build objects from the source files in the - kernel Documentation/ tree. - - Say N if you are unsure. - config DMA_API_DEBUG bool "Enable debugging of DMA-API usage" depends on HAVE_DMA_API_DEBUG diff --git a/lib/genalloc.c b/lib/genalloc.c index 0a1139644d32..144fe6b1a03e 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -292,7 +292,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, struct gen_pool_chunk *chunk; unsigned long addr = 0; int order = pool->min_alloc_order; - int nbits, start_bit = 0, end_bit, remain; + int nbits, start_bit, end_bit, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); @@ -307,6 +307,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, if (size > atomic_read(&chunk->avail)) continue; + start_bit = 0; end_bit = chunk_size(chunk) >> order; retry: start_bit = algo(chunk->bits, end_bit, start_bit, diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 7312e7784611..f0c7f1481bae 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1139,6 +1139,28 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) } EXPORT_SYMBOL(dup_iter); +/** + * import_iovec() - Copy an array of &struct iovec from userspace + * into the kernel, check that it is valid, and initialize a new + * &struct iov_iter iterator to access it. + * + * @type: One of %READ or %WRITE. + * @uvector: Pointer to the userspace array. + * @nr_segs: Number of elements in userspace array. + * @fast_segs: Number of elements in @iov. + * @iov: (input and output parameter) Pointer to pointer to (usually small + * on-stack) kernel array. + * @i: Pointer to iterator that will be initialized on success. + * + * If the array pointed to by *@iov is large enough to hold all @nr_segs, + * then this function places %NULL in *@iov on return. Otherwise, a new + * array will be allocated and the result placed in *@iov. This means that + * the caller may call kfree() on *@iov regardless of whether the small + * on-stack array was used or not (and regardless of whether this function + * returns an error or not). + * + * Return: 0 on success or negative error code on error. + */ int import_iovec(int type, const struct iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, struct iovec **iov, struct iov_iter *i) diff --git a/lib/irq_poll.c b/lib/irq_poll.c index 2be55692aa43..1d6565e81030 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c @@ -74,7 +74,7 @@ void irq_poll_complete(struct irq_poll *iop) } EXPORT_SYMBOL(irq_poll_complete); -static void irq_poll_softirq(struct softirq_action *h) +static void __latent_entropy irq_poll_softirq(struct softirq_action *h) { struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); int rearm = 0, budget = irq_poll_budget; diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 27fe74948882..9ac959ef4cae 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -33,6 +33,7 @@ #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) +static DEFINE_SPINLOCK(percpu_ref_switch_lock); static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq); static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) @@ -82,6 +83,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, atomic_long_set(&ref->count, start_count); ref->release = release; + ref->confirm_switch = NULL; return 0; } EXPORT_SYMBOL_GPL(percpu_ref_init); @@ -101,6 +103,8 @@ void percpu_ref_exit(struct percpu_ref *ref) unsigned long __percpu *percpu_count = percpu_count_ptr(ref); if (percpu_count) { + /* non-NULL confirm_switch indicates switching in progress */ + WARN_ON_ONCE(ref->confirm_switch); free_percpu(percpu_count); ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; } @@ -161,66 +165,23 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, percpu_ref_func_t *confirm_switch) { - if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) { - /* switching from percpu to atomic */ - ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; - - /* - * Non-NULL ->confirm_switch is used to indicate that - * switching is in progress. Use noop one if unspecified. - */ - WARN_ON_ONCE(ref->confirm_switch); - ref->confirm_switch = - confirm_switch ?: percpu_ref_noop_confirm_switch; - - percpu_ref_get(ref); /* put after confirmation */ - call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); - } else if (confirm_switch) { - /* - * Somebody already set ATOMIC. Switching may still be in - * progress. @confirm_switch must be invoked after the - * switching is complete and a full sched RCU grace period - * has passed. Wait synchronously for the previous - * switching and schedule @confirm_switch invocation. - */ - wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); - ref->confirm_switch = confirm_switch; - - percpu_ref_get(ref); /* put after confirmation */ - call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu); + if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { + if (confirm_switch) + confirm_switch(ref); + return; } -} -/** - * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode - * @ref: percpu_ref to switch to atomic mode - * @confirm_switch: optional confirmation callback - * - * There's no reason to use this function for the usual reference counting. - * Use percpu_ref_kill[_and_confirm](). - * - * Schedule switching of @ref to atomic mode. All its percpu counts will - * be collected to the main atomic counter. On completion, when all CPUs - * are guaraneed to be in atomic mode, @confirm_switch, which may not - * block, is invoked. This function may be invoked concurrently with all - * the get/put operations and can safely be mixed with kill and reinit - * operations. Note that @ref will stay in atomic mode across kill/reinit - * cycles until percpu_ref_switch_to_percpu() is called. - * - * This function normally doesn't block and can be called from any context - * but it may block if @confirm_kill is specified and @ref is already in - * the process of switching to atomic mode. In such cases, @confirm_switch - * will be invoked after the switching is complete. - * - * Due to the way percpu_ref is implemented, @confirm_switch will be called - * after at least one full sched RCU grace period has passed but this is an - * implementation detail and must not be depended upon. - */ -void percpu_ref_switch_to_atomic(struct percpu_ref *ref, - percpu_ref_func_t *confirm_switch) -{ - ref->force_atomic = true; - __percpu_ref_switch_to_atomic(ref, confirm_switch); + /* switching from percpu to atomic */ + ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; + + /* + * Non-NULL ->confirm_switch is used to indicate that switching is + * in progress. Use noop one if unspecified. + */ + ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; + + percpu_ref_get(ref); /* put after confirmation */ + call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); } static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) @@ -233,8 +194,6 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) return; - wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); - atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); /* @@ -250,6 +209,58 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); } +static void __percpu_ref_switch_mode(struct percpu_ref *ref, + percpu_ref_func_t *confirm_switch) +{ + lockdep_assert_held(&percpu_ref_switch_lock); + + /* + * If the previous ATOMIC switching hasn't finished yet, wait for + * its completion. If the caller ensures that ATOMIC switching + * isn't in progress, this function can be called from any context. + */ + wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch, + percpu_ref_switch_lock); + + if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) + __percpu_ref_switch_to_atomic(ref, confirm_switch); + else + __percpu_ref_switch_to_percpu(ref); +} + +/** + * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode + * @ref: percpu_ref to switch to atomic mode + * @confirm_switch: optional confirmation callback + * + * There's no reason to use this function for the usual reference counting. + * Use percpu_ref_kill[_and_confirm](). + * + * Schedule switching of @ref to atomic mode. All its percpu counts will + * be collected to the main atomic counter. On completion, when all CPUs + * are guaraneed to be in atomic mode, @confirm_switch, which may not + * block, is invoked. This function may be invoked concurrently with all + * the get/put operations and can safely be mixed with kill and reinit + * operations. Note that @ref will stay in atomic mode across kill/reinit + * cycles until percpu_ref_switch_to_percpu() is called. + * + * This function may block if @ref is in the process of switching to atomic + * mode. If the caller ensures that @ref is not in the process of + * switching to atomic mode, this function can be called from any context. + */ +void percpu_ref_switch_to_atomic(struct percpu_ref *ref, + percpu_ref_func_t *confirm_switch) +{ + unsigned long flags; + + spin_lock_irqsave(&percpu_ref_switch_lock, flags); + + ref->force_atomic = true; + __percpu_ref_switch_mode(ref, confirm_switch); + + spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); +} + /** * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode * @ref: percpu_ref to switch to percpu mode @@ -264,17 +275,20 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) * dying or dead, the actual switching takes place on the following * percpu_ref_reinit(). * - * This function normally doesn't block and can be called from any context - * but it may block if @ref is in the process of switching to atomic mode - * by percpu_ref_switch_atomic(). + * This function may block if @ref is in the process of switching to atomic + * mode. If the caller ensures that @ref is not in the process of + * switching to atomic mode, this function can be called from any context. */ void percpu_ref_switch_to_percpu(struct percpu_ref *ref) { + unsigned long flags; + + spin_lock_irqsave(&percpu_ref_switch_lock, flags); + ref->force_atomic = false; + __percpu_ref_switch_mode(ref, NULL); - /* a dying or dead ref can't be switched to percpu mode w/o reinit */ - if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) - __percpu_ref_switch_to_percpu(ref); + spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); } /** @@ -290,21 +304,23 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref) * * This function normally doesn't block and can be called from any context * but it may block if @confirm_kill is specified and @ref is in the - * process of switching to atomic mode by percpu_ref_switch_atomic(). - * - * Due to the way percpu_ref is implemented, @confirm_switch will be called - * after at least one full sched RCU grace period has passed but this is an - * implementation detail and must not be depended upon. + * process of switching to atomic mode by percpu_ref_switch_to_atomic(). */ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill) { + unsigned long flags; + + spin_lock_irqsave(&percpu_ref_switch_lock, flags); + WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, "%s called more than once on %pf!", __func__, ref->release); ref->percpu_count_ptr |= __PERCPU_REF_DEAD; - __percpu_ref_switch_to_atomic(ref, confirm_kill); + __percpu_ref_switch_mode(ref, confirm_kill); percpu_ref_put(ref); + + spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); } EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); @@ -321,11 +337,16 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); */ void percpu_ref_reinit(struct percpu_ref *ref) { + unsigned long flags; + + spin_lock_irqsave(&percpu_ref_switch_lock, flags); + WARN_ON_ONCE(!percpu_ref_is_zero(ref)); ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; percpu_ref_get(ref); - if (!ref->force_atomic) - __percpu_ref_switch_to_percpu(ref); + __percpu_ref_switch_mode(ref, NULL); + + spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); } EXPORT_SYMBOL_GPL(percpu_ref_reinit); diff --git a/lib/random32.c b/lib/random32.c index 915982b304bb..fa594b1140e6 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void) } #endif -static DEFINE_PER_CPU(struct rnd_state, net_rand_state); +static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 60f77f1d470a..4d830e299989 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -50,7 +50,7 @@ STACK_ALLOC_ALIGN) #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) -#define STACK_ALLOC_SLABS_CAP 1024 +#define STACK_ALLOC_SLABS_CAP 8192 #define STACK_ALLOC_MAX_SLABS \ (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 94346b4d8984..0362da0b66c3 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -4831,7 +4831,7 @@ static struct bpf_test tests[] = { { }, INTERNAL, { 0x34 }, - { { 1, 0xbef } }, + { { ETH_HLEN, 0xbef } }, .fill_helper = bpf_fill_ld_abs_vlan_push_pop, }, /* |