diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/qspinlock.h | 14 | ||||
-rw-r--r-- | include/linux/init_task.h | 8 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 14 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 8 | ||||
-rw-r--r-- | include/linux/sched.h | 5 | ||||
-rw-r--r-- | include/linux/spinlock.h | 31 | ||||
-rw-r--r-- | include/linux/spinlock_up.h | 6 | ||||
-rw-r--r-- | include/linux/srcutiny.h | 13 | ||||
-rw-r--r-- | include/linux/srcutree.h | 3 | ||||
-rw-r--r-- | include/linux/swait.h | 55 | ||||
-rw-r--r-- | include/trace/events/rcu.h | 7 | ||||
-rw-r--r-- | include/uapi/linux/membarrier.h | 23 |
12 files changed, 111 insertions, 76 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 9f0681bf1e87..66260777d644 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -22,17 +22,6 @@ #include <asm-generic/qspinlock_types.h> /** - * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock - * @lock : Pointer to queued spinlock structure - * - * There is a very slight possibility of live-lock if the lockers keep coming - * and the waiter is just unfortunate enough to not see any unlock state. - */ -#ifndef queued_spin_unlock_wait -extern void queued_spin_unlock_wait(struct qspinlock *lock); -#endif - -/** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise @@ -41,8 +30,6 @@ extern void queued_spin_unlock_wait(struct qspinlock *lock); static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { /* - * See queued_spin_unlock_wait(). - * * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL * isn't immediately observable. */ @@ -135,6 +122,5 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) #define arch_spin_lock_flags(l, f) queued_spin_lock(l) -#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l) #endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index a2f6707e9fc0..0e849715e5be 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -126,17 +126,11 @@ extern struct group_info init_groups; #endif #ifdef CONFIG_PREEMPT_RCU -#define INIT_TASK_RCU_TREE_PREEMPT() \ - .rcu_blocked_node = NULL, -#else -#define INIT_TASK_RCU_TREE_PREEMPT(tsk) -#endif -#ifdef CONFIG_PREEMPT_RCU #define INIT_TASK_RCU_PREEMPT(tsk) \ .rcu_read_lock_nesting = 0, \ .rcu_read_unlock_special.s = 0, \ .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ - INIT_TASK_RCU_TREE_PREEMPT() + .rcu_blocked_node = NULL, #else #define INIT_TASK_RCU_PREEMPT(tsk) #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index cf307ebf345d..96f1baf62ab8 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -58,8 +58,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void synchronize_sched(void); -void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); -void synchronize_rcu_tasks(void); void rcu_barrier_tasks(void); #ifdef CONFIG_PREEMPT_RCU @@ -105,6 +103,7 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); +extern int rcu_scheduler_active __read_mostly; void rcu_sched_qs(void); void rcu_bh_qs(void); void rcu_check_callbacks(int user); @@ -165,8 +164,6 @@ static inline void rcu_init_nohz(void) { } * macro rather than an inline function to avoid #include hell. */ #ifdef CONFIG_TASKS_RCU -#define TASKS_RCU(x) x -extern struct srcu_struct tasks_rcu_exit_srcu; #define rcu_note_voluntary_context_switch_lite(t) \ do { \ if (READ_ONCE((t)->rcu_tasks_holdout)) \ @@ -177,10 +174,17 @@ extern struct srcu_struct tasks_rcu_exit_srcu; rcu_all_qs(); \ rcu_note_voluntary_context_switch_lite(t); \ } while (0) +void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); +void synchronize_rcu_tasks(void); +void exit_tasks_rcu_start(void); +void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU */ -#define TASKS_RCU(x) do { } while (0) #define rcu_note_voluntary_context_switch_lite(t) do { } while (0) #define rcu_note_voluntary_context_switch(t) rcu_all_qs() +#define call_rcu_tasks call_rcu_sched +#define synchronize_rcu_tasks synchronize_sched +static inline void exit_tasks_rcu_start(void) { } +static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU */ /** diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 5becbbccb998..b3dbf9502fd0 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -116,13 +116,11 @@ static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } static inline void exit_rcu(void) { } - -#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) -extern int rcu_scheduler_active __read_mostly; +#ifdef CONFIG_SRCU void rcu_scheduler_starting(void); -#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ +#else /* #ifndef CONFIG_SRCU */ static inline void rcu_scheduler_starting(void) { } -#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ +#endif /* #else #ifndef CONFIG_SRCU */ static inline void rcu_end_inkernel_boot(void) { } static inline bool rcu_is_watching(void) { return true; } diff --git a/include/linux/sched.h b/include/linux/sched.h index 8337e2db0bb2..e4c38809a09e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -589,9 +589,10 @@ struct task_struct { #ifdef CONFIG_TASKS_RCU unsigned long rcu_tasks_nvcsw; - bool rcu_tasks_holdout; - struct list_head rcu_tasks_holdout_list; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; #endif /* #ifdef CONFIG_TASKS_RCU */ struct sched_info sched_info; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d9510e8522d4..ef018a6e4985 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -130,12 +130,6 @@ do { \ #define smp_mb__before_spinlock() smp_wmb() #endif -/** - * raw_spin_unlock_wait - wait until the spinlock gets unlocked - * @lock: the spinlock in question. - */ -#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) - #ifdef CONFIG_DEBUG_SPINLOCK extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) @@ -369,31 +363,6 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock) raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) -/** - * spin_unlock_wait - Interpose between successive critical sections - * @lock: the spinlock whose critical sections are to be interposed. - * - * Semantically this is equivalent to a spin_lock() immediately - * followed by a spin_unlock(). However, most architectures have - * more efficient implementations in which the spin_unlock_wait() - * cannot block concurrent lock acquisition, and in some cases - * where spin_unlock_wait() does not write to the lock variable. - * Nevertheless, spin_unlock_wait() can have high overhead, so if - * you feel the need to use it, please check to see if there is - * a better way to get your job done. - * - * The ordering guarantees provided by spin_unlock_wait() are: - * - * 1. All accesses preceding the spin_unlock_wait() happen before - * any accesses in later critical sections for this same lock. - * 2. All accesses following the spin_unlock_wait() happen after - * any accesses in earlier critical sections for this same lock. - */ -static __always_inline void spin_unlock_wait(spinlock_t *lock) -{ - raw_spin_unlock_wait(&lock->rlock); -} - static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 0d9848de677d..612fb530af41 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -26,11 +26,6 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define arch_spin_is_locked(x) ((x)->slock == 0) -static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) -{ - smp_cond_load_acquire(&lock->slock, VAL); -} - static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; @@ -73,7 +68,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) -#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0) /* for sched/core.c and kernel_lock.c: */ # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index cfbfc540cafc..261471f407a5 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -87,4 +87,17 @@ static inline void srcu_barrier(struct srcu_struct *sp) synchronize_srcu(sp); } +/* Defined here to avoid size increase for non-torture kernels. */ +static inline void srcu_torture_stats_print(struct srcu_struct *sp, + char *tt, char *tf) +{ + int idx; + + idx = READ_ONCE(sp->srcu_idx) & 0x1; + pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", + tt, tf, idx, + READ_ONCE(sp->srcu_lock_nesting[!idx]), + READ_ONCE(sp->srcu_lock_nesting[idx])); +} + #endif diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 42973f787e7e..a949f4f9e4d7 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -104,8 +104,6 @@ struct srcu_struct { #define SRCU_STATE_SCAN1 1 #define SRCU_STATE_SCAN2 2 -void process_srcu(struct work_struct *work); - #define __SRCU_STRUCT_INIT(name) \ { \ .sda = &name##_srcu_data, \ @@ -141,5 +139,6 @@ void process_srcu(struct work_struct *work); void synchronize_srcu_expedited(struct srcu_struct *sp); void srcu_barrier(struct srcu_struct *sp); +void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf); #endif diff --git a/include/linux/swait.h b/include/linux/swait.h index c1f9c62a8a50..4a4e180d0a35 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -169,4 +169,59 @@ do { \ __ret; \ }) +#define __swait_event_idle(wq, condition) \ + (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) + +/** + * swait_event_idle - wait without system load contribution + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_IDLE) until the @condition evaluates to + * true. The @condition is checked each time the waitqueue @wq is woken up. + * + * This function is mostly used when a kthread or workqueue waits for some + * condition and doesn't want to contribute to system load. Signals are + * ignored. + */ +#define swait_event_idle(wq, condition) \ +do { \ + if (condition) \ + break; \ + __swait_event_idle(wq, condition); \ +} while (0) + +#define __swait_event_idle_timeout(wq, condition, timeout) \ + ___swait_event(wq, ___wait_cond_timeout(condition), \ + TASK_IDLE, timeout, \ + __ret = schedule_timeout(__ret)) + +/** + * swait_event_idle_timeout - wait up to timeout without load contribution + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @timeout: timeout at which we'll give up in jiffies + * + * The process is put to sleep (TASK_IDLE) until the @condition evaluates to + * true. The @condition is checked each time the waitqueue @wq is woken up. + * + * This function is mostly used when a kthread or workqueue waits for some + * condition and doesn't want to contribute to system load. Signals are + * ignored. + * + * Returns: + * 0 if the @condition evaluated to %false after the @timeout elapsed, + * 1 if the @condition evaluated to %true after the @timeout elapsed, + * or the remaining jiffies (at least 1) if the @condition evaluated + * to %true before the @timeout elapsed. + */ +#define swait_event_idle_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!___wait_cond_timeout(condition)) \ + __ret = __swait_event_idle_timeout(wq, \ + condition, timeout); \ + __ret; \ +}) + #endif /* _LINUX_SWAIT_H */ diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 91dc089d65b7..e91ae1f2290d 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -703,6 +703,7 @@ TRACE_EVENT(rcu_batch_end, * at the beginning and end of the read, respectively. Note that the * callback address can be NULL. */ +#define RCUTORTURENAME_LEN 8 TRACE_EVENT(rcu_torture_read, TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, @@ -711,7 +712,7 @@ TRACE_EVENT(rcu_torture_read, TP_ARGS(rcutorturename, rhp, secs, c_old, c), TP_STRUCT__entry( - __field(const char *, rcutorturename) + __field(char, rcutorturename[RCUTORTURENAME_LEN]) __field(struct rcu_head *, rhp) __field(unsigned long, secs) __field(unsigned long, c_old) @@ -719,7 +720,9 @@ TRACE_EVENT(rcu_torture_read, ), TP_fast_assign( - __entry->rcutorturename = rcutorturename; + strncpy(__entry->rcutorturename, rcutorturename, + RCUTORTURENAME_LEN); + __entry->rcutorturename[RCUTORTURENAME_LEN - 1] = 0; __entry->rhp = rhp; __entry->secs = secs; __entry->c_old = c_old; diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h index e0b108bd2624..6d47b3249d8a 100644 --- a/include/uapi/linux/membarrier.h +++ b/include/uapi/linux/membarrier.h @@ -40,14 +40,33 @@ * (non-running threads are de facto in such a * state). This covers threads from all processes * running on the system. This command returns 0. + * @MEMBARRIER_CMD_PRIVATE_EXPEDITED: + * Execute a memory barrier on each running + * thread belonging to the same process as the current + * thread. Upon return from system call, the + * caller thread is ensured that all its running + * threads siblings have passed through a state + * where all memory accesses to user-space + * addresses match program order between entry + * to and return from the system call + * (non-running threads are de facto in such a + * state). This only covers threads from the + * same processes as the caller thread. This + * command returns 0. The "expedited" commands + * complete faster than the non-expedited ones, + * they never block, but have the downside of + * causing extra overhead. * * Command to be passed to the membarrier system call. The commands need to * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to * the value 0. */ enum membarrier_cmd { - MEMBARRIER_CMD_QUERY = 0, - MEMBARRIER_CMD_SHARED = (1 << 0), + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_SHARED = (1 << 0), + /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ + /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ + MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), }; #endif /* _UAPI_LINUX_MEMBARRIER_H */ |