diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-27 02:42:43 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-27 02:42:43 +0300 |
commit | 1ae78780eda54023a0fb49ee743dbba39da148e0 (patch) | |
tree | 615313e65083cc30aca62cba3c717331f53002a6 /include | |
parent | 77a05940eee7e9891cd6add7a690a3e762ee21b0 (diff) | |
parent | 43e0ae7ae0f567a3f8c10ec7a4078bc482660921 (diff) | |
download | linux-1ae78780eda54023a0fb49ee743dbba39da148e0.tar.xz |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar:
"The main changes in this cycle were:
- Dynamic tick (nohz) updates, perhaps most notably changes to force
the tick on when needed due to lengthy in-kernel execution on CPUs
on which RCU is waiting.
- Linux-kernel memory consistency model updates.
- Replace rcu_swap_protected() with rcu_prepace_pointer().
- Torture-test updates.
- Documentation updates.
- Miscellaneous fixes"
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (51 commits)
security/safesetid: Replace rcu_swap_protected() with rcu_replace_pointer()
net/sched: Replace rcu_swap_protected() with rcu_replace_pointer()
net/netfilter: Replace rcu_swap_protected() with rcu_replace_pointer()
net/core: Replace rcu_swap_protected() with rcu_replace_pointer()
bpf/cgroup: Replace rcu_swap_protected() with rcu_replace_pointer()
fs/afs: Replace rcu_swap_protected() with rcu_replace_pointer()
drivers/scsi: Replace rcu_swap_protected() with rcu_replace_pointer()
drm/i915: Replace rcu_swap_protected() with rcu_replace_pointer()
x86/kvm/pmu: Replace rcu_swap_protected() with rcu_replace_pointer()
rcu: Upgrade rcu_swap_protected() to rcu_replace_pointer()
rcu: Suppress levelspread uninitialized messages
rcu: Fix uninitialized variable in nocb_gp_wait()
rcu: Update descriptions for rcu_future_grace_period tracepoint
rcu: Update descriptions for rcu_nocb_wake tracepoint
rcu: Remove obsolete descriptions for rcu_barrier tracepoint
rcu: Ensure that ->rcu_urgent_qs is set before resched IPI
workqueue: Convert for_each_wq to use built-in list check
rcu: Several rcu_segcblist functions can be static
rcu: Remove unused function hlist_bl_del_init_rcu()
Documentation: Rename rcu_node_context_switch() to rcu_note_context_switch()
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/rculist_bl.h | 28 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 18 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 1 | ||||
-rw-r--r-- | include/linux/rcutree.h | 1 | ||||
-rw-r--r-- | include/linux/tick.h | 7 | ||||
-rw-r--r-- | include/trace/events/rcu.h | 47 | ||||
-rw-r--r-- | include/trace/events/timer.h | 3 |
7 files changed, 54 insertions, 51 deletions
diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h index 66e73ec1aa99..0b952d06eb0b 100644 --- a/include/linux/rculist_bl.h +++ b/include/linux/rculist_bl.h @@ -25,34 +25,6 @@ static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) } /** - * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization - * @n: the element to delete from the hash list. - * - * Note: hlist_bl_unhashed() on the node returns true after this. It is - * useful for RCU based read lockfree traversal if the writer side - * must know if the list entry is still hashed or already unhashed. - * - * In particular, it means that we can not poison the forward pointers - * that may still be used for walking the hash list and we can only - * zero the pprev pointer so list_unhashed() will return true after - * this. - * - * The caller must take whatever precautions are necessary (such as - * holding appropriate locks) to avoid racing with another - * list-mutation primitive, such as hlist_bl_add_head_rcu() or - * hlist_bl_del_rcu(), running on this same list. However, it is - * perfectly legal to run concurrently with the _rcu list-traversal - * primitives, such as hlist_bl_for_each_entry_rcu(). - */ -static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n) -{ - if (!hlist_bl_unhashed(n)) { - __hlist_bl_del(n); - n->pprev = NULL; - } -} - -/** * hlist_bl_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 75a2eded7aa2..185dd9736863 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -383,6 +383,24 @@ do { \ } while (0) /** + * rcu_replace_pointer() - replace an RCU pointer, returning its old value + * @rcu_ptr: RCU pointer, whose old value is returned + * @ptr: regular pointer + * @c: the lockdep conditions under which the dereference will take place + * + * Perform a replacement, where @rcu_ptr is an RCU-annotated + * pointer and @c is the lockdep argument that is passed to the + * rcu_dereference_protected() call used to read that pointer. The old + * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr. + */ +#define rcu_replace_pointer(rcu_ptr, ptr, c) \ +({ \ + typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ + rcu_assign_pointer((rcu_ptr), (ptr)); \ + __tmp; \ +}) + +/** * rcu_swap_protected() - swap an RCU and a regular pointer * @rcu_ptr: RCU pointer * @ptr: regular pointer diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 9bf1dfe7781f..37b6f0c2b79d 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -84,6 +84,7 @@ static inline void rcu_scheduler_starting(void) { } #endif /* #else #ifndef CONFIG_SRCU */ static inline void rcu_end_inkernel_boot(void) { } static inline bool rcu_is_watching(void) { return true; } +static inline void rcu_momentary_dyntick_idle(void) { } /* Avoid RCU read-side critical sections leaking across. */ static inline void rcu_all_qs(void) { barrier(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 18b1ed9864b0..c5147de885ec 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -37,6 +37,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier(void); bool rcu_eqs_special_set(int cpu); +void rcu_momentary_dyntick_idle(void); unsigned long get_state_synchronize_rcu(void); void cond_synchronize_rcu(unsigned long oldstate); diff --git a/include/linux/tick.h b/include/linux/tick.h index 7e050a356cc5..7896f792d3b0 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -108,7 +108,8 @@ enum tick_dep_bits { TICK_DEP_BIT_POSIX_TIMER = 0, TICK_DEP_BIT_PERF_EVENTS = 1, TICK_DEP_BIT_SCHED = 2, - TICK_DEP_BIT_CLOCK_UNSTABLE = 3 + TICK_DEP_BIT_CLOCK_UNSTABLE = 3, + TICK_DEP_BIT_RCU = 4 }; #define TICK_DEP_MASK_NONE 0 @@ -116,6 +117,7 @@ enum tick_dep_bits { #define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS) #define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED) #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE) +#define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU) #ifdef CONFIG_NO_HZ_COMMON extern bool tick_nohz_enabled; @@ -268,6 +270,9 @@ static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } +static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } +static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } + static inline void tick_dep_set(enum tick_dep_bits bit) { } static inline void tick_dep_clear(enum tick_dep_bits bit) { } static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 694bd040cf51..66122602bd08 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -93,16 +93,16 @@ TRACE_EVENT_RCU(rcu_grace_period, * the data from the rcu_node structure, other than rcuname, which comes * from the rcu_state structure, and event, which is one of the following: * - * "Startleaf": Request a grace period based on leaf-node data. + * "Cleanup": Clean up rcu_node structure after previous GP. + * "CleanupMore": Clean up, and another GP is needed. + * "EndWait": Complete wait. + * "NoGPkthread": The RCU grace-period kthread has not yet started. * "Prestarted": Someone beat us to the request * "Startedleaf": Leaf node marked for future GP. * "Startedleafroot": All nodes from leaf to root marked for future GP. * "Startedroot": Requested a nocb grace period based on root-node data. - * "NoGPkthread": The RCU grace-period kthread has not yet started. + * "Startleaf": Request a grace period based on leaf-node data. * "StartWait": Start waiting for the requested grace period. - * "EndWait": Complete wait. - * "Cleanup": Clean up rcu_node structure after previous GP. - * "CleanupMore": Clean up, and another GP is needed. */ TRACE_EVENT_RCU(rcu_future_grace_period, @@ -258,20 +258,27 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock, * the number of the offloaded CPU are extracted. The third and final * argument is a string as follows: * - * "WakeEmpty": Wake rcuo kthread, first CB to empty list. - * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list. - * "WakeOvf": Wake rcuo kthread, CB list is huge. - * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge. - * "WakeNot": Don't wake rcuo kthread. - * "WakeNotPoll": Don't wake rcuo kthread because it is polling. - * "DeferredWake": Carried out the "IsDeferred" wakeup. - * "Poll": Start of new polling cycle for rcu_nocb_poll. - * "Sleep": Sleep waiting for GP for !rcu_nocb_poll. - * "CBSleep": Sleep waiting for CBs for !rcu_nocb_poll. - * "WokeEmpty": rcuo kthread woke to find empty list. - * "WokeNonEmpty": rcuo kthread woke to find non-empty list. - * "WaitQueue": Enqueue partially done, timed wait for it to complete. - * "WokeQueue": Partial enqueue now complete. + * "AlreadyAwake": The to-be-awakened rcuo kthread is already awake. + * "Bypass": rcuo GP kthread sees non-empty ->nocb_bypass. + * "CBSleep": rcuo CB kthread sleeping waiting for CBs. + * "Check": rcuo GP kthread checking specified CPU for work. + * "DeferredWake": Timer expired or polled check, time to wake. + * "DoWake": The to-be-awakened rcuo kthread needs to be awakened. + * "EndSleep": Done waiting for GP for !rcu_nocb_poll. + * "FirstBQ": New CB to empty ->nocb_bypass (->cblist maybe non-empty). + * "FirstBQnoWake": FirstBQ plus rcuo kthread need not be awakened. + * "FirstBQwake": FirstBQ plus rcuo kthread must be awakened. + * "FirstQ": New CB to empty ->cblist (->nocb_bypass maybe non-empty). + * "NeedWaitGP": rcuo GP kthread must wait on a grace period. + * "Poll": Start of new polling cycle for rcu_nocb_poll. + * "Sleep": Sleep waiting for GP for !rcu_nocb_poll. + * "Timer": Deferred-wake timer expired. + * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list. + * "WakeEmpty": Wake rcuo kthread, first CB to empty list. + * "WakeNot": Don't wake rcuo kthread. + * "WakeNotPoll": Don't wake rcuo kthread because it is polling. + * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge. + * "WokeEmpty": rcuo CB kthread woke to find empty list. */ TRACE_EVENT_RCU(rcu_nocb_wake, @@ -713,8 +720,6 @@ TRACE_EVENT_RCU(rcu_torture_read, * "Begin": rcu_barrier() started. * "EarlyExit": rcu_barrier() piggybacked, thus early exit. * "Inc1": rcu_barrier() piggyback check counter incremented. - * "OfflineNoCB": rcu_barrier() found callback on never-online CPU - * "OnlineNoCB": rcu_barrier() found online no-CBs CPU. * "OnlineQ": rcu_barrier() found online CPU with callbacks. * "OnlineNQ": rcu_barrier() found online CPU, no callbacks. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index b7a904825e7d..295517f109d7 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -367,7 +367,8 @@ TRACE_EVENT(itimer_expire, tick_dep_name(POSIX_TIMER) \ tick_dep_name(PERF_EVENTS) \ tick_dep_name(SCHED) \ - tick_dep_name_end(CLOCK_UNSTABLE) + tick_dep_name(CLOCK_UNSTABLE) \ + tick_dep_name_end(RCU) #undef tick_dep_name #undef tick_dep_mask_name |