summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/context_tracking.h32
-rw-r--r--include/linux/context_tracking_state.h60
-rw-r--r--include/linux/entry-common.h2
-rw-r--r--include/linux/rcu_segcblist.h6
-rw-r--r--include/linux/rculist.h9
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/srcutree.h15
-rw-r--r--include/trace/events/rcu.h20
11 files changed, 102 insertions, 67 deletions
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 6e76b9dba00e..d53092ffa9db 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -26,26 +26,26 @@ extern void user_exit_callable(void);
static inline void user_enter(void)
{
if (context_tracking_enabled())
- ct_user_enter(CONTEXT_USER);
+ ct_user_enter(CT_STATE_USER);
}
static inline void user_exit(void)
{
if (context_tracking_enabled())
- ct_user_exit(CONTEXT_USER);
+ ct_user_exit(CT_STATE_USER);
}
/* Called with interrupts disabled. */
static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
- __ct_user_enter(CONTEXT_USER);
+ __ct_user_enter(CT_STATE_USER);
}
static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
- __ct_user_exit(CONTEXT_USER);
+ __ct_user_exit(CT_STATE_USER);
}
static inline enum ctx_state exception_enter(void)
@@ -57,7 +57,7 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = __ct_state();
- if (prev_ctx != CONTEXT_KERNEL)
+ if (prev_ctx != CT_STATE_KERNEL)
ct_user_exit(prev_ctx);
return prev_ctx;
@@ -67,7 +67,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
{
if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
context_tracking_enabled()) {
- if (prev_ctx != CONTEXT_KERNEL)
+ if (prev_ctx != CT_STATE_KERNEL)
ct_user_enter(prev_ctx);
}
}
@@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
static __always_inline bool context_tracking_guest_enter(void)
{
if (context_tracking_enabled())
- __ct_user_enter(CONTEXT_GUEST);
+ __ct_user_enter(CT_STATE_GUEST);
return context_tracking_enabled_this_cpu();
}
@@ -83,7 +83,7 @@ static __always_inline bool context_tracking_guest_enter(void)
static __always_inline void context_tracking_guest_exit(void)
{
if (context_tracking_enabled())
- __ct_user_exit(CONTEXT_GUEST);
+ __ct_user_exit(CT_STATE_GUEST);
}
#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
@@ -113,13 +113,17 @@ extern void ct_idle_enter(void);
extern void ct_idle_exit(void);
/*
- * Is the current CPU in an extended quiescent state?
+ * Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)?
+ *
+ * Note that this returns the actual boolean data (watching / not watching),
+ * whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of
+ * context_tracking.state.
*
* No ordering, as we are sampling CPU-local information.
*/
-static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
+static __always_inline bool rcu_is_watching_curr_cpu(void)
{
- return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
+ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING;
}
/*
@@ -140,9 +144,9 @@ static __always_inline bool warn_rcu_enter(void)
* lots of the actual reporting also relies on RCU.
*/
preempt_disable_notrace();
- if (rcu_dynticks_curr_cpu_in_eqs()) {
+ if (!rcu_is_watching_curr_cpu()) {
ret = true;
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_RCU_WATCHING);
}
return ret;
@@ -151,7 +155,7 @@ static __always_inline bool warn_rcu_enter(void)
static __always_inline void warn_rcu_exit(bool rcu)
{
if (rcu)
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_RCU_WATCHING);
preempt_enable_notrace();
}
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index bbff5f7f8803..7b8433d5a8ef 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -7,22 +7,22 @@
#include <linux/context_tracking_irq.h>
/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
-#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
+#define CT_NESTING_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
enum ctx_state {
- CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
- CONTEXT_KERNEL = 0,
- CONTEXT_IDLE = 1,
- CONTEXT_USER = 2,
- CONTEXT_GUEST = 3,
- CONTEXT_MAX = 4,
+ CT_STATE_DISABLED = -1, /* returned by ct_state() if unknown */
+ CT_STATE_KERNEL = 0,
+ CT_STATE_IDLE = 1,
+ CT_STATE_USER = 2,
+ CT_STATE_GUEST = 3,
+ CT_STATE_MAX = 4,
};
-/* Even value for idle, else odd. */
-#define RCU_DYNTICKS_IDX CONTEXT_MAX
+/* Odd value for watching, else even. */
+#define CT_RCU_WATCHING CT_STATE_MAX
-#define CT_STATE_MASK (CONTEXT_MAX - 1)
-#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
+#define CT_STATE_MASK (CT_STATE_MAX - 1)
+#define CT_RCU_WATCHING_MASK (~CT_STATE_MASK)
struct context_tracking {
#ifdef CONFIG_CONTEXT_TRACKING_USER
@@ -39,8 +39,8 @@ struct context_tracking {
atomic_t state;
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
- long dynticks_nesting; /* Track process nesting level. */
- long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
+ long nesting; /* Track process nesting level. */
+ long nmi_nesting; /* Track irq/NMI nesting level. */
#endif
};
@@ -56,47 +56,47 @@ static __always_inline int __ct_state(void)
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
-static __always_inline int ct_dynticks(void)
+static __always_inline int ct_rcu_watching(void)
{
- return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
+ return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK;
}
-static __always_inline int ct_dynticks_cpu(int cpu)
+static __always_inline int ct_rcu_watching_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
+ return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
}
-static __always_inline int ct_dynticks_cpu_acquire(int cpu)
+static __always_inline int ct_rcu_watching_cpu_acquire(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
+ return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK;
}
-static __always_inline long ct_dynticks_nesting(void)
+static __always_inline long ct_nesting(void)
{
- return __this_cpu_read(context_tracking.dynticks_nesting);
+ return __this_cpu_read(context_tracking.nesting);
}
-static __always_inline long ct_dynticks_nesting_cpu(int cpu)
+static __always_inline long ct_nesting_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return ct->dynticks_nesting;
+ return ct->nesting;
}
-static __always_inline long ct_dynticks_nmi_nesting(void)
+static __always_inline long ct_nmi_nesting(void)
{
- return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
+ return __this_cpu_read(context_tracking.nmi_nesting);
}
-static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
+static __always_inline long ct_nmi_nesting_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return ct->dynticks_nmi_nesting;
+ return ct->nmi_nesting;
}
#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
@@ -113,7 +113,7 @@ static __always_inline bool context_tracking_enabled_cpu(int cpu)
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
}
-static inline bool context_tracking_enabled_this_cpu(void)
+static __always_inline bool context_tracking_enabled_this_cpu(void)
{
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
@@ -123,14 +123,14 @@ static inline bool context_tracking_enabled_this_cpu(void)
*
* Returns the current cpu's context tracking state if context tracking
* is enabled. If context tracking is disabled, returns
- * CONTEXT_DISABLED. This should be used primarily for debugging.
+ * CT_STATE_DISABLED. This should be used primarily for debugging.
*/
static __always_inline int ct_state(void)
{
int ret;
if (!context_tracking_enabled())
- return CONTEXT_DISABLED;
+ return CT_STATE_DISABLED;
preempt_disable();
ret = __ct_state();
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index b0fb775a600d..1e50cdb83ae5 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -108,7 +108,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
arch_enter_from_user_mode(regs);
lockdep_hardirqs_off(CALLER_ADDR0);
- CT_WARN_ON(__ct_state() != CONTEXT_USER);
+ CT_WARN_ON(__ct_state() != CT_STATE_USER);
user_exit_irqoff();
instrumentation_begin();
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index ba95c06675e1..2fdc2208f1ca 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -185,11 +185,7 @@ struct rcu_cblist {
* ----------------------------------------------------------------------------
*/
#define SEGCBLIST_ENABLED BIT(0)
-#define SEGCBLIST_RCU_CORE BIT(1)
-#define SEGCBLIST_LOCKING BIT(2)
-#define SEGCBLIST_KTHREAD_CB BIT(3)
-#define SEGCBLIST_KTHREAD_GP BIT(4)
-#define SEGCBLIST_OFFLOADED BIT(5)
+#define SEGCBLIST_OFFLOADED BIT(1)
struct rcu_segcblist {
struct rcu_head *head;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 3dc1e58865f7..14dfa6008467 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -191,7 +191,10 @@ static inline void hlist_del_init_rcu(struct hlist_node *n)
* @old : the element to be replaced
* @new : the new element to insert
*
- * The @old entry will be replaced with the @new entry atomically.
+ * The @old entry will be replaced with the @new entry atomically from
+ * the perspective of concurrent readers. It is the caller's responsibility
+ * to synchronize with concurrent updaters, if any.
+ *
* Note: @old should not be empty.
*/
static inline void list_replace_rcu(struct list_head *old,
@@ -519,7 +522,9 @@ static inline void hlist_del_rcu(struct hlist_node *n)
* @old : the element to be replaced
* @new : the new element to insert
*
- * The @old entry will be replaced with the @new entry atomically.
+ * The @old entry will be replaced with the @new entry atomically from
+ * the perspective of concurrent readers. It is the caller's responsibility
+ * to synchronize with concurrent updaters, if any.
*/
static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 13f6f00aecf9..58d84c59f3dd 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -34,10 +34,12 @@
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+#define RCU_SEQ_CTR_SHIFT 2
+#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
+
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
-void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
struct rcu_gp_oldstate;
@@ -144,11 +146,18 @@ void rcu_init_nohz(void);
int rcu_nocb_cpu_offload(int cpu);
int rcu_nocb_cpu_deoffload(int cpu);
void rcu_nocb_flush_deferred_wakeup(void);
+
+#define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s)
+
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+
static inline void rcu_init_nohz(void) { }
static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
static inline void rcu_nocb_flush_deferred_wakeup(void) { }
+
+#define RCU_NOCB_LOCKDEP_WARN(c, s)
+
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*
@@ -165,6 +174,7 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { }
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
+void rcu_tasks_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
# define call_rcu_tasks call_rcu
@@ -191,6 +201,7 @@ void rcu_tasks_trace_qs_blkd(struct task_struct *t);
rcu_tasks_trace_qs_blkd(t); \
} \
} while (0)
+void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
# endif
@@ -202,8 +213,8 @@ do { \
} while (0)
# ifdef CONFIG_TASKS_RUDE_RCU
-void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks_rude(void);
+void rcu_tasks_rude_torture_stats_print(char *tt, char *tf);
# endif
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index d9ac7b136aea..cf2b5a188f78 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -158,7 +158,7 @@ void rcu_scheduler_starting(void);
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
-static inline void rcu_momentary_dyntick_idle(void) { }
+static inline void rcu_momentary_eqs(void) { }
static inline void kfree_rcu_scheduler_running(void) { }
static inline bool rcu_gp_might_be_stalled(void) { return false; }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 254244202ea9..7dbde2b6f714 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -37,7 +37,7 @@ void synchronize_rcu_expedited(void);
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
void rcu_barrier(void);
-void rcu_momentary_dyntick_idle(void);
+void rcu_momentary_eqs(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index fcd61dfe2af3..3871bd32018f 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -294,4 +294,10 @@ int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
+#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
+bool csd_lock_is_stuck(void);
+#else
+static inline bool csd_lock_is_stuck(void) { return false; }
+#endif
+
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 8f3f72480e78..ed57598394de 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -129,10 +129,23 @@ struct srcu_struct {
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
+/*
+ * Values for initializing gp sequence fields. Higher values allow wrap arounds to
+ * occur earlier.
+ * The second value with state is useful in the case of static initialization of
+ * srcu_usage where srcu_gp_seq_needed is expected to have some state value in its
+ * lower bits (or else it will appear to be already initialized within
+ * the call check_init_srcu_struct()).
+ */
+#define SRCU_GP_SEQ_INITIAL_VAL ((0UL - 100UL) << RCU_SEQ_CTR_SHIFT)
+#define SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE (SRCU_GP_SEQ_INITIAL_VAL - 1)
+
#define __SRCU_USAGE_INIT(name) \
{ \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .srcu_gp_seq_needed = -1UL, \
+ .srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL, \
+ .srcu_gp_seq_needed = SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE, \
+ .srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL, \
.work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
}
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 31b3e0d3e65f..e81431deaa50 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -466,40 +466,40 @@ TRACE_EVENT(rcu_stall_warning,
/*
* Tracepoint for dyntick-idle entry/exit events. These take 2 strings
* as argument:
- * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not
- * being in dyntick-idle mode.
+ * polarity: "Start", "End", "StillWatching" for entering, exiting or still not
+ * being in EQS mode.
* context: "USER" or "IDLE" or "IRQ".
- * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context.
+ * NMIs nested in IRQs are inferred with nesting > 1 in IRQ context.
*
* These events also take a pair of numbers, which indicate the nesting
* depth before and after the event of interest, and a third number that is
- * the ->dynticks counter. Note that task-related and interrupt-related
+ * the RCU_WATCHING counter. Note that task-related and interrupt-related
* events use two separate counters, and that the "++=" and "--=" events
* for irq/NMI will change the counter by two, otherwise by one.
*/
-TRACE_EVENT_RCU(rcu_dyntick,
+TRACE_EVENT_RCU(rcu_watching,
- TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, int counter),
- TP_ARGS(polarity, oldnesting, newnesting, dynticks),
+ TP_ARGS(polarity, oldnesting, newnesting, counter),
TP_STRUCT__entry(
__field(const char *, polarity)
__field(long, oldnesting)
__field(long, newnesting)
- __field(int, dynticks)
+ __field(int, counter)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
- __entry->dynticks = dynticks;
+ __entry->counter = counter;
),
TP_printk("%s %lx %lx %#3x", __entry->polarity,
__entry->oldnesting, __entry->newnesting,
- __entry->dynticks & 0xfff)
+ __entry->counter & 0xfff)
);
/*