summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h46
1 files changed, 29 insertions, 17 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e5c38718ff5..aa9c5be7a632 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -44,8 +44,8 @@
#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
-#include <linux/livepatch_sched.h>
#include <linux/uidgid_types.h>
+#include <linux/tracepoint-defs.h>
#include <asm/kmap_size.h>
/* task_struct member predeclarations (sorted alphabetically): */
@@ -187,6 +187,12 @@ struct user_event_mm;
# define debug_rtlock_wait_restore_state() do { } while (0)
#endif
+#define trace_set_current_state(state_value) \
+ do { \
+ if (tracepoint_enabled(sched_set_state_tp)) \
+ __trace_set_current_state(state_value); \
+ } while (0)
+
/*
* set_current_state() includes a barrier so that the write of current->__state
* is correctly serialised wrt the caller's subsequent test of whether to
@@ -227,12 +233,14 @@ struct user_event_mm;
#define __set_current_state(state_value) \
do { \
debug_normal_state_change((state_value)); \
+ trace_set_current_state(state_value); \
WRITE_ONCE(current->__state, (state_value)); \
} while (0)
#define set_current_state(state_value) \
do { \
debug_normal_state_change((state_value)); \
+ trace_set_current_state(state_value); \
smp_store_mb(current->__state, (state_value)); \
} while (0)
@@ -248,6 +256,7 @@ struct user_event_mm;
\
raw_spin_lock_irqsave(&current->pi_lock, flags); \
debug_special_state_change((state_value)); \
+ trace_set_current_state(state_value); \
WRITE_ONCE(current->__state, (state_value)); \
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
@@ -283,6 +292,7 @@ struct user_event_mm;
raw_spin_lock(&current->pi_lock); \
current->saved_state = current->__state; \
debug_rtlock_wait_set_state(); \
+ trace_set_current_state(TASK_RTLOCK_WAIT); \
WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
raw_spin_unlock(&current->pi_lock); \
} while (0);
@@ -292,6 +302,7 @@ struct user_event_mm;
lockdep_assert_irqs_disabled(); \
raw_spin_lock(&current->pi_lock); \
debug_rtlock_wait_restore_state(); \
+ trace_set_current_state(current->saved_state); \
WRITE_ONCE(current->__state, current->saved_state); \
current->saved_state = TASK_RUNNING; \
raw_spin_unlock(&current->pi_lock); \
@@ -328,6 +339,10 @@ extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
+/* wrapper function to trace from this header file */
+DECLARE_TRACEPOINT(sched_set_state_tp);
+extern void __trace_set_current_state(int state_value);
+
/**
* struct prev_cputime - snapshot of system and user cputime
* @utime: time spent in user mode
@@ -1028,6 +1043,7 @@ struct task_struct {
/* delay due to memory thrashing */
unsigned in_thrashing:1;
#endif
+ unsigned in_nf_duplicate:1;
#ifdef CONFIG_PREEMPT_RT
struct netdev_xmit net_xmit;
#endif
@@ -1223,6 +1239,14 @@ struct task_struct {
struct mutex_waiter *blocked_on;
#endif
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+ /*
+ * Encoded lock address causing task block (lower 2 bits = type from
+ * <linux/hung_task.h>). Accessed via hung_task_*() helpers.
+ */
+ unsigned long blocker;
+#endif
+
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
int non_block_count;
#endif
@@ -1626,22 +1650,15 @@ struct task_struct {
struct user_event_mm *user_event_mm;
#endif
- /*
- * New fields for task_struct should be added above here, so that
- * they are included in the randomized portion of task_struct.
- */
- randomized_struct_fields_end
-
/* CPU-specific state of this task: */
struct thread_struct thread;
/*
- * WARNING: on x86, 'thread_struct' contains a variable-sized
- * structure. It *MUST* be at the end of 'task_struct'.
- *
- * Do not put anything below here!
+ * New fields for task_struct should be added above here, so that
+ * they are included in the randomized portion of task_struct.
*/
-};
+ randomized_struct_fields_end
+} __attribute__ ((aligned (64)));
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
@@ -2069,9 +2086,6 @@ extern int __cond_resched(void);
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-void sched_dynamic_klp_enable(void);
-void sched_dynamic_klp_disable(void);
-
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
static __always_inline int _cond_resched(void)
@@ -2092,7 +2106,6 @@ static __always_inline int _cond_resched(void)
static inline int _cond_resched(void)
{
- klp_sched_try_switch();
return __cond_resched();
}
@@ -2102,7 +2115,6 @@ static inline int _cond_resched(void)
static inline int _cond_resched(void)
{
- klp_sched_try_switch();
return 0;
}