diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 61 |
1 files changed, 47 insertions, 14 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index cbe5c899599c..c46f3a63b758 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -939,6 +939,9 @@ struct task_struct { #ifdef CONFIG_IOMMU_SVA unsigned pasid_activated:1; #endif +#ifdef CONFIG_CPU_SUP_INTEL + unsigned reported_split_lock:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ @@ -1441,6 +1444,7 @@ struct task_struct { int pagefault_disabled; #ifdef CONFIG_MMU struct task_struct *oom_reaper_list; + struct timer_list oom_reaper_timer; #endif #ifdef CONFIG_VMAP_STACK struct vm_struct *stack_vm_area; @@ -2115,6 +2119,47 @@ static inline void cond_resched_rcu(void) #endif } +#ifdef CONFIG_PREEMPT_DYNAMIC + +extern bool preempt_model_none(void); +extern bool preempt_model_voluntary(void); +extern bool preempt_model_full(void); + +#else + +static inline bool preempt_model_none(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_NONE); +} +static inline bool preempt_model_voluntary(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); +} +static inline bool preempt_model_full(void) +{ + return IS_ENABLED(CONFIG_PREEMPT); +} + +#endif + +static inline bool preempt_model_rt(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_RT); +} + +/* + * Does the preemption model allow non-cooperative preemption? + * + * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with + * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the + * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the + * PREEMPT_NONE model. + */ +static inline bool preempt_model_preemptible(void) +{ + return preempt_model_full() || preempt_model_rt(); +} + /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPTION, @@ -2335,20 +2380,6 @@ static inline void rseq_syscall(struct pt_regs *regs) #endif -const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); -char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); -int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq); - -const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq); -const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); -const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); - -int sched_trace_rq_cpu(struct rq *rq); -int sched_trace_rq_cpu_capacity(struct rq *rq); -int sched_trace_rq_nr_running(struct rq *rq); - -const struct cpumask *sched_trace_rd_span(struct root_domain *rd); - #ifdef CONFIG_SCHED_CORE extern void sched_core_free(struct task_struct *tsk); extern void sched_core_fork(struct task_struct *p); @@ -2359,4 +2390,6 @@ static inline void sched_core_free(struct task_struct *tsk) { } static inline void sched_core_fork(struct task_struct *p) { } #endif +extern void sched_set_stop_task(int cpu, struct task_struct *stop); + #endif |