diff options
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 8 |
2 files changed, 2 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 77f01ac385f7..dc37ae787e33 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -750,10 +750,8 @@ struct task_struct { #endif unsigned int __state; -#ifdef CONFIG_PREEMPT_RT /* saved state for "spinlock sleepers" */ unsigned int saved_state; -#endif /* * This begins the randomizable portion of task_struct. Only diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f39482d6a6e6..49541e3c1295 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2232,23 +2232,20 @@ int __task_state_match(struct task_struct *p, unsigned int state) if (READ_ONCE(p->__state) & state) return 1; -#ifdef CONFIG_PREEMPT_RT if (READ_ONCE(p->saved_state) & state) return -1; -#endif + return 0; } static __always_inline int task_state_match(struct task_struct *p, unsigned int state) { -#ifdef CONFIG_PREEMPT_RT /* * Serialize against current_save_and_set_rtlock_wait_state() and * current_restore_rtlock_saved_state(). */ guard(raw_spinlock_irq)(&p->pi_lock); -#endif return __task_state_match(p, state); } @@ -4038,7 +4035,6 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) *success = !!(match = __task_state_match(p, state)); -#ifdef CONFIG_PREEMPT_RT /* * Saved state preserves the task state across blocking on * an RT lock. If the state matches, set p::saved_state to @@ -4054,7 +4050,7 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) */ if (match < 0) p->saved_state = TASK_RUNNING; -#endif + return match > 0; } |