summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2020-11-17 18:16:35 +0300
committerPeter Zijlstra <peterz@infradead.org>2020-11-19 13:25:42 +0300
commit9f68b5b74c48761bcbd7d90cf1426049bdbaabb7 (patch)
treed121e17b162a5fdd759f0768e5462c1fab11c394 /kernel/sched
parent179a9cf79212bb3b96fb69a314583189cd863c5b (diff)
downloadlinux-9f68b5b74c48761bcbd7d90cf1426049bdbaabb7.tar.xz
sched: Detect call to schedule from critical entry code
Detect calls to schedule() between user_enter() and user_exit(). Those are symptoms of early entry code that either forgot to protect a call to schedule() inside exception_enter()/exception_exit() or, in the case of HAVE_CONTEXT_TRACKING_OFFSTACK, enabled interrupts or preemption in a wrong spot. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20201117151637.259084-4-frederic@kernel.org
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c1
1 files changed, 1 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d2003a7d5ab5..c23d7cb5aee3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4291,6 +4291,7 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
preempt_count_set(PREEMPT_DISABLED);
}
rcu_sleep_check();
+ SCHED_WARN_ON(ct_state() == CONTEXT_USER);
profile_hit(SCHED_PROFILING, __builtin_return_address(0));