diff options
author | Ingo Molnar <mingo@kernel.org> | 2019-09-16 15:04:28 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-09-16 15:05:04 +0300 |
commit | 563c4f85f9f0d63b712081d5b4522152cdcb8b6b (patch) | |
tree | 92351ab2d60d5fd7ce5745d21c9a60ce6d46c5dd /include/linux/sched.h | |
parent | 4adcdcea717cb2d8436bef00dd689aa5bc76f11b (diff) | |
parent | 09c7e8b21d67c3c78ab9701dbc0fb1e9f14a0ba5 (diff) | |
download | linux-563c4f85f9f0d63b712081d5b4522152cdcb8b6b.tar.xz |
Merge branch 'sched/rt' into sched/core, to pick up -rt changes
Pick up the first couple of patches working towards PREEMPT_RT.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index b94ad92dfbe6..f0edee94834a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1772,7 +1772,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) * value indicates whether a reschedule was done in fact. * cond_resched_lock() will drop the spinlock before scheduling, */ -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION extern int _cond_resched(void); #else static inline int _cond_resched(void) { return 0; } @@ -1801,12 +1801,12 @@ static inline void cond_resched_rcu(void) /* * Does a critical section need to be broken due to another - * task waiting?: (technically does not depend on CONFIG_PREEMPT, + * task waiting?: (technically does not depend on CONFIG_PREEMPTION, * but a general need for low latency) */ static inline int spin_needbreak(spinlock_t *lock) { -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION return spin_is_contended(lock); #else return 0; |