diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-09-23 21:04:26 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-09-25 16:07:55 +0400 |
commit | a233f1120c37724938f7201fe2353b2577adaaf9 (patch) | |
tree | 9d7b0887b3398a1fc699ea557822c891bf9c450d /include/linux/sched.h | |
parent | bdb43806589096ac4272fe1307e789846ac08d7c (diff) | |
download | linux-a233f1120c37724938f7201fe2353b2577adaaf9.tar.xz |
sched: Prepare for per-cpu preempt_count
When using per-cpu preempt_count variables we need to save/restore the
preempt_count on context switch (into per task storage; for instance
the old thread_info::preempt_count variable) because of
PREEMPT_ACTIVE.
However, this means that on fork() the preempt_count value of the last
context switch gets copied and if we had a PREEMPT_ACTIVE switch right
before cloning a child task the child task will now too have
PREEMPT_ACTIVE set and start its life with an extra PREEMPT_ACTIVE
count.
Therefore we need to make init_task_preempt_count() unconditional;
this resets whatever preempt_count we inherited from our parent
process.
Doing so for !per-cpu implementations is harmless.
For !PREEMPT_COUNT kernels we need to be careful not to start life
with an increased preempt_count.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-4k0b7oy1rcdyzochwiixuwi9@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 06ac17c7e639..b09798b672f3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -428,6 +428,14 @@ struct task_cputime { .sum_exec_runtime = 0, \ } +#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) + +#ifdef CONFIG_PREEMPT_COUNT +#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) +#else +#define PREEMPT_DISABLED PREEMPT_ENABLED +#endif + /* * Disable preemption until the scheduler is running. * Reset by start_kernel()->sched_init()->init_idle(). @@ -435,9 +443,7 @@ struct task_cputime { * We include PREEMPT_ACTIVE to avoid cond_resched() from working * before the scheduler is active -- see should_resched(). */ -#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE + PREEMPT_NEED_RESCHED) -#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) -#define PREEMPT_DISABLED (1 + PREEMPT_NEED_RESCHED) +#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) /** * struct thread_group_cputimer - thread group interval timer counts |