diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2012-03-20 04:02:01 +0400 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2012-03-20 04:02:01 +0400 |
commit | 10ce3cc919f50c2043b41ca968b43c26a3672600 (patch) | |
tree | ea409366a5208aced495bc0516a08b81fd43222e /kernel/sched/idle_task.c | |
parent | 24e3e5ae1e4c2a3a32f5b1f96b4e3fd721806acd (diff) | |
parent | 5c6a7a62c130afef3d61c1dee153012231ff5cd9 (diff) | |
download | linux-10ce3cc919f50c2043b41ca968b43c26a3672600.tar.xz |
Merge branch 'next' into for-linus
Diffstat (limited to 'kernel/sched/idle_task.c')
-rw-r--r-- | kernel/sched/idle_task.c | 99 |
1 files changed, 99 insertions, 0 deletions
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c new file mode 100644 index 000000000000..91b4c957f289 --- /dev/null +++ b/kernel/sched/idle_task.c @@ -0,0 +1,99 @@ +#include "sched.h" + +/* + * idle-task scheduling class. + * + * (NOTE: these are not related to SCHED_IDLE tasks which are + * handled in sched_fair.c) + */ + +#ifdef CONFIG_SMP +static int +select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) +{ + return task_cpu(p); /* IDLE tasks as never migrated */ +} +#endif /* CONFIG_SMP */ +/* + * Idle tasks are unconditionally rescheduled: + */ +static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) +{ + resched_task(rq->idle); +} + +static struct task_struct *pick_next_task_idle(struct rq *rq) +{ + schedstat_inc(rq, sched_goidle); + calc_load_account_idle(rq); + return rq->idle; +} + +/* + * It is not legal to sleep in the idle task - print a warning + * message if some code attempts to do it: + */ +static void +dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) +{ + raw_spin_unlock_irq(&rq->lock); + printk(KERN_ERR "bad: scheduling from the idle thread!\n"); + dump_stack(); + raw_spin_lock_irq(&rq->lock); +} + +static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) +{ +} + +static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) +{ +} + +static void set_curr_task_idle(struct rq *rq) +{ +} + +static void switched_to_idle(struct rq *rq, struct task_struct *p) +{ + BUG(); +} + +static void +prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) +{ + BUG(); +} + +static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) +{ + return 0; +} + +/* + * Simple, special scheduling class for the per-CPU idle tasks: + */ +const struct sched_class idle_sched_class = { + /* .next is NULL */ + /* no enqueue/yield_task for idle tasks */ + + /* dequeue is not valid, we print a debug message there: */ + .dequeue_task = dequeue_task_idle, + + .check_preempt_curr = check_preempt_curr_idle, + + .pick_next_task = pick_next_task_idle, + .put_prev_task = put_prev_task_idle, + +#ifdef CONFIG_SMP + .select_task_rq = select_task_rq_idle, +#endif + + .set_curr_task = set_curr_task_idle, + .task_tick = task_tick_idle, + + .get_rr_interval = get_rr_interval_idle, + + .prio_changed = prio_changed_idle, + .switched_to = switched_to_idle, +}; |