summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h64
1 files changed, 45 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a1707583de49..693f0e6c54d4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -88,6 +88,7 @@ struct sched_param {
struct exec_domain;
struct futex_pi_state;
+struct bio;
/*
* List of flags we want to share for kernel threads,
@@ -194,6 +195,14 @@ extern void sched_init_smp(void);
extern void init_idle(struct task_struct *idle, int cpu);
extern cpumask_t nohz_cpu_mask;
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
+extern int select_nohz_load_balancer(int cpu);
+#else
+static inline int select_nohz_load_balancer(int cpu)
+{
+ return 0;
+}
+#endif
/*
* Only dump TASK_* tasks. (0 for all tasks)
@@ -226,6 +235,7 @@ extern void scheduler_tick(void);
extern void softlockup_tick(void);
extern void spawn_softlockup_task(void);
extern void touch_softlockup_watchdog(void);
+extern void touch_all_softlockup_watchdogs(void);
#else
static inline void softlockup_tick(void)
{
@@ -236,6 +246,9 @@ static inline void spawn_softlockup_task(void)
static inline void touch_softlockup_watchdog(void)
{
}
+static inline void touch_all_softlockup_watchdogs(void)
+{
+}
#endif
@@ -379,6 +392,7 @@ struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
+ struct list_head signalfd_list;
};
struct pacct_struct {
@@ -457,6 +471,7 @@ struct signal_struct {
cputime_t utime, stime, cutime, cstime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
+ unsigned long inblock, oublock, cinblock, coublock;
/*
* Cumulative ns of scheduled CPU time for dead threads in the
@@ -668,8 +683,14 @@ struct sched_group {
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU. This is read only (except for setup, hotplug CPU).
+ * Note : Never change cpu_power without recompute its reciprocal
*/
- unsigned long cpu_power;
+ unsigned int __cpu_power;
+ /*
+ * reciprocal value of cpu_power to avoid expensive divides
+ * (see include/linux/reciprocal_div.h)
+ */
+ u32 reciprocal_cpu_power;
};
struct sched_domain {
@@ -799,10 +820,10 @@ struct prio_array;
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- struct thread_info *thread_info;
+ void *stack;
atomic_t usage;
- unsigned long flags; /* per process flags, defined below */
- unsigned long ptrace;
+ unsigned int flags; /* per process flags, defined below */
+ unsigned int ptrace;
int lock_depth; /* BKL lock depth */
@@ -825,7 +846,7 @@ struct task_struct {
unsigned long long sched_time; /* sched_clock time spent running */
enum sleep_type sleep_type;
- unsigned long policy;
+ unsigned int policy;
cpumask_t cpus_allowed;
unsigned int time_slice, first_time_slice;
@@ -845,11 +866,11 @@ struct task_struct {
/* task state */
struct linux_binfmt *binfmt;
- long exit_state;
+ int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
/* ??? */
- unsigned long personality;
+ unsigned int personality;
unsigned did_exec:1;
pid_t pid;
pid_t tgid;
@@ -881,7 +902,7 @@ struct task_struct {
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- unsigned long rt_priority;
+ unsigned int rt_priority;
cputime_t utime, stime;
unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time;
@@ -996,6 +1017,9 @@ struct task_struct {
/* journalling filesystem info */
void *journal_info;
+/* stacked block device info */
+ struct bio *bio_list, **bio_tail;
+
/* VM state */
struct reclaim_state *reclaim_state;
@@ -1138,6 +1162,7 @@ static inline void put_task_struct(struct task_struct *t)
/* Not implemented yet, only for 486*/
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
+#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* dumped core */
@@ -1158,6 +1183,7 @@ static inline void put_task_struct(struct task_struct *t)
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
+#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -1299,6 +1325,7 @@ extern int in_egroup_p(gid_t);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
+extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
@@ -1494,8 +1521,8 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
#ifndef __HAVE_THREAD_FUNCTIONS
-#define task_thread_info(task) (task)->thread_info
-#define task_stack_page(task) ((void*)((task)->thread_info))
+#define task_thread_info(task) ((struct thread_info *)(task)->stack)
+#define task_stack_page(task) ((task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
@@ -1505,7 +1532,7 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
static inline unsigned long *end_of_stack(struct task_struct *p)
{
- return (unsigned long *)(p->thread_info + 1);
+ return (unsigned long *)(task_thread_info(p) + 1);
}
#endif
@@ -1590,11 +1617,13 @@ static inline int lock_need_resched(spinlock_t *lock)
return 0;
}
-/* Reevaluate whether the task has signals pending delivery.
- This is required every time the blocked sigset_t changes.
- callers must hold sighand->siglock. */
-
-extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t));
+/*
+ * Reevaluate whether the task has signals pending delivery.
+ * Wake the task if so.
+ * This is required every time the blocked sigset_t changes.
+ * callers must hold sighand->siglock.
+ */
+extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void signal_wake_up(struct task_struct *t, int resume_stopped);
@@ -1641,10 +1670,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
-#include <linux/sysdev.h>
extern int sched_mc_power_savings, sched_smt_power_savings;
-extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings;
-extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
extern void normalize_rt_tasks(void);