summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2005-06-27 12:55:12 +0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-28 01:33:29 +0400
commit22e2c507c301c3dbbcf91b4948b88f78842ee6c9 (patch)
tree9a97c91d1362e69703aa286021daffb8a5456f4c /include/linux/sched.h
parent020f46a39eb7b99a575b9f4d105fce2b142acdf1 (diff)
downloadlinux-22e2c507c301c3dbbcf91b4948b88f78842ee6c9.tar.xz
[PATCH] Update cfq io scheduler to time sliced design
This updates the CFQ io scheduler to the new time sliced design (cfq v3). It provides full process fairness, while giving excellent aggregate system throughput even for many competing processes. It supports io priorities, either inherited from the cpu nice value or set directly with the ioprio_get/set syscalls. The latter closely mimic set/getpriority. This import is based on my latest from -mm. Signed-off-by: Jens Axboe <axboe@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9530b1903160..ff48815bd3a2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -608,6 +608,8 @@ struct task_struct {
struct list_head run_list;
prio_array_t *array;
+ unsigned short ioprio;
+
unsigned long sleep_avg;
unsigned long long timestamp, last_ran;
unsigned long long sched_time; /* sched_clock time spent running */
@@ -763,6 +765,7 @@ struct task_struct {
nodemask_t mems_allowed;
int cpuset_mems_generation;
#endif
+ atomic_t fs_excl; /* holding fs exclusive resources */
};
static inline pid_t process_group(struct task_struct *tsk)
@@ -1112,7 +1115,8 @@ extern void unhash_process(struct task_struct *p);
/*
* Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
- * subscriptions and synchronises with wait4(). Also used in procfs.
+ * subscriptions and synchronises with wait4(). Also used in procfs. Also
+ * pins the final release of task.io_context.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),