diff options
author | Tejun Heo <tj@kernel.org> | 2010-07-02 12:03:51 +0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-07-02 13:00:02 +0400 |
commit | f34217977d717385a3e9fd7018ac39fade3964c0 (patch) | |
tree | 7e05645e911eea15b33a368b91ac82ae12884e6d /include/linux/workqueue.h | |
parent | bdbc5dd7de5d07d6c9d3536e598956165a031d4c (diff) | |
download | linux-f34217977d717385a3e9fd7018ac39fade3964c0.tar.xz |
workqueue: implement unbound workqueue
This patch implements unbound workqueue which can be specified with
WQ_UNBOUND flag on creation. An unbound workqueue has the following
properties.
* It uses a dedicated gcwq with a pseudo CPU number WORK_CPU_UNBOUND.
This gcwq is always online and disassociated.
* Workers are not bound to any CPU and not concurrency managed. Works
are dispatched to workers as soon as possible and the only applied
limitation is @max_active. IOW, all unbound workqeueues are
implicitly high priority.
Unbound workqueues can be used as simple execution context provider.
Contexts unbound to any cpu are served as soon as possible.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: David Howells <dhowells@redhat.com>
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r-- | include/linux/workqueue.h | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 139069a6286c..67ce734747f6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -51,7 +51,8 @@ enum { WORK_NO_COLOR = WORK_NR_COLORS, /* special cpu IDs */ - WORK_CPU_NONE = NR_CPUS, + WORK_CPU_UNBOUND = NR_CPUS, + WORK_CPU_NONE = NR_CPUS + 1, WORK_CPU_LAST = WORK_CPU_NONE, /* @@ -237,11 +238,17 @@ enum { WQ_RESCUER = 1 << 3, /* has an rescue worker */ WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ + WQ_UNBOUND = 1 << 6, /* not bound to any cpu */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ + WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, }; +/* unbound wq's aren't per-cpu, scale max_active according to #cpus */ +#define WQ_UNBOUND_MAX_ACTIVE \ + max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) + /* * System-wide workqueues which are always present. * @@ -256,10 +263,16 @@ enum { * system_nrt_wq is non-reentrant and guarantees that any given work * item is never executed in parallel by multiple CPUs. Queue * flushing might take relatively long. + * + * system_unbound_wq is unbound workqueue. Workers are not bound to + * any specific CPU, not concurrency managed, and all queued works are + * executed immediately as long as max_active limit is not reached and + * resources are available. */ extern struct workqueue_struct *system_wq; extern struct workqueue_struct *system_long_wq; extern struct workqueue_struct *system_nrt_wq; +extern struct workqueue_struct *system_unbound_wq; extern struct workqueue_struct * __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, |