diff options
| author | David S. Miller <davem@davemloft.net> | 2010-09-10 09:27:33 +0400 | 
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2010-09-10 09:27:33 +0400 | 
| commit | e548833df83c3554229eff0672900bfe958b45fd (patch) | |
| tree | 85efc4a76dc356593d6d394776aeb845dc580fb6 /kernel/workqueue.c | |
| parent | cbd9da7be869f676afc204e1a664163778c770bd (diff) | |
| parent | 053d8f6622701f849fda2ca2c9ae596c13599ba9 (diff) | |
| download | linux-e548833df83c3554229eff0672900bfe958b45fd.tar.xz | |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
	net/mac80211/main.c
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 62 | 
1 files changed, 47 insertions, 15 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2994a0e3a61c..727f24e563ae 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -35,6 +35,9 @@  #include <linux/lockdep.h>  #include <linux/idr.h> +#define CREATE_TRACE_POINTS +#include <trace/events/workqueue.h> +  #include "workqueue_sched.h"  enum { @@ -87,7 +90,8 @@ enum {  /*   * Structure fields follow one of the following exclusion rules.   * - * I: Set during initialization and read-only afterwards. + * I: Modifiable by initialization/destruction paths and read-only for + *    everyone else.   *   * P: Preemption protected.  Disabling preemption is enough and should   *    only be modified and accessed from the local cpu. @@ -195,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;  	cpumask_test_and_set_cpu((cpu), (mask))  #define mayday_clear_cpu(cpu, mask)		cpumask_clear_cpu((cpu), (mask))  #define for_each_mayday_cpu(cpu, mask)		for_each_cpu((cpu), (mask)) -#define alloc_mayday_mask(maskp, gfp)		alloc_cpumask_var((maskp), (gfp)) +#define alloc_mayday_mask(maskp, gfp)		zalloc_cpumask_var((maskp), (gfp))  #define free_mayday_mask(mask)			free_cpumask_var((mask))  #else  typedef unsigned long mayday_mask_t; @@ -940,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,  	struct global_cwq *gcwq;  	struct cpu_workqueue_struct *cwq;  	struct list_head *worklist; +	unsigned int work_flags;  	unsigned long flags;  	debug_work_activate(work); +	if (WARN_ON_ONCE(wq->flags & WQ_DYING)) +		return; +  	/* determine gcwq to use */  	if (!(wq->flags & WQ_UNBOUND)) {  		struct global_cwq *last_gcwq; @@ -986,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,  	BUG_ON(!list_empty(&work->entry));  	cwq->nr_in_flight[cwq->work_color]++; +	work_flags = work_color_to_flags(cwq->work_color);  	if (likely(cwq->nr_active < cwq->max_active)) {  		cwq->nr_active++;  		worklist = gcwq_determine_ins_pos(gcwq, cwq); -	} else +	} else { +		work_flags |= WORK_STRUCT_DELAYED;  		worklist = &cwq->delayed_works; +	} -	insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); +	insert_work(cwq, work, worklist, work_flags);  	spin_unlock_irqrestore(&gcwq->lock, flags);  } @@ -1212,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)   * bound), %false if offline.   */  static bool worker_maybe_bind_and_lock(struct worker *worker) +__acquires(&gcwq->lock)  {  	struct global_cwq *gcwq = worker->gcwq;  	struct task_struct *task = worker->task; @@ -1485,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)   * otherwise.   */  static bool maybe_create_worker(struct global_cwq *gcwq) +__releases(&gcwq->lock) +__acquires(&gcwq->lock)  {  	if (!need_to_create_worker(gcwq))  		return false; @@ -1659,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)  	struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);  	move_linked_works(work, pos, NULL); +	__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));  	cwq->nr_active++;  } @@ -1666,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)   * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight   * @cwq: cwq of interest   * @color: color of work which left the queue + * @delayed: for a delayed work   *   * A work either has completed or is removed from pending queue,   * decrement nr_in_flight of its cwq and handle workqueue flushing. @@ -1673,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)   * CONTEXT:   * spin_lock_irq(gcwq->lock).   */ -static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) +static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, +				 bool delayed)  {  	/* ignore uncolored works */  	if (color == WORK_NO_COLOR)  		return;  	cwq->nr_in_flight[color]--; -	cwq->nr_active--; -	if (!list_empty(&cwq->delayed_works)) { -		/* one down, submit a delayed one */ -		if (cwq->nr_active < cwq->max_active) -			cwq_activate_first_delayed(cwq); +	if (!delayed) { +		cwq->nr_active--; +		if (!list_empty(&cwq->delayed_works)) { +			/* one down, submit a delayed one */ +			if (cwq->nr_active < cwq->max_active) +				cwq_activate_first_delayed(cwq); +		}  	}  	/* is flush in progress and are we at the flushing tip? */ @@ -1722,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)   * spin_lock_irq(gcwq->lock) which is released and regrabbed.   */  static void process_one_work(struct worker *worker, struct work_struct *work) +__releases(&gcwq->lock) +__acquires(&gcwq->lock)  {  	struct cpu_workqueue_struct *cwq = get_work_cwq(work);  	struct global_cwq *gcwq = cwq->gcwq; @@ -1790,7 +1811,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)  	work_clear_pending(work);  	lock_map_acquire(&cwq->wq->lockdep_map);  	lock_map_acquire(&lockdep_map); +	trace_workqueue_execute_start(work);  	f(work); +	/* +	 * While we must be careful to not use "work" after this, the trace +	 * point will only record its address. +	 */ +	trace_workqueue_execute_end(work);  	lock_map_release(&lockdep_map);  	lock_map_release(&cwq->wq->lockdep_map); @@ -1814,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)  	hlist_del_init(&worker->hentry);  	worker->current_work = NULL;  	worker->current_cwq = NULL; -	cwq_dec_nr_in_flight(cwq, work_color); +	cwq_dec_nr_in_flight(cwq, work_color, false);  }  /** @@ -2379,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)  			debug_work_deactivate(work);  			list_del_init(&work->entry);  			cwq_dec_nr_in_flight(get_work_cwq(work), -					     get_work_color(work)); +				get_work_color(work), +				*work_data_bits(work) & WORK_STRUCT_DELAYED);  			ret = 1;  		}  	} @@ -2782,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,  		if (IS_ERR(rescuer->task))  			goto err; -		wq->rescuer = rescuer;  		rescuer->task->flags |= PF_THREAD_BOUND;  		wake_up_process(rescuer->task);  	} @@ -2824,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)  {  	unsigned int cpu; +	wq->flags |= WQ_DYING;  	flush_workqueue(wq);  	/* @@ -2848,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)  	if (wq->flags & WQ_RESCUER) {  		kthread_stop(wq->rescuer->task);  		free_mayday_mask(wq->mayday_mask); +		kfree(wq->rescuer);  	}  	free_cwqs(wq); @@ -3230,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)   * multiple times.  To be used by cpu_callback.   */  static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) +__releases(&gcwq->lock) +__acquires(&gcwq->lock)  {  	if (!(gcwq->trustee_state == state ||  	      gcwq->trustee_state == TRUSTEE_DONE)) { @@ -3536,8 +3567,7 @@ static int __init init_workqueues(void)  		spin_lock_init(&gcwq->lock);  		INIT_LIST_HEAD(&gcwq->worklist);  		gcwq->cpu = cpu; -		if (cpu == WORK_CPU_UNBOUND) -			gcwq->flags |= GCWQ_DISASSOCIATED; +		gcwq->flags |= GCWQ_DISASSOCIATED;  		INIT_LIST_HEAD(&gcwq->idle_list);  		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) @@ -3561,6 +3591,8 @@ static int __init init_workqueues(void)  		struct global_cwq *gcwq = get_gcwq(cpu);  		struct worker *worker; +		if (cpu != WORK_CPU_UNBOUND) +			gcwq->flags &= ~GCWQ_DISASSOCIATED;  		worker = create_worker(gcwq, true);  		BUG_ON(!worker);  		spin_lock_irq(&gcwq->lock);  | 
