summaryrefslogtreecommitdiff
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-11-04 19:28:47 +0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-11-04 19:28:52 +0400
commit7f16e5c1416070dc590dd333a2d677700046a4ab (patch)
tree55718bbef64431e70d5ed282be516cd45b3f75e6 /fs/btrfs/async-thread.c
parent9d1cb9147dbe45f6e94dc796518ecf67cb64b359 (diff)
parent5e01dc7b26d9f24f39abace5da98ccbd6a5ceb52 (diff)
downloadlinux-7f16e5c1416070dc590dd333a2d677700046a4ab.tar.xz
Merge tag 'v3.12' into drm-intel-next
I want to merge in the new Broadwell support as a late hw enabling pull request. But since the internal branch was based upon our drm-intel-nightly integration branch I need to resolve all the oustanding conflicts in drm/i915 with a backmerge to make the 60+ patches apply properly. We'll propably have some fun because Linus will come up with a slightly different merge solution. Conflicts: drivers/gpu/drm/i915/i915_dma.c drivers/gpu/drm/i915/i915_drv.c drivers/gpu/drm/i915/intel_crt.c drivers/gpu/drm/i915/intel_ddi.c drivers/gpu/drm/i915/intel_display.c drivers/gpu/drm/i915/intel_dp.c drivers/gpu/drm/i915/intel_drv.h All rather simple adjacent lines changed or partial backports from -next to -fixes, with the exception of the thaw code in i915_dma.c. That one needed a bit of shuffling to restore the intent. Oh and the massive header file reordering in intel_drv.h is a bit trouble. But not much. v2: Also don't forget the fixup for the silent conflict that results in compile fail ... Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 58b7d14b08ee..08cc08f037a6 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -107,7 +107,8 @@ static void check_idle_worker(struct btrfs_worker_thread *worker)
worker->idle = 1;
/* the list may be empty if the worker is just starting */
- if (!list_empty(&worker->worker_list)) {
+ if (!list_empty(&worker->worker_list) &&
+ !worker->workers->stopping) {
list_move(&worker->worker_list,
&worker->workers->idle_list);
}
@@ -127,7 +128,8 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
spin_lock_irqsave(&worker->workers->lock, flags);
worker->idle = 0;
- if (!list_empty(&worker->worker_list)) {
+ if (!list_empty(&worker->worker_list) &&
+ !worker->workers->stopping) {
list_move_tail(&worker->worker_list,
&worker->workers->worker_list);
}
@@ -412,6 +414,7 @@ void btrfs_stop_workers(struct btrfs_workers *workers)
int can_stop;
spin_lock_irq(&workers->lock);
+ workers->stopping = 1;
list_splice_init(&workers->idle_list, &workers->worker_list);
while (!list_empty(&workers->worker_list)) {
cur = workers->worker_list.next;
@@ -455,6 +458,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
workers->ordered = 0;
workers->atomic_start_pending = 0;
workers->atomic_worker_start = async_helper;
+ workers->stopping = 0;
}
/*
@@ -480,15 +484,19 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
atomic_set(&worker->num_pending, 0);
atomic_set(&worker->refs, 1);
worker->workers = workers;
- worker->task = kthread_run(worker_loop, worker,
- "btrfs-%s-%d", workers->name,
- workers->num_workers + 1);
+ worker->task = kthread_create(worker_loop, worker,
+ "btrfs-%s-%d", workers->name,
+ workers->num_workers + 1);
if (IS_ERR(worker->task)) {
ret = PTR_ERR(worker->task);
- kfree(worker);
goto fail;
}
+
spin_lock_irq(&workers->lock);
+ if (workers->stopping) {
+ spin_unlock_irq(&workers->lock);
+ goto fail_kthread;
+ }
list_add_tail(&worker->worker_list, &workers->idle_list);
worker->idle = 1;
workers->num_workers++;
@@ -496,8 +504,13 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
WARN_ON(workers->num_workers_starting < 0);
spin_unlock_irq(&workers->lock);
+ wake_up_process(worker->task);
return 0;
+
+fail_kthread:
+ kthread_stop(worker->task);
fail:
+ kfree(worker);
spin_lock_irq(&workers->lock);
workers->num_workers_starting--;
spin_unlock_irq(&workers->lock);