summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-07 23:42:58 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-07 23:42:58 +0400
commit3b7433b8a8a83c87972065b1852b7dcae691e464 (patch)
tree93fa2c003f8baef5ab0733b53bac77961ed5240c /drivers
parent4a386c3e177ca2fbc70c9283d0b46537844763a0 (diff)
parent6ee0578b4daaea01c96b172c6aacca43fd9807a6 (diff)
downloadlinux-3b7433b8a8a83c87972065b1852b7dcae691e464.tar.xz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (55 commits) workqueue: mark init_workqueues() as early_initcall() workqueue: explain for_each_*cwq_cpu() iterators fscache: fix build on !CONFIG_SYSCTL slow-work: kill it gfs2: use workqueue instead of slow-work drm: use workqueue instead of slow-work cifs: use workqueue instead of slow-work fscache: drop references to slow-work fscache: convert operation to use workqueue instead of slow-work fscache: convert object to use workqueue instead of slow-work workqueue: fix how cpu number is stored in work->data workqueue: fix mayday_mask handling on UP workqueue: fix build problem on !CONFIG_SMP workqueue: fix locking in retry path of maybe_create_worker() async: use workqueue for worker pool workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead workqueue: implement unbound workqueue workqueue: prepare for WQ_UNBOUND implementation libata: take advantage of cmwq and remove concurrency limitations workqueue: fix worker management invocation without pending works ... Fixed up conflicts in fs/cifs/* as per Tejun. Other trivial conflicts in include/linux/workqueue.h, kernel/trace/Kconfig and kernel/workqueue.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/osl.c40
-rw-r--r--drivers/ata/libata-core.c20
-rw-r--r--drivers/ata/libata-eh.c4
-rw-r--r--drivers/ata/libata-scsi.c10
-rw-r--r--drivers/ata/libata-sff.c9
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c29
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c26
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h8
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c15
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.h2
11 files changed, 59 insertions, 105 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 78418ce4fc78..46cce391fa46 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -191,36 +191,11 @@ acpi_status __init acpi_os_initialize(void)
return AE_OK;
}
-static void bind_to_cpu0(struct work_struct *work)
-{
- set_cpus_allowed_ptr(current, cpumask_of(0));
- kfree(work);
-}
-
-static void bind_workqueue(struct workqueue_struct *wq)
-{
- struct work_struct *work;
-
- work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
- INIT_WORK(work, bind_to_cpu0);
- queue_work(wq, work);
-}
-
acpi_status acpi_os_initialize1(void)
{
- /*
- * On some machines, a software-initiated SMI causes corruption unless
- * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
- * typically it's done in GPE-related methods that are run via
- * workqueues, so we can avoid the known corruption cases by binding
- * the workqueues to CPU 0.
- */
- kacpid_wq = create_singlethread_workqueue("kacpid");
- bind_workqueue(kacpid_wq);
- kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
- bind_workqueue(kacpi_notify_wq);
- kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
- bind_workqueue(kacpi_hotplug_wq);
+ kacpid_wq = create_workqueue("kacpid");
+ kacpi_notify_wq = create_workqueue("kacpi_notify");
+ kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
@@ -766,7 +741,14 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
else
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- ret = queue_work(queue, &dpc->work);
+ /*
+ * On some machines, a software-initiated SMI causes corruption unless
+ * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
+ * typically it's done in GPE-related methods that are run via
+ * workqueues, so we can avoid the known corruption cases by always
+ * queueing on CPU 0.
+ */
+ ret = queue_work_on(0, queue, &dpc->work);
if (!ret) {
printk(KERN_ERR PREFIX
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a0a4d6968400..4972fdf4bd31 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -98,8 +98,6 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
unsigned int ata_print_id = 1;
-struct workqueue_struct *ata_aux_wq;
-
struct ata_force_param {
const char *name;
unsigned int cbl;
@@ -5594,6 +5592,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
+ mutex_init(&ap->scsi_scan_mutex);
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
@@ -6532,29 +6531,20 @@ static int __init ata_init(void)
ata_parse_force_param();
- ata_aux_wq = create_singlethread_workqueue("ata_aux");
- if (!ata_aux_wq)
- goto fail;
-
rc = ata_sff_init();
- if (rc)
- goto fail;
+ if (rc) {
+ kfree(ata_force_tbl);
+ return rc;
+ }
printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
return 0;
-
-fail:
- kfree(ata_force_tbl);
- if (ata_aux_wq)
- destroy_workqueue(ata_aux_wq);
- return rc;
}
static void __exit ata_exit(void)
{
ata_sff_exit();
kfree(ata_force_tbl);
- destroy_workqueue(ata_aux_wq);
}
subsys_initcall(ata_init);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 697474b625b7..c9ae299b8342 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -727,7 +727,7 @@ void ata_scsi_error(struct Scsi_Host *host)
if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING;
else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
- queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
+ schedule_delayed_work(&ap->hotplug_task, 0);
if (ap->pflags & ATA_PFLAG_RECOVERED)
ata_port_printk(ap, KERN_INFO, "EH complete\n");
@@ -2945,7 +2945,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
ehc->i.flags |= ATA_EHI_SETMODE;
/* schedule the scsi_rescan_device() here */
- queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
+ schedule_work(&(ap->scsi_rescan_task));
} else if (dev->class == ATA_DEV_UNKNOWN &&
ehc->tries[dev->devno] &&
ata_class_enabled(ehc->classes[dev->devno])) {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a54273d2c3c6..d75c9c479d1a 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3435,7 +3435,7 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
" switching to async\n");
}
- queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
+ queue_delayed_work(system_long_wq, &ap->hotplug_task,
round_jiffies_relative(HZ));
}
@@ -3582,6 +3582,7 @@ void ata_scsi_hotplug(struct work_struct *work)
}
DPRINTK("ENTER\n");
+ mutex_lock(&ap->scsi_scan_mutex);
/* Unplug detached devices. We cannot use link iterator here
* because PMP links have to be scanned even if PMP is
@@ -3595,6 +3596,7 @@ void ata_scsi_hotplug(struct work_struct *work)
/* scan for new ones */
ata_scsi_scan_host(ap, 0);
+ mutex_unlock(&ap->scsi_scan_mutex);
DPRINTK("EXIT\n");
}
@@ -3673,9 +3675,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
* @work: Pointer to ATA port to perform scsi_rescan_device()
*
* After ATA pass thru (SAT) commands are executed successfully,
- * libata need to propagate the changes to SCSI layer. This
- * function must be executed from ata_aux_wq such that sdev
- * attach/detach don't race with rescan.
+ * libata need to propagate the changes to SCSI layer.
*
* LOCKING:
* Kernel thread context (may sleep).
@@ -3688,6 +3688,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
struct ata_device *dev;
unsigned long flags;
+ mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
ata_for_each_link(link, ap, EDGE) {
@@ -3707,6 +3708,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
}
spin_unlock_irqrestore(ap->lock, flags);
+ mutex_unlock(&ap->scsi_scan_mutex);
}
/**
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index efa4a18cfb9d..674c1436491f 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -3318,14 +3318,7 @@ void ata_sff_port_init(struct ata_port *ap)
int __init ata_sff_init(void)
{
- /*
- * FIXME: In UP case, there is only one workqueue thread and if you
- * have more than one PIO device, latency is bloody awful, with
- * occasional multi-second "hiccups" as one PIO device waits for
- * another. It's an ugly wart that users DO occasionally complain
- * about; luckily most users have at most one PIO polled device.
- */
- ata_sff_wq = create_workqueue("ata_sff");
+ ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE);
if (!ata_sff_wq)
return -ENOMEM;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 4b84ed60324a..9ce1ecc63e39 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -54,7 +54,6 @@ enum {
};
extern unsigned int ata_print_id;
-extern struct workqueue_struct *ata_aux_wq;
extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 11fe9c870d17..45981304feb8 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -831,13 +831,11 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
-static struct slow_work_ops output_poll_ops;
-
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void output_poll_execute(struct slow_work *work)
+static void output_poll_execute(struct work_struct *work)
{
- struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
- struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
enum drm_connector_status old_status, status;
bool repoll = false, changed = false;
@@ -877,7 +875,7 @@ static void output_poll_execute(struct slow_work *work)
}
if (repoll) {
- ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+ ret = queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
if (ret)
DRM_ERROR("delayed enqueue failed %d\n", ret);
}
@@ -887,7 +885,7 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
@@ -903,7 +901,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
}
if (poll) {
- ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
+ ret = queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
if (ret)
DRM_ERROR("delayed enqueue failed %d\n", ret);
}
@@ -912,9 +910,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_enable);
void drm_kms_helper_poll_init(struct drm_device *dev)
{
- slow_work_register_user(THIS_MODULE);
- delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
- &output_poll_ops);
+ INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
dev->mode_config.poll_enabled = true;
drm_kms_helper_poll_enable(dev);
@@ -924,7 +920,6 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
- slow_work_unregister_user(THIS_MODULE);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
@@ -932,12 +927,8 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
- /* schedule a slow work asap */
- delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
+ /* kill timer and schedule immediate execution, this doesn't block */
+ cancel_delayed_work(&dev->mode_config.output_poll_work);
+ queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
-
-static struct slow_work_ops output_poll_ops = {
- .execute = output_poll_execute,
-};
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 90daa6e751d8..07c5c18a25cb 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -705,6 +705,8 @@ done:
*/
static int __devinit ivtv_init_struct1(struct ivtv *itv)
{
+ struct sched_param param = { .sched_priority = 99 };
+
itv->base_addr = pci_resource_start(itv->pdev, 0);
itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */
itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */
@@ -716,13 +718,17 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
spin_lock_init(&itv->lock);
spin_lock_init(&itv->dma_reg_lock);
- itv->irq_work_queues = create_singlethread_workqueue(itv->v4l2_dev.name);
- if (itv->irq_work_queues == NULL) {
- IVTV_ERR("Could not create ivtv workqueue\n");
+ init_kthread_worker(&itv->irq_worker);
+ itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker,
+ itv->v4l2_dev.name);
+ if (IS_ERR(itv->irq_worker_task)) {
+ IVTV_ERR("Could not create ivtv task\n");
return -1;
}
+ /* must use the FIFO scheduler as it is realtime sensitive */
+ sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, &param);
- INIT_WORK(&itv->irq_work_queue, ivtv_irq_work_handler);
+ init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
/* start counting open_id at 1 */
itv->open_id = 1;
@@ -1006,7 +1012,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
/* PCI Device Setup */
retval = ivtv_setup_pci(itv, pdev, pci_id);
if (retval == -EIO)
- goto free_workqueue;
+ goto free_worker;
if (retval == -ENXIO)
goto free_mem;
@@ -1218,8 +1224,8 @@ free_mem:
release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (itv->has_cx23415)
release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
-free_workqueue:
- destroy_workqueue(itv->irq_work_queues);
+free_worker:
+ kthread_stop(itv->irq_worker_task);
err:
if (retval == 0)
retval = -ENODEV;
@@ -1363,9 +1369,9 @@ static void ivtv_remove(struct pci_dev *pdev)
ivtv_set_irq_mask(itv, 0xffffffff);
del_timer_sync(&itv->dma_timer);
- /* Stop all Work Queues */
- flush_workqueue(itv->irq_work_queues);
- destroy_workqueue(itv->irq_work_queues);
+ /* Kill irq worker */
+ flush_kthread_worker(&itv->irq_worker);
+ kthread_stop(itv->irq_worker_task);
ivtv_streams_cleanup(itv, 1);
ivtv_udma_free(itv);
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index bd084df4448a..102071246218 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -51,7 +51,7 @@
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
-#include <linux/workqueue.h>
+#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -260,7 +260,6 @@ struct ivtv_mailbox_data {
#define IVTV_F_I_DEC_PAUSED 20 /* the decoder is paused */
#define IVTV_F_I_INITED 21 /* set after first open */
#define IVTV_F_I_FAILED 22 /* set if first open failed */
-#define IVTV_F_I_WORK_INITED 23 /* worker thread was initialized */
/* Event notifications */
#define IVTV_F_I_EV_DEC_STOPPED 28 /* decoder stopped event */
@@ -666,8 +665,9 @@ struct ivtv {
/* Interrupts & DMA */
u32 irqmask; /* active interrupts */
u32 irq_rr_idx; /* round-robin stream index */
- struct workqueue_struct *irq_work_queues; /* workqueue for PIO/YUV/VBI actions */
- struct work_struct irq_work_queue; /* work entry */
+ struct kthread_worker irq_worker; /* kthread worker for PIO/YUV/VBI actions */
+ struct task_struct *irq_worker_task; /* task for irq_worker */
+ struct kthread_work irq_work; /* kthread work entry */
spinlock_t dma_reg_lock; /* lock access to DMA engine registers */
int cur_dma_stream; /* index of current stream doing DMA (-1 if none) */
int cur_pio_stream; /* index of current stream doing PIO (-1 if none) */
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index fea1ec33b0df..9b4faf009196 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -71,19 +71,10 @@ static void ivtv_pio_work_handler(struct ivtv *itv)
write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
}
-void ivtv_irq_work_handler(struct work_struct *work)
+void ivtv_irq_work_handler(struct kthread_work *work)
{
- struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
+ struct ivtv *itv = container_of(work, struct ivtv, irq_work);
- DEFINE_WAIT(wait);
-
- if (test_and_clear_bit(IVTV_F_I_WORK_INITED, &itv->i_flags)) {
- struct sched_param param = { .sched_priority = 99 };
-
- /* This thread must use the FIFO scheduler as it
- is realtime sensitive. */
- sched_setscheduler(current, SCHED_FIFO, &param);
- }
if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
ivtv_pio_work_handler(itv);
@@ -975,7 +966,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
}
if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
- queue_work(itv->irq_work_queues, &itv->irq_work_queue);
+ queue_kthread_work(&itv->irq_worker, &itv->irq_work);
}
spin_unlock(&itv->dma_reg_lock);
diff --git a/drivers/media/video/ivtv/ivtv-irq.h b/drivers/media/video/ivtv/ivtv-irq.h
index f879a5822e71..1e84433737cc 100644
--- a/drivers/media/video/ivtv/ivtv-irq.h
+++ b/drivers/media/video/ivtv/ivtv-irq.h
@@ -46,7 +46,7 @@
irqreturn_t ivtv_irq_handler(int irq, void *dev_id);
-void ivtv_irq_work_handler(struct work_struct *work);
+void ivtv_irq_work_handler(struct kthread_work *work);
void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock);
void ivtv_unfinished_dma(unsigned long arg);