diff options
author | Dan Williams <dan.j.williams@intel.com> | 2011-12-20 04:42:34 +0400 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2012-02-19 23:48:51 +0400 |
commit | b1124cd3ec97406c767b90bf7e93ecd2d2915592 (patch) | |
tree | d0936775aacff4492177c14a73175738dfb51ee0 /drivers/scsi/libsas/sas_event.c | |
parent | f8daa6e6d83f60a721752cb53433bfdc1503b45f (diff) | |
download | linux-b1124cd3ec97406c767b90bf7e93ecd2d2915592.tar.xz |
[SCSI] libsas: introduce sas_drain_work()
When an lldd invokes ->notify_port_event() it can trigger a chain of libsas
events to:
1/ form the port and find the direct attached device
2/ if the attached device is an expander perform domain discovery
A call to flush_workqueue() will only flush the initial port formation work.
Currently libsas users need to call scsi_flush_work() up to the max depth of
chain (which will grow from 2 to 3 when ata discovery is moved to its own
discovery event). Instead of open coding multiple calls switch to use
drain_workqueue() to flush sas work.
drain_workqueue() does not handle new work submitted during the drain so
libsas needs a bit of infrastructure to hold off unchained work submissions
while a drain is in flight. A lldd ->notify() event is considered 'unchained'
while a sas_discover_event() is 'chained'. As Tejun notes:
"For now, I think it would be best to add private wrapper in libsas to
support deferring unchained work items while draining."
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/libsas/sas_event.c')
-rw-r--r-- | drivers/scsi/libsas/sas_event.c | 55 |
1 files changed, 55 insertions, 0 deletions
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index 9c084bc09bbd..e5035aa4c2a6 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c @@ -22,10 +22,65 @@ * */ +#include <linux/export.h> #include <scsi/scsi_host.h> #include "sas_internal.h" #include "sas_dump.h" +static void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work) +{ + if (!test_bit(SAS_HA_REGISTERED, &ha->state)) + return; + + if (test_bit(SAS_HA_DRAINING, &ha->state)) + list_add(&work->entry, &ha->defer_q); + else + scsi_queue_work(ha->core.shost, work); +} + +static void sas_queue_event(int event, unsigned long *pending, + struct work_struct *work, + struct sas_ha_struct *ha) +{ + if (!test_and_set_bit(event, pending)) { + unsigned long flags; + + spin_lock_irqsave(&ha->state_lock, flags); + sas_queue_work(ha, work); + spin_unlock_irqrestore(&ha->state_lock, flags); + } +} + +int sas_drain_work(struct sas_ha_struct *ha) +{ + struct workqueue_struct *wq = ha->core.shost->work_q; + struct work_struct *w, *_w; + int err; + + err = mutex_lock_interruptible(&ha->drain_mutex); + if (err) + return err; + + set_bit(SAS_HA_DRAINING, &ha->state); + /* flush submitters */ + spin_lock_irq(&ha->state_lock); + spin_unlock_irq(&ha->state_lock); + + drain_workqueue(wq); + + spin_lock_irq(&ha->state_lock); + clear_bit(SAS_HA_DRAINING, &ha->state); + list_for_each_entry_safe(w, _w, &ha->defer_q, entry) { + list_del_init(&w->entry); + sas_queue_work(ha, w); + } + spin_unlock_irq(&ha->state_lock); + mutex_unlock(&ha->drain_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(sas_drain_work); + static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) { BUG_ON(event >= HA_NUM_EVENTS); |