diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 21:38:14 +0300 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 21:38:14 +0300 |
commit | 2ba05622b8b143b0c95968ba59bddfbd6d2f2559 (patch) | |
tree | b7b72d02a993ff2ba731d6608f4ab8ce87482bcb /drivers/dma/dmaengine.c | |
parent | bec085134e446577a983f17f57d642a88d1af53b (diff) | |
download | linux-2ba05622b8b143b0c95968ba59bddfbd6d2f2559.tar.xz |
dmaengine: provide a common 'issue_pending_all' implementation
async_tx and net_dma each have open-coded versions of issue_pending_all,
so provide a common routine in dmaengine.
The implementation needs to walk the global device list, so implement
rcu to allow dma_issue_pending_all to run lockless. Clients protect
themselves from channel removal events by holding a dmaengine reference.
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 27 |
1 files changed, 24 insertions, 3 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 87a8cd4791ed..418eca28d472 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -70,6 +70,7 @@ #include <linux/rcupdate.h> #include <linux/mutex.h> #include <linux/jiffies.h> +#include <linux/rculist.h> static DEFINE_MUTEX(dma_list_mutex); static LIST_HEAD(dma_device_list); @@ -366,6 +367,26 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) EXPORT_SYMBOL(dma_find_channel); /** + * dma_issue_pending_all - flush all pending operations across all channels + */ +void dma_issue_pending_all(void) +{ + struct dma_device *device; + struct dma_chan *chan; + + WARN_ONCE(dmaengine_ref_count == 0, + "client called %s without a reference", __func__); + + rcu_read_lock(); + list_for_each_entry_rcu(device, &dma_device_list, global_node) + list_for_each_entry(chan, &device->channels, device_node) + if (chan->client_count) + device->device_issue_pending(chan); + rcu_read_unlock(); +} +EXPORT_SYMBOL(dma_issue_pending_all); + +/** * nth_chan - returns the nth channel of the given capability * @cap: capability to match * @n: nth channel desired @@ -490,7 +511,7 @@ void dma_async_client_register(struct dma_client *client) err = dma_chan_get(chan); if (err == -ENODEV) { /* module removed before we could use it */ - list_del_init(&device->global_node); + list_del_rcu(&device->global_node); break; } else if (err) pr_err("dmaengine: failed to get %s: (%d)\n", @@ -635,7 +656,7 @@ int dma_async_device_register(struct dma_device *device) goto err_out; } } - list_add_tail(&device->global_node, &dma_device_list); + list_add_tail_rcu(&device->global_node, &dma_device_list); dma_channel_rebalance(); mutex_unlock(&dma_list_mutex); @@ -677,7 +698,7 @@ void dma_async_device_unregister(struct dma_device *device) struct dma_chan *chan; mutex_lock(&dma_list_mutex); - list_del(&device->global_node); + list_del_rcu(&device->global_node); dma_channel_rebalance(); mutex_unlock(&dma_list_mutex); |