summaryrefslogtreecommitdiff
path: root/drivers/dma/qcom/hidma.c
diff options
context:
space:
mode:
authorSinan Kaya <okaya@codeaurora.org>2017-06-30 17:43:05 +0300
committerVinod Koul <vinod.koul@intel.com>2017-07-03 08:09:51 +0300
commit99efdb3e48fb2fa84addb3102946d3eca341192b (patch)
tree5d9d7116af71e62aeff4a8e2d52d4b1f88376395 /drivers/dma/qcom/hidma.c
parent82474dade7f4c60206609e7274591d159db105bf (diff)
downloadlinux-99efdb3e48fb2fa84addb3102946d3eca341192b.tar.xz
dmaengine: qcom_hidma: correct API violation for submit
Current code is violating the DMA Engine API by putting the submitted requests directly into the HW queue. This causes queued transactions to be started by another thread as soon as the first one finishes. The DMA Engine document clearly states this. "dmaengine_submit() will not start the DMA operation". Move HW queuing of the requests into the issue_pending() routine to comply with API requirements also create a new queued state for temporarily holding the requests. A descriptor goes through these transitions now. free->prepared->queued->active->completed->free as opposed to free->prepared->active->completed->free Signed-off-by: Sinan Kaya <okaya@codeaurora.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/qcom/hidma.c')
-rw-r--r--drivers/dma/qcom/hidma.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 84e3699a19bd..34fb6afd229b 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -210,6 +210,7 @@ static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
INIT_LIST_HEAD(&mchan->prepared);
INIT_LIST_HEAD(&mchan->active);
INIT_LIST_HEAD(&mchan->completed);
+ INIT_LIST_HEAD(&mchan->queued);
spin_lock_init(&mchan->lock);
list_add_tail(&mchan->chan.device_node, &ddev->channels);
@@ -230,9 +231,15 @@ static void hidma_issue_pending(struct dma_chan *dmach)
struct hidma_chan *mchan = to_hidma_chan(dmach);
struct hidma_dev *dmadev = mchan->dmadev;
unsigned long flags;
+ struct hidma_desc *qdesc, *next;
int status;
spin_lock_irqsave(&mchan->lock, flags);
+ list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
+ hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
+ list_move_tail(&qdesc->node, &mchan->active);
+ }
+
if (!mchan->running) {
struct hidma_desc *desc = list_first_entry(&mchan->active,
struct hidma_desc,
@@ -315,17 +322,18 @@ static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
pm_runtime_put_autosuspend(dmadev->ddev.dev);
return -ENODEV;
}
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
mdesc = container_of(txd, struct hidma_desc, desc);
spin_lock_irqsave(&mchan->lock, irqflags);
- /* Move descriptor to active */
- list_move_tail(&mdesc->node, &mchan->active);
+ /* Move descriptor to queued */
+ list_move_tail(&mdesc->node, &mchan->queued);
/* Update cookie */
cookie = dma_cookie_assign(txd);
- hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
spin_unlock_irqrestore(&mchan->lock, irqflags);
return cookie;
@@ -431,6 +439,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
list_splice_init(&mchan->active, &list);
list_splice_init(&mchan->prepared, &list);
list_splice_init(&mchan->completed, &list);
+ list_splice_init(&mchan->queued, &list);
spin_unlock_irqrestore(&mchan->lock, irqflags);
/* this suspends the existing transfer */