diff options
author | Nicolas Ferre <nicolas.ferre@atmel.com> | 2011-05-06 21:56:52 +0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2011-05-09 10:12:54 +0400 |
commit | 23b5e3ad68a3c26a6a36039ea907997664aedcab (patch) | |
tree | 4aeeb9c081c05255615eb3fdd97268f58e6cc653 /drivers/dma/at_hdmac.c | |
parent | 543aabc7d295bfe2489f184259395e3467520d48 (diff) | |
download | linux-23b5e3ad68a3c26a6a36039ea907997664aedcab.tar.xz |
dmaengine: at_hdmac: implement pause and resume in atc_control
Pause and resume controls are useful for audio devices. This also returns
correct status from atc_tx_status() in case chan is paused.
Idea from dw_dmac patch by Linus Walleij.
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/at_hdmac.c')
-rw-r--r-- | drivers/dma/at_hdmac.c | 97 |
1 files changed, 70 insertions, 27 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index ba0b5ec4e4c2..5968245e1e6a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -508,7 +508,8 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { if (pending & AT_DMA_ERR(i)) { /* Disable channel on AHB error */ - dma_writel(atdma, CHDR, atchan->mask); + dma_writel(atdma, CHDR, + AT_DMA_RES(i) | atchan->mask); /* Give information to tasklet */ set_bit(ATC_IS_ERROR, &atchan->status); } @@ -952,39 +953,78 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - struct at_desc *desc, *_desc; + int chan_id = atchan->chan_common.chan_id; + LIST_HEAD(list); - /* Only supports DMA_TERMINATE_ALL */ - if (cmd != DMA_TERMINATE_ALL) - return -ENXIO; + dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); - /* - * This is only called when something went wrong elsewhere, so - * we don't really care about the data. Just disable the - * channel. We still have to poll the channel enable bit due - * to AHB/HSB limitations. - */ - spin_lock_bh(&atchan->lock); + if (cmd == DMA_PAUSE) { + int pause_timeout = 1000; - dma_writel(atdma, CHDR, atchan->mask); + spin_lock_bh(&atchan->lock); - /* confirm that this channel is disabled */ - while (dma_readl(atdma, CHSR) & atchan->mask) - cpu_relax(); + dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); + + /* wait for FIFO to be empty */ + while (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) { + if (pause_timeout-- > 0) { + /* the FIFO can only drain if the peripheral + * is still requesting data: + * -> timeout if it is not the case. */ + dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); + spin_unlock_bh(&atchan->lock); + return -ETIMEDOUT; + } + cpu_relax(); + } - /* active_list entries will end up before queued entries */ - list_splice_init(&atchan->queue, &list); - list_splice_init(&atchan->active_list, &list); + set_bit(ATC_IS_PAUSED, &atchan->status); - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); + spin_unlock_bh(&atchan->lock); + } else if (cmd == DMA_RESUME) { + if (!test_bit(ATC_IS_PAUSED, &atchan->status)) + return 0; - /* if channel dedicated to cyclic operations, free it */ - clear_bit(ATC_IS_CYCLIC, &atchan->status); + spin_lock_bh(&atchan->lock); - spin_unlock_bh(&atchan->lock); + dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); + clear_bit(ATC_IS_PAUSED, &atchan->status); + + spin_unlock_bh(&atchan->lock); + } else if (cmd == DMA_TERMINATE_ALL) { + struct at_desc *desc, *_desc; + /* + * This is only called when something went wrong elsewhere, so + * we don't really care about the data. Just disable the + * channel. We still have to poll the channel enable bit due + * to AHB/HSB limitations. + */ + spin_lock_bh(&atchan->lock); + + /* disabling channel: must also remove suspend state */ + dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); + + /* confirm that this channel is disabled */ + while (dma_readl(atdma, CHSR) & atchan->mask) + cpu_relax(); + + /* active_list entries will end up before queued entries */ + list_splice_init(&atchan->queue, &list); + list_splice_init(&atchan->active_list, &list); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + atc_chain_complete(atchan, desc); + + clear_bit(ATC_IS_PAUSED, &atchan->status); + /* if channel dedicated to cyclic operations, free it */ + clear_bit(ATC_IS_CYCLIC, &atchan->status); + + spin_unlock_bh(&atchan->lock); + } else { + return -ENXIO; + } return 0; } @@ -1032,8 +1072,11 @@ atc_tx_status(struct dma_chan *chan, else dma_set_tx_state(txstate, last_complete, last_used, 0); - dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", - cookie, last_complete ? last_complete : 0, + if (test_bit(ATC_IS_PAUSED, &atchan->status)) + ret = DMA_PAUSED; + + dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", + ret, cookie, last_complete ? last_complete : 0, last_used ? last_used : 0); return ret; |