diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 22:34:54 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 22:34:54 +0300 |
commit | b5b131c7473e17275debcdf1c226f452dc3876ed (patch) | |
tree | a272e947c38213d4ee989bb3f863a8091d50426b /drivers/dma/edma.c | |
parent | c7eec380e85a427983782df744f0fb745d867170 (diff) | |
parent | 896e041e8e8efb34520d033a693ef25391f9c9f0 (diff) | |
download | linux-b5b131c7473e17275debcdf1c226f452dc3876ed.tar.xz |
Merge tag 'dmaengine-4.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This is smallish update with minor changes to core and new driver and
usual updates. Nothing super exciting here..
- We have made slave address as physical to enable driver to do the
mapping.
- We now expose the maxburst for slave dma as new capability so
clients can know this and program accordingly
- addition of device synchronize callbacks on omap and edma.
- pl330 updates to support DMAFLUSHP for Rockchip platforms.
- Updates and improved sg handling in Xilinx VDMA driver.
- New hidma qualcomm dma driver, though some bits are still in
progress"
* tag 'dmaengine-4.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (40 commits)
dmaengine: IOATDMA: revise channel reset workaround on CB3.3 platforms
dmaengine: add Qualcomm Technologies HIDMA channel driver
dmaengine: add Qualcomm Technologies HIDMA management driver
dmaengine: hidma: Add Device Tree binding
dmaengine: qcom_bam_dma: move to qcom directory
dmaengine: tegra: Move of_device_id table near to its user
dmaengine: xilinx_vdma: Remove unnecessary variable initializations
dmaengine: sirf: use __maybe_unused to hide pm functions
dmaengine: rcar-dmac: clear pertinence number of channels
dmaengine: sh: shdmac: don't open code of_device_get_match_data()
dmaengine: tegra: don't open code of_device_get_match_data()
dmaengine: qcom_bam_dma: Make driver work for BE
dmaengine: sun4i: support module autoloading
dma/mic_x100_dma: IS_ERR() vs PTR_ERR() typo
dmaengine: xilinx_vdma: Use readl_poll_timeout instead of do while loop's
dmaengine: xilinx_vdma: Simplify spin lock handling
dmaengine: xilinx_vdma: Fix issues with non-parking mode
dmaengine: xilinx_vdma: Improve SG engine handling
dmaengine: pl330: fix to support the burst mode
dmaengine: make slave address physical
...
Diffstat (limited to 'drivers/dma/edma.c')
-rw-r--r-- | drivers/dma/edma.c | 63 |
1 files changed, 36 insertions, 27 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index e3d7fcb69b4c..ee3463e774f8 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -869,6 +869,13 @@ static int edma_terminate_all(struct dma_chan *chan) return 0; } +static void edma_synchronize(struct dma_chan *chan) +{ + struct edma_chan *echan = to_edma_chan(chan); + + vchan_synchronize(&echan->vchan); +} + static int edma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { @@ -1365,36 +1372,36 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( static void edma_completion_handler(struct edma_chan *echan) { struct device *dev = echan->vchan.chan.device->dev; - struct edma_desc *edesc = echan->edesc; - - if (!edesc) - return; + struct edma_desc *edesc; spin_lock(&echan->vchan.lock); - if (edesc->cyclic) { - vchan_cyclic_callback(&edesc->vdesc); - spin_unlock(&echan->vchan.lock); - return; - } else if (edesc->processed == edesc->pset_nr) { - edesc->residue = 0; - edma_stop(echan); - vchan_cookie_complete(&edesc->vdesc); - echan->edesc = NULL; - - dev_dbg(dev, "Transfer completed on channel %d\n", - echan->ch_num); - } else { - dev_dbg(dev, "Sub transfer completed on channel %d\n", - echan->ch_num); - - edma_pause(echan); - - /* Update statistics for tx_status */ - edesc->residue -= edesc->sg_len; - edesc->residue_stat = edesc->residue; - edesc->processed_stat = edesc->processed; + edesc = echan->edesc; + if (edesc) { + if (edesc->cyclic) { + vchan_cyclic_callback(&edesc->vdesc); + spin_unlock(&echan->vchan.lock); + return; + } else if (edesc->processed == edesc->pset_nr) { + edesc->residue = 0; + edma_stop(echan); + vchan_cookie_complete(&edesc->vdesc); + echan->edesc = NULL; + + dev_dbg(dev, "Transfer completed on channel %d\n", + echan->ch_num); + } else { + dev_dbg(dev, "Sub transfer completed on channel %d\n", + echan->ch_num); + + edma_pause(echan); + + /* Update statistics for tx_status */ + edesc->residue -= edesc->sg_len; + edesc->residue_stat = edesc->residue; + edesc->processed_stat = edesc->processed; + } + edma_execute(echan); } - edma_execute(echan); spin_unlock(&echan->vchan.lock); } @@ -1837,6 +1844,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) s_ddev->device_pause = edma_dma_pause; s_ddev->device_resume = edma_dma_resume; s_ddev->device_terminate_all = edma_terminate_all; + s_ddev->device_synchronize = edma_synchronize; s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; @@ -1862,6 +1870,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) m_ddev->device_pause = edma_dma_pause; m_ddev->device_resume = edma_dma_resume; m_ddev->device_terminate_all = edma_terminate_all; + m_ddev->device_synchronize = edma_synchronize; m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; |