diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-29 01:45:17 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-29 01:45:17 +0300 |
commit | 6039b80eb50a893476fea7d56e86ed2d19290054 (patch) | |
tree | 9cb535ca7604f4e1859dfac221ada671368b149b /drivers/dma/qcom | |
parent | c9b011a87dd49bac1632311811c974bb7cd33c25 (diff) | |
parent | 4bb0439626983fdde5af3ce970bd0ba2070f5378 (diff) | |
download | linux-6039b80eb50a893476fea7d56e86ed2d19290054.tar.xz |
Merge tag 'dmaengine-4.8-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This time we have bit of largish changes: two new drivers, bunch of
updates and cleanups to existing set. Nothing super exciting though.
New drivers:
- Xilinx zynqmp dma engine driver
- Marvell xor2 driver
Updates:
- dmatest sg support
- updates and enhancements to Xilinx drivers, adding of cyclic mode
- clock handling fixes across drivers
- removal of OOM messages on kzalloc across subsystem
- interleaved transfers support in omap driver
- runtime pm support in qcom bam dma
- tasklet kill freeup across drivers
- irq cleanup on remove across drivers"
* tag 'dmaengine-4.8-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (94 commits)
dmaengine: k3dma: add missing clk_disable_unprepare() on error in k3_dma_probe()
dmaengine: zynqmp_dma: add missing MODULE_LICENSE
dmaengine: qcom_hidma: use for_each_matching_node() macro
dmaengine: zynqmp_dma: Fix static checker warning
dmaengine: omap-dma: Support for interleaved transfer
dmaengine: ioat: statify symbol
dmaengine: pxa_dma: implement device_synchronize
dmaengine: imx-sdma: remove assignment never used
dmaengine: imx-sdma: remove dummy assignment
dmaengine: cppi: remove unused and bogus check
dmaengine: qcom_hidma_lli: kill the tasklets upon exit
dmaengine: pxa_dma: remove owner assignment
dmaengine: fsl_raid: remove owner assignment
dmaengine: coh901318: remove owner assignment
dmaengine: qcom_hidma: kill the tasklets upon exit
dmaengine: txx9dmac: explicitly freeup irq
dmaengine: sirf-dma: kill the tasklets upon exit
dmaengine: s3c24xx: kill the tasklets upon exit
dmaengine: s3c24xx: explicitly freeup irq
dmaengine: pl330: explicitly freeup irq
...
Diffstat (limited to 'drivers/dma/qcom')
-rw-r--r-- | drivers/dma/qcom/bam_dma.c | 109 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma.c | 1 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_ll.c | 1 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_mgmt.c | 7 |
4 files changed, 113 insertions, 5 deletions
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 969b48176745..03c4eb3fd314 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -48,6 +48,7 @@ #include <linux/of_dma.h> #include <linux/clk.h> #include <linux/dmaengine.h> +#include <linux/pm_runtime.h> #include "../dmaengine.h" #include "../virt-dma.h" @@ -58,6 +59,8 @@ struct bam_desc_hw { __le16 flags; }; +#define BAM_DMA_AUTOSUSPEND_DELAY 100 + #define DESC_FLAG_INT BIT(15) #define DESC_FLAG_EOT BIT(14) #define DESC_FLAG_EOB BIT(13) @@ -527,12 +530,17 @@ static void bam_free_chan(struct dma_chan *chan) struct bam_device *bdev = bchan->bdev; u32 val; unsigned long flags; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return; vchan_free_chan_resources(to_virt_chan(chan)); if (bchan->curr_txd) { dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); - return; + goto err; } spin_lock_irqsave(&bchan->vc.lock, flags); @@ -550,6 +558,10 @@ static void bam_free_chan(struct dma_chan *chan) /* disable irq */ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); + +err: + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); } /** @@ -696,11 +708,18 @@ static int bam_pause(struct dma_chan *chan) struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; unsigned long flag; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&bchan->vc.lock, flag); writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan->paused = 1; spin_unlock_irqrestore(&bchan->vc.lock, flag); + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); return 0; } @@ -715,11 +734,18 @@ static int bam_resume(struct dma_chan *chan) struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; unsigned long flag; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&bchan->vc.lock, flag); writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan->paused = 0; spin_unlock_irqrestore(&bchan->vc.lock, flag); + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); return 0; } @@ -795,6 +821,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data) { struct bam_device *bdev = data; u32 clr_mask = 0, srcs = 0; + int ret; srcs |= process_channel_irqs(bdev); @@ -802,6 +829,10 @@ static irqreturn_t bam_dma_irq(int irq, void *data) if (srcs & P_IRQ) tasklet_schedule(&bdev->task); + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; + if (srcs & BAM_IRQ) { clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); @@ -814,6 +845,9 @@ static irqreturn_t bam_dma_irq(int irq, void *data) writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); } + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); + return IRQ_HANDLED; } @@ -893,6 +927,7 @@ static void bam_start_dma(struct bam_chan *bchan) struct bam_desc_hw *desc; struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, sizeof(struct bam_desc_hw)); + int ret; lockdep_assert_held(&bchan->vc.lock); @@ -904,6 +939,10 @@ static void bam_start_dma(struct bam_chan *bchan) async_desc = container_of(vd, struct bam_async_desc, vd); bchan->curr_txd = async_desc; + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return; + /* on first use, initialize the channel hardware */ if (!bchan->initialized) bam_chan_init_hw(bchan, async_desc->dir); @@ -946,6 +985,9 @@ static void bam_start_dma(struct bam_chan *bchan) wmb(); writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); + + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); } /** @@ -970,6 +1012,7 @@ static void dma_tasklet(unsigned long data) bam_start_dma(bchan); spin_unlock_irqrestore(&bchan->vc.lock, flags); } + } /** @@ -1213,6 +1256,13 @@ static int bam_dma_probe(struct platform_device *pdev) if (ret) goto err_unregister_dma; + pm_runtime_irq_safe(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + return 0; err_unregister_dma: @@ -1233,6 +1283,8 @@ static int bam_dma_remove(struct platform_device *pdev) struct bam_device *bdev = platform_get_drvdata(pdev); u32 i; + pm_runtime_force_suspend(&pdev->dev); + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&bdev->common); @@ -1260,11 +1312,66 @@ static int bam_dma_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused bam_dma_runtime_suspend(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + + clk_disable(bdev->bamclk); + + return 0; +} + +static int __maybe_unused bam_dma_runtime_resume(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(bdev->bamclk); + if (ret < 0) { + dev_err(dev, "clk_enable failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int __maybe_unused bam_dma_suspend(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + + pm_runtime_force_suspend(dev); + + clk_unprepare(bdev->bamclk); + + return 0; +} + +static int __maybe_unused bam_dma_resume(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare(bdev->bamclk); + if (ret) + return ret; + + pm_runtime_force_resume(dev); + + return 0; +} + +static const struct dev_pm_ops bam_dma_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume) + SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume, + NULL) +}; + static struct platform_driver bam_dma_driver = { .probe = bam_dma_probe, .remove = bam_dma_remove, .driver = { .name = "bam-dma-engine", + .pm = &bam_dma_pm_ops, .of_match_table = bam_of_match, }, }; diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 41b5c6dee713..b2374cd91e45 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -708,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev) pm_runtime_get_sync(dmadev->ddev.dev); dma_async_device_unregister(&dmadev->ddev); devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); + tasklet_kill(&dmadev->task); hidma_debug_uninit(dmadev); hidma_ll_uninit(dmadev->lldev); hidma_free(dmadev); diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index f3929001539b..ad20dfb64c71 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c @@ -831,6 +831,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev) required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; tasklet_kill(&lldev->task); + tasklet_kill(&lldev->rst_task); memset(lldev->trepool, 0, required_bytes); lldev->trepool = NULL; lldev->pending_tre_count = 0; diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index c0e365321310..82f36e466083 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -371,8 +371,8 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) pdevinfo.size_data = 0; pdevinfo.dma_mask = DMA_BIT_MASK(64); new_pdev = platform_device_register_full(&pdevinfo); - if (!new_pdev) { - ret = -ENODEV; + if (IS_ERR(new_pdev)) { + ret = PTR_ERR(new_pdev); goto out; } of_dma_configure(&new_pdev->dev, child); @@ -392,8 +392,7 @@ static int __init hidma_mgmt_init(void) #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) struct device_node *child; - for (child = of_find_matching_node(NULL, hidma_mgmt_match); child; - child = of_find_matching_node(child, hidma_mgmt_match)) { + for_each_matching_node(child, hidma_mgmt_match) { /* device tree based firmware here */ hidma_mgmt_of_populate_channels(child); of_node_put(child); |