From 5637abaab994a933d7f00c95bc1456c7a58c83f4 Mon Sep 17 00:00:00 2001 From: Michael Tretter Date: Thu, 26 Aug 2021 11:47:36 +0200 Subject: dmaengine: zynqmp_dma: simplify with dev_err_probe The clocks are provided by the ZynqMP firmware driver and are deferred until the firmware driver has probed. This leads to misleading error messages during probe of the zynqmp_dma driver. Use dev_err_probe for printing errors during probe to avoid error messages for -EPROBE_DEFER. Signed-off-by: Michael Tretter Link: https://lore.kernel.org/r/20210826094742.1302009-2-m.tretter@pengutronix.de Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 97f02f8eb03a..1d077a85f32b 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -1061,16 +1061,14 @@ static int zynqmp_dma_probe(struct platform_device *pdev) p->dev = &pdev->dev; zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main"); - if (IS_ERR(zdev->clk_main)) { - dev_err(&pdev->dev, "main clock not found.\n"); - return PTR_ERR(zdev->clk_main); - } + if (IS_ERR(zdev->clk_main)) + return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_main), + "main clock not found.\n"); zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); - if (IS_ERR(zdev->clk_apb)) { - dev_err(&pdev->dev, "apb clock not found.\n"); - return PTR_ERR(zdev->clk_apb); - } + if (IS_ERR(zdev->clk_apb)) + return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_apb), + "apb clock not found.\n"); platform_set_drvdata(pdev, zdev); pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT); @@ -1085,7 +1083,7 @@ static int zynqmp_dma_probe(struct platform_device *pdev) ret = zynqmp_dma_chan_probe(zdev, pdev); if (ret) { - dev_err(&pdev->dev, "Probing channel failed\n"); + dev_err_probe(&pdev->dev, ret, "Probing channel failed\n"); goto err_disable_pm; } @@ -1097,7 +1095,7 @@ static int zynqmp_dma_probe(struct platform_device *pdev) ret = of_dma_controller_register(pdev->dev.of_node, of_zynqmp_dma_xlate, zdev); if (ret) { - dev_err(&pdev->dev, "Unable to register DMA to DT\n"); + dev_err_probe(&pdev->dev, ret, "Unable to register DMA to DT\n"); dma_async_device_unregister(&zdev->common); goto free_chan_resources; } -- cgit v1.2.3 From 4c0f93eb80fbaafcb2cefa09ed1d6c5099dc1939 Mon Sep 17 00:00:00 2001 From: Michael Tretter Date: Thu, 26 Aug 2021 11:47:37 +0200 Subject: dmaengine: zynqmp_dma: drop message on probe success There is no need to print a message that the ZynqMP DMA driver probed successfully, since it carries no additional information. Drop the message. Signed-off-by: Michael Tretter Link: https://lore.kernel.org/r/20210826094742.1302009-3-m.tretter@pengutronix.de Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 1d077a85f32b..bdeae8a34123 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -1103,8 +1103,6 @@ static int zynqmp_dma_probe(struct platform_device *pdev) pm_runtime_mark_last_busy(zdev->dev); pm_runtime_put_sync_autosuspend(zdev->dev); - dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n"); - return 0; free_chan_resources: -- cgit v1.2.3 From 7073b5a8bd6ea641b41568e2899b0977f79134e3 Mon Sep 17 00:00:00 2001 From: Michael Tretter Date: Thu, 26 Aug 2021 11:47:38 +0200 Subject: dmaengine: zynqmp_dma: enable COMPILE_TEST The driver doesn't use anything architecture specific. Allow the compilation on other architectures as well. Signed-off-by: Michael Tretter Link: https://lore.kernel.org/r/20210826094742.1302009-4-m.tretter@pengutronix.de Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 80c2c03cb014..6bcdb4e6a0d1 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -717,7 +717,7 @@ config XILINX_DMA config XILINX_ZYNQMP_DMA tristate "Xilinx ZynqMP DMA Engine" - depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) + depends on ARCH_ZYNQ || MICROBLAZE || ARM64 || COMPILE_TEST select DMA_ENGINE help Enable support for Xilinx ZynqMP DMA controller. -- cgit v1.2.3 From 85997fdfd15916dddee2458b7382934b6c857f87 Mon Sep 17 00:00:00 2001 From: Michael Tretter Date: Thu, 26 Aug 2021 11:47:39 +0200 Subject: dmaengine: zynqmp_dma: cleanup includes The driver includes a few headers that are not actually used, but are probably copy paste errors. Remove them. Signed-off-by: Michael Tretter Link: https://lore.kernel.org/r/20210826094742.1302009-5-m.tretter@pengutronix.de Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index bdeae8a34123..2a57d7c38d35 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -6,15 +6,12 @@ */ #include -#include -#include +#include #include #include #include #include -#include #include -#include #include #include #include -- cgit v1.2.3 From 16ed0ef3e931f49b06d73afbe1fb41737bee86b6 Mon Sep 17 00:00:00 2001 From: Michael Tretter Date: Thu, 26 Aug 2021 11:47:40 +0200 Subject: dmaengine: zynqmp_dma: cleanup after completing all descriptors The current implementation iterates the entire done list for each completed dma descriptor even if there are multiple completed descriptors. Avoid this by first moving all completed descriptors to the done list and afterwards iterating the done list and finishing the descriptors. Signed-off-by: Michael Tretter Link: https://lore.kernel.org/r/20210826094742.1302009-6-m.tretter@pengutronix.de Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 2a57d7c38d35..aa1374c24498 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -758,10 +758,11 @@ static void zynqmp_dma_do_tasklet(struct tasklet_struct *t) while (count) { zynqmp_dma_complete_descriptor(chan); - zynqmp_dma_chan_desc_cleanup(chan); count--; } + zynqmp_dma_chan_desc_cleanup(chan); + if (chan->idle) zynqmp_dma_start_transfer(chan); -- cgit v1.2.3 From 193a750df59580ddda6779290fa3898ba3216d3c Mon Sep 17 00:00:00 2001 From: Michael Tretter Date: Thu, 26 Aug 2021 11:47:41 +0200 Subject: dmaengine: zynqmp_dma: refine dma descriptor locking The descriptor lists are locked for the entire tasklet that completes the descriptors. This is not necessary, because the lock actually only protects the descriptor lists. Make the spin lock more fine-grained and only protect functions that actually operate on the descriptor lists. This decreases the time when the lock is held. Signed-off-by: Michael Tretter Link: https://lore.kernel.org/r/20210826094742.1302009-7-m.tretter@pengutronix.de Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index aa1374c24498..b4220cb88da6 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -600,6 +600,9 @@ static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan) static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) { struct zynqmp_dma_desc_sw *desc, *next; + unsigned long irqflags; + + spin_lock_irqsave(&chan->lock, irqflags); list_for_each_entry_safe(desc, next, &chan->done_list, node) { dma_async_tx_callback callback; @@ -616,6 +619,8 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) /* Run any dependencies, then free the descriptor */ zynqmp_dma_free_descriptor(chan, desc); } + + spin_unlock_irqrestore(&chan->lock, irqflags); } /** @@ -655,9 +660,13 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan) */ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) { + unsigned long irqflags; + + spin_lock_irqsave(&chan->lock, irqflags); zynqmp_dma_free_desc_list(chan, &chan->active_list); zynqmp_dma_free_desc_list(chan, &chan->pending_list); zynqmp_dma_free_desc_list(chan, &chan->done_list); + spin_unlock_irqrestore(&chan->lock, irqflags); } /** @@ -667,11 +676,8 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); - unsigned long irqflags; - spin_lock_irqsave(&chan->lock, irqflags); zynqmp_dma_free_descriptors(chan); - spin_unlock_irqrestore(&chan->lock, irqflags); dma_free_coherent(chan->dev, (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), chan->desc_pool_v, chan->desc_pool_p); @@ -686,11 +692,16 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) */ static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan) { + unsigned long irqflags; + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + spin_lock_irqsave(&chan->lock, irqflags); zynqmp_dma_complete_descriptor(chan); + spin_unlock_irqrestore(&chan->lock, irqflags); zynqmp_dma_chan_desc_cleanup(chan); zynqmp_dma_free_descriptors(chan); + zynqmp_dma_init(chan); } @@ -746,28 +757,27 @@ static void zynqmp_dma_do_tasklet(struct tasklet_struct *t) u32 count; unsigned long irqflags; - spin_lock_irqsave(&chan->lock, irqflags); - if (chan->err) { zynqmp_dma_reset(chan); chan->err = false; - goto unlock; + return; } + spin_lock_irqsave(&chan->lock, irqflags); count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); - while (count) { zynqmp_dma_complete_descriptor(chan); count--; } + spin_unlock_irqrestore(&chan->lock, irqflags); zynqmp_dma_chan_desc_cleanup(chan); - if (chan->idle) + if (chan->idle) { + spin_lock_irqsave(&chan->lock, irqflags); zynqmp_dma_start_transfer(chan); - -unlock: - spin_unlock_irqrestore(&chan->lock, irqflags); + spin_unlock_irqrestore(&chan->lock, irqflags); + } } /** @@ -779,12 +789,9 @@ unlock: static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); - unsigned long irqflags; - spin_lock_irqsave(&chan->lock, irqflags); writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); zynqmp_dma_free_descriptors(chan); - spin_unlock_irqrestore(&chan->lock, irqflags); return 0; } -- cgit v1.2.3 From 9558cf4ad07e8913c14e83959212ae8fdf60cfea Mon Sep 17 00:00:00 2001 From: Michael Tretter Date: Thu, 26 Aug 2021 11:47:42 +0200 Subject: dmaengine: zynqmp_dma: fix lockdep warning in tasklet The tasklet that handles the completed dma transfers uses spin_unlock for unlocking a spin lock that was previously locked with spin_lock_irqsave. This caused the following lockdep warning about an inconsistent lock state: inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. We must use spin_lock_irqsave, because it is possible to queue DMA transfers from an irq handler. Replace the spin_unlock and spin_lock by spin_unlock_irqrestore and spin_lock_irqsave. Signed-off-by: Michael Tretter Link: https://lore.kernel.org/r/20210826094742.1302009-8-m.tretter@pengutronix.de Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index b4220cb88da6..54adac6391ef 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -611,9 +611,9 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) callback = desc->async_tx.callback; callback_param = desc->async_tx.callback_param; if (callback) { - spin_unlock(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); callback(callback_param); - spin_lock(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); } /* Run any dependencies, then free the descriptor */ -- cgit v1.2.3 From ae8f13f0a6fdd7562e420b756daa9b807e05f775 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 29 Sep 2021 17:29:22 -0500 Subject: dmaengine: stm32-mdma: Use struct_size() helper in devm_kzalloc() Make use of the struct_size() helper instead of an open-coded version, in order to avoid any potential type mistakes or integer overflows that, in the worse scenario, could lead to heap overflows. Link: https://github.com/KSPP/linux/issues/160 Signed-off-by: Gustavo A. R. Silva Reviewed-by: Amelie Delaunay Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20210929222922.GA357509@embeddedor Signed-off-by: Vinod Koul --- drivers/dma/stm32-mdma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 18cbd1e43c2e..d30a4a28d3bf 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1566,7 +1566,8 @@ static int stm32_mdma_probe(struct platform_device *pdev) if (count < 0) count = 0; - dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev) + sizeof(u32) * count, + dmadev = devm_kzalloc(&pdev->dev, + struct_size(dmadev, ahb_addr_masks, count), GFP_KERNEL); if (!dmadev) return -ENOMEM; -- cgit v1.2.3 From 85f604af9c83a4656b1d07bec73298c3ba7d7c1e Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 29 Sep 2021 12:15:38 -0700 Subject: dmaengine: idxd: move out percpu_ref_exit() to ensure it's outside submission percpu_ref_tryget_live() is safe to call as long as ref is between init and exit according to the function comment. Move percpu_ref_exit() so it is called after the dma channel is no longer valid to ensure this holds true. Fixes: 93a40a6d7428 ("dmaengine: idxd: add percpu_ref to descriptor submission path") Suggested-by: Kevin Tian Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/163294293832.914350.10326422026738506152.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 1 - drivers/dma/idxd/dma.c | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 83a5ff2ecf2a..cbbfa17d8d11 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -427,7 +427,6 @@ void idxd_wq_quiesce(struct idxd_wq *wq) { percpu_ref_kill(&wq->wq_active); wait_for_completion(&wq->wq_dead); - percpu_ref_exit(&wq->wq_active); } /* Device control bits */ diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index e0f056c1d1f5..b90b085d18cf 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -311,6 +311,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) err_dma: idxd_wq_quiesce(wq); + percpu_ref_exit(&wq->wq_active); err_ref: idxd_wq_free_resources(wq); err_res_alloc: @@ -329,6 +330,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) idxd_wq_quiesce(wq); idxd_unregister_dma_channel(wq); __drv_disable_wq(wq); + percpu_ref_exit(&wq->wq_active); idxd_wq_free_resources(wq); wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); -- cgit v1.2.3 From 35696789cc7dbfcb6e9bcd5c52319db9445d3616 Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Wed, 15 Sep 2021 21:37:03 +0530 Subject: dmaengine: tegra210-adma: Re-order 'has_outstanding_reqs' member The 'has_outstanding_reqs' member description order in structure 'tegra_adma_chip_data' does not match with the corresponding member declaration. The same is true for member assignment in chip data structures declared for Tegra210 and Tegra186. This is a trivial fix to re-order the mentioned member for a better readability. Signed-off-by: Sameer Pujar Reviewed-by: Jon Hunter Link: https://lore.kernel.org/r/1631722025-19873-2-git-send-email-spujar@nvidia.com Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index b1115a6d1935..caf200e53a6b 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -78,12 +78,12 @@ struct tegra_adma; * @ch_req_tx_shift: Register offset for AHUB transmit channel select. * @ch_req_rx_shift: Register offset for AHUB receive channel select. * @ch_base_offset: Register offset of DMA channel registers. - * @has_outstanding_reqs: If DMA channel can have outstanding requests. * @ch_fifo_ctrl: Default value for channel FIFO CTRL register. * @ch_req_mask: Mask for Tx or Rx channel select. * @ch_req_max: Maximum number of Tx or Rx channels available. * @ch_reg_size: Size of DMA channel register space. * @nr_channels: Number of DMA channels available. + * @has_outstanding_reqs: If DMA channel can have outstanding requests. */ struct tegra_adma_chip_data { unsigned int (*adma_get_burst_config)(unsigned int burst_size); @@ -782,12 +782,12 @@ static const struct tegra_adma_chip_data tegra210_chip_data = { .ch_req_tx_shift = 28, .ch_req_rx_shift = 24, .ch_base_offset = 0, - .has_outstanding_reqs = false, .ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT, .ch_req_mask = 0xf, .ch_req_max = 10, .ch_reg_size = 0x80, .nr_channels = 22, + .has_outstanding_reqs = false, }; static const struct tegra_adma_chip_data tegra186_chip_data = { @@ -797,12 +797,12 @@ static const struct tegra_adma_chip_data tegra186_chip_data = { .ch_req_tx_shift = 27, .ch_req_rx_shift = 22, .ch_base_offset = 0x10000, - .has_outstanding_reqs = true, .ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT, .ch_req_mask = 0x1f, .ch_req_max = 20, .ch_reg_size = 0x100, .nr_channels = 32, + .has_outstanding_reqs = true, }; static const struct of_device_id tegra_adma_of_match[] = { -- cgit v1.2.3 From c7f9c67ffb7be8aeafae7a4faee6738ac38a64bb Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Wed, 15 Sep 2021 21:37:04 +0530 Subject: dmaengine: tegra210-adma: Add description for 'adma_get_burst_config' Trivial change to add description for 'adma_get_burst_config' in chip data structure. Signed-off-by: Sameer Pujar Reviewed-by: Jon Hunter Link: https://lore.kernel.org/r/1631722025-19873-3-git-send-email-spujar@nvidia.com Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index caf200e53a6b..03f9776896fa 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -73,6 +73,7 @@ struct tegra_adma; /* * struct tegra_adma_chip_data - Tegra chip specific data + * @adma_get_burst_config: Function callback used to set DMA burst size. * @global_reg_offset: Register offset of DMA global register. * @global_int_clear: Register offset of DMA global interrupt clear. * @ch_req_tx_shift: Register offset for AHUB transmit channel select. -- cgit v1.2.3 From 32de4745e20a639376735f198cccd0477f9aa396 Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Wed, 15 Sep 2021 21:37:05 +0530 Subject: dmaengine: tegra210-adma: Override ADMA FIFO size ADMAIF FIFO uses a ring buffer and it is divided amongst the available channels. The default FIFO size (in multiples of 16 words) of ADMAIF TX/RX channels is as below: * On Tegra210, channel 1 to 2 : size = 3 channel 3 to 10: size = 2 * On Tegra186 and later, channel 1 to 4 : size = 3 channel 5 to 20: size = 2 As per recommendation from HW, FIFO size of ADMA channel should be same as the corresponding ADMAIF channel it maps to. FIFO corruption is observed if the sizes do not match. We are using the default FIFO sizes for ADMAIF and there is no plan to support any custom values. Thus at runtime, override the ADMA channel FIFO size value depending on the corresponding ADMAIF channel. Signed-off-by: Sameer Pujar Reviewed-by: Jon Hunter Link: https://lore.kernel.org/r/1631722025-19873-4-git-send-email-spujar@nvidia.com Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 48 ++++++++++++++++++++++++++++++++------------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 03f9776896fa..911533cc3133 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -43,10 +43,8 @@ #define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4) #define ADMA_CH_FIFO_CTRL 0x2c -#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8) -#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf) -#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8) -#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f) +#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8 +#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0 #define ADMA_CH_LOWER_SRC_ADDR 0x34 #define ADMA_CH_LOWER_TRG_ADDR 0x3c @@ -61,12 +59,6 @@ #define TEGRA_ADMA_BURST_COMPLETE_TIME 20 -#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \ - TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3)) - -#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \ - TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3)) - #define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift) struct tegra_adma; @@ -84,6 +76,8 @@ struct tegra_adma; * @ch_req_max: Maximum number of Tx or Rx channels available. * @ch_reg_size: Size of DMA channel register space. * @nr_channels: Number of DMA channels available. + * @ch_fifo_size_mask: Mask for FIFO size field. + * @sreq_index_offset: Slave channel index offset. * @has_outstanding_reqs: If DMA channel can have outstanding requests. */ struct tegra_adma_chip_data { @@ -98,6 +92,8 @@ struct tegra_adma_chip_data { unsigned int ch_req_max; unsigned int ch_reg_size; unsigned int nr_channels; + unsigned int ch_fifo_size_mask; + unsigned int sreq_index_offset; bool has_outstanding_reqs; }; @@ -561,13 +557,14 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, { struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata; - unsigned int burst_size, adma_dir; + unsigned int burst_size, adma_dir, fifo_size_shift; if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS) return -EINVAL; switch (direction) { case DMA_MEM_TO_DEV: + fifo_size_shift = ADMA_CH_TX_FIFO_SIZE_SHIFT; adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB; burst_size = tdc->sconfig.dst_maxburst; ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1); @@ -578,6 +575,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, break; case DMA_DEV_TO_MEM: + fifo_size_shift = ADMA_CH_RX_FIFO_SIZE_SHIFT; adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM; burst_size = tdc->sconfig.src_maxburst; ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1); @@ -599,7 +597,27 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); if (cdata->has_outstanding_reqs) ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8); - ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl; + + /* + * 'sreq_index' represents the current ADMAIF channel number and as per + * HW recommendation its FIFO size should match with the corresponding + * ADMA channel. + * + * ADMA FIFO size is set as per below (based on default ADMAIF channel + * FIFO sizes): + * fifo_size = 0x2 (sreq_index > sreq_index_offset) + * fifo_size = 0x3 (sreq_index <= sreq_index_offset) + * + */ + if (tdc->sreq_index > cdata->sreq_index_offset) + ch_regs->fifo_ctrl = + ADMA_CH_REG_FIELD_VAL(2, cdata->ch_fifo_size_mask, + fifo_size_shift); + else + ch_regs->fifo_ctrl = + ADMA_CH_REG_FIELD_VAL(3, cdata->ch_fifo_size_mask, + fifo_size_shift); + ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; return tegra_adma_request_alloc(tdc, direction); @@ -783,11 +801,12 @@ static const struct tegra_adma_chip_data tegra210_chip_data = { .ch_req_tx_shift = 28, .ch_req_rx_shift = 24, .ch_base_offset = 0, - .ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT, .ch_req_mask = 0xf, .ch_req_max = 10, .ch_reg_size = 0x80, .nr_channels = 22, + .ch_fifo_size_mask = 0xf, + .sreq_index_offset = 2, .has_outstanding_reqs = false, }; @@ -798,11 +817,12 @@ static const struct tegra_adma_chip_data tegra186_chip_data = { .ch_req_tx_shift = 27, .ch_req_rx_shift = 22, .ch_base_offset = 0x10000, - .ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT, .ch_req_mask = 0x1f, .ch_req_max = 20, .ch_reg_size = 0x100, .nr_channels = 32, + .ch_fifo_size_mask = 0x1f, + .sreq_index_offset = 4, .has_outstanding_reqs = true, }; -- cgit v1.2.3 From 1f6a89efbf9981a637bf472a5662e4d3746af530 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 15 Oct 2021 13:34:47 +0100 Subject: dmaengine: Remove redundant initialization of variable err The variable err is being initialized with a value that is never read, it is being updated later on. The assignment is redundant and can be removed and move the declaration into the local scope. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20211015123447.27560-1-colin.king@canonical.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index af3ee288bc11..d9f7c097cfd6 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -695,13 +695,12 @@ static struct dma_chan *find_candidate(struct dma_device *device, */ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) { - int err = -EBUSY; - /* lock against __dma_request_channel */ mutex_lock(&dma_list_mutex); if (chan->client_count == 0) { struct dma_device *device = chan->device; + int err; dma_cap_set(DMA_PRIVATE, device->cap_mask); device->privatecnt++; -- cgit v1.2.3 From fa5270ec2f2688d98a82895be7039b81c87d856c Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 7 Oct 2021 14:12:27 +0300 Subject: dmaengine: at_xdmac: call at_xdmac_axi_config() on resume path at_xdmac could be used on SoCs which supports backup mode (where most of the SoC power, including power to DMA controller, is closed at suspend time). Thus, on resume, the settings which were previously done need to be restored. Do the same for axi configuration. Fixes: f40566f220a1 ("dmaengine: at_xdmac: add AXI priority support and recommended settings") Signed-off-by: Claudiu Beznea Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/r/20211007111230.2331837-2-claudiu.beznea@microchip.com Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 51 ++++++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index ab78e0f6afd7..c66ad5706cb5 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1926,6 +1926,30 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan) return; } +static void at_xdmac_axi_config(struct platform_device *pdev) +{ + struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev); + bool dev_m2m = false; + u32 dma_requests; + + if (!atxdmac->layout->axi_config) + return; /* Not supported */ + + if (!of_property_read_u32(pdev->dev.of_node, "dma-requests", + &dma_requests)) { + dev_info(&pdev->dev, "controller in mem2mem mode.\n"); + dev_m2m = true; + } + + if (dev_m2m) { + at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M); + at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M); + } else { + at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M); + at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M); + } +} + #ifdef CONFIG_PM static int atmel_xdmac_prepare(struct device *dev) { @@ -1975,6 +1999,7 @@ static int atmel_xdmac_resume(struct device *dev) struct at_xdmac *atxdmac = dev_get_drvdata(dev); struct at_xdmac_chan *atchan; struct dma_chan *chan, *_chan; + struct platform_device *pdev = container_of(dev, struct platform_device, dev); int i; int ret; @@ -1982,6 +2007,8 @@ static int atmel_xdmac_resume(struct device *dev) if (ret) return ret; + at_xdmac_axi_config(pdev); + /* Clear pending interrupts. */ for (i = 0; i < atxdmac->dma.chancnt; i++) { atchan = &atxdmac->chan[i]; @@ -2007,30 +2034,6 @@ static int atmel_xdmac_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ -static void at_xdmac_axi_config(struct platform_device *pdev) -{ - struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev); - bool dev_m2m = false; - u32 dma_requests; - - if (!atxdmac->layout->axi_config) - return; /* Not supported */ - - if (!of_property_read_u32(pdev->dev.of_node, "dma-requests", - &dma_requests)) { - dev_info(&pdev->dev, "controller in mem2mem mode.\n"); - dev_m2m = true; - } - - if (dev_m2m) { - at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M); - at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M); - } else { - at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M); - at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M); - } -} - static int at_xdmac_probe(struct platform_device *pdev) { struct at_xdmac *atxdmac; -- cgit v1.2.3 From 320c88a3104dc955f928a1eecebd551ff89530c0 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 7 Oct 2021 14:12:28 +0300 Subject: dmaengine: at_xdmac: fix AT_XDMAC_CC_PERID() macro AT_XDMAC_CC_PERID() should be used to setup bits 24..30 of XDMAC_CC register. Using it without parenthesis around 0x7f & (i) will lead to setting all the time zero for bits 24..30 of XDMAC_CC as the << operator has higher precedence over bitwise &. Thus, add paranthesis around 0x7f & (i). Fixes: 15a03850ab8f ("dmaengine: at_xdmac: fix macro typo") Signed-off-by: Claudiu Beznea Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/r/20211007111230.2331837-3-claudiu.beznea@microchip.com Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index c66ad5706cb5..e18abbd56fb5 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -155,7 +155,7 @@ #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) -#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */ +#define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */ #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ -- cgit v1.2.3 From b183d41a340b224ed99dc0b967de590a0be8ef48 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 7 Oct 2021 14:12:29 +0300 Subject: dmaengine: at_xdmac: use __maybe_unused for pm functions Use __maybe_unused for pm functions. Signed-off-by: Claudiu Beznea Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/r/20211007111230.2331837-4-claudiu.beznea@microchip.com Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index e18abbd56fb5..12371396fcc0 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1950,8 +1950,7 @@ static void at_xdmac_axi_config(struct platform_device *pdev) } } -#ifdef CONFIG_PM -static int atmel_xdmac_prepare(struct device *dev) +static int __maybe_unused atmel_xdmac_prepare(struct device *dev) { struct at_xdmac *atxdmac = dev_get_drvdata(dev); struct dma_chan *chan, *_chan; @@ -1965,12 +1964,8 @@ static int atmel_xdmac_prepare(struct device *dev) } return 0; } -#else -# define atmel_xdmac_prepare NULL -#endif -#ifdef CONFIG_PM_SLEEP -static int atmel_xdmac_suspend(struct device *dev) +static int __maybe_unused atmel_xdmac_suspend(struct device *dev) { struct at_xdmac *atxdmac = dev_get_drvdata(dev); struct dma_chan *chan, *_chan; @@ -1994,7 +1989,7 @@ static int atmel_xdmac_suspend(struct device *dev) return 0; } -static int atmel_xdmac_resume(struct device *dev) +static int __maybe_unused atmel_xdmac_resume(struct device *dev) { struct at_xdmac *atxdmac = dev_get_drvdata(dev); struct at_xdmac_chan *atchan; @@ -2032,7 +2027,6 @@ static int atmel_xdmac_resume(struct device *dev) } return 0; } -#endif /* CONFIG_PM_SLEEP */ static int at_xdmac_probe(struct platform_device *pdev) { -- cgit v1.2.3 From 8e0c7e486014f8e924f5dc8f7a8719adb48f9d92 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 7 Oct 2021 14:12:30 +0300 Subject: dmaengine: at_xdmac: use pm_ptr() Use pm_ptr() macro to fill at_xdmac_driver.driver.pm. Signed-off-by: Claudiu Beznea Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/r/20211007111230.2331837-5-claudiu.beznea@microchip.com Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 12371396fcc0..7fb19bd18ac3 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -2231,7 +2231,7 @@ static struct platform_driver at_xdmac_driver = { .driver = { .name = "at_xdmac", .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), - .pm = &atmel_xdmac_dev_pm_ops, + .pm = pm_ptr(&atmel_xdmac_dev_pm_ops), } }; -- cgit v1.2.3 From adec566b05288f2787a1f88dbaf77ed8b0c644fa Mon Sep 17 00:00:00 2001 From: Anatolij Gustschin Date: Thu, 14 Oct 2021 11:40:12 +0200 Subject: dmaengine: bestcomm: fix system boot lockups memset() and memcpy() on an MMIO region like here results in a lockup at startup on mpc5200 platform (since this first happens during probing of the ATA and Ethernet drivers). Use memset_io() and memcpy_toio() instead. Fixes: 2f9ea1bde0d1 ("bestcomm: core bestcomm support for Freescale MPC5200") Cc: stable@vger.kernel.org # v5.14+ Signed-off-by: Anatolij Gustschin Link: https://lore.kernel.org/r/20211014094012.21286-1-agust@denx.de Signed-off-by: Vinod Koul --- drivers/dma/bestcomm/ata.c | 2 +- drivers/dma/bestcomm/bestcomm.c | 22 +++++++++++----------- drivers/dma/bestcomm/fec.c | 4 ++-- drivers/dma/bestcomm/gen_bd.c | 4 ++-- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/dma/bestcomm/ata.c b/drivers/dma/bestcomm/ata.c index 2fd87f83cf90..e169f18da551 100644 --- a/drivers/dma/bestcomm/ata.c +++ b/drivers/dma/bestcomm/ata.c @@ -133,7 +133,7 @@ void bcom_ata_reset_bd(struct bcom_task *tsk) struct bcom_ata_var *var; /* Reset all BD */ - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); tsk->index = 0; tsk->outdex = 0; diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index d91cbbe7a48f..8c42e5ca00a9 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -95,7 +95,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size) tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa); if (!tsk->bd) goto error; - memset(tsk->bd, 0x00, bd_count * bd_size); + memset_io(tsk->bd, 0x00, bd_count * bd_size); tsk->num_bd = bd_count; tsk->bd_size = bd_size; @@ -186,16 +186,16 @@ bcom_load_image(int task, u32 *task_image) inc = bcom_task_inc(task); /* Clear & copy */ - memset(var, 0x00, BCOM_VAR_SIZE); - memset(inc, 0x00, BCOM_INC_SIZE); + memset_io(var, 0x00, BCOM_VAR_SIZE); + memset_io(inc, 0x00, BCOM_INC_SIZE); desc_src = (u32 *)(hdr + 1); var_src = desc_src + hdr->desc_size; inc_src = var_src + hdr->var_size; - memcpy(desc, desc_src, hdr->desc_size * sizeof(u32)); - memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); - memcpy(inc, inc_src, hdr->inc_size * sizeof(u32)); + memcpy_toio(desc, desc_src, hdr->desc_size * sizeof(u32)); + memcpy_toio(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); + memcpy_toio(inc, inc_src, hdr->inc_size * sizeof(u32)); return 0; } @@ -302,13 +302,13 @@ static int bcom_engine_init(void) return -ENOMEM; } - memset(bcom_eng->tdt, 0x00, tdt_size); - memset(bcom_eng->ctx, 0x00, ctx_size); - memset(bcom_eng->var, 0x00, var_size); - memset(bcom_eng->fdt, 0x00, fdt_size); + memset_io(bcom_eng->tdt, 0x00, tdt_size); + memset_io(bcom_eng->ctx, 0x00, ctx_size); + memset_io(bcom_eng->var, 0x00, var_size); + memset_io(bcom_eng->fdt, 0x00, fdt_size); /* Copy the FDT for the EU#3 */ - memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); + memcpy_toio(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); /* Initialize Task base structure */ for (task=0; taskindex = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA); @@ -241,7 +241,7 @@ bcom_fec_tx_reset(struct bcom_task *tsk) tsk->index = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA); diff --git a/drivers/dma/bestcomm/gen_bd.c b/drivers/dma/bestcomm/gen_bd.c index 906ddba6a6f5..8a24a5cbc263 100644 --- a/drivers/dma/bestcomm/gen_bd.c +++ b/drivers/dma/bestcomm/gen_bd.c @@ -142,7 +142,7 @@ bcom_gen_bd_rx_reset(struct bcom_task *tsk) tsk->index = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA); @@ -226,7 +226,7 @@ bcom_gen_bd_tx_reset(struct bcom_task *tsk) tsk->index = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA); -- cgit v1.2.3 From 79c4c3db7d86b9bec94562275efc82e58f3d0132 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 12 Oct 2021 11:01:59 -0700 Subject: dmaengine: idxd: check GENCAP config support for gencfg register DSA spec 1.2 has moved the GENCFG register under the GENCAP configuration support with respect to writability. Add check in driver before writing to GENCFG register. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/163406171896.1303830.11217958011385656998.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index cbbfa17d8d11..27612329f510 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -791,7 +791,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd) struct device *dev = &idxd->pdev->dev; /* Setup bandwidth token limit */ - if (idxd->token_limit) { + if (idxd->hw.gen_cap.config_en && idxd->token_limit) { reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); reg.token_limit = idxd->token_limit; iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); -- cgit v1.2.3 From c5b64b6826e020041ef4f68a281dee2f33c827d6 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 12 Oct 2021 11:01:19 -0700 Subject: dmaengine: idxd: remove gen cap field per spec 1.2 update Remove max_descs_per_engine field. The recently released DSA spec 1.2 [1] has removed this field and made it reserved. [1]: https://software.intel.com/content/dam/develop/external/us/en/documents-tps/341204-intel-data-streaming-accelerator-spec.pdf Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/163406167978.1303649.1798682437841822837.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/registers.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index ffc7550a77ee..eeb11e6eb25b 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -36,8 +36,7 @@ union gen_cap_reg { u64 max_batch_shift:4; u64 max_ims_mult:6; u64 config_en:1; - u64 max_descs_per_engine:8; - u64 rsvd3:24; + u64 rsvd3:32; }; u64 bits; } __packed; -- cgit v1.2.3 From 161596fd776a54df922158175ed844804a861c37 Mon Sep 17 00:00:00 2001 From: Biju Das Date: Thu, 23 Sep 2021 11:24:51 +0100 Subject: dmaengine: sh: rz-dmac: Add DMA clock handling Currently, DMA clocks are turned on by the bootloader. This patch adds support for DMA clock handling so that the driver manages the DMA clocks. Fixes: 5000d37042a6 ("dmaengine: sh: Add DMAC driver for RZ/G2L SoC") Signed-off-by: Biju Das Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20210923102451.11403-1-biju.das.jz@bp.renesas.com Signed-off-by: Vinod Koul --- drivers/dma/sh/rz-dmac.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c index f9f30cbeccbe..d9f2cfef878e 100644 --- a/drivers/dma/sh/rz-dmac.c +++ b/drivers/dma/sh/rz-dmac.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -872,6 +873,13 @@ static int rz_dmac_probe(struct platform_device *pdev) /* Initialize the channels. */ INIT_LIST_HEAD(&dmac->engine.channels); + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n"); + goto err_pm_disable; + } + for (i = 0; i < dmac->n_channels; i++) { ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i); if (ret < 0) @@ -925,6 +933,10 @@ err: channel->lmdesc.base_dma); } + pm_runtime_put(&pdev->dev); +err_pm_disable: + pm_runtime_disable(&pdev->dev); + return ret; } @@ -943,6 +955,8 @@ static int rz_dmac_remove(struct platform_device *pdev) } of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&dmac->engine); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); return 0; } -- cgit v1.2.3 From d59f7037cec6b9388f658dfd4a56590a582467f0 Mon Sep 17 00:00:00 2001 From: Artur Rojek Date: Sun, 29 Aug 2021 21:58:05 +0200 Subject: dmaengine: jz4780: Set max number of SGs per burst Total amount of SG list entries executed in a single burst is limited by the number of available DMA descriptors. This information is useful for device drivers utilizing this DMA engine. Signed-off-by: Artur Rojek Acked-by: Paul Cercueil Link: https://lore.kernel.org/r/20210829195805.148964-1-contact@artur-rojek.eu Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index ebee94dbd630..96701dedcac8 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -915,6 +915,7 @@ static int jz4780_dma_probe(struct platform_device *pdev) dd->dst_addr_widths = JZ_DMA_BUSWIDTHS; dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + dd->max_sg_burst = JZ_DMA_MAX_DESC; /* * Enable DMA controller, mark all channels as not programmable. -- cgit v1.2.3 From 981703aae3b1965896caaef95f25886ec8007744 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 10 Oct 2021 08:29:29 +0200 Subject: dmaengine: dw-edma: Remove an unused variable 'head' is unused, remove it. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/46e071be21fbc5ac5c35d4796a7e4249e94c3a77.1633847306.git.christophe.jaillet@wanadoo.fr Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/dw-edma-core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index 53289927dd0d..468d1097a1ec 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -249,7 +249,6 @@ static int dw_edma_device_terminate_all(struct dma_chan *dchan) { struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); int err = 0; - LIST_HEAD(head); if (!chan->configured) { /* Do nothing */ -- cgit v1.2.3 From 79e40b06a4ebfc8e0a48ed6164345f8e0a96b699 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Mon, 11 Oct 2021 11:42:57 +0200 Subject: dmaengine: stm32-dma: mark pending descriptor complete in terminate_all To prevent accidental repeated completion, mark pending descriptor complete in terminate_all. It can be the case when terminate_all is called while no end of transfer interrupt occurs. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20211011094259.315023-2-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 9063c727962e..a5ccf3fa95e0 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -497,6 +497,7 @@ static int stm32_dma_terminate_all(struct dma_chan *c) spin_lock_irqsave(&chan->vchan.lock, flags); if (chan->desc) { + dma_cookie_complete(&chan->desc->vdesc.tx); vchan_terminate_vdesc(&chan->desc->vdesc); if (chan->busy) stm32_dma_stop(chan); -- cgit v1.2.3 From b20fd5fa310cbf7ec367f263a34382a24c4cee73 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Mon, 11 Oct 2021 11:42:58 +0200 Subject: dmaengine: stm32-dma: fix stm32_dma_get_max_width buf_addr parameter of stm32_dma_set_xfer_param function is a dma_addr_t. We only need to check the remainder of buf_addr/max_width, so, no need to use do_div and extra u64 addr. Use '%' instead. Fixes: e0ebdbdcb42a ("dmaengine: stm32-dma: take address into account when computing max width") Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20211011094259.315023-3-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index a5ccf3fa95e0..6e4ef44941ef 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -270,7 +270,6 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, u32 threshold) { enum dma_slave_buswidth max_width; - u64 addr = buf_addr; if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL) max_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -281,7 +280,7 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, max_width > DMA_SLAVE_BUSWIDTH_1_BYTE) max_width = max_width >> 1; - if (do_div(addr, max_width)) + if (buf_addr % max_width) max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; return max_width; -- cgit v1.2.3 From af229d2c2557b5cf2a3b1eb39847ec1de7446873 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Mon, 11 Oct 2021 11:42:59 +0200 Subject: dmaengine: stm32-dma: fix burst in case of unaligned memory address Theorically, address pointers used by STM32 DMA must be chosen so as to ensure that all transfers within a burst block are aligned on the address boundary equal to the size of the transfer. If this is always the case for peripheral addresses on STM32, it is not for memory addresses if the user doesn't respect this alignment constraint. To avoid a weird behavior of the DMA controller in this case (no error triggered but data are not transferred as expected), force no burst. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20211011094259.315023-4-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 6e4ef44941ef..2283c500f4ce 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -753,8 +753,14 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, if (src_bus_width < 0) return src_bus_width; - /* Set memory burst size */ - src_maxburst = STM32_DMA_MAX_BURST; + /* + * Set memory burst size - burst not possible if address is not aligned on + * the address boundary equal to the size of the transfer + */ + if (buf_addr % buf_len) + src_maxburst = 1; + else + src_maxburst = STM32_DMA_MAX_BURST; src_best_burst = stm32_dma_get_best_burst(buf_len, src_maxburst, fifoth, @@ -803,8 +809,14 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, if (dst_bus_width < 0) return dst_bus_width; - /* Set memory burst size */ - dst_maxburst = STM32_DMA_MAX_BURST; + /* + * Set memory burst size - burst not possible if address is not aligned on + * the address boundary equal to the size of the transfer + */ + if (buf_addr % buf_len) + dst_maxburst = 1; + else + dst_maxburst = STM32_DMA_MAX_BURST; dst_best_burst = stm32_dma_get_best_burst(buf_len, dst_maxburst, fifoth, -- cgit v1.2.3 From 824351668a413af7d6d88e4ee2c9bee7c60daad2 Mon Sep 17 00:00:00 2001 From: Pandith N Date: Fri, 1 Oct 2021 19:38:10 +0530 Subject: dmaengine: dw-axi-dmac: support DMAX_NUM_CHANNELS > 8 Added support for DMA controller with more than 8 channels. DMAC register map changes based on number of channels. Enabling DMAC channel: DMAC_CHENREG has to be used when number of channels <= 8 DMAC_CHENREG2 has to be used when number of channels > 8 Configuring DMA channel: CHx_CFG has to be used when number of channels <= 8 CHx_CFG2 has to be used when number of channels > 8 Suspending and resuming channel: DMAC_CHENREG has to be used when number of channels <= 8 DMAC_CHSUSPREG has to be used for suspending a channel > 8 Signed-off-by: Pandith N Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20211001140812.24977-2-pandith.n@intel.com Signed-off-by: Vinod Koul --- drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 105 ++++++++++++++++++------- drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 35 ++++++++- 2 files changed, 109 insertions(+), 31 deletions(-) diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 35993ab92154..9a8231244c42 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -79,6 +79,32 @@ axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val) iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4); } +static inline void axi_chan_config_write(struct axi_dma_chan *chan, + struct axi_dma_chan_config *config) +{ + u32 cfg_lo, cfg_hi; + + cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS | + config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS); + if (chan->chip->dw->hdata->reg_map_8_channels) { + cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS | + config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS | + config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS | + config->src_per << CH_CFG_H_SRC_PER_POS | + config->dst_per << CH_CFG_H_DST_PER_POS | + config->prior << CH_CFG_H_PRIORITY_POS; + } else { + cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS | + config->dst_per << CH_CFG2_L_DST_PER_POS; + cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS | + config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS | + config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS | + config->prior << CH_CFG2_H_PRIORITY_POS; + } + axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo); + axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi); +} + static inline void axi_dma_disable(struct axi_dma_chip *chip) { u32 val; @@ -154,7 +180,10 @@ static inline void axi_chan_disable(struct axi_dma_chan *chan) val = axi_dma_ioread32(chan->chip, DMAC_CHEN); val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); - val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; + if (chan->chip->dw->hdata->reg_map_8_channels) + val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; + else + val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); } @@ -163,8 +192,12 @@ static inline void axi_chan_enable(struct axi_dma_chan *chan) u32 val; val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | - BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; + if (chan->chip->dw->hdata->reg_map_8_channels) + val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | + BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; + else + val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | + BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); } @@ -336,7 +369,8 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, struct axi_dma_desc *first) { u32 priority = chan->chip->dw->hdata->priority[chan->id]; - u32 reg, irq_mask; + struct axi_dma_chan_config config; + u32 irq_mask; u8 lms = 0; /* Select AXI0 master for LLI fetching */ if (unlikely(axi_chan_is_hw_enable(chan))) { @@ -348,36 +382,32 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, axi_dma_enable(chan->chip); - reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS | - DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS); - axi_chan_iowrite32(chan, CH_CFG_L, reg); - - reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS | - priority << CH_CFG_H_PRIORITY_POS | - DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS | - DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS); + config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL; + config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL; + config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC; + config.prior = priority; + config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW; + config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW; switch (chan->direction) { case DMA_MEM_TO_DEV: dw_axi_dma_set_byte_halfword(chan, true); - reg |= (chan->config.device_fc ? - DWAXIDMAC_TT_FC_MEM_TO_PER_DST : - DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC) - << CH_CFG_H_TT_FC_POS; + config.tt_fc = chan->config.device_fc ? + DWAXIDMAC_TT_FC_MEM_TO_PER_DST : + DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC; if (chan->chip->apb_regs) - reg |= (chan->id << CH_CFG_H_DST_PER_POS); + config.dst_per = chan->id; break; case DMA_DEV_TO_MEM: - reg |= (chan->config.device_fc ? - DWAXIDMAC_TT_FC_PER_TO_MEM_SRC : - DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC) - << CH_CFG_H_TT_FC_POS; + config.tt_fc = chan->config.device_fc ? + DWAXIDMAC_TT_FC_PER_TO_MEM_SRC : + DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC; if (chan->chip->apb_regs) - reg |= (chan->id << CH_CFG_H_SRC_PER_POS); + config.src_per = chan->id; break; default: break; } - axi_chan_iowrite32(chan, CH_CFG_H, reg); + axi_chan_config_write(chan, &config); write_chan_llp(chan, first->hw_desc[0].llp | lms); @@ -1120,10 +1150,17 @@ static int dma_chan_pause(struct dma_chan *dchan) spin_lock_irqsave(&chan->vc.lock, flags); - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | - BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + if (chan->chip->dw->hdata->reg_map_8_channels) { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); + val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | + BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; + axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + } else { + val = 0; + val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | + BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; + axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); + } do { if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED) @@ -1147,9 +1184,15 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan) u32 val; val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); - val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + if (chan->chip->dw->hdata->reg_map_8_channels) { + val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); + val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); + axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + } else { + val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT); + val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT); + axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); + } chan->is_paused = false; } @@ -1241,6 +1284,8 @@ static int parse_device_properties(struct axi_dma_chip *chip) return -EINVAL; chip->dw->hdata->nr_channels = tmp; + if (tmp <= DMA_REG_MAP_CH_REF) + chip->dw->hdata->reg_map_8_channels = true; ret = device_property_read_u32(dev, "snps,dma-masters", &tmp); if (ret) diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index 380005afde16..be69a0b76860 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h @@ -18,7 +18,7 @@ #include "../virt-dma.h" -#define DMAC_MAX_CHANNELS 8 +#define DMAC_MAX_CHANNELS 16 #define DMAC_MAX_MASTERS 2 #define DMAC_MAX_BLK_SIZE 0x200000 @@ -30,6 +30,8 @@ struct dw_axi_dma_hcfg { u32 priority[DMAC_MAX_CHANNELS]; /* maximum supported axi burst length */ u32 axi_rw_burst_len; + /* Register map for DMAX_NUM_CHANNELS <= 8 */ + bool reg_map_8_channels; bool restrict_axi_burst_len; }; @@ -103,6 +105,17 @@ struct axi_dma_desc { u32 period_len; }; +struct axi_dma_chan_config { + u8 dst_multblk_type; + u8 src_multblk_type; + u8 dst_per; + u8 src_per; + u8 tt_fc; + u8 prior; + u8 hs_sel_dst; + u8 hs_sel_src; +}; + static inline struct device *dchan2dev(struct dma_chan *dchan) { return &dchan->dev->device; @@ -139,6 +152,8 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) #define DMAC_CHEN 0x018 /* R/W DMAC Channel Enable */ #define DMAC_CHEN_L 0x018 /* R/W DMAC Channel Enable 00-31 */ #define DMAC_CHEN_H 0x01C /* R/W DMAC Channel Enable 32-63 */ +#define DMAC_CHSUSPREG 0x020 /* R/W DMAC Channel Suspend */ +#define DMAC_CHABORTREG 0x028 /* R/W DMAC Channel Abort */ #define DMAC_INTSTATUS 0x030 /* R DMAC Interrupt Status */ #define DMAC_COMMON_INTCLEAR 0x038 /* W DMAC Interrupt Clear */ #define DMAC_COMMON_INTSTATUS_ENA 0x040 /* R DMAC Interrupt Status Enable */ @@ -187,6 +202,7 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) #define DMA_APB_HS_SEL_BIT_SIZE 0x08 /* HW handshake bits per channel */ #define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */ #define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */ +#define DMA_REG_MAP_CH_REF 0x08 /* Channel count to choose register map */ /* DMAC_CFG */ #define DMAC_EN_POS 0 @@ -195,12 +211,20 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) #define INT_EN_POS 1 #define INT_EN_MASK BIT(INT_EN_POS) +/* DMAC_CHEN */ #define DMAC_CHAN_EN_SHIFT 0 #define DMAC_CHAN_EN_WE_SHIFT 8 #define DMAC_CHAN_SUSP_SHIFT 16 #define DMAC_CHAN_SUSP_WE_SHIFT 24 +/* DMAC_CHEN2 */ +#define DMAC_CHAN_EN2_WE_SHIFT 16 + +/* DMAC_CHSUSP */ +#define DMAC_CHAN_SUSP2_SHIFT 0 +#define DMAC_CHAN_SUSP2_WE_SHIFT 16 + /* CH_CTL_H */ #define CH_CTL_H_ARLEN_EN BIT(6) #define CH_CTL_H_ARLEN_POS 7 @@ -289,6 +313,15 @@ enum { DWAXIDMAC_MBLK_TYPE_LL }; +/* CH_CFG2 */ +#define CH_CFG2_L_SRC_PER_POS 4 +#define CH_CFG2_L_DST_PER_POS 11 + +#define CH_CFG2_H_TT_FC_POS 0 +#define CH_CFG2_H_HS_SEL_SRC_POS 3 +#define CH_CFG2_H_HS_SEL_DST_POS 4 +#define CH_CFG2_H_PRIORITY_POS 20 + /** * DW AXI DMA channel interrupts * -- cgit v1.2.3 From 93a7d32e9f4b8bad722a8c8c83c579a2f6a5aec3 Mon Sep 17 00:00:00 2001 From: Pandith N Date: Fri, 1 Oct 2021 19:38:11 +0530 Subject: dmaengine: dw-axi-dmac: Hardware handshake configuration Added hardware handshake selection in channel config, for mem2per and per2mem case. The peripheral specific handshake interface needs to be programmed in src_per, dst_per bits of CHx_CFG register. Signed-off-by: Pandith N Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20211001140812.24977-3-pandith.n@intel.com Signed-off-by: Vinod Koul --- drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 9a8231244c42..f46fd9895a13 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -396,6 +396,8 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC; if (chan->chip->apb_regs) config.dst_per = chan->id; + else + config.dst_per = chan->hw_handshake_num; break; case DMA_DEV_TO_MEM: config.tt_fc = chan->config.device_fc ? @@ -403,6 +405,8 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC; if (chan->chip->apb_regs) config.src_per = chan->id; + else + config.src_per = chan->hw_handshake_num; break; default: break; -- cgit v1.2.3 From 2d0f07f888f52532588730aae0241af5c5df393d Mon Sep 17 00:00:00 2001 From: Pandith N Date: Fri, 1 Oct 2021 19:38:12 +0530 Subject: dmaengine: dw-axi-dmac: set coherent mask Add support for setting dma coherent mask, dma mask is set to 64 bit Signed-off-by: Pandith N Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20211001140812.24977-4-pandith.n@intel.com Signed-off-by: Vinod Koul --- drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index f46fd9895a13..79572ec532ef 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -212,12 +212,16 @@ static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan) static void axi_dma_hw_init(struct axi_dma_chip *chip) { + int ret; u32 i; for (i = 0; i < chip->dw->hdata->nr_channels; i++) { axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL); axi_chan_disable(&chip->dw->chan[i]); } + ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64)); + if (ret) + dev_warn(chip->dev, "Unable to set coherent mask\n"); } static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src, -- cgit v1.2.3 From ef6c1dadc2a255965c6b47c365dec60b05e19ea6 Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Tue, 28 Sep 2021 17:18:30 +0200 Subject: dmaengine: imx-sdma: remove useless braces Braces {} are not necessary for single statement blocks. Signed-off-by: Flavio Suligoi Link: https://lore.kernel.org/r/20210928151833.589843-1-f.suligoi@asem.it Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index cacc725ca545..a58798fc3ff8 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -741,9 +741,8 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, unsigned long flags; buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL); - if (!buf_virt) { + if (!buf_virt) return -ENOMEM; - } spin_lock_irqsave(&sdma->channel_0_lock, flags); -- cgit v1.2.3 From 1f8595efae8dbc457fe98ca0d6b8f81a7c5477ce Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Tue, 28 Sep 2021 17:18:31 +0200 Subject: dmaengine: imx-sdma: add missed braces The "if" conditional statement is not a single statement, so both branches require braces. Signed-off-by: Flavio Suligoi Link: https://lore.kernel.org/r/20210928151833.589843-2-f.suligoi@asem.it Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index a58798fc3ff8..726076683400 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1226,8 +1226,9 @@ static int sdma_config_channel(struct dma_chan *chan) if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP || sdmac->peripheral_type == IMX_DMATYPE_ASRC) sdma_set_watermarklevel_for_p2p(sdmac); - } else + } else { __set_bit(sdmac->event_id0, sdmac->event_mask); + } /* Address */ sdmac->shp_addr = sdmac->per_address; -- cgit v1.2.3 From df7cc2aa399304593ab806d147168150b0a878dd Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Tue, 28 Sep 2021 17:18:32 +0200 Subject: dmaengine: imx-sdma: align statement to open parenthesis Alignment should match open parenthesis. Signed-off-by: Flavio Suligoi Link: https://lore.kernel.org/r/20210928151833.589843-3-f.suligoi@asem.it Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 726076683400..7b3bd3608651 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1241,7 +1241,7 @@ static int sdma_config_channel(struct dma_chan *chan) } static int sdma_set_channel_priority(struct sdma_channel *sdmac, - unsigned int priority) + unsigned int priority) { struct sdma_engine *sdma = sdmac->sdma; int channel = sdmac->channel; @@ -1261,7 +1261,7 @@ static int sdma_request_channel0(struct sdma_engine *sdma) int ret = -EBUSY; sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, - GFP_NOWAIT); + GFP_NOWAIT); if (!sdma->bd0) { ret = -ENOMEM; goto out; @@ -1284,7 +1284,7 @@ static int sdma_alloc_bd(struct sdma_desc *desc) int ret = 0; desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, - &desc->bd_phys, GFP_NOWAIT); + &desc->bd_phys, GFP_NOWAIT); if (!desc->bd) { ret = -ENOMEM; goto out; @@ -1757,7 +1757,7 @@ static void sdma_issue_pending(struct dma_chan *chan) #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46 static void sdma_add_scripts(struct sdma_engine *sdma, - const struct sdma_script_start_addrs *addr) + const struct sdma_script_start_addrs *addr) { s32 *addr_arr = (u32 *)addr; s32 *saddr_arr = (u32 *)sdma->script_addrs; @@ -1840,8 +1840,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) clk_enable(sdma->clk_ahb); /* download the RAM image for SDMA */ sdma_load_script(sdma, ram_code, - header->ram_code_size, - addr->ram_code_start_addr); + header->ram_code_size, + addr->ram_code_start_addr); clk_disable(sdma->clk_ipg); clk_disable(sdma->clk_ahb); @@ -1850,8 +1850,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) sdma->fw_loaded = true; dev_info(sdma->dev, "loaded firmware %d.%d\n", - header->version_major, - header->version_minor); + header->version_major, + header->version_minor); err_firmware: release_firmware(fw); -- cgit v1.2.3 From 635156d94b644a4000ff19c4fff68a60afff279f Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Tue, 28 Sep 2021 17:18:33 +0200 Subject: dmaengine: imx-sdma: remove space after sizeof Space prohibited between function name and open parenthesis '(' Signed-off-by: Flavio Suligoi Link: https://lore.kernel.org/r/20210928151833.589843-4-f.suligoi@asem.it Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 7b3bd3608651..75ec0754d4ad 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1955,7 +1955,7 @@ static int sdma_init(struct sdma_engine *sdma) writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); sdma->channel_control = dma_alloc_coherent(sdma->dev, - MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + + MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) + sizeof(struct sdma_context_data), &ccb_phys, GFP_KERNEL); @@ -1965,9 +1965,9 @@ static int sdma_init(struct sdma_engine *sdma) } sdma->context = (void *)sdma->channel_control + - MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); + MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control); sdma->context_phys = ccb_phys + - MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); + MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control); /* disable all channels */ for (i = 0; i < sdma->drvdata->num_events; i++) -- cgit v1.2.3 From e7e1e880b114ca640a2f280b0d5d38aed98f98c6 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sat, 23 Oct 2021 15:41:01 +0200 Subject: dmaengine: dmaengine_desc_callback_valid(): Check for `callback_result` Before the `callback_result` callback was introduced drivers coded their invocation to the callback in a similar way to: if (cb->callback) { spin_unlock(&dma->lock); cb->callback(cb->callback_param); spin_lock(&dma->lock); } With the introduction of `callback_result` two helpers where introduced to transparently handle both types of callbacks. And drivers where updated to look like this: if (dmaengine_desc_callback_valid(cb)) { spin_unlock(&dma->lock); dmaengine_desc_callback_invoke(cb, ...); spin_lock(&dma->lock); } dmaengine_desc_callback_invoke() correctly handles both `callback_result` and `callback`. But we forgot to update the dmaengine_desc_callback_valid() function to check for `callback_result`. As a result DMA descriptors that use the `callback_result` rather than `callback` don't have their callback invoked by drivers that follow the pattern above. Fix this by checking for both `callback` and `callback_result` in dmaengine_desc_callback_valid(). Fixes: f067025bc676 ("dmaengine: add support to provide error result from a DMA transation") Signed-off-by: Lars-Peter Clausen Acked-by: Dave Jiang Link: https://lore.kernel.org/r/20211023134101.28042-1-lars@metafoo.de Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index 1bfbd64b1371..53f16d3f0029 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -176,7 +176,7 @@ dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx, static inline bool dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) { - return (cb->callback) ? true : false; + return cb->callback || cb->callback_result; } struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); -- cgit v1.2.3 From 05f4fae9a2f5785d180b48ee93d7fa75425091c3 Mon Sep 17 00:00:00 2001 From: Dongliang Mu Date: Wed, 20 Oct 2021 22:35:33 +0800 Subject: dmaengine: rcar-dmac: refactor the error handling code of rcar_dmac_probe In rcar_dmac_probe, if pm_runtime_resume_and_get fails, it forgets to disable runtime PM. And of_dma_controller_free should only be invoked after the success of of_dma_controller_register. Fix this by refactoring the error handling code. Signed-off-by: Dongliang Mu Reviewed-by: Laurent Pinchart Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20211020143546.3436205-1-mudongliangabcd@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 6885b3dcd7a9..5c7716fd6bc5 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1916,7 +1916,7 @@ static int rcar_dmac_probe(struct platform_device *pdev) ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); - return ret; + goto err_pm_disable; } ret = rcar_dmac_init(dmac); @@ -1924,7 +1924,7 @@ static int rcar_dmac_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to reset device\n"); - goto error; + goto err_pm_disable; } /* Initialize engine */ @@ -1958,14 +1958,14 @@ static int rcar_dmac_probe(struct platform_device *pdev) for_each_rcar_dmac_chan(i, dmac, chan) { ret = rcar_dmac_chan_probe(dmac, chan); if (ret < 0) - goto error; + goto err_pm_disable; } /* Register the DMAC as a DMA provider for DT. */ ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, NULL); if (ret < 0) - goto error; + goto err_pm_disable; /* * Register the DMA engine device. @@ -1974,12 +1974,13 @@ static int rcar_dmac_probe(struct platform_device *pdev) */ ret = dma_async_device_register(engine); if (ret < 0) - goto error; + goto err_dma_free; return 0; -error: +err_dma_free: of_dma_controller_free(pdev->dev.of_node); +err_pm_disable: pm_runtime_disable(&pdev->dev); return ret; } -- cgit v1.2.3 From c5a51fc89c0103c03b8a54cf12dac7d014b3a2bf Mon Sep 17 00:00:00 2001 From: Dongliang Mu Date: Thu, 21 Oct 2021 11:05:38 +0800 Subject: dmaengine: tegra210-adma: fix pm runtime unbalance The previous commit 059e969c2a7d ("dmaengine: tegra210-adma: Using pm_runtime_resume_and_get to replace open coding") forgets to replace the pm_runtime_get_sync in the tegra_adma_probe, but removes the pm_runtime_put_noidle. Fix this by continuing to replace pm_runtime_get_sync with pm_runtime_resume_and_get in tegra_adma_probe. Fixes: 059e969c2a7d ("dmaengine: tegra210-adma: Using pm_runtime_resume_and_get to replace open coding") Signed-off-by: Dongliang Mu Reviewed-by: Jon Hunter Link: https://lore.kernel.org/r/20211021030538.3465287-1-mudongliangabcd@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 911533cc3133..ebbcff567c81 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -888,7 +888,7 @@ static int tegra_adma_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) goto rpm_disable; -- cgit v1.2.3 From 2f802d0af7ab5a629a8b4dc9d7be2e11bf477612 Mon Sep 17 00:00:00 2001 From: Dongliang Mu Date: Thu, 21 Oct 2021 11:14:31 +0800 Subject: dmaengine: tegra210-adma: fix pm runtime unbalance in tegra_adma_remove Since pm_runtime_put is done when tegra_adma_probe is successful, we cannot do pm_runtime_put_sync again in tegra_adma_remove. Fix this by removing the pm_runtime_put_sync in tegra_adma_remove. Signed-off-by: Dongliang Mu Reviewed-by: Jon Hunter Link: https://lore.kernel.org/r/20211021031432.3466261-1-mudongliangabcd@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index ebbcff567c81..ae39b52012b2 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -961,7 +961,6 @@ static int tegra_adma_remove(struct platform_device *pdev) for (i = 0; i < tdma->nr_channels; ++i) irq_dispose_mapping(tdma->channels[i].irq); - pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; -- cgit v1.2.3 From 15af840831f69baa9efb0d50007459d2015397a5 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Oct 2021 09:27:25 -0700 Subject: dmaengine: idxd: remove kernel wq type set when load configuration Remove setting of wq type on guest kernel during configuration load on RO device config. The user will set the kernel wq type and this setting based on config is not necessary. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/163474724511.2607444.1876715711451990426.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 27612329f510..ef3281fc3f8b 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -1050,8 +1050,6 @@ static int idxd_wq_load_config(struct idxd_wq *wq) wq->size = wq->wqcfg->wq_size; wq->threshold = wq->wqcfg->wq_thresh; - if (wq->wqcfg->priv) - wq->type = IDXD_WQT_KERNEL; /* The driver does not support shared WQ mode in read-only config yet */ if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en) -- cgit v1.2.3 From 98da0106aac0d3c5d4a3c95d238f1ff88957bbfc Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 21 Sep 2021 13:15:58 -0700 Subject: dmanegine: idxd: fix resource free ordering on driver removal Fault triggers on ioread32() when pci driver unbind is envoked. The placement of idxd sub-driver removal causes the probing of the device mmio region after the mmio mapping being torn down. The driver needs the sub-drivers to be unbound but not release the idxd context until all shutdown activities has been done. Move the sub-driver unregistering up before the remove() calls shutdown(). But take a device ref on the idxd->conf_dev so that the memory does not get freed in ->release(). When all cleanup activities has been done, release the ref to allow the idxd memory to be freed. [57159.542766] RIP: 0010:ioread32+0x27/0x60 [57159.547097] Code: 00 66 90 48 81 ff ff ff 03 00 77 1e 48 81 ff 00 00 01 00 76 05 0f b7 d7 ed c3 8b 15 03 50 41 01 b8 ff ff ff ff 85 d2 75 04 c3 <8b> 07 c3 55 83 ea 01 48 89 fe 48 c7 c7 00 70 5f 82 48 89 e5 48 83 [57159.566647] RSP: 0018:ffffc900011abb60 EFLAGS: 00010292 [57159.572295] RAX: ffffc900011e0000 RBX: ffff888107d39800 RCX: 0000000000000000 [57159.579842] RDX: 0000000000000000 RSI: ffffffff82b1e448 RDI: ffffc900011e0090 [57159.587421] RBP: ffffc900011abb88 R08: 0000000000000000 R09: 0000000000000001 [57159.594972] R10: 0000000000000001 R11: 0000000000000000 R12: ffff8881019840d0 [57159.602533] R13: ffff8881097e9000 R14: ffffffffa08542a0 R15: 00000000000003a8 [57159.610093] FS: 00007f991e0a8740(0000) GS:ffff888459900000(0000) knlGS:00000000000 00000 [57159.618614] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [57159.624814] CR2: ffffc900011e0090 CR3: 000000010862a002 CR4: 00000000003706e0 [57159.632397] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [57159.639973] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [57159.647601] Call Trace: [57159.650502] ? idxd_device_disable+0x41/0x110 [idxd] [57159.655948] idxd_device_drv_remove+0x2b/0x80 [idxd] [57159.661374] idxd_config_bus_remove+0x16/0x20 [57159.666191] __device_release_driver+0x163/0x240 [57159.671320] device_release_driver+0x2b/0x40 [57159.676052] bus_remove_device+0xf5/0x160 [57159.680524] device_del+0x19c/0x400 [57159.684440] device_unregister+0x18/0x60 [57159.688792] idxd_remove+0x140/0x1c0 [idxd] [57159.693406] pci_device_remove+0x3e/0xb0 [57159.697758] __device_release_driver+0x163/0x240 [57159.702788] device_driver_detach+0x43/0xb0 [57159.707424] unbind_store+0x11e/0x130 [57159.711537] drv_attr_store+0x24/0x30 [57159.715646] sysfs_kf_write+0x4b/0x60 [57159.719710] kernfs_fop_write_iter+0x153/0x1e0 [57159.724563] new_sync_write+0x120/0x1b0 [57159.728812] vfs_write+0x23e/0x350 [57159.732624] ksys_write+0x70/0xf0 [57159.736335] __x64_sys_write+0x1a/0x20 [57159.740492] do_syscall_64+0x3b/0x90 [57159.744465] entry_SYSCALL_64_after_hwframe+0x44/0xae [57159.749908] RIP: 0033:0x7f991e19c387 [57159.753898] Code: 0d 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24 [57159.773564] RSP: 002b:00007ffc2ce2d6a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [57159.781550] RAX: ffffffffffffffda RBX: 000000000000000c RCX: 00007f991e19c387 [57159.789133] RDX: 000000000000000c RSI: 000055ee2630e140 RDI: 0000000000000001 [57159.796695] RBP: 000055ee2630e140 R08: 0000000000000000 R09: 00007f991e2324e0 [57159.804246] R10: 00007f991e2323e0 R11: 0000000000000246 R12: 000000000000000c [57159.811800] R13: 00007f991e26f520 R14: 000000000000000c R15: 00007f991e26f700 [57159.819373] Modules linked in: idxd bridge stp llc bnep sunrpc nls_iso8859_1 intel_ rapl_msr intel_rapl_common x86_pkg_temp_thermal intel_powerclamp coretemp snd_hda_code c_realtek iTCO_wdt 8250_dw snd_hda_codec_generic kvm_intel ledtrig_audio iTCO_vendor_s upport snd_hda_intel snd_intel_dspcfg ppdev kvm snd_hda_codec intel_wmi_thunderbolt sn d_hwdep irqbypass iwlwifi btusb snd_hda_core rapl btrtl intel_cstate snd_seq btbcm snd _seq_device btintel snd_pcm cfg80211 bluetooth pcspkr psmouse input_leds snd_timer int el_lpss_pci mei_me intel_lpss snd ecdh_generic ecc mei ucsi_acpi i2c_i801 idma64 i2c_s mbus virt_dma soundcore typec_ucsi typec wmi parport_pc parport video mac_hid acpi_pad sch_fq_codel drm ip_tables x_tables crct10dif_pclmul crc32_pclmul ghash_clmulni_intel usbkbd hid_generic usbmouse aesni_intel usbhid crypto_simd cryptd e1000e hid serio_ra w ahci libahci pinctrl_sunrisepoint fuse msr autofs4 [last unloaded: idxd] [57159.904082] CR2: ffffc900011e0090 [57159.907877] ---[ end trace b4e32f49ce9176a4 ]--- Fixes: 49c4959f04b5 ("dmaengine: idxd: fix sequence for pci driver remove() and shutdown()") Reported-by: Ziye Yang Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/163225535868.4152687.9318737776682088722.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/init.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index eb09bc591c31..7bf03f371ce1 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -797,11 +797,19 @@ static void idxd_remove(struct pci_dev *pdev) int msixcnt = pci_msix_vec_count(pdev); int i; - dev_dbg(&pdev->dev, "%s called\n", __func__); + idxd_unregister_devices(idxd); + /* + * When ->release() is called for the idxd->conf_dev, it frees all the memory related + * to the idxd context. The driver still needs those bits in order to do the rest of + * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref + * on the device here to hold off the freeing while allowing the idxd sub-driver + * to unbind. + */ + get_device(idxd_confdev(idxd)); + device_unregister(idxd_confdev(idxd)); idxd_shutdown(pdev); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - idxd_unregister_devices(idxd); for (i = 0; i < msixcnt; i++) { irq_entry = &idxd->irq_entries[i]; @@ -815,7 +823,7 @@ static void idxd_remove(struct pci_dev *pdev) pci_disable_device(pdev); destroy_workqueue(idxd->wq); perfmon_pmu_remove(idxd); - device_unregister(idxd_confdev(idxd)); + put_device(idxd_confdev(idxd)); } static struct pci_driver idxd_pci_driver = { -- cgit v1.2.3 From b3b180e735409ca0c76642014304b59482e0e653 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 20 Sep 2021 14:20:07 +0200 Subject: dmaengine: remove debugfs #ifdef The ptdma driver has added debugfs support, but this fails to build when debugfs is disabled: drivers/dma/ptdma/ptdma-debugfs.c: In function 'ptdma_debugfs_setup': drivers/dma/ptdma/ptdma-debugfs.c:93:54: error: 'struct dma_device' has no member named 'dbg_dev_root' 93 | debugfs_create_file("info", 0400, pt->dma_dev.dbg_dev_root, pt, | ^ drivers/dma/ptdma/ptdma-debugfs.c:96:55: error: 'struct dma_device' has no member named 'dbg_dev_root' 96 | debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt, | ^ drivers/dma/ptdma/ptdma-debugfs.c:102:52: error: 'struct dma_device' has no member named 'dbg_dev_root' 102 | debugfs_create_dir("q", pt->dma_dev.dbg_dev_root); | ^ Remove the #ifdef in the header, as this only saves a few bytes, but would require ugly #ifdefs in each driver using it. Simplify the other user while we're at it. Fixes: e2fb2e2a33fa ("dmaengine: ptdma: Add debugfs entries for PTDMA") Fixes: 26cf132de6f7 ("dmaengine: Create debug directories for DMA devices") Signed-off-by: Arnd Bergmann Reviewed-by: Laurent Pinchart Link: https://lore.kernel.org/r/20210920122017.205975-1-arnd@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dpdma.c | 15 +-------------- include/linux/dmaengine.h | 2 -- 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index b280a53e8570..ce5c66e6897d 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -271,9 +271,6 @@ struct xilinx_dpdma_device { /* ----------------------------------------------------------------------------- * DebugFS */ - -#ifdef CONFIG_DEBUG_FS - #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32 #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535" @@ -299,7 +296,7 @@ struct xilinx_dpdma_debugfs_request { static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan) { - if (chan->id == dpdma_debugfs.chan_id) + if (IS_ENABLED(CONFIG_DEBUG_FS) && chan->id == dpdma_debugfs.chan_id) dpdma_debugfs.xilinx_dpdma_irq_done_count++; } @@ -462,16 +459,6 @@ static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev) dev_err(xdev->dev, "Failed to create debugfs testcase file\n"); } -#else -static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev) -{ -} - -static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan) -{ -} -#endif /* CONFIG_DEBUG_FS */ - /* ----------------------------------------------------------------------------- * I/O Accessors */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e5c2c9e71bf1..9000f3ffce8b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -944,10 +944,8 @@ struct dma_device { void (*device_issue_pending)(struct dma_chan *chan); void (*device_release)(struct dma_device *dev); /* debugfs support */ -#ifdef CONFIG_DEBUG_FS void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev); struct dentry *dbg_dev_root; -#endif }; static inline int dmaengine_slave_config(struct dma_chan *chan, -- cgit v1.2.3 From ee5c6f0ca219b65f5085043d481d9b6f045693d5 Mon Sep 17 00:00:00 2001 From: Bixuan Cui Date: Wed, 8 Sep 2021 17:28:26 +0800 Subject: dmaengine: idxd: Use list_move_tail instead of list_del/list_add_tail Using list_move_tail() instead of list_del() + list_add_tail() Reported-by: Hulk Robot Signed-off-by: Bixuan Cui Acked-by: Dave Jiang Link: https://lore.kernel.org/r/20210908092826.67765-1-cuibixuan@huawei.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/irq.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index ca88fa7a328e..79fcfc4883e4 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -221,8 +221,7 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry) list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) { if (desc->completion->status) { - list_del(&desc->list); - list_add_tail(&desc->list, &flist); + list_move_tail(&desc->list, &flist); } } -- cgit v1.2.3 From 5b5b5aa50d1b90392f13afd15089e191d57316f5 Mon Sep 17 00:00:00 2001 From: Angelo Dureghello Date: Wed, 1 Sep 2021 23:16:10 +0200 Subject: dmaengine: fsl-edma: fix for missing dmamux module Fix following panic on system halt: Requesting system halt [ 10.600000] spi spi0.1: spi_device 0.1 cleanup [ 10.630000] fsl_edma_chan_mux() fsl_chan->edma->n_chans 64 dmamux_nr 0 [ 10.630000] *** ZERO DIVIDE *** FORMAT=4 [ 10.630000] Current process id is 38 [ 10.630000] BAD KERNEL TRAP: 00000000 [ 10.630000] PC: [<402f09ba>] fsl_edma_chan_mux+0x7c/0x12e ... Some architecture as mcf5441x (ColdFire) may not have a dmamux, so dmamux_nr is set to 0. This patch considers this case. Signed-off-by: Angelo Dureghello Link: https://lore.kernel.org/r/20210901211610.662077-1-angelo.dureghello@timesys.com Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-common.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 930ae268c497..009c75ff1320 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -638,12 +638,14 @@ EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources); void fsl_edma_free_chan_resources(struct dma_chan *chan) { struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); + struct fsl_edma_engine *edma = fsl_chan->edma; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&fsl_chan->vchan.lock, flags); fsl_edma_disable_request(fsl_chan); - fsl_edma_chan_mux(fsl_chan, 0, false); + if (edma->drvdata->dmamuxs) + fsl_edma_chan_mux(fsl_chan, 0, false); fsl_chan->edesc = NULL; vchan_get_all_descriptors(&fsl_chan->vchan, &head); fsl_edma_unprep_slave_dma(fsl_chan); -- cgit v1.2.3 From 88d97ea82cbe352851a8654ee952d3a694c8c2c6 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 8 Sep 2021 16:04:03 -0700 Subject: dmaengine: idxd: add halt interrupt support Add halt interrupt support. Given that the misc interrupt handler already check halt state, the driver just need to run the halt handling code when receiving the halt interrupt. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/163114224352.846654.14334468363464318828.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/irq.c | 5 +++++ drivers/dma/idxd/registers.h | 1 + 2 files changed, 6 insertions(+) diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index 79fcfc4883e4..17f2f8a31b63 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -63,6 +63,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) int i; bool err = false; + if (cause & IDXD_INTC_HALT_STATE) + goto halt; + if (cause & IDXD_INTC_ERR) { spin_lock(&idxd->dev_lock); for (i = 0; i < 4; i++) @@ -121,6 +124,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) if (!err) return 0; +halt: gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); if (gensts.state == IDXD_DEVICE_STATE_HALT) { idxd->state = IDXD_DEV_HALTED; @@ -134,6 +138,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) queue_work(idxd->wq, &idxd->work); } else { spin_lock(&idxd->dev_lock); + idxd->state = IDXD_DEV_HALTED; idxd_wqs_quiesce(idxd); idxd_wqs_unmap_portal(idxd); idxd_device_clear_state(idxd); diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index eeb11e6eb25b..262c8220adbd 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -157,6 +157,7 @@ enum idxd_device_reset_type { #define IDXD_INTC_CMD 0x02 #define IDXD_INTC_OCCUPY 0x04 #define IDXD_INTC_PERFMON_OVFL 0x08 +#define IDXD_INTC_HALT_STATE 0x10 #define IDXD_CMD_OFFSET 0xa0 union idxd_command_reg { -- cgit v1.2.3 From e530a9f3db4188d1f4e3704b0948ef69c04d5ca6 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 1 Sep 2021 17:18:05 -0700 Subject: dmaengine: idxd: reconfig device after device reset command Device reset clears the MSIXPERM table and the device registers. Re-program the MSIXPERM table and re-enable the error interrupts post reset. Fixes: 745e92a6d816 ("dmaengine: idxd: idxd: move remove() bits for idxd 'struct device' to device.c") Reported-by: Sanjay Kumar Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/163054188513.2853562.12077053294595278181.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index ef3281fc3f8b..b1407465d5c4 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -583,6 +583,8 @@ void idxd_device_reset(struct idxd_device *idxd) spin_lock(&idxd->dev_lock); idxd_device_clear_state(idxd); idxd->state = IDXD_DEV_DISABLED; + idxd_unmask_error_interrupts(idxd); + idxd_msix_perm_setup(idxd); spin_unlock(&idxd->dev_lock); } -- cgit v1.2.3 From 7789e3464cb610cb8925cd8605bc0aa9d088280d Mon Sep 17 00:00:00 2001 From: Cai Huoqing Date: Sat, 28 Aug 2021 17:01:17 +0800 Subject: dmaengine: sa11x0: Make use of the helper macro SET_NOIRQ_SYSTEM_SLEEP_PM_OPS() Use the helper macro SET_NOIRQ_SYSTEM_SLEEP_PM_OPS() instead of the verbose operators ".suspend_noirq /.resume_noirq/.freeze_noirq/ .thaw_noirq/.poweroff_noirq/.restore_noirq", because the SET_NOIRQ_SYSTEM_SLEEP_PM_OPS() is a nice helper macro that could be brought in to make code a little clearer, a little more concise. Signed-off-by: Cai Huoqing Link: https://lore.kernel.org/r/20210828090117.1814-1-caihuoqing@baidu.com Signed-off-by: Vinod Koul --- drivers/dma/sa11x0-dma.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 1e918e284fc0..38f318b2f80d 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -1072,12 +1072,7 @@ static int sa11x0_dma_resume(struct device *dev) } static const struct dev_pm_ops sa11x0_dma_pm_ops = { - .suspend_noirq = sa11x0_dma_suspend, - .resume_noirq = sa11x0_dma_resume, - .freeze_noirq = sa11x0_dma_suspend, - .thaw_noirq = sa11x0_dma_resume, - .poweroff_noirq = sa11x0_dma_suspend, - .restore_noirq = sa11x0_dma_resume, + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sa11x0_dma_suspend, sa11x0_dma_resume) }; static struct platform_driver sa11x0_dma_driver = { -- cgit v1.2.3 From dbe3c54e71051b50a4aa863502368000d3e7701f Mon Sep 17 00:00:00 2001 From: Shravya Kumbham Date: Mon, 13 Sep 2021 14:58:36 +0530 Subject: dmaengine: xilinx_dma: Fix kernel-doc warnings Modify the prototype from xilinx_dma_tx_descriptor to xilinx_dma_alloc_tx_descriptor and xilinx_dma_channel_set_config to xilinx_vdma_channel_set_config in API description to fix below linux kernel-doc warnings. drivers/dma/xilinx/xilinx_dma.c:800: warning: expecting prototype for xilinx_dma_tx_descriptor(). Prototype was for xilinx_dma_alloc_tx_descriptor() instead. drivers/dma/xilinx/xilinx_dma.c:2471: warning: expecting prototype for xilinx_dma_channel_set_config(). Prototype was for xilinx_vdma_channel_set_config() instead. Signed-off-by: Shravya Kumbham Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1631525316-2323-1-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index a4450bc95466..60cea7d997ee 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -792,7 +792,7 @@ static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, } /** - * xilinx_dma_tx_descriptor - Allocate transaction descriptor + * xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor * @chan: Driver specific DMA channel * * Return: The allocated descriptor on success and NULL on failure. @@ -2483,7 +2483,7 @@ static void xilinx_dma_synchronize(struct dma_chan *dchan) } /** - * xilinx_dma_channel_set_config - Configure VDMA channel + * xilinx_vdma_channel_set_config - Configure VDMA channel * Run-time configuration for Axi VDMA, supports: * . halt the channel * . configure interrupt coalescing and inter-packet delay threshold -- cgit v1.2.3 From fe14c67267886e3af3c377a7bee4e6f915778636 Mon Sep 17 00:00:00 2001 From: Len Baker Date: Sat, 4 Sep 2021 16:58:13 +0200 Subject: dmaengine: milbeaut-hdmac: Prefer kcalloc over open coded arithmetic As noted in the "Deprecated Interfaces, Language Features, Attributes, and Conventions" documentation [1], size calculations (especially multiplication) should not be performed in memory allocator (or similar) function arguments due to the risk of them overflowing. This could lead to values wrapping around and a smaller allocation being made than the caller was expecting. Using those allocations could lead to linear overflows of heap memory and other misbehaviors. So, use the purpose specific kcalloc() function instead of the argument size * count in the kzalloc() function. [1] https://www.kernel.org/doc/html/v5.14/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments Signed-off-by: Len Baker Link: https://lore.kernel.org/r/20210904145813.5161-1-len.baker@gmx.com Signed-off-by: Vinod Koul --- drivers/dma/milbeaut-hdmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c index a8cfb59f6efe..1b0a95892627 100644 --- a/drivers/dma/milbeaut-hdmac.c +++ b/drivers/dma/milbeaut-hdmac.c @@ -269,7 +269,7 @@ milbeaut_hdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (!md) return NULL; - md->sgl = kzalloc(sizeof(*sgl) * sg_len, GFP_NOWAIT); + md->sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT); if (!md->sgl) { kfree(md); return NULL; -- cgit v1.2.3 From 9bf9e0b44104d05b21761441227ae566c732ecb9 Mon Sep 17 00:00:00 2001 From: Xin Xiong Date: Sat, 11 Sep 2021 15:05:33 +0800 Subject: dmaengine: mmp_pdma: fix reference count leaks in mmp_pdma_probe The issue happens in an error handling path. If of_dma_controller_register() fails, the function simply prints error messages and returns error code, without decrementing the reference count of pdev->device incremented earlier by dma_async_device_register(), which may result in refcount leaks. Fix it by invoking dma_async_device_unregister() before returning the error code. Signed-off-by: Xin Xiong Signed-off-by: Xiyu Yang Signed-off-by: Xin Tan Link: https://lore.kernel.org/r/20210911070533.3114-1-xiongx18@fudan.edu.cn Signed-off-by: Vinod Koul --- drivers/dma/mmp_pdma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 89f1814ff27a..a23563cd118b 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -1123,6 +1123,7 @@ static int mmp_pdma_probe(struct platform_device *op) mmp_pdma_dma_xlate, pdev); if (ret < 0) { dev_err(&op->dev, "of_dma_controller_register failed\n"); + dma_async_device_unregister(&pdev->device); return ret; } } -- cgit v1.2.3 From 4c0eee50658746b0333d35a75d3db6e0aac08ef9 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 15 Sep 2021 12:20:38 +0100 Subject: dmaengine: sh: make array ds_lut static Don't populate the read-only array ds_lut on the stack but instead it static. Also makes the object code smaller by 163 bytes: Before: text data bss dec hex filename 23508 4796 0 28304 6e90 ./drivers/dma/sh/rz-dmac.o After: text data bss dec hex filename 23281 4860 0 28141 6ded ./drivers/dma/sh/rz-dmac.o (gcc version 11.2.0) Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20210915112038.12407-1-colin.king@canonical.com Signed-off-by: Vinod Koul --- drivers/dma/sh/rz-dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c index d9f2cfef878e..ee2872e7d64c 100644 --- a/drivers/dma/sh/rz-dmac.c +++ b/drivers/dma/sh/rz-dmac.c @@ -574,7 +574,7 @@ static void rz_dmac_issue_pending(struct dma_chan *chan) static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds) { u8 i; - const enum dma_slave_buswidth ds_lut[] = { + static const enum dma_slave_buswidth ds_lut[] = { DMA_SLAVE_BUSWIDTH_1_BYTE, DMA_SLAVE_BUSWIDTH_2_BYTES, DMA_SLAVE_BUSWIDTH_4_BYTES, -- cgit v1.2.3 From ecb8c88bd31cce374396358f395795dcfe3479f8 Mon Sep 17 00:00:00 2001 From: Wang Qing Date: Thu, 7 Oct 2021 20:28:27 -0700 Subject: dmaengine: dw-edma-pcie: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. pci_set_dma_mask()/pci_set_consistent_dma_mask() should be replaced with dma_set_mask()/dma_set_coherent_mask(), and use dma_set_mask_and_coherent() for both. Signed-off-by: Wang Qing Link: https://lore.kernel.org/r/1633663733-47199-2-git-send-email-wangqing@vivo.com Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/dw-edma-pcie.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c index 44f6e09bdb53..198f6cd8ac1b 100644 --- a/drivers/dma/dw-edma/dw-edma-pcie.c +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -186,27 +186,18 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, pci_set_master(pdev); /* DMA configuration */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - pci_err(pdev, "consistent DMA mask 64 set failed\n"); - return err; - } + pci_err(pdev, "DMA mask 64 set failed\n"); + return err; } else { pci_err(pdev, "DMA mask 64 set failed\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { pci_err(pdev, "DMA mask 32 set failed\n"); return err; } - - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - pci_err(pdev, "consistent DMA mask 32 set failed\n"); - return err; - } } /* Data structure allocation */ -- cgit v1.2.3 From 1365e117bf5eca14c7397c404770ffbc5e3ad99c Mon Sep 17 00:00:00 2001 From: Qing Wang Date: Thu, 7 Oct 2021 20:28:31 -0700 Subject: dmaengine: dw: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. pci_set_dma_mask()/pci_set_consistent_dma_mask() should be replaced with dma_set_mask()/dma_set_coherent_mask(), and use dma_set_mask_and_coherent() for both. Signed-off-by: Qing Wang Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/1633663733-47199-6-git-send-email-wangqing@vivo.com Signed-off-by: Vinod Koul --- drivers/dma/dw/pci.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 26a3f926da02..ad2d4d012cf7 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -32,11 +32,7 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) pci_set_master(pdev); pci_try_set_mwi(pdev); - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (ret) - return ret; - - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret; -- cgit v1.2.3 From d77143dd248e7d909975941f3097d7e36b7876cf Mon Sep 17 00:00:00 2001 From: Qing Wang Date: Thu, 7 Oct 2021 20:28:30 -0700 Subject: dmaengine: hisi_dma: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. pci_set_dma_mask()/pci_set_consistent_dma_mask() should be replaced with dma_set_mask()/dma_set_coherent_mask(), and use dma_set_mask_and_coherent() for both. Signed-off-by: Qing Wang Link: https://lore.kernel.org/r/1633663733-47199-5-git-send-email-wangqing@vivo.com Signed-off-by: Vinod Koul --- drivers/dma/hisi_dma.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c index c855a0e4f9ff..97c87a7cba87 100644 --- a/drivers/dma/hisi_dma.c +++ b/drivers/dma/hisi_dma.c @@ -519,11 +519,7 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id) return ret; } - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (ret) - return ret; - - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (ret) return ret; -- cgit v1.2.3 From bec897e0a796e3eb5116a2340505ca2da4cecb1b Mon Sep 17 00:00:00 2001 From: Qing Wang Date: Thu, 7 Oct 2021 20:28:29 -0700 Subject: dmaengine: hsu: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. pci_set_dma_mask()/pci_set_consistent_dma_mask() should be replaced with dma_set_mask()/dma_set_coherent_mask(), and use dma_set_mask_and_coherent() for both. Signed-off-by: Qing Wang Link: https://lore.kernel.org/r/1633663733-47199-4-git-send-email-wangqing@vivo.com Signed-off-by: Vinod Koul --- drivers/dma/hsu/pci.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c index 9045a6f7f589..6a2df3dd78d0 100644 --- a/drivers/dma/hsu/pci.c +++ b/drivers/dma/hsu/pci.c @@ -65,11 +65,7 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); pci_try_set_mwi(pdev); - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (ret) - return ret; - - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret; -- cgit v1.2.3 From 0c5afef7bf1fbda7e7883dc4b93f64f90003706f Mon Sep 17 00:00:00 2001 From: Qing Wang Date: Thu, 7 Oct 2021 20:28:28 -0700 Subject: dmaengine: ioat: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. pci_set_dma_mask()/pci_set_consistent_dma_mask() should be replaced with dma_set_mask()/dma_set_coherent_mask(), and use dma_set_mask_and_coherent() for both. Signed-off-by: Qing Wang Link: https://lore.kernel.org/r/1633663733-47199-3-git-send-email-wangqing@vivo.com Signed-off-by: Vinod Koul --- drivers/dma/ioat/init.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 191b59279007..373b8dac6c9b 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -1363,15 +1363,9 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!iomap) return -ENOMEM; - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) - return err; - - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) return err; -- cgit v1.2.3 From c726c62db857d375800af7e82eb1c6f639e87631 Mon Sep 17 00:00:00 2001 From: Qing Wang Date: Thu, 7 Oct 2021 20:28:32 -0700 Subject: dmaengine: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. pci_set_dma_mask()/pci_set_consistent_dma_mask() should be replaced with dma_set_mask()/dma_set_coherent_mask(), and use dma_set_mask_and_coherent() for both. Signed-off-by: Qing Wang Reviewed-by: Logan Gunthorpe Link: https://lore.kernel.org/r/1633663733-47199-7-git-send-email-wangqing@vivo.com Signed-off-by: Vinod Koul --- drivers/dma/plx_dma.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c index 166934544161..1ffcb5ca9788 100644 --- a/drivers/dma/plx_dma.c +++ b/drivers/dma/plx_dma.c @@ -563,15 +563,9 @@ static int plx_dma_probe(struct pci_dev *pdev, if (rc) return rc; - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); if (rc) - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) - return rc; - - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); - if (rc) - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (rc) return rc; -- cgit v1.2.3 From 07c609cc987710a2874d41cf54802777b44b523b Mon Sep 17 00:00:00 2001 From: Cai Huoqing Date: Tue, 26 Oct 2021 10:05:07 +0800 Subject: dmaengine: sa11x0: Mark PM functions as __maybe_unused Without CONFIG_PM_SLEEP, the runtime suspend/resume functions are unused, producing a warning: ../drivers/dma/sa11x0-dma.c:1042:12: error: 'sa11x0_dma_resume' defined but not used ../drivers/dma/sa11x0-dma.c:1004:12: error: 'sa11x0_dma_suspend' defined but not used Signed-off-by: Cai Huoqing Link: https://lore.kernel.org/r/20211026020508.550-1-caihuoqing@baidu.com Signed-off-by: Vinod Koul --- drivers/dma/sa11x0-dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 38f318b2f80d..a29c13cae716 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -1001,7 +1001,7 @@ static int sa11x0_dma_remove(struct platform_device *pdev) return 0; } -static int sa11x0_dma_suspend(struct device *dev) +static __maybe_unused int sa11x0_dma_suspend(struct device *dev) { struct sa11x0_dma_dev *d = dev_get_drvdata(dev); unsigned pch; @@ -1039,7 +1039,7 @@ static int sa11x0_dma_suspend(struct device *dev) return 0; } -static int sa11x0_dma_resume(struct device *dev) +static __maybe_unused int sa11x0_dma_resume(struct device *dev) { struct sa11x0_dma_dev *d = dev_get_drvdata(dev); unsigned pch; -- cgit v1.2.3 From 37aef53f5ccf789a87006e0dcbce187ce3427f63 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Mon, 18 Oct 2021 12:24:20 +0200 Subject: dt-bindings: dmaengine: bam_dma: Add "powered remotely" mode In some configurations, the BAM DMA controller is set up by a remote processor and the local processor can simply start making use of it without setting up the BAM. This is already supported using the "qcom,controlled-remotely" property. However, for some reason another possible configuration is that the remote processor is responsible for powering up the BAM, but we are still responsible for initializing it (e.g. resetting it etc). Add a "qcom,powered-remotely" property to describe that configuration. Signed-off-by: Stephan Gerhold Acked-by: Rob Herring Reviewed-by: Bhupesh Sharma Link: https://lore.kernel.org/r/20211018102421.19848-2-stephan@gerhold.net Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/qcom_bam_dma.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt index cf5b9e44432c..6e9a5497b3f2 100644 --- a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt +++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt @@ -15,6 +15,8 @@ Required properties: the secure world. - qcom,controlled-remotely : optional, indicates that the bam is controlled by remote proccessor i.e. execution environment. +- qcom,powered-remotely : optional, indicates that the bam is powered up by + a remote processor but must be initialized by the local processor. - num-channels : optional, indicates supported number of DMA channels in a remotely controlled bam. - qcom,num-ees : optional, indicates supported number of Execution Environments -- cgit v1.2.3 From 9502ffcda0491c2079e2f6e3278b3c7377fd81fb Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Mon, 18 Oct 2021 12:24:21 +0200 Subject: dmaengine: qcom: bam_dma: Add "powered remotely" mode In some configurations, the BAM DMA controller is set up by a remote processor and the local processor can simply start making use of it without setting up the BAM. This is already supported using the "qcom,controlled-remotely" property. However, for some reason another possible configuration is that the remote processor is responsible for powering up the BAM, but we are still responsible for initializing it (e.g. resetting it etc). This configuration is quite challenging to handle properly because the power control is handled through separate channels (e.g. device-specific SMSM interrupts / smem-states). Great care must be taken to ensure the BAM registers are not accessed while the BAM is powered off since this results in a bus stall. Attempt to support this configuration with minimal device-specific code in the bam_dma driver by tracking the number of requested channels. Consumers of DMA channels are responsible to only request DMA channels when the BAM was powered on by the remote processor, and to release them before the BAM is powered off. When the first channel is requested the BAM is initialized (reset) and it is also put into reset when the last channel was released. Signed-off-by: Stephan Gerhold Reviewed-by: Bhupesh Sharma Link: https://lore.kernel.org/r/20211018102421.19848-3-stephan@gerhold.net Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 90 +++++++++++++++++++++++++++++----------------- 1 file changed, 57 insertions(+), 33 deletions(-) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index c8a77b428b52..87f6ca1541cf 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -388,6 +388,8 @@ struct bam_device { /* execution environment ID, from DT */ u32 ee; bool controlled_remotely; + bool powered_remotely; + u32 active_channels; const struct reg_offset_data *layout; @@ -415,6 +417,44 @@ static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe, r.ee_mult * bdev->ee; } +/** + * bam_reset() - reset and initialize BAM registers + * @bdev: bam device + */ +static void bam_reset(struct bam_device *bdev) +{ + u32 val; + + /* s/w reset bam */ + /* after reset all pipes are disabled and idle */ + val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); + val |= BAM_SW_RST; + writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); + val &= ~BAM_SW_RST; + writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); + + /* make sure previous stores are visible before enabling BAM */ + wmb(); + + /* enable bam */ + val |= BAM_EN; + writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); + + /* set descriptor threshhold, start with 4 bytes */ + writel_relaxed(DEFAULT_CNT_THRSHLD, + bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); + + /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ + writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS)); + + /* enable irqs for errors */ + writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, + bam_addr(bdev, 0, BAM_IRQ_EN)); + + /* unmask global bam interrupt */ + writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); +} + /** * bam_reset_channel - Reset individual BAM DMA channel * @bchan: bam channel @@ -512,6 +552,9 @@ static int bam_alloc_chan(struct dma_chan *chan) return -ENOMEM; } + if (bdev->active_channels++ == 0 && bdev->powered_remotely) + bam_reset(bdev); + return 0; } @@ -565,6 +608,13 @@ static void bam_free_chan(struct dma_chan *chan) /* disable irq */ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); + if (--bdev->active_channels == 0 && bdev->powered_remotely) { + /* s/w reset bam */ + val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); + val |= BAM_SW_RST; + writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); + } + err: pm_runtime_mark_last_busy(bdev->dev); pm_runtime_put_autosuspend(bdev->dev); @@ -1164,37 +1214,9 @@ static int bam_init(struct bam_device *bdev) bdev->num_channels = val & BAM_NUM_PIPES_MASK; } - if (bdev->controlled_remotely) - return 0; - - /* s/w reset bam */ - /* after reset all pipes are disabled and idle */ - val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); - val |= BAM_SW_RST; - writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); - val &= ~BAM_SW_RST; - writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); - - /* make sure previous stores are visible before enabling BAM */ - wmb(); - - /* enable bam */ - val |= BAM_EN; - writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); - - /* set descriptor threshhold, start with 4 bytes */ - writel_relaxed(DEFAULT_CNT_THRSHLD, - bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); - - /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ - writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS)); - - /* enable irqs for errors */ - writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, - bam_addr(bdev, 0, BAM_IRQ_EN)); - - /* unmask global bam interrupt */ - writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); + /* Reset BAM now if fully controlled locally */ + if (!bdev->controlled_remotely && !bdev->powered_remotely) + bam_reset(bdev); return 0; } @@ -1257,8 +1279,10 @@ static int bam_dma_probe(struct platform_device *pdev) bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node, "qcom,controlled-remotely"); + bdev->powered_remotely = of_property_read_bool(pdev->dev.of_node, + "qcom,powered-remotely"); - if (bdev->controlled_remotely) { + if (bdev->controlled_remotely || bdev->powered_remotely) { ret = of_property_read_u32(pdev->dev.of_node, "num-channels", &bdev->num_channels); if (ret) @@ -1270,7 +1294,7 @@ static int bam_dma_probe(struct platform_device *pdev) dev_err(bdev->dev, "num-ees unspecified in dt\n"); } - if (bdev->controlled_remotely) + if (bdev->controlled_remotely || bdev->powered_remotely) bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk"); else bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); -- cgit v1.2.3 From 2f23355e96b4a5896de2032176197fa0c5c444dd Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 26 Oct 2021 17:56:07 +0200 Subject: dmaengine: dw-axi-dmac: Simplify assignment in dma_chan_pause() Simplify assigning zero and performing a logical OR to a single assignment. Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/2abd0da35608c14689a919d47dd45898a8ab4297.1635263478.git.geert@linux-m68k.org Signed-off-by: Vinod Koul --- drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 79572ec532ef..cd0d745eb071 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -1164,9 +1164,8 @@ static int dma_chan_pause(struct dma_chan *dchan) BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); } else { - val = 0; - val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | - BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; + val = BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | + BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); } -- cgit v1.2.3 From d191a9abc02f1f59bfb3b2349d30cb5534dc0fd9 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Mon, 25 Oct 2021 10:40:02 +0300 Subject: dmaengine: at_xdmac: fix compilation warning Fixed "unused variable 'atmel_xdmac_dev_pm_ops'" compilation warning when CONFIG_PM is not defined. Fixes: 8e0c7e486014 ("dmaengine: at_xdmac: use pm_ptr()") Reported-by: kernel test robot Signed-off-by: Claudiu Beznea Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/r/20211025074002.722504-1-claudiu.beznea@microchip.com Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 7fb19bd18ac3..275a76f188ae 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -2207,7 +2207,7 @@ static int at_xdmac_remove(struct platform_device *pdev) return 0; } -static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = { +static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = { .prepare = atmel_xdmac_prepare, SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume) }; -- cgit v1.2.3 From a34da7ef9a8c2b89ddb84689562f3d2b48a4e588 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 25 Oct 2021 09:54:26 +0200 Subject: dmaengine: altera-msgdma: Correctly handle descriptor callbacks DMA clients can provide one of two types of callbacks. For this reason dmaengine drivers should not directly invoke `callback`, but always use dmaengine_desc_callback_invoke(). This makes sure that both types of callbacks are handled correctly. The altera-msgdma driver currently doesn't do this and only handles the `callback` type callback. If the client used the `callback_result` type callback it will not be called. Fix this by switching to `dmaengine_desc_callback_valid()` and `dmaengine_desc_callback_invoke()`. Signed-off-by: Lars-Peter Clausen Link: https://lore.kernel.org/r/20211025075428.2094-1-lars@metafoo.de Signed-off-by: Vinod Koul --- drivers/dma/altera-msgdma.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index 5a2c7573b692..f5b885d69cd3 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c @@ -585,16 +585,14 @@ static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev) struct msgdma_sw_desc *desc, *next; list_for_each_entry_safe(desc, next, &mdev->done_list, node) { - dma_async_tx_callback callback; - void *callback_param; + struct dmaengine_desc_callback cb; list_del(&desc->node); - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; - if (callback) { + dmaengine_desc_get_callback(&desc->async_tx, &cb); + if (dmaengine_desc_callback_valid(&cb)) { spin_unlock(&mdev->lock); - callback(callback_param); + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock(&mdev->lock); } -- cgit v1.2.3 From a63ddc38571ef32eb396eb81f50dca1c8d9b2432 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 25 Oct 2021 09:54:27 +0200 Subject: dmaengine: xilinx_dma: Correctly handle cyclic descriptor callbacks DMA clients can provide one of two types of callbacks. For this reason dmaengine drivers should not directly invoke `callback`, but always use `dmaengine_desc_callback_invoke()`. This makes sure that both types of callbacks are handled correctly. The xilinx_dma driver currently doesn't do this for cyclic descriptors and only handles the `callback` type callback. If the client used the `callback_result` type callback it will not be called. Fix this by switching to `dmaengine_desc_callback_valid()` and `dmaengine_desc_callback_invoke()`. Signed-off-by: Lars-Peter Clausen Link: https://lore.kernel.org/r/20211025075428.2094-2-lars@metafoo.de Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 60cea7d997ee..4677ce08ed40 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -998,14 +998,12 @@ static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, struct xilinx_dma_tx_descriptor *desc, unsigned long *flags) { - dma_async_tx_callback callback; - void *callback_param; + struct dmaengine_desc_callback cb; - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; - if (callback) { + dmaengine_desc_get_callback(&desc->async_tx, &cb); + if (dmaengine_desc_callback_valid(&cb)) { spin_unlock_irqrestore(&chan->lock, *flags); - callback(callback_param); + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irqsave(&chan->lock, *flags); } } -- cgit v1.2.3 From 1825ecc908d4e77735a993a72406e04e9dfbe5eb Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 25 Oct 2021 09:54:28 +0200 Subject: dmaengine: zynqmp_dma: Correctly handle descriptor callbacks DMA clients can provide one of two types of callbacks. For this reason dmaengine drivers should not directly invoke `callback`, but always use `dmaengine_desc_callback_invoke()`. This makes sure that both types of callbacks are handled correctly. The zynqmp_dma driver currently doesn't do this and only handles the `callback` type callback. If the client used the `callback_result` type callback it will not be called. Fix this by switching to `dmaengine_desc_callback_valid()` and `dmaengine_desc_callback_invoke()`. Signed-off-by: Lars-Peter Clausen Link: https://lore.kernel.org/r/20211025075428.2094-3-lars@metafoo.de Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 54adac6391ef..7aa63b652027 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -605,14 +605,12 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) spin_lock_irqsave(&chan->lock, irqflags); list_for_each_entry_safe(desc, next, &chan->done_list, node) { - dma_async_tx_callback callback; - void *callback_param; + struct dmaengine_desc_callback cb; - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; - if (callback) { + dmaengine_desc_get_callback(&desc->async_tx, &cb); + if (dmaengine_desc_callback_valid(&cb)) { spin_unlock_irqrestore(&chan->lock, irqflags); - callback(callback_param); + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irqsave(&chan->lock, irqflags); } -- cgit v1.2.3 From 2efe58cfaad4ad94eab888d287c174de5209a0c2 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 25 Oct 2021 07:59:49 -0700 Subject: dmaengine: idxd: cleanup completion record allocation According to core-api/dma-api-howto.rst, the address from dma_alloc_coherent is gauranteed to align to the smallest PAGE_SIZE order. That supercedes the 64B/32B alignment requirement of the completion record. Remove alignment adjustment code. Tested-by: Jacob Pan Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/163517396063.3484297.7494385225280705372.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 22 +++++----------------- drivers/dma/idxd/idxd.h | 2 -- 2 files changed, 5 insertions(+), 19 deletions(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index b1407465d5c4..fab412349f7f 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -135,8 +135,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq) struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; int rc, num_descs, i; - int align; - u64 tmp; if (wq->type != IDXD_WQT_KERNEL) return 0; @@ -148,21 +146,13 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq) if (rc < 0) return rc; - align = idxd->data->align; - wq->compls_size = num_descs * idxd->data->compl_size + align; - wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size, - &wq->compls_addr_raw, GFP_KERNEL); - if (!wq->compls_raw) { + wq->compls_size = num_descs * idxd->data->compl_size; + wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL); + if (!wq->compls) { rc = -ENOMEM; goto fail_alloc_compls; } - /* Adjust alignment */ - wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1); - tmp = (u64)wq->compls_raw; - tmp = (tmp + (align - 1)) & ~(align - 1); - wq->compls = (struct dsa_completion_record *)tmp; - rc = alloc_descs(wq, num_descs); if (rc < 0) goto fail_alloc_descs; @@ -191,8 +181,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq) fail_sbitmap_init: free_descs(wq); fail_alloc_descs: - dma_free_coherent(dev, wq->compls_size, wq->compls_raw, - wq->compls_addr_raw); + dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); fail_alloc_compls: free_hw_descs(wq); return rc; @@ -207,8 +196,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq) free_hw_descs(wq); free_descs(wq); - dma_free_coherent(dev, wq->compls_size, wq->compls_raw, - wq->compls_addr_raw); + dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); sbitmap_queue_free(&wq->sbq); } diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index bfcb03329f77..0cf8d3145870 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -187,9 +187,7 @@ struct idxd_wq { struct dsa_completion_record *compls; struct iax_completion_record *iax_compls; }; - void *compls_raw; dma_addr_t compls_addr; - dma_addr_t compls_addr_raw; int compls_size; struct idxd_desc **descs; struct sbitmap_queue sbq; -- cgit v1.2.3 From a3e340c1574b6679f5b333221284d0959095da52 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 25 Oct 2021 08:01:04 -0700 Subject: dmaengine: idxd: fix resource leak on dmaengine driver disable The wq resources needs to be released before the kernel type is reset by __drv_disable_wq(). With dma channels unregistered and wq quiesced, all the wq resources for dmaengine can be freed. There is no need to wait until wq is disabled. With the wq->type being reset to "unknown", the driver is skipping the freeing of the resources. Fixes: 0cda4f6986a3 ("dmaengine: idxd: create dmaengine driver for wq 'device'") Reported-by: Jacob Pan Tested-by: Jacob Pan Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/163517405099.3484556.12521975053711345244.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/dma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index b90b085d18cf..c39e9483206a 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -329,10 +329,9 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); idxd_wq_quiesce(wq); idxd_unregister_dma_channel(wq); + idxd_wq_free_resources(wq); __drv_disable_wq(wq); percpu_ref_exit(&wq->wq_active); - idxd_wq_free_resources(wq); - wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); } -- cgit v1.2.3 From e0674853943287669a82d1ffe09a700944615978 Mon Sep 17 00:00:00 2001 From: Joy Zou Date: Tue, 26 Oct 2021 17:00:25 +0800 Subject: dmaengine: fsl-edma: support edma memcpy Add memcpy in edma. The edma has the capability to transfer data by software trigger so that it could be used for memory copy. Enable MEMCPY for edma driver and it could be test directly by dmatest. Signed-off-by: Joy Zou Link: https://lore.kernel.org/r/20211026090025.2777292-1-joy.zou@nxp.com Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-common.c | 31 +++++++++++++++++++++++++++++++ drivers/dma/fsl-edma-common.h | 4 ++++ drivers/dma/fsl-edma.c | 7 +++++++ 3 files changed, 42 insertions(+) diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 009c75ff1320..3ae05d1446a5 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -348,6 +348,7 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, struct fsl_edma_engine *edma = fsl_chan->edma; struct edma_regs *regs = &fsl_chan->edma->regs; u32 ch = fsl_chan->vchan.chan.chan_id; + u16 csr = 0; /* * TCD parameters are stored in struct fsl_edma_hw_tcd in little @@ -373,6 +374,12 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, edma_writel(edma, (s32)tcd->dlast_sga, ®s->tcd[ch].dlast_sga); + if (fsl_chan->is_sw) { + csr = le16_to_cpu(tcd->csr); + csr |= EDMA_TCD_CSR_START; + tcd->csr = cpu_to_le16(csr); + } + edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr); } @@ -587,6 +594,29 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( } EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg); +struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan, + dma_addr_t dma_dst, dma_addr_t dma_src, + size_t len, unsigned long flags) +{ + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); + struct fsl_edma_desc *fsl_desc; + + fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1); + if (!fsl_desc) + return NULL; + fsl_desc->iscyclic = false; + + fsl_chan->is_sw = true; + + /* To match with copy_align and max_seg_size so 1 tcd is enough */ + fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst, + EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE, + 32, len, 0, 1, 1, 32, 0, true, true, false); + + return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); +} +EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy); + void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) { struct virt_dma_desc *vdesc; @@ -654,6 +684,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan) vchan_dma_desc_free_list(&fsl_chan->vchan, &head); dma_pool_destroy(fsl_chan->tcd_pool); fsl_chan->tcd_pool = NULL; + fsl_chan->is_sw = false; } EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources); diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index ec1169741de1..004ec4a6bc86 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -121,6 +121,7 @@ struct fsl_edma_chan { struct fsl_edma_desc *edesc; struct dma_slave_config cfg; u32 attr; + bool is_sw; struct dma_pool *tcd_pool; dma_addr_t dma_dev_addr; u32 dma_dev_size; @@ -240,6 +241,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context); +struct dma_async_tx_descriptor *fsl_edma_prep_memcpy( + struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src, + size_t len, unsigned long flags); void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan); void fsl_edma_issue_pending(struct dma_chan *chan); int fsl_edma_alloc_chan_resources(struct dma_chan *chan); diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 90bb72af306c..76cbf54aec58 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "fsl-edma-common.h" @@ -372,6 +373,7 @@ static int fsl_edma_probe(struct platform_device *pdev) dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask); dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask); dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask); + dma_cap_set(DMA_MEMCPY, fsl_edma->dma_dev.cap_mask); fsl_edma->dma_dev.dev = &pdev->dev; fsl_edma->dma_dev.device_alloc_chan_resources @@ -381,6 +383,7 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; + fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy; fsl_edma->dma_dev.device_config = fsl_edma_slave_config; fsl_edma->dma_dev.device_pause = fsl_edma_pause; fsl_edma->dma_dev.device_resume = fsl_edma_resume; @@ -392,6 +395,10 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS; fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + fsl_edma->dma_dev.copy_align = DMAENGINE_ALIGN_32_BYTES; + /* Per worst case 'nbytes = 1' take CITER as the max_seg_size */ + dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff); + platform_set_drvdata(pdev, fsl_edma); ret = dma_async_device_register(&fsl_edma->dma_dev); -- cgit v1.2.3 From 2498363310e9b5e5de0e104709adc35c9f3ff7d9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 3 Nov 2021 16:33:12 +0100 Subject: dmaengine: stm32-dma: avoid 64-bit division in stm32_dma_get_max_width Using the % operator on a 64-bit variable is expensive and can cause a link failure: arm-linux-gnueabi-ld: drivers/dma/stm32-dma.o: in function `stm32_dma_get_max_width': stm32-dma.c:(.text+0x170): undefined reference to `__aeabi_uldivmod' arm-linux-gnueabi-ld: drivers/dma/stm32-dma.o: in function `stm32_dma_set_xfer_param': stm32-dma.c:(.text+0x1cd4): undefined reference to `__aeabi_uldivmod' As we know that we just want to check the alignment in stm32_dma_get_max_width(), there is no need for a full division, and using a simple mask is a faster replacement. Same in stm32_dma_set_xfer_param(), change this to only allow burst transfers if the address is a multiple of the length. stm32_dma_get_best_burst just after will take buf_len into account to fix burst in case of misalignment. Fixes: b20fd5fa310c ("dmaengine: stm32-dma: fix stm32_dma_get_max_width") Reported-by: kernel test robot Signed-off-by: Arnd Bergmann Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20211103153312.41483-1-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 2283c500f4ce..83a37a6955a3 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -280,7 +280,7 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, max_width > DMA_SLAVE_BUSWIDTH_1_BYTE) max_width = max_width >> 1; - if (buf_addr % max_width) + if (buf_addr & (max_width - 1)) max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; return max_width; @@ -757,7 +757,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, * Set memory burst size - burst not possible if address is not aligned on * the address boundary equal to the size of the transfer */ - if (buf_addr % buf_len) + if (buf_addr & (buf_len - 1)) src_maxburst = 1; else src_maxburst = STM32_DMA_MAX_BURST; @@ -813,7 +813,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, * Set memory burst size - burst not possible if address is not aligned on * the address boundary equal to the size of the transfer */ - if (buf_addr % buf_len) + if (buf_addr & (buf_len - 1)) dst_maxburst = 1; else dst_maxburst = STM32_DMA_MAX_BURST; -- cgit v1.2.3 From 5c6c6d60e4b489308ae4da8424c869f7cc53cd12 Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Sun, 31 Oct 2021 08:54:10 +0530 Subject: dmaengine: ti: k3-udma: Set bchan to NULL if a channel request fail bcdma_get_*() checks if bchan is already allocated by checking if it has a NON NULL value. For the error cases, bchan will have error value and bcdma_get_*() considers this as already allocated (PASS) since the error values are NON NULL. This results in NULL pointer dereference error while de-referencing bchan. Reset the value of bchan to NULL if a channel request fails. CC: stable@vger.kernel.org Acked-by: Peter Ujfalusi Signed-off-by: Kishon Vijay Abraham I Link: https://lore.kernel.org/r/20211031032411.27235-2-kishon@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index a35858610780..14ae28830871 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1348,6 +1348,7 @@ static int bcdma_get_bchan(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; enum udma_tp_level tpl; + int ret; if (uc->bchan) { dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", @@ -1365,8 +1366,11 @@ static int bcdma_get_bchan(struct udma_chan *uc) tpl = ud->bchan_tpl.levels - 1; uc->bchan = __udma_reserve_bchan(ud, tpl, -1); - if (IS_ERR(uc->bchan)) - return PTR_ERR(uc->bchan); + if (IS_ERR(uc->bchan)) { + ret = PTR_ERR(uc->bchan); + uc->bchan = NULL; + return ret; + } uc->tchan = uc->bchan; -- cgit v1.2.3 From eb91224e47ec33a0a32c9be0ec0fcb3433e555fd Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Sun, 31 Oct 2021 08:54:11 +0530 Subject: dmaengine: ti: k3-udma: Set r/tchan or rflow to NULL if request fail udma_get_*() checks if rchan/tchan/rflow is already allocated by checking if it has a NON NULL value. For the error cases, rchan/tchan/rflow will have error value and udma_get_*() considers this as already allocated (PASS) since the error values are NON NULL. This results in NULL pointer dereference error while de-referencing rchan/tchan/rflow. Reset the value of rchan/tchan/rflow to NULL if a channel request fails. CC: stable@vger.kernel.org Acked-by: Peter Ujfalusi Signed-off-by: Kishon Vijay Abraham I Link: https://lore.kernel.org/r/20211031032411.27235-3-kishon@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 14ae28830871..041d8e32d630 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1380,6 +1380,7 @@ static int bcdma_get_bchan(struct udma_chan *uc) static int udma_get_tchan(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; + int ret; if (uc->tchan) { dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", @@ -1394,8 +1395,11 @@ static int udma_get_tchan(struct udma_chan *uc) */ uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, uc->config.mapped_channel_id); - if (IS_ERR(uc->tchan)) - return PTR_ERR(uc->tchan); + if (IS_ERR(uc->tchan)) { + ret = PTR_ERR(uc->tchan); + uc->tchan = NULL; + return ret; + } if (ud->tflow_cnt) { int tflow_id; @@ -1425,6 +1429,7 @@ static int udma_get_tchan(struct udma_chan *uc) static int udma_get_rchan(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; + int ret; if (uc->rchan) { dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", @@ -1439,8 +1444,13 @@ static int udma_get_rchan(struct udma_chan *uc) */ uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, uc->config.mapped_channel_id); + if (IS_ERR(uc->rchan)) { + ret = PTR_ERR(uc->rchan); + uc->rchan = NULL; + return ret; + } - return PTR_ERR_OR_ZERO(uc->rchan); + return 0; } static int udma_get_chan_pair(struct udma_chan *uc) @@ -1494,6 +1504,7 @@ static int udma_get_chan_pair(struct udma_chan *uc) static int udma_get_rflow(struct udma_chan *uc, int flow_id) { struct udma_dev *ud = uc->ud; + int ret; if (!uc->rchan) { dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); @@ -1507,8 +1518,13 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id) } uc->rflow = __udma_get_rflow(ud, flow_id); + if (IS_ERR(uc->rflow)) { + ret = PTR_ERR(uc->rflow); + uc->rflow = NULL; + return ret; + } - return PTR_ERR_OR_ZERO(uc->rflow); + return 0; } static void bcdma_put_bchan(struct udma_chan *uc) -- cgit v1.2.3