diff options
author | Mauro Carvalho Chehab <mchehab+samsung@kernel.org> | 2019-02-18 18:12:12 +0300 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab+samsung@kernel.org> | 2019-02-18 18:12:12 +0300 |
commit | 5f09bc8cc4010a3ea17c5881a62fc45192ebe7b0 (patch) | |
tree | c8e30ba4eaf357699ef97a786bf56661f4591be8 /drivers/dma | |
parent | 6fd369dd1cb65a032f1ab9227033ecb7b759656d (diff) | |
parent | a3b22b9f11d9fbc48b0291ea92259a5a810e9438 (diff) | |
download | linux-5f09bc8cc4010a3ea17c5881a62fc45192ebe7b0.tar.xz |
Merge tag 'v5.0-rc7' into patchwork
Linux 5.0-rc7
* tag 'v5.0-rc7': (1667 commits)
Linux 5.0-rc7
Input: elan_i2c - add ACPI ID for touchpad in Lenovo V330-15ISK
Input: st-keyscan - fix potential zalloc NULL dereference
Input: apanel - switch to using brightness_set_blocking()
powerpc/64s: Fix possible corruption on big endian due to pgd/pud_present()
efi/arm: Revert "Defer persistent reservations until after paging_init()"
arm64, mm, efi: Account for GICv3 LPI tables in static memblock reserve table
sunrpc: fix 4 more call sites that were using stack memory with a scatterlist
include/linux/module.h: copy __init/__exit attrs to init/cleanup_module
Compiler Attributes: add support for __copy (gcc >= 9)
lib/crc32.c: mark crc32_le_base/__crc32c_le_base aliases as __pure
auxdisplay: ht16k33: fix potential user-after-free on module unload
x86/platform/UV: Use efi_runtime_lock to serialise BIOS calls
i2c: bcm2835: Clear current buffer pointers and counts after a transfer
i2c: cadence: Fix the hold bit setting
drm: Use array_size() when creating lease
dm thin: fix bug where bio that overwrites thin block ignores FUA
Revert "exec: load_script: don't blindly truncate shebang string"
Revert "gfs2: read journal in large chunks to locate the head"
net: ethernet: freescale: set FEC ethtool regs version
...
Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/at_xdmac.c | 19 | ||||
-rw-r--r-- | drivers/dma/bcm2835-dma.c | 70 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 32 | ||||
-rw-r--r-- | drivers/dma/imx-dma.c | 8 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 8 | ||||
-rw-r--r-- | drivers/dma/mediatek/mtk-hsdma.c | 4 | ||||
-rw-r--r-- | drivers/dma/mxs-dma.c | 6 | ||||
-rw-r--r-- | drivers/dma/xgene-dma.c | 4 | ||||
-rw-r--r-- | drivers/dma/xilinx/xilinx_dma.c | 14 | ||||
-rw-r--r-- | drivers/dma/xilinx/zynqmp_dma.c | 6 |
10 files changed, 74 insertions, 97 deletions
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 4e557684f792..fe69dccfa0c0 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -203,6 +203,7 @@ struct at_xdmac_chan { u32 save_cim; u32 save_cnda; u32 save_cndc; + u32 irq_status; unsigned long status; struct tasklet_struct tasklet; struct dma_slave_config sconfig; @@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data) struct at_xdmac_desc *desc; u32 error_mask; - dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", - __func__, atchan->status); + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", + __func__, atchan->irq_status); error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS @@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data) if (at_xdmac_chan_is_cyclic(atchan)) { at_xdmac_handle_cyclic(atchan); - } else if ((atchan->status & AT_XDMAC_CIS_LIS) - || (atchan->status & error_mask)) { + } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) + || (atchan->irq_status & error_mask)) { struct dma_async_tx_descriptor *txd; - if (atchan->status & AT_XDMAC_CIS_RBEIS) + if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) dev_err(chan2dev(&atchan->chan), "read bus error!!!"); - if (atchan->status & AT_XDMAC_CIS_WBEIS) + if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) dev_err(chan2dev(&atchan->chan), "write bus error!!!"); - if (atchan->status & AT_XDMAC_CIS_ROIS) + if (atchan->irq_status & AT_XDMAC_CIS_ROIS) dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); spin_lock(&atchan->lock); @@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) atchan = &atxdmac->chan[i]; chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); - atchan->status = chan_status & chan_imr; + atchan->irq_status = chan_status & chan_imr; dev_vdbg(atxdmac->dma.dev, "%s: chan%d: imr=0x%x, status=0x%x\n", __func__, i, chan_imr, chan_status); @@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) at_xdmac_chan_read(atchan, AT_XDMAC_CDA), at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); - if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) + if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); tasklet_schedule(&atchan->tasklet); diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 1a44c8086d77..ae10f5614f95 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg( } } -static int bcm2835_dma_abort(void __iomem *chan_base) +static int bcm2835_dma_abort(struct bcm2835_chan *c) { - unsigned long cs; + void __iomem *chan_base = c->chan_base; long int timeout = 10000; - cs = readl(chan_base + BCM2835_DMA_CS); - if (!(cs & BCM2835_DMA_ACTIVE)) + /* + * A zero control block address means the channel is idle. + * (The ACTIVE flag in the CS register is not a reliable indicator.) + */ + if (!readl(chan_base + BCM2835_DMA_ADDR)) return 0; /* Write 0 to the active bit - Pause the DMA */ writel(0, chan_base + BCM2835_DMA_CS); /* Wait for any current AXI transfer to complete */ - while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { + while ((readl(chan_base + BCM2835_DMA_CS) & + BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) cpu_relax(); - cs = readl(chan_base + BCM2835_DMA_CS); - } - /* We'll un-pause when we set of our next DMA */ + /* Peripheral might be stuck and fail to signal AXI write responses */ if (!timeout) - return -ETIMEDOUT; - - if (!(cs & BCM2835_DMA_ACTIVE)) - return 0; - - /* Terminate the control block chain */ - writel(0, chan_base + BCM2835_DMA_NEXTCB); - - /* Abort the whole DMA */ - writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, - chan_base + BCM2835_DMA_CS); + dev_err(c->vc.chan.device->dev, + "failed to complete outstanding writes\n"); + writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); return 0; } @@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) spin_lock_irqsave(&c->vc.lock, flags); - /* Acknowledge interrupt */ - writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); + /* + * Clear the INT flag to receive further interrupts. Keep the channel + * active in case the descriptor is cyclic or in case the client has + * already terminated the descriptor and issued a new one. (May happen + * if this IRQ handler is threaded.) If the channel is finished, it + * will remain idle despite the ACTIVE flag being set. + */ + writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, + c->chan_base + BCM2835_DMA_CS); d = c->desc; @@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) if (d->cyclic) { /* call the cyclic callback */ vchan_cyclic_callback(&d->vd); - - /* Keep the DMA engine running */ - writel(BCM2835_DMA_ACTIVE, - c->chan_base + BCM2835_DMA_CS); - } else { + } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { vchan_cookie_complete(&c->desc->vd); bcm2835_dma_start_desc(c); } @@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); unsigned long flags; - int timeout = 10000; LIST_HEAD(head); spin_lock_irqsave(&c->vc.lock, flags); @@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) list_del_init(&c->node); spin_unlock(&d->lock); - /* - * Stop DMA activity: we assume the callback will not be called - * after bcm_dma_abort() returns (even if it does, it will see - * c->desc is NULL and exit.) - */ + /* stop DMA activity */ if (c->desc) { vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; - bcm2835_dma_abort(c->chan_base); - - /* Wait for stopping */ - while (--timeout) { - if (!(readl(c->chan_base + BCM2835_DMA_CS) & - BCM2835_DMA_ACTIVE)) - break; - - cpu_relax(); - } - - if (!timeout) - dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); + bcm2835_dma_abort(c); } vchan_get_all_descriptors(&c->vc, &head); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 2eea4ef72915..6511928b4cdf 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -711,11 +711,9 @@ static int dmatest_func(void *data) srcs[i] = um->addr[i] + src_off; ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { - dmaengine_unmap_put(um); result("src mapping error", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } um->to_cnt++; } @@ -730,11 +728,9 @@ static int dmatest_func(void *data) DMA_BIDIRECTIONAL); ret = dma_mapping_error(dev->dev, dsts[i]); if (ret) { - dmaengine_unmap_put(um); result("dst mapping error", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } um->bidi_cnt++; } @@ -762,12 +758,10 @@ static int dmatest_func(void *data) } if (!tx) { - dmaengine_unmap_put(um); result("prep error", total_tests, src_off, dst_off, len, ret); msleep(100); - failed_tests++; - continue; + goto error_unmap_continue; } done->done = false; @@ -776,12 +770,10 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - dmaengine_unmap_put(um); result("submit error", total_tests, src_off, dst_off, len, ret); msleep(100); - failed_tests++; - continue; + goto error_unmap_continue; } dma_async_issue_pending(chan); @@ -790,22 +782,20 @@ static int dmatest_func(void *data) status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); - dmaengine_unmap_put(um); - if (!done->done) { result("test timed out", total_tests, src_off, dst_off, len, 0); - failed_tests++; - continue; + goto error_unmap_continue; } else if (status != DMA_COMPLETE) { result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } + dmaengine_unmap_put(um); + if (params->noverify) { verbose_result("test passed", total_tests, src_off, dst_off, len, 0); @@ -846,6 +836,12 @@ static int dmatest_func(void *data) verbose_result("test passed", total_tests, src_off, dst_off, len, 0); } + + continue; + +error_unmap_continue: + dmaengine_unmap_put(um); + failed_tests++; } ktime = ktime_sub(ktime_get(), ktime); ktime = ktime_sub(ktime, comparetime); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index c2fff3f6c9ca..4a09af3cd546 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data) { struct imxdma_channel *imxdmac = (void *)data; struct imxdma_engine *imxdma = imxdmac->imxdma; - struct imxdma_desc *desc; + struct imxdma_desc *desc, *next_desc; unsigned long flags; spin_lock_irqsave(&imxdma->lock, flags); @@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data) list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); if (!list_empty(&imxdmac->ld_queue)) { - desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, - node); + next_desc = list_first_entry(&imxdmac->ld_queue, + struct imxdma_desc, node); list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); - if (imxdma_xfer_desc(desc) < 0) + if (imxdma_xfer_desc(next_desc) < 0) dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", __func__, imxdmac->channel); } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index a2b0a0e71168..86708fb9bda1 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1182,8 +1182,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma) { int ret = -EBUSY; - sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, - GFP_NOWAIT); + sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, + GFP_NOWAIT); if (!sdma->bd0) { ret = -ENOMEM; goto out; @@ -1205,8 +1205,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc) u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); int ret = 0; - desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, - GFP_NOWAIT); + desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys, + GFP_NOWAIT); if (!desc->bd) { ret = -ENOMEM; goto out; diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index b7ec56ae02a6..1a2028e1c29e 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -325,8 +325,8 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma, * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. */ pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); - ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, - &ring->tphys, GFP_NOWAIT); + ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring, + &ring->tphys, GFP_NOWAIT); if (!ring->txd) return -ENOMEM; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 35193b31a9e0..22cc7f68ef6e 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -416,9 +416,9 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int ret; - mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, - CCW_BLOCK_SIZE, - &mxs_chan->ccw_phys, GFP_KERNEL); + mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, + CCW_BLOCK_SIZE, + &mxs_chan->ccw_phys, GFP_KERNEL); if (!mxs_chan->ccw) { ret = -ENOMEM; goto err_alloc; diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 1d5988849aa6..eafd6c4b90fe 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -1208,8 +1208,8 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, ring->size = ret; /* Allocate memory for DMA ring descriptor */ - ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, - &ring->desc_paddr, GFP_KERNEL); + ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size, + &ring->desc_paddr, GFP_KERNEL); if (!ring->desc_vaddr) { chan_err(chan, "Failed to allocate ring desc\n"); return -ENOMEM; diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 02880963092f..cb20b411493e 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -879,10 +879,9 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) */ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { /* Allocate the buffer descriptors. */ - chan->seg_v = dma_zalloc_coherent(chan->dev, - sizeof(*chan->seg_v) * - XILINX_DMA_NUM_DESCS, - &chan->seg_p, GFP_KERNEL); + chan->seg_v = dma_alloc_coherent(chan->dev, + sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, + &chan->seg_p, GFP_KERNEL); if (!chan->seg_v) { dev_err(chan->dev, "unable to allocate channel %d descriptors\n", @@ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) * so allocating a desc segment during channel allocation for * programming tail descriptor. */ - chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, - sizeof(*chan->cyclic_seg_v), - &chan->cyclic_seg_p, GFP_KERNEL); + chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, + sizeof(*chan->cyclic_seg_v), + &chan->cyclic_seg_p, + GFP_KERNEL); if (!chan->cyclic_seg_v) { dev_err(chan->dev, "unable to allocate desc segment for cyclic DMA\n"); diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 8db51750ce93..4478787a247f 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -490,9 +490,9 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) list_add_tail(&desc->node, &chan->free_list); } - chan->desc_pool_v = dma_zalloc_coherent(chan->dev, - (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), - &chan->desc_pool_p, GFP_KERNEL); + chan->desc_pool_v = dma_alloc_coherent(chan->dev, + (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), + &chan->desc_pool_p, GFP_KERNEL); if (!chan->desc_pool_v) return -ENOMEM; |