diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 1 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 11 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 19 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 4 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 9 | ||||
-rw-r--r-- | drivers/dma/sh/rcar-dmac.c | 52 | ||||
-rw-r--r-- | drivers/dma/stm32-dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/sun4i-dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/virt-dma.c | 11 |
9 files changed, 69 insertions, 42 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fc3435c5240b..24e8597b2c3e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -608,6 +608,7 @@ config ASYNC_TX_DMA config DMATEST tristate "DMA Test client" depends on DMA_ENGINE + select DMA_ENGINE_RAID help Simple DMA test client. Say N unless you're debugging a DMA Device driver. diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 54d581d407aa..a07ef3d6b3ec 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -535,6 +535,13 @@ static int dmatest_func(void *data) total_tests++; + /* Check if buffer count fits into map count variable (u8) */ + if ((src_cnt + dst_cnt) >= 255) { + pr_err("too many buffers (%d of 255 supported)\n", + src_cnt + dst_cnt); + break; + } + if (1 << align > params->buf_size) { pr_err("%u-byte buffer too small for %d-byte alignment\n", params->buf_size, 1 << align); @@ -585,7 +592,7 @@ static int dmatest_func(void *data) for (i = 0; i < src_cnt; i++) { void *buf = thread->srcs[i]; struct page *pg = virt_to_page(buf); - unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; + unsigned long pg_off = offset_in_page(buf); um->addr[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_TO_DEVICE); @@ -605,7 +612,7 @@ static int dmatest_func(void *data) for (i = 0; i < dst_cnt; i++) { void *buf = thread->dsts[i]; struct page *pg = virt_to_page(buf); - unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; + unsigned long pg_off = offset_in_page(buf); dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_BIDIRECTIONAL); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d1651a50c349..085993cb2ccc 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -937,6 +937,21 @@ static int sdma_disable_channel(struct dma_chan *chan) return 0; } +static int sdma_disable_channel_with_delay(struct dma_chan *chan) +{ + sdma_disable_channel(chan); + + /* + * According to NXP R&D team a delay of one BD SDMA cost time + * (maximum is 1ms) should be added after disable of the channel + * bit, to ensure SDMA core has really been stopped after SDMA + * clients call .device_terminate_all. + */ + mdelay(1); + + return 0; +} + static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) { struct sdma_engine *sdma = sdmac->sdma; @@ -1828,11 +1843,11 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; - sdma->dma_device.device_terminate_all = sdma_disable_channel; + sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; sdma->dma_device.device_issue_pending = sdma_issue_pending; sdma->dma_device.dev->dma_parms = &sdma->dma_parms; dma_set_max_seg_size(sdma->dma_device.dev, 65535); diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index cc5259b881d4..6ad4384b3fa8 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -760,9 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma, dma_cookie_init(&ioat_chan->dma_chan); list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); ioat_dma->idx[idx] = ioat_chan; - init_timer(&ioat_chan->timer); - ioat_chan->timer.function = ioat_timer_event; - ioat_chan->timer.data = data; + setup_timer(&ioat_chan->timer, ioat_timer_event, data); tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 0cb951b743a6..25bc5b103aa2 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -960,7 +960,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) } src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), - (size_t)src & ~PAGE_MASK, PAGE_SIZE, + offset_in_page(src), PAGE_SIZE, DMA_TO_DEVICE); unmap->addr[0] = src_dma; @@ -972,7 +972,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) unmap->to_cnt = 1; dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), - (size_t)dest & ~PAGE_MASK, PAGE_SIZE, + offset_in_page(dest), PAGE_SIZE, DMA_FROM_DEVICE); unmap->addr[1] = dest_dma; @@ -1580,11 +1580,6 @@ static int mv_xor_probe(struct platform_device *pdev) int irq; cd = &pdata->channels[i]; - if (!cd) { - ret = -ENODEV; - goto err_channel_add; - } - irq = platform_get_irq(pdev, i); if (irq < 0) { ret = irq; diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 48b22d5c8602..db41795fe42a 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -344,13 +344,19 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); if (desc->hwdescs.use) { - struct rcar_dmac_xfer_chunk *chunk; + struct rcar_dmac_xfer_chunk *chunk = + list_first_entry(&desc->chunks, + struct rcar_dmac_xfer_chunk, node); dev_dbg(chan->chan.device->dev, "chan%u: queue desc %p: %u@%pad\n", chan->index, desc, desc->nchunks, &desc->hwdescs.dma); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, + chunk->src_addr >> 32); + rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, + chunk->dst_addr >> 32); rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, desc->hwdescs.dma >> 32); #endif @@ -368,8 +374,6 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) * should. Initialize it manually with the destination address * of the first chunk. */ - chunk = list_first_entry(&desc->chunks, - struct rcar_dmac_xfer_chunk, node); rcar_dmac_chan_write(chan, RCAR_DMADAR, chunk->dst_addr & 0xffffffff); @@ -855,8 +859,12 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, unsigned int nchunks = 0; unsigned int max_chunk_size; unsigned int full_size = 0; - bool highmem = false; + bool cross_boundary = false; unsigned int i; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + u32 high_dev_addr; + u32 high_mem_addr; +#endif desc = rcar_dmac_desc_get(chan); if (!desc) @@ -882,6 +890,16 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, full_size += len; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (i == 0) { + high_dev_addr = dev_addr >> 32; + high_mem_addr = mem_addr >> 32; + } + + if ((dev_addr >> 32 != high_dev_addr) || + (mem_addr >> 32 != high_mem_addr)) + cross_boundary = true; +#endif while (len) { unsigned int size = min(len, max_chunk_size); @@ -890,18 +908,14 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, * Prevent individual transfers from crossing 4GB * boundaries. */ - if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) + if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; - if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) + cross_boundary = true; + } + if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; - - /* - * Check if either of the source or destination address - * can't be expressed in 32 bits. If so we can't use - * hardware descriptor lists. - */ - if (dev_addr >> 32 || mem_addr >> 32) - highmem = true; + cross_boundary = true; + } #endif chunk = rcar_dmac_xfer_chunk_get(chan); @@ -943,13 +957,11 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, * Use hardware descriptor lists if possible when more than one chunk * needs to be transferred (otherwise they don't make much sense). * - * The highmem check currently covers the whole transfer. As an - * optimization we could use descriptor lists for consecutive lowmem - * chunks and direct manual mode for highmem chunks. Whether the - * performance improvement would be significant enough compared to the - * additional complexity remains to be investigated. + * Source/Destination address should be located in same 4GiB region + * in the 40bit address space when it uses Hardware descriptor, + * and cross_boundary is checking it. */ - desc->hwdescs.use = !highmem && nchunks > 1; + desc->hwdescs.use = !cross_boundary && nchunks > 1; if (desc->hwdescs.use) { if (rcar_dmac_fill_hwdesc(chan, desc) < 0) desc->hwdescs.use = false; diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 49f86cabcfec..786fc8fcc38e 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1008,7 +1008,7 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, c = dma_get_slave_channel(&chan->vchan.chan); if (!c) { - dev_err(dev, "No more channel avalaible\n"); + dev_err(dev, "No more channels available\n"); return NULL; } diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index 57aa227bfadb..f4ed3f17607c 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -238,7 +238,7 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv, } spin_lock_irqsave(&priv->lock, flags); - for_each_clear_bit_from(i, &priv->pchans_used, max) { + for_each_clear_bit_from(i, priv->pchans_used, max) { pchan = &pchans[i]; pchan->vchan = vchan; set_bit(i, priv->pchans_used); diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index e47fc9b0944f..545e97279083 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(vchan_find_desc); static void vchan_complete(unsigned long arg) { struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; - struct virt_dma_desc *vd; + struct virt_dma_desc *vd, *_vd; struct dmaengine_desc_callback cb; LIST_HEAD(head); @@ -103,8 +103,7 @@ static void vchan_complete(unsigned long arg) dmaengine_desc_callback_invoke(&cb, NULL); - while (!list_empty(&head)) { - vd = list_first_entry(&head, struct virt_dma_desc, node); + list_for_each_entry_safe(vd, _vd, &head, node) { dmaengine_desc_get_callback(&vd->tx, &cb); list_del(&vd->node); @@ -119,9 +118,9 @@ static void vchan_complete(unsigned long arg) void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) { - while (!list_empty(head)) { - struct virt_dma_desc *vd = list_first_entry(head, - struct virt_dma_desc, node); + struct virt_dma_desc *vd, *_vd; + + list_for_each_entry_safe(vd, _vd, head, node) { if (dmaengine_desc_test_reuse(&vd->tx)) { list_move_tail(&vd->node, &vc->desc_allocated); } else { |