diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 7 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 10 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 10 | ||||
-rw-r--r-- | drivers/dma/at_hdmac_regs.h | 8 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 21 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 22 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 432 | ||||
-rw-r--r-- | drivers/dma/dw_dmac_regs.h | 24 | ||||
-rw-r--r-- | drivers/dma/edma.c | 55 | ||||
-rw-r--r-- | drivers/dma/ep93xx_dma.c | 3 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 11 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 105 | ||||
-rw-r--r-- | drivers/dma/ioat/hw.h | 11 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 11 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 45 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_idmac.c | 2 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_irq.c | 1 | ||||
-rw-r--r-- | drivers/dma/mmp_pdma.c | 6 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 40 | ||||
-rw-r--r-- | drivers/dma/mxs-dma.c | 8 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 13 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 37 | ||||
-rw-r--r-- | drivers/dma/sh/shdma-base.c | 3 | ||||
-rw-r--r-- | drivers/dma/sh/shdma.c | 2 | ||||
-rw-r--r-- | drivers/dma/sirf-dma.c | 25 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 2 | ||||
-rw-r--r-- | drivers/dma/tegra20-apb-dma.c | 56 |
27 files changed, 606 insertions, 364 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d4c12180c654..0b408bbb6a17 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -51,7 +51,7 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH config AMBA_PL08X bool "ARM PrimeCell PL080 or PL081 support" - depends on ARM_AMBA && EXPERIMENTAL + depends on ARM_AMBA select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help @@ -83,7 +83,6 @@ config INTEL_IOP_ADMA config DW_DMAC tristate "Synopsys DesignWare AHB DMA support" - depends on HAVE_CLK select DMA_ENGINE default y if CPU_AT32AP7000 help @@ -213,8 +212,8 @@ config TIMB_DMA Enable support for the Timberdale FPGA DMA engine. config SIRF_DMA - tristate "CSR SiRFprimaII DMA support" - depends on ARCH_PRIMA2 + tristate "CSR SiRFprimaII/SiRFmarco DMA support" + depends on ARCH_SIRF select DMA_ENGINE help Enable support for the CSR SiRFprimaII DMA engine. diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index d1cc5791476b..8bad254a498d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -83,7 +83,7 @@ #include <linux/pm_runtime.h> #include <linux/seq_file.h> #include <linux/slab.h> -#include <asm/hardware/pl080.h> +#include <linux/amba/pl080.h> #include "dmaengine.h" #include "virt-dma.h" @@ -1096,15 +1096,9 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, struct pl08x_dma_chan *plchan) { LIST_HEAD(head); - struct pl08x_txd *txd; vchan_get_all_descriptors(&plchan->vc, &head); - - while (!list_empty(&head)) { - txd = list_first_entry(&head, struct pl08x_txd, vd.node); - list_del(&txd->vd.node); - pl08x_desc_free(&txd->vd); - } + vchan_dma_desc_free_list(&plchan->vc, &head); } /* diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 13a02f4425b0..6e13f262139a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -778,7 +778,7 @@ err: */ static int atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, - size_t period_len, enum dma_transfer_direction direction) + size_t period_len) { if (period_len > (ATC_BTSIZE_MAX << reg_width)) goto err_out; @@ -786,8 +786,6 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, goto err_out; if (unlikely(buf_addr & ((1 << reg_width) - 1))) goto err_out; - if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) - goto err_out; return 0; @@ -886,14 +884,16 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, return NULL; } + if (unlikely(!is_slave_direction(direction))) + goto err_out; + if (sconfig->direction == DMA_MEM_TO_DEV) reg_width = convert_buswidth(sconfig->dst_addr_width); else reg_width = convert_buswidth(sconfig->src_addr_width); /* Check for too big/unaligned periods and unaligned DMA buffer */ - if (atc_dma_cyclic_check_values(reg_width, buf_addr, - period_len, direction)) + if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) goto err_out; /* build cyclic linked list */ diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 116e4adffb08..0eb3c1388667 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -369,10 +369,10 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {} static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) { - dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common), - " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", - lli->saddr, lli->daddr, - lli->ctrla, lli->ctrlb, lli->dscr); + dev_crit(chan2dev(&atchan->chan_common), + " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", + lli->saddr, lli->daddr, + lli->ctrla, lli->ctrlb, lli->dscr); } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a815d44c70a4..242b8c0a3de8 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -62,6 +62,7 @@ #include <linux/rculist.h> #include <linux/idr.h> #include <linux/slab.h> +#include <linux/of_dma.h> static DEFINE_MUTEX(dma_list_mutex); static DEFINE_IDR(dma_idr); @@ -266,7 +267,10 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) pr_err("%s: timeout!\n", __func__); return DMA_ERROR; } - } while (status == DMA_IN_PROGRESS); + if (status != DMA_IN_PROGRESS) + break; + cpu_relax(); + } while (1); return status; } @@ -546,6 +550,21 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v } EXPORT_SYMBOL_GPL(__dma_request_channel); +/** + * dma_request_slave_channel - try to allocate an exclusive slave channel + * @dev: pointer to client device structure + * @name: slave channel name + */ +struct dma_chan *dma_request_slave_channel(struct device *dev, char *name) +{ + /* If device-tree is present get slave info from here */ + if (dev->of_node) + return of_dma_request_slave_channel(dev->of_node, name); + + return NULL; +} +EXPORT_SYMBOL_GPL(dma_request_slave_channel); + void dma_release_channel(struct dma_chan *chan) { mutex_lock(&dma_list_mutex); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 64b048d7fba7..a2c8904b63ea 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -242,6 +242,13 @@ static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); } +static unsigned int min_odd(unsigned int x, unsigned int y) +{ + unsigned int val = min(x, y); + + return val % 2 ? val : val - 1; +} + /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by @@ -262,6 +269,7 @@ static int dmatest_func(void *data) struct dmatest_thread *thread = data; struct dmatest_done done = { .wait = &done_wait }; struct dma_chan *chan; + struct dma_device *dev; const char *thread_name; unsigned int src_off, dst_off, len; unsigned int error_count; @@ -283,13 +291,16 @@ static int dmatest_func(void *data) smp_rmb(); chan = thread->chan; + dev = chan->device; if (thread->type == DMA_MEMCPY) src_cnt = dst_cnt = 1; else if (thread->type == DMA_XOR) { - src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ + /* force odd to ensure dst = src */ + src_cnt = min_odd(xor_sources | 1, dev->max_xor); dst_cnt = 1; } else if (thread->type == DMA_PQ) { - src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ + /* force odd to ensure dst = src */ + src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0)); dst_cnt = 2; for (i = 0; i < src_cnt; i++) pq_coefs[i] = 1; @@ -327,7 +338,6 @@ static int dmatest_func(void *data) while (!kthread_should_stop() && !(iterations && total_tests >= iterations)) { - struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx = NULL; dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt]; @@ -526,7 +536,9 @@ err_srcs: thread_name, total_tests, failed_tests, ret); /* terminate all transfers on specified channels */ - chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + if (ret) + dmaengine_terminate_all(chan); + if (iterations > 0) while (!kthread_should_stop()) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); @@ -551,7 +563,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) } /* terminate all transfers on specified channels */ - dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0); + dmaengine_terminate_all(dtc->chan); kfree(dtc); } diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 8f0b111af4de..dc3b9558a25c 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1,6 +1,5 @@ /* - * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on - * AVR32 systems.) + * Core driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007-2008 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics @@ -9,11 +8,13 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> +#include <linux/dmapool.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -46,15 +47,32 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) return slave ? slave->src_master : 1; } +#define SRC_MASTER 0 +#define DST_MASTER 1 + +static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) +{ + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_dma_slave *dws = chan->private; + unsigned int m; + + if (master == SRC_MASTER) + m = dwc_get_sms(dws); + else + m = dwc_get_dms(dws); + + return min_t(unsigned int, dw->nr_masters - 1, m); +} + #define DWC_DEFAULT_CTLLO(_chan) ({ \ - struct dw_dma_slave *__slave = (_chan->private); \ struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ - int _dms = dwc_get_dms(__slave); \ - int _sms = dwc_get_sms(__slave); \ - u8 _smsize = __slave ? _sconfig->src_maxburst : \ + bool _is_slave = is_slave_direction(_dwc->direction); \ + int _dms = dwc_get_master(_chan, DST_MASTER); \ + int _sms = dwc_get_master(_chan, SRC_MASTER); \ + u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ DW_DMA_MSIZE_16; \ - u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ + u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ DW_DMA_MSIZE_16; \ \ (DWC_CTLL_DST_MSIZE(_dmsize) \ @@ -72,15 +90,14 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) */ #define NR_DESCS_PER_CHANNEL 64 -/*----------------------------------------------------------------------*/ +static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master) +{ + struct dw_dma *dw = to_dw_dma(chan->device); -/* - * Because we're not relying on writeback from the controller (it may not - * even be configured into the core!) we don't need to use dma_pool. These - * descriptors -- and associated data -- are cacheable. We do need to make - * sure their dcache entries are written back before handing them off to - * the controller, though. - */ + return dw->data_width[dwc_get_master(chan, master)]; +} + +/*----------------------------------------------------------------------*/ static struct device *chan2dev(struct dma_chan *chan) { @@ -93,7 +110,7 @@ static struct device *chan2parent(struct dma_chan *chan) static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) { - return list_entry(dwc->active_list.next, struct dw_desc, desc_node); + return to_dw_desc(dwc->active_list.next); } static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) @@ -120,19 +137,6 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) return ret; } -static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) -{ - struct dw_desc *child; - - list_for_each_entry(child, &desc->tx_list, desc_node) - dma_sync_single_for_cpu(chan2parent(&dwc->chan), - child->txd.phys, sizeof(child->lli), - DMA_TO_DEVICE); - dma_sync_single_for_cpu(chan2parent(&dwc->chan), - desc->txd.phys, sizeof(desc->lli), - DMA_TO_DEVICE); -} - /* * Move a descriptor, including any children, to the free list. * `desc' must not be on any lists. @@ -144,8 +148,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) if (desc) { struct dw_desc *child; - dwc_sync_desc_for_cpu(dwc, desc); - spin_lock_irqsave(&dwc->lock, flags); list_for_each_entry(child, &desc->tx_list, desc_node) dev_vdbg(chan2dev(&dwc->chan), @@ -178,9 +180,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc) cfghi = dws->cfg_hi; cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; } else { - if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) + if (dwc->direction == DMA_MEM_TO_DEV) cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); - else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) + else if (dwc->direction == DMA_DEV_TO_MEM) cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); } @@ -222,7 +224,6 @@ static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) channel_readl(dwc, CTL_LO)); } - static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) { channel_clear_bit(dw, CH_EN, dwc->mask); @@ -248,6 +249,9 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, channel_writel(dwc, CTL_LO, ctllo); channel_writel(dwc, CTL_HI, desc->lli.ctlhi); channel_set_bit(dw, CH_EN, dwc->mask); + + /* Move pointer to next descriptor */ + dwc->tx_node_active = dwc->tx_node_active->next; } /* Called with dwc->lock held and bh disabled */ @@ -279,7 +283,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) dwc_initialize(dwc); dwc->tx_list = &first->tx_list; - dwc->tx_node_active = first->tx_list.next; + dwc->tx_node_active = &first->tx_list; dwc_do_single_block(dwc, first); @@ -316,8 +320,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, param = txd->callback_param; } - dwc_sync_desc_for_cpu(dwc, desc); - /* async_tx_ack */ list_for_each_entry(child, &desc->tx_list, desc_node) async_tx_ack(&child->txd); @@ -326,7 +328,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); - if (!dwc->chan.private) { + if (!is_slave_direction(dwc->direction)) { struct device *parent = chan2parent(&dwc->chan); if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) @@ -348,7 +350,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, spin_unlock_irqrestore(&dwc->lock, flags); - if (callback_required && callback) + if (callback) callback(param); } @@ -398,6 +400,20 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) if (status_xfer & dwc->mask) { /* Everything we've submitted is done */ dma_writel(dw, CLEAR.XFER, dwc->mask); + + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { + if (dwc->tx_node_active != dwc->tx_list) { + desc = to_dw_desc(dwc->tx_node_active); + + /* Submit next block */ + dwc_do_single_block(dwc, desc); + spin_unlock_irqrestore(&dwc->lock, flags); + + return; + } + /* We are done here */ + clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); + } spin_unlock_irqrestore(&dwc->lock, flags); dwc_complete_all(dw, dwc); @@ -409,6 +425,12 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) return; } + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { + dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, (unsigned long long)llp); @@ -457,9 +479,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) { - dev_printk(KERN_CRIT, chan2dev(&dwc->chan), - " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", - lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); + dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", + lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); } static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) @@ -487,16 +508,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) dwc_dostart(dwc, dwc_first_active(dwc)); /* - * KERN_CRITICAL may seem harsh, but since this only happens + * WARN may seem harsh, but since this only happens * when someone submits a bad physical address in a * descriptor, we should consider ourselves lucky that the * controller flagged an error instead of scribbling over * random memory locations. */ - dev_printk(KERN_CRIT, chan2dev(&dwc->chan), - "Bad descriptor submitted for DMA!\n"); - dev_printk(KERN_CRIT, chan2dev(&dwc->chan), - " cookie: %d\n", bad_desc->txd.cookie); + dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" + " cookie: %d\n", bad_desc->txd.cookie); dwc_dump_lli(dwc, &bad_desc->lli); list_for_each_entry(child, &bad_desc->tx_list, desc_node) dwc_dump_lli(dwc, &child->lli); @@ -597,36 +616,8 @@ static void dw_dma_tasklet(unsigned long data) dwc_handle_cyclic(dw, dwc, status_err, status_xfer); else if (status_err & (1 << i)) dwc_handle_error(dw, dwc); - else if (status_xfer & (1 << i)) { - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { - if (dwc->tx_node_active != dwc->tx_list) { - struct dw_desc *desc = - list_entry(dwc->tx_node_active, - struct dw_desc, - desc_node); - - dma_writel(dw, CLEAR.XFER, dwc->mask); - - /* move pointer to next descriptor */ - dwc->tx_node_active = - dwc->tx_node_active->next; - - dwc_do_single_block(dwc, desc); - - spin_unlock_irqrestore(&dwc->lock, flags); - continue; - } else { - /* we are done here */ - clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); - } - } - spin_unlock_irqrestore(&dwc->lock, flags); - + else if (status_xfer & (1 << i)) dwc_scan_descriptors(dw, dwc); - } } /* @@ -708,7 +699,6 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma_slave *dws = chan->private; struct dw_desc *desc; struct dw_desc *first; struct dw_desc *prev; @@ -729,8 +719,10 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, return NULL; } - data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)], - dwc->dw->data_width[dwc_get_dms(dws)]); + dwc->direction = DMA_MEM_TO_MEM; + + data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER), + dwc_get_data_width(chan, DST_MASTER)); src_width = dst_width = min_t(unsigned int, data_width, dwc_fast_fls(src | dest | len)); @@ -760,25 +752,17 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, first = desc; } else { prev->lli.llp = desc->txd.phys; - dma_sync_single_for_device(chan2parent(chan), - prev->txd.phys, sizeof(prev->lli), - DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; } - if (flags & DMA_PREP_INTERRUPT) /* Trigger interrupt after last block */ prev->lli.ctllo |= DWC_CTLL_INT_EN; prev->lli.llp = 0; - dma_sync_single_for_device(chan2parent(chan), - prev->txd.phys, sizeof(prev->lli), - DMA_TO_DEVICE); - first->txd.flags = flags; first->len = len; @@ -795,7 +779,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned long flags, void *context) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma_slave *dws = chan->private; struct dma_slave_config *sconfig = &dwc->dma_sconfig; struct dw_desc *prev; struct dw_desc *first; @@ -810,9 +793,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, dev_vdbg(chan2dev(chan), "%s\n", __func__); - if (unlikely(!dws || !sg_len)) + if (unlikely(!is_slave_direction(direction) || !sg_len)) return NULL; + dwc->direction = direction; + prev = first = NULL; switch (direction) { @@ -827,7 +812,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : DWC_CTLL_FC(DW_DMA_FC_D_M2P); - data_width = dwc->dw->data_width[dwc_get_sms(dws)]; + data_width = dwc_get_data_width(chan, SRC_MASTER); for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -865,10 +850,6 @@ slave_sg_todev_fill_desc: first = desc; } else { prev->lli.llp = desc->txd.phys; - dma_sync_single_for_device(chan2parent(chan), - prev->txd.phys, - sizeof(prev->lli), - DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } @@ -890,7 +871,7 @@ slave_sg_todev_fill_desc: ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : DWC_CTLL_FC(DW_DMA_FC_D_P2M); - data_width = dwc->dw->data_width[dwc_get_dms(dws)]; + data_width = dwc_get_data_width(chan, DST_MASTER); for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -927,10 +908,6 @@ slave_sg_fromdev_fill_desc: first = desc; } else { prev->lli.llp = desc->txd.phys; - dma_sync_single_for_device(chan2parent(chan), - prev->txd.phys, - sizeof(prev->lli), - DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } @@ -950,10 +927,6 @@ slave_sg_fromdev_fill_desc: prev->lli.ctllo |= DWC_CTLL_INT_EN; prev->lli.llp = 0; - dma_sync_single_for_device(chan2parent(chan), - prev->txd.phys, sizeof(prev->lli), - DMA_TO_DEVICE); - first->len = total_len; return &first->txd; @@ -984,11 +957,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - /* Check if it is chan is configured for slave transfers */ - if (!chan->private) + /* Check if chan will be configured for slave transfers */ + if (!is_slave_direction(sconfig->direction)) return -EINVAL; memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); + dwc->direction = sconfig->direction; convert_burst(&dwc->dma_sconfig.src_maxburst); convert_burst(&dwc->dma_sconfig.dst_maxburst); @@ -996,6 +970,26 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) return 0; } +static inline void dwc_chan_pause(struct dw_dma_chan *dwc) +{ + u32 cfglo = channel_readl(dwc, CFG_LO); + + channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); + while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) + cpu_relax(); + + dwc->paused = true; +} + +static inline void dwc_chan_resume(struct dw_dma_chan *dwc) +{ + u32 cfglo = channel_readl(dwc, CFG_LO); + + channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); + + dwc->paused = false; +} + static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { @@ -1003,18 +997,13 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc, *_desc; unsigned long flags; - u32 cfglo; LIST_HEAD(list); if (cmd == DMA_PAUSE) { spin_lock_irqsave(&dwc->lock, flags); - cfglo = channel_readl(dwc, CFG_LO); - channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); - while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) - cpu_relax(); + dwc_chan_pause(dwc); - dwc->paused = true; spin_unlock_irqrestore(&dwc->lock, flags); } else if (cmd == DMA_RESUME) { if (!dwc->paused) @@ -1022,9 +1011,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_lock_irqsave(&dwc->lock, flags); - cfglo = channel_readl(dwc, CFG_LO); - channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); - dwc->paused = false; + dwc_chan_resume(dwc); spin_unlock_irqrestore(&dwc->lock, flags); } else if (cmd == DMA_TERMINATE_ALL) { @@ -1034,7 +1021,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, dwc_chan_disable(dw, dwc); - dwc->paused = false; + dwc_chan_resume(dwc); /* active_list entries will end up before queued entries */ list_splice_init(&dwc->queue, &list); @@ -1113,22 +1100,22 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&dwc->lock, flags); i = dwc->descs_allocated; while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { + dma_addr_t phys; + spin_unlock_irqrestore(&dwc->lock, flags); - desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); - if (!desc) { - dev_info(chan2dev(chan), - "only allocated %d descriptors\n", i); - spin_lock_irqsave(&dwc->lock, flags); - break; - } + desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); + if (!desc) + goto err_desc_alloc; + + memset(desc, 0, sizeof(struct dw_desc)); INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->txd, chan); desc->txd.tx_submit = dwc_tx_submit; desc->txd.flags = DMA_CTRL_ACK; - desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, - sizeof(desc->lli), DMA_TO_DEVICE); + desc->txd.phys = phys; + dwc_desc_put(dwc, desc); spin_lock_irqsave(&dwc->lock, flags); @@ -1140,6 +1127,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); return i; + +err_desc_alloc: + dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); + + return i; } static void dwc_free_chan_resources(struct dma_chan *chan) @@ -1171,14 +1163,56 @@ static void dwc_free_chan_resources(struct dma_chan *chan) list_for_each_entry_safe(desc, _desc, &list, desc_node) { dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); - dma_unmap_single(chan2parent(chan), desc->txd.phys, - sizeof(desc->lli), DMA_TO_DEVICE); - kfree(desc); + dma_pool_free(dw->desc_pool, desc, desc->txd.phys); } dev_vdbg(chan2dev(chan), "%s: done\n", __func__); } +bool dw_dma_generic_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma *dw = to_dw_dma(chan->device); + static struct dw_dma *last_dw; + static char *last_bus_id; + int i = -1; + + /* + * dmaengine framework calls this routine for all channels of all dma + * controller, until true is returned. If 'param' bus_id is not + * registered with a dma controller (dw), then there is no need of + * running below function for all channels of dw. + * + * This block of code does this by saving the parameters of last + * failure. If dw and param are same, i.e. trying on same dw with + * different channel, return false. + */ + if ((last_dw == dw) && (last_bus_id == param)) + return false; + /* + * Return true: + * - If dw_dma's platform data is not filled with slave info, then all + * dma controllers are fine for transfer. + * - Or if param is NULL + */ + if (!dw->sd || !param) + return true; + + while (++i < dw->sd_count) { + if (!strcmp(dw->sd[i].bus_id, param)) { + chan->private = &dw->sd[i]; + last_dw = NULL; + last_bus_id = NULL; + + return true; + } + } + + last_dw = dw; + last_bus_id = param; + return false; +} +EXPORT_SYMBOL(dw_dma_generic_filter); + /* --------------------- Cyclic DMA API extensions -------------------- */ /** @@ -1298,6 +1332,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, retval = ERR_PTR(-EINVAL); + if (unlikely(!is_slave_direction(direction))) + goto out_err; + + dwc->direction = direction; + if (direction == DMA_MEM_TO_DEV) reg_width = __ffs(sconfig->dst_addr_width); else @@ -1312,8 +1351,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, goto out_err; if (unlikely(buf_addr & ((1 << reg_width) - 1))) goto out_err; - if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) - goto out_err; retval = ERR_PTR(-ENOMEM); @@ -1371,20 +1408,14 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, desc->lli.ctlhi = (period_len >> reg_width); cdesc->desc[i] = desc; - if (last) { + if (last) last->lli.llp = desc->txd.phys; - dma_sync_single_for_device(chan2parent(chan), - last->txd.phys, sizeof(last->lli), - DMA_TO_DEVICE); - } last = desc; } /* lets make a cyclic list */ last->lli.llp = cdesc->desc[0]->txd.phys; - dma_sync_single_for_device(chan2parent(chan), last->txd.phys, - sizeof(last->lli), DMA_TO_DEVICE); dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " "period %zu periods %d\n", (unsigned long long)buf_addr, @@ -1462,6 +1493,91 @@ static void dw_dma_off(struct dw_dma *dw) dw->chan[i].initialized = false; } +#ifdef CONFIG_OF +static struct dw_dma_platform_data * +dw_dma_parse_dt(struct platform_device *pdev) +{ + struct device_node *sn, *cn, *np = pdev->dev.of_node; + struct dw_dma_platform_data *pdata; + struct dw_dma_slave *sd; + u32 tmp, arr[4]; + + if (!np) { + dev_err(&pdev->dev, "Missing DT data\n"); + return NULL; + } + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels)) + return NULL; + + if (of_property_read_bool(np, "is_private")) + pdata->is_private = true; + + if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) + pdata->chan_allocation_order = (unsigned char)tmp; + + if (!of_property_read_u32(np, "chan_priority", &tmp)) + pdata->chan_priority = tmp; + + if (!of_property_read_u32(np, "block_size", &tmp)) + pdata->block_size = tmp; + + if (!of_property_read_u32(np, "nr_masters", &tmp)) { + if (tmp > 4) + return NULL; + + pdata->nr_masters = tmp; + } + + if (!of_property_read_u32_array(np, "data_width", arr, + pdata->nr_masters)) + for (tmp = 0; tmp < pdata->nr_masters; tmp++) + pdata->data_width[tmp] = arr[tmp]; + + /* parse slave data */ + sn = of_find_node_by_name(np, "slave_info"); + if (!sn) + return pdata; + + /* calculate number of slaves */ + tmp = of_get_child_count(sn); + if (!tmp) + return NULL; + + sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL); + if (!sd) + return NULL; + + pdata->sd = sd; + pdata->sd_count = tmp; + + for_each_child_of_node(sn, cn) { + sd->dma_dev = &pdev->dev; + of_property_read_string(cn, "bus_id", &sd->bus_id); + of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi); + of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo); + if (!of_property_read_u32(cn, "src_master", &tmp)) + sd->src_master = tmp; + + if (!of_property_read_u32(cn, "dst_master", &tmp)) + sd->dst_master = tmp; + sd++; + } + + return pdata; +} +#else +static inline struct dw_dma_platform_data * +dw_dma_parse_dt(struct platform_device *pdev) +{ + return NULL; +} +#endif + static int dw_probe(struct platform_device *pdev) { struct dw_dma_platform_data *pdata; @@ -1477,10 +1593,6 @@ static int dw_probe(struct platform_device *pdev) int err; int i; - pdata = dev_get_platdata(&pdev->dev); - if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) - return -EINVAL; - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!io) return -EINVAL; @@ -1496,6 +1608,22 @@ static int dw_probe(struct platform_device *pdev) dw_params = dma_read_byaddr(regs, DW_PARAMS); autocfg = dw_params >> DW_PARAMS_EN & 0x1; + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) + pdata = dw_dma_parse_dt(pdev); + + if (!pdata && autocfg) { + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + /* Fill platform data with the default values */ + pdata->is_private = true; + pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; + pdata->chan_priority = CHAN_PRIORITY_ASCENDING; + } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) + return -EINVAL; + if (autocfg) nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; else @@ -1512,6 +1640,8 @@ static int dw_probe(struct platform_device *pdev) clk_prepare_enable(dw->clk); dw->regs = regs; + dw->sd = pdata->sd; + dw->sd_count = pdata->sd_count; /* get hardware configuration parameters */ if (autocfg) { @@ -1543,6 +1673,14 @@ static int dw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dw); + /* create a pool of consistent memory blocks for hardware descriptors */ + dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, + sizeof(struct dw_desc), 4, 0); + if (!dw->desc_pool) { + dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); + return -ENOMEM; + } + tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); INIT_LIST_HEAD(&dw->dma.channels); @@ -1574,7 +1712,7 @@ static int dw_probe(struct platform_device *pdev) channel_clear_bit(dw, CH_EN, dwc->mask); - dwc->dw = dw; + dwc->direction = DMA_TRANS_NONE; /* hardware configuration */ if (autocfg) { @@ -1626,8 +1764,8 @@ static int dw_probe(struct platform_device *pdev) dma_writel(dw, CFG, DW_CFG_DMA_EN); - printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", - dev_name(&pdev->dev), nr_channels); + dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n", + nr_channels); dma_async_device_register(&dw->dma); @@ -1657,7 +1795,7 @@ static void dw_shutdown(struct platform_device *pdev) { struct dw_dma *dw = platform_get_drvdata(pdev); - dw_dma_off(platform_get_drvdata(pdev)); + dw_dma_off(dw); clk_disable_unprepare(dw->clk); } @@ -1666,7 +1804,7 @@ static int dw_suspend_noirq(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct dw_dma *dw = platform_get_drvdata(pdev); - dw_dma_off(platform_get_drvdata(pdev)); + dw_dma_off(dw); clk_disable_unprepare(dw->clk); return 0; @@ -1679,6 +1817,7 @@ static int dw_resume_noirq(struct device *dev) clk_prepare_enable(dw->clk); dma_writel(dw, CFG, DW_CFG_DMA_EN); + return 0; } @@ -1700,6 +1839,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table); #endif static struct platform_driver dw_driver = { + .probe = dw_probe, .remove = dw_remove, .shutdown = dw_shutdown, .driver = { @@ -1711,7 +1851,7 @@ static struct platform_driver dw_driver = { static int __init dw_init(void) { - return platform_driver_probe(&dw_driver, dw_probe); + return platform_driver_register(&dw_driver); } subsys_initcall(dw_init); diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 88965597b7d0..fef296de4af1 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -9,6 +9,7 @@ * published by the Free Software Foundation. */ +#include <linux/dmaengine.h> #include <linux/dw_dmac.h> #define DW_DMA_MAX_NR_CHANNELS 8 @@ -184,12 +185,13 @@ enum dw_dmac_flags { }; struct dw_dma_chan { - struct dma_chan chan; - void __iomem *ch_regs; - u8 mask; - u8 priority; - bool paused; - bool initialized; + struct dma_chan chan; + void __iomem *ch_regs; + u8 mask; + u8 priority; + enum dma_transfer_direction direction; + bool paused; + bool initialized; /* software emulation of the LLP transfers */ struct list_head *tx_list; @@ -212,9 +214,6 @@ struct dw_dma_chan { /* configuration passed via DMA_SLAVE_CONFIG */ struct dma_slave_config dma_sconfig; - - /* backlink to dw_dma */ - struct dw_dma *dw; }; static inline struct dw_dma_chan_regs __iomem * @@ -236,9 +235,14 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) struct dw_dma { struct dma_device dma; void __iomem *regs; + struct dma_pool *desc_pool; struct tasklet_struct tasklet; struct clk *clk; + /* slave information */ + struct dw_dma_slave *sd; + unsigned int sd_count; + u8 all_chan_mask; /* hardware configuration */ @@ -295,6 +299,8 @@ struct dw_desc { size_t len; }; +#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) + static inline struct dw_desc * txd_to_dw_desc(struct dma_async_tx_descriptor *txd) { diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 232b4583ae93..82c8672f26e8 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -69,9 +69,7 @@ struct edma_chan { int ch_num; bool alloced; int slot[EDMA_MAX_SLOTS]; - dma_addr_t addr; - int addr_width; - int maxburst; + struct dma_slave_config cfg; }; struct edma_cc { @@ -178,29 +176,14 @@ static int edma_terminate_all(struct edma_chan *echan) return 0; } - static int edma_slave_config(struct edma_chan *echan, - struct dma_slave_config *config) + struct dma_slave_config *cfg) { - if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) || - (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) + if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) return -EINVAL; - if (config->direction == DMA_MEM_TO_DEV) { - if (config->dst_addr) - echan->addr = config->dst_addr; - if (config->dst_addr_width) - echan->addr_width = config->dst_addr_width; - if (config->dst_maxburst) - echan->maxburst = config->dst_maxburst; - } else if (config->direction == DMA_DEV_TO_MEM) { - if (config->src_addr) - echan->addr = config->src_addr; - if (config->src_addr_width) - echan->addr_width = config->src_addr_width; - if (config->src_maxburst) - echan->maxburst = config->src_maxburst; - } + memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); return 0; } @@ -235,6 +218,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; struct edma_desc *edesc; + dma_addr_t dev_addr; + enum dma_slave_buswidth dev_width; + u32 burst; struct scatterlist *sg; int i; int acnt, bcnt, ccnt, src, dst, cidx; @@ -243,7 +229,20 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( if (unlikely(!echan || !sgl || !sg_len)) return NULL; - if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { + if (direction == DMA_DEV_TO_MEM) { + dev_addr = echan->cfg.src_addr; + dev_width = echan->cfg.src_addr_width; + burst = echan->cfg.src_maxburst; + } else if (direction == DMA_MEM_TO_DEV) { + dev_addr = echan->cfg.dst_addr; + dev_width = echan->cfg.dst_addr_width; + burst = echan->cfg.dst_maxburst; + } else { + dev_err(dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { dev_err(dev, "Undefined slave buswidth\n"); return NULL; } @@ -275,14 +274,14 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( } } - acnt = echan->addr_width; + acnt = dev_width; /* * If the maxburst is equal to the fifo width, use * A-synced transfers. This allows for large contiguous * buffer transfers using only one PaRAM set. */ - if (echan->maxburst == 1) { + if (burst == 1) { edesc->absync = false; ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); @@ -302,7 +301,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( */ } else { edesc->absync = true; - bcnt = echan->maxburst; + bcnt = burst; ccnt = sg_dma_len(sg) / (acnt * bcnt); if (ccnt > (SZ_64K - 1)) { dev_err(dev, "Exceeded max SG segment size\n"); @@ -313,13 +312,13 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( if (direction == DMA_MEM_TO_DEV) { src = sg_dma_address(sg); - dst = echan->addr; + dst = dev_addr; src_bidx = acnt; src_cidx = cidx; dst_bidx = 0; dst_cidx = 0; } else { - src = echan->addr; + src = dev_addr; dst = sg_dma_address(sg); src_bidx = 0; src_cidx = 0; diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index bcfde400904f..f2bf8c0c4675 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -903,8 +903,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) switch (data->port) { case EP93XX_DMA_SSP: case EP93XX_DMA_IDE: - if (data->direction != DMA_MEM_TO_DEV && - data->direction != DMA_DEV_TO_MEM) + if (!is_slave_direction(data->direction)) return -EINVAL; break; default: diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 73b2b65cb1de..464138a8a782 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -833,14 +833,14 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device) dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE | + flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); if (!tx) { dev_err(dev, "Self-test prep failed, disabling\n"); err = -ENODEV; - goto free_resources; + goto unmap_dma; } async_tx_ack(tx); @@ -851,7 +851,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device) if (cookie < 0) { dev_err(dev, "Self-test setup failed, disabling\n"); err = -ENODEV; - goto free_resources; + goto unmap_dma; } dma->device_issue_pending(dma_chan); @@ -862,7 +862,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device) != DMA_SUCCESS) { dev_err(dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; - goto free_resources; + goto unmap_dma; } if (memcmp(src, dest, IOAT_TEST_SIZE)) { dev_err(dev, "Self-test copy failed compare, disabling\n"); @@ -870,6 +870,9 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device) goto free_resources; } +unmap_dma: + dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); + dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); free_resources: dma->device_free_chan_resources(dma_chan); out: diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index f7f1dc62c15c..e52cf1eb6839 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -863,6 +863,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) unsigned long tmo; struct device *dev = &device->pdev->dev; struct dma_device *dma = &device->common; + u8 op = 0; dev_dbg(dev, "%s\n", __func__); @@ -908,18 +909,22 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) } /* test xor */ + op = IOAT_OP_XOR; + dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < IOAT_NUM_SRC_TEST; i++) dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, IOAT_NUM_SRC_TEST, PAGE_SIZE, - DMA_PREP_INTERRUPT); + DMA_PREP_INTERRUPT | + DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { dev_err(dev, "Self-test xor prep failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } async_tx_ack(tx); @@ -930,7 +935,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) if (cookie < 0) { dev_err(dev, "Self-test xor setup failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } dma->device_issue_pending(dma_chan); @@ -939,9 +944,13 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test xor timed out\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } + dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { u32 *ptr = page_address(dest); @@ -957,6 +966,8 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) goto free_resources; + op = IOAT_OP_XOR_VAL; + /* validate the sources with the destintation page */ for (i = 0; i < IOAT_NUM_SRC_TEST; i++) xor_val_srcs[i] = xor_srcs[i]; @@ -969,11 +980,13 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT); + &xor_val_result, DMA_PREP_INTERRUPT | + DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { dev_err(dev, "Self-test zero prep failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } async_tx_ack(tx); @@ -984,7 +997,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) if (cookie < 0) { dev_err(dev, "Self-test zero setup failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } dma->device_issue_pending(dma_chan); @@ -993,9 +1006,12 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test validate timed out\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + if (xor_val_result != 0) { dev_err(dev, "Self-test validate failed compare\n"); err = -ENODEV; @@ -1007,14 +1023,18 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; /* test memset */ + op = IOAT_OP_FILL; + dma_addr = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, - DMA_PREP_INTERRUPT); + DMA_PREP_INTERRUPT | + DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { dev_err(dev, "Self-test memset prep failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } async_tx_ack(tx); @@ -1025,7 +1045,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) if (cookie < 0) { dev_err(dev, "Self-test memset setup failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } dma->device_issue_pending(dma_chan); @@ -1034,9 +1054,11 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test memset timed out\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } + dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { u32 *ptr = page_address(dest); if (ptr[i]) { @@ -1047,17 +1069,21 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) } /* test for non-zero parity sum */ + op = IOAT_OP_XOR_VAL; + xor_val_result = 0; for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT); + &xor_val_result, DMA_PREP_INTERRUPT | + DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { dev_err(dev, "Self-test 2nd zero prep failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } async_tx_ack(tx); @@ -1068,7 +1094,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) if (cookie < 0) { dev_err(dev, "Self-test 2nd zero setup failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } dma->device_issue_pending(dma_chan); @@ -1077,15 +1103,31 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test 2nd validate timed out\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } if (xor_val_result != SUM_CHECK_P_RESULT) { dev_err(dev, "Self-test validate failed compare\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + + goto free_resources; +dma_unmap: + if (op == IOAT_OP_XOR) { + dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); + } else if (op == IOAT_OP_XOR_VAL) { + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); + } else if (op == IOAT_OP_FILL) + dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); free_resources: dma->device_free_chan_resources(dma_chan); out: @@ -1126,12 +1168,7 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - /* -= IOAT ver.3 workarounds =- */ - /* Write CHANERRMSK_INT with 3E07h to mask out the errors - * that can cause stability issues for IOAT ver.3, and clear any - * pending errors - */ - pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); + /* clear any pending errors */ err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); if (err) { dev_err(&pdev->dev, "channel error register unreachable\n"); @@ -1187,6 +1224,26 @@ static bool is_snb_ioat(struct pci_dev *pdev) } } +static bool is_ivb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_IVB0: + case PCI_DEVICE_ID_INTEL_IOAT_IVB1: + case PCI_DEVICE_ID_INTEL_IOAT_IVB2: + case PCI_DEVICE_ID_INTEL_IOAT_IVB3: + case PCI_DEVICE_ID_INTEL_IOAT_IVB4: + case PCI_DEVICE_ID_INTEL_IOAT_IVB5: + case PCI_DEVICE_ID_INTEL_IOAT_IVB6: + case PCI_DEVICE_ID_INTEL_IOAT_IVB7: + case PCI_DEVICE_ID_INTEL_IOAT_IVB8: + case PCI_DEVICE_ID_INTEL_IOAT_IVB9: + return true; + default: + return false; + } + +} + int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; @@ -1207,7 +1264,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - if (is_jf_ioat(pdev) || is_snb_ioat(pdev)) + if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev)) dma->copy_align = 6; dma_cap_set(DMA_INTERRUPT, dma->cap_mask); diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index d2ff3fda0b18..7cb74c62c719 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -35,6 +35,17 @@ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ #define IOAT_VER_3_2 0x32 /* Version 3.2 */ +#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e +#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f + int system_has_dca_enabled(struct pci_dev *pdev); struct ioat_dma_descriptor { diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index bfa9a3536e09..f4e6163a1e73 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -40,17 +40,6 @@ MODULE_VERSION(IOAT_DMA_VERSION); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel Corporation"); -#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e -#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f - static struct pci_device_id ioat_pci_tbl[] = { /* I/OAT v1 platforms */ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 9072e173b860..62a59fd65723 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -936,7 +936,7 @@ static irqreturn_t iop_adma_err_handler(int irq, void *data) struct iop_adma_chan *chan = data; unsigned long status = iop_chan_get_status(chan); - dev_printk(KERN_ERR, chan->device->common.dev, + dev_err(chan->device->common.dev, "error ( %s%s%s%s%s%s%s)\n", iop_is_err_int_parity(status, chan) ? "int_parity " : "", iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "", @@ -1017,7 +1017,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device) if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { - dev_printk(KERN_ERR, dma_chan->device->dev, + dev_err(dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1027,7 +1027,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device) dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) { - dev_printk(KERN_ERR, dma_chan->device->dev, + dev_err(dma_chan->device->dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; goto free_resources; @@ -1117,7 +1117,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { - dev_printk(KERN_ERR, dma_chan->device->dev, + dev_err(dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { u32 *ptr = page_address(dest); if (ptr[i] != cmp_word) { - dev_printk(KERN_ERR, dma_chan->device->dev, + dev_err(dma_chan->device->dev, "Self-test xor failed compare, disabling\n"); err = -ENODEV; goto free_resources; @@ -1163,14 +1163,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { - dev_printk(KERN_ERR, dma_chan->device->dev, + dev_err(dma_chan->device->dev, "Self-test zero sum timed out, disabling\n"); err = -ENODEV; goto free_resources; } if (zero_sum_result != 0) { - dev_printk(KERN_ERR, dma_chan->device->dev, + dev_err(dma_chan->device->dev, "Self-test zero sum failed compare, disabling\n"); err = -ENODEV; goto free_resources; @@ -1187,7 +1187,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { - dev_printk(KERN_ERR, dma_chan->device->dev, + dev_err(dma_chan->device->dev, "Self-test memset timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1196,7 +1196,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { u32 *ptr = page_address(dest); if (ptr[i]) { - dev_printk(KERN_ERR, dma_chan->device->dev, + dev_err(dma_chan->device->dev, "Self-test memset failed compare, disabling\n"); err = -ENODEV; goto free_resources; @@ -1219,14 +1219,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { - dev_printk(KERN_ERR, dma_chan->device->dev, + dev_err(dma_chan->device->dev, "Self-test non-zero sum timed out, disabling\n"); err = -ENODEV; goto free_resources; } if (zero_sum_result != 1) { - dev_printk(KERN_ERR, dma_chan->device->dev, + dev_err(dma_chan->device->dev, "Self-test non-zero sum failed compare, disabling\n"); err = -ENODEV; goto free_resources; @@ -1579,15 +1579,14 @@ static int iop_adma_probe(struct platform_device *pdev) goto err_free_iop_chan; } - dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " - "( %s%s%s%s%s%s%s)\n", - dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", - dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", - dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", - dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", - dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", - dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", - dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); + dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s%s)\n", + dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", + dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", + dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", + dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", + dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", + dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", + dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); dma_async_device_register(dma_dev); goto out; @@ -1651,8 +1650,8 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) /* run the descriptor */ iop_chan_enable(iop_chan); } else - dev_printk(KERN_ERR, iop_chan->device->common.dev, - "failed to allocate null descriptor\n"); + dev_err(iop_chan->device->common.dev, + "failed to allocate null descriptor\n"); spin_unlock_bh(&iop_chan->lock); } @@ -1704,7 +1703,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) /* run the descriptor */ iop_chan_enable(iop_chan); } else - dev_printk(KERN_ERR, iop_chan->device->common.dev, + dev_err(iop_chan->device->common.dev, "failed to allocate null descriptor\n"); spin_unlock_bh(&iop_chan->lock); } diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 65855373cee6..8c61d17a86bf 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1347,7 +1347,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan chan->chan_id != IDMAC_IC_7) return NULL; - if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) { + if (!is_slave_direction(direction)) { dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); return NULL; } diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index a5ee37d5320f..2e284a4438bc 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c @@ -44,7 +44,6 @@ static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg) struct ipu_irq_bank { unsigned int control; unsigned int status; - spinlock_t lock; struct ipu *ipu; }; diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 13bdf4a7e1ec..41ad6a62f838 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -617,10 +617,8 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, else if (maxburst == 32) chan->dcmd |= DCMD_BURST32; - if (cfg) { - chan->dir = cfg->direction; - chan->drcmr = cfg->slave_id; - } + chan->dir = cfg->direction; + chan->drcmr = cfg->slave_id; chan->dev_addr = addr; break; default: diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index ac71f555dd72..e26de4f680e1 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -210,7 +210,7 @@ static void mv_set_mode(struct mv_xor_chan *chan, break; default: dev_err(mv_chan_to_devp(chan), - "error: unsupported operation %d.\n", + "error: unsupported operation %d\n", type); BUG(); return; @@ -828,28 +828,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan) u32 val; val = __raw_readl(XOR_CONFIG(chan)); - dev_err(mv_chan_to_devp(chan), - "config 0x%08x.\n", val); + dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); val = __raw_readl(XOR_ACTIVATION(chan)); - dev_err(mv_chan_to_devp(chan), - "activation 0x%08x.\n", val); + dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); val = __raw_readl(XOR_INTR_CAUSE(chan)); - dev_err(mv_chan_to_devp(chan), - "intr cause 0x%08x.\n", val); + dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); val = __raw_readl(XOR_INTR_MASK(chan)); - dev_err(mv_chan_to_devp(chan), - "intr mask 0x%08x.\n", val); + dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); val = __raw_readl(XOR_ERROR_CAUSE(chan)); - dev_err(mv_chan_to_devp(chan), - "error cause 0x%08x.\n", val); + dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); val = __raw_readl(XOR_ERROR_ADDR(chan)); - dev_err(mv_chan_to_devp(chan), - "error addr 0x%08x.\n", val); + dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); } static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, @@ -862,7 +856,7 @@ static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, } dev_err(mv_chan_to_devp(chan), - "error on chan %d. intr cause 0x%08x.\n", + "error on chan %d. intr cause 0x%08x\n", chan->idx, intr_cause); mv_dump_xor_regs(chan); @@ -1052,9 +1046,8 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) u32 *ptr = page_address(dest); if (ptr[i] != cmp_word) { dev_err(dma_chan->device->dev, - "Self-test xor failed compare, disabling." - " index %d, data %x, expected %x\n", i, - ptr[i], cmp_word); + "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", + i, ptr[i], cmp_word); err = -ENODEV; goto free_resources; } @@ -1194,12 +1187,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev, goto err_free_irq; } - dev_info(&pdev->dev, "Marvell XOR: " - "( %s%s%s%s)\n", - dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", - dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", - dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", - dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); + dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n", + dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", + dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", + dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", + dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); dma_async_device_register(dma_dev); return mv_chan; @@ -1253,7 +1245,7 @@ static int mv_xor_probe(struct platform_device *pdev) struct resource *res; int i, ret; - dev_notice(&pdev->dev, "Marvell XOR driver\n"); + dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); if (!xordev) diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 9f02e794b12b..8f6d30d37c45 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -109,7 +109,7 @@ struct mxs_dma_chan { struct dma_chan chan; struct dma_async_tx_descriptor desc; struct tasklet_struct tasklet; - int chan_irq; + unsigned int chan_irq; struct mxs_dma_ccw *ccw; dma_addr_t ccw_phys; int desc_count; @@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; struct mxs_dma_ccw *ccw; struct scatterlist *sg; - int i, j; + u32 i, j; u32 *pio; bool append = flags & DMA_PREP_INTERRUPT; int idx = append ? mxs_chan->desc_count : 0; @@ -537,8 +537,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; - int num_periods = buf_len / period_len; - int i = 0, buf = 0; + u32 num_periods = buf_len / period_len; + u32 i = 0, buf = 0; if (mxs_chan->status == DMA_IN_PROGRESS) return NULL; diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index eca1c4ddf039..aed6e0c117ed 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -1029,18 +1029,7 @@ static struct pci_driver pch_dma_driver = { #endif }; -static int __init pch_dma_init(void) -{ - return pci_register_driver(&pch_dma_driver); -} - -static void __exit pch_dma_exit(void) -{ - pci_unregister_driver(&pch_dma_driver); -} - -module_init(pch_dma_init); -module_exit(pch_dma_exit); +module_pci_driver(pch_dma_driver); MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH " "DMA controller driver"); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 95555f37ea6d..f7edb6f0ee87 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2866,7 +2866,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pdat = adev->dev.platform_data; /* Allocate a new DMAC and its Channels */ - pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL); + pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL); if (!pdmac) { dev_err(&adev->dev, "unable to allocate mem\n"); return -ENOMEM; @@ -2878,13 +2878,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; res = &adev->res; - request_mem_region(res->start, resource_size(res), "dma-pl330"); - - pi->base = ioremap(res->start, resource_size(res)); - if (!pi->base) { - ret = -ENXIO; - goto probe_err1; - } + pi->base = devm_request_and_ioremap(&adev->dev, res); + if (!pi->base) + return -ENXIO; amba_set_drvdata(adev, pdmac); @@ -2892,11 +2888,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ret = request_irq(irq, pl330_irq_handler, 0, dev_name(&adev->dev), pi); if (ret) - goto probe_err2; + return ret; ret = pl330_add(pi); if (ret) - goto probe_err3; + goto probe_err1; INIT_LIST_HEAD(&pdmac->desc_pool); spin_lock_init(&pdmac->pool_lock); @@ -2918,7 +2914,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) if (!pdmac->peripherals) { ret = -ENOMEM; dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); - goto probe_err4; + goto probe_err2; } for (i = 0; i < num_chan; i++) { @@ -2962,7 +2958,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ret = dma_async_device_register(pd); if (ret) { dev_err(&adev->dev, "unable to register DMAC\n"); - goto probe_err4; + goto probe_err2; } dev_info(&adev->dev, @@ -2975,15 +2971,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) return 0; -probe_err4: - pl330_del(pi); -probe_err3: - free_irq(irq, pi); probe_err2: - iounmap(pi->base); + pl330_del(pi); probe_err1: - release_mem_region(res->start, resource_size(res)); - kfree(pdmac); + free_irq(irq, pi); return ret; } @@ -2993,7 +2984,6 @@ static int __devexit pl330_remove(struct amba_device *adev) struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; struct pl330_info *pi; - struct resource *res; int irq; if (!pdmac) @@ -3020,13 +3010,6 @@ static int __devexit pl330_remove(struct amba_device *adev) irq = adev->irq[0]; free_irq(irq, pi); - iounmap(pi->base); - - res = &adev->res; - release_mem_region(res->start, resource_size(res)); - - kfree(pdmac); - return 0; } diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index f4cd946d259d..4acb85a10250 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -638,9 +638,6 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long flags; int ret; - if (!chan) - return -EINVAL; - switch (cmd) { case DMA_TERMINATE_ALL: spin_lock_irqsave(&schan->chan_lock, flags); diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index 8201bb4e0cd7..62eff4800521 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c @@ -326,7 +326,7 @@ static int sh_dmae_set_slave(struct shdma_chan *schan, shdma_chan); const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); if (!cfg) - return -ENODEV; + return -ENXIO; if (!try) sh_chan->config = cfg; diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index c3de6edb9651..3c210ba9f938 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -32,7 +32,9 @@ #define SIRFSOC_DMA_CH_VALID 0x140 #define SIRFSOC_DMA_CH_INT 0x144 #define SIRFSOC_DMA_INT_EN 0x148 +#define SIRFSOC_DMA_INT_EN_CLR 0x14C #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 +#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C #define SIRFSOC_DMA_MODE_CTRL_BIT 4 #define SIRFSOC_DMA_DIR_CTRL_BIT 5 @@ -76,6 +78,7 @@ struct sirfsoc_dma { struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; void __iomem *base; int irq; + bool is_marco; }; #define DRV_NAME "sirfsoc_dma" @@ -288,13 +291,19 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) int cid = schan->chan.chan_id; unsigned long flags; - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & - ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); - writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); - - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) - & ~((1 << cid) | 1 << (cid + 16)), + if (!sdma->is_marco) { + writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & + ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); + writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) + & ~((1 << cid) | 1 << (cid + 16)), sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); + } else { + writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); + writel_relaxed((1 << cid) | 1 << (cid + 16), + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); + } + + writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); spin_lock_irqsave(&schan->lock, flags); list_splice_tail_init(&schan->active, &schan->free); @@ -568,6 +577,9 @@ static int sirfsoc_dma_probe(struct platform_device *op) return -ENOMEM; } + if (of_device_is_compatible(dn, "sirf,marco-dmac")) + sdma->is_marco = true; + if (of_property_read_u32(dn, "cell-index", &id)) { dev_err(dev, "Fail to get DMAC index\n"); return -ENODEV; @@ -668,6 +680,7 @@ static int __devexit sirfsoc_dma_remove(struct platform_device *op) static struct of_device_id sirfsoc_dma_match[] = { { .compatible = "sirf,prima2-dmac", }, + { .compatible = "sirf,marco-dmac", }, {}, }; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index d77d41d7c463..ad860a221c33 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2531,7 +2531,7 @@ d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long dma_flags, void *context) { - if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) + if (!is_slave_direction(direction)) return NULL; return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index efdfffa13349..6c144814a896 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -62,6 +62,9 @@ #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC +#define TEGRA_APBDMA_CHAN_CSRE 0x00C +#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31) + /* AHB memory address */ #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 @@ -112,10 +115,12 @@ struct tegra_dma; * tegra_dma_chip_data Tegra chip specific DMA data * @nr_channels: Number of channels available in the controller. * @max_dma_count: Maximum DMA transfer count supported by DMA controller. + * @support_channel_pause: Support channel wise pause of dma. */ struct tegra_dma_chip_data { int nr_channels; int max_dma_count; + bool support_channel_pause; }; /* DMA channel registers */ @@ -353,6 +358,32 @@ static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) spin_unlock(&tdma->global_lock); } +static void tegra_dma_pause(struct tegra_dma_channel *tdc, + bool wait_for_burst_complete) +{ + struct tegra_dma *tdma = tdc->tdma; + + if (tdma->chip_data->support_channel_pause) { + tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, + TEGRA_APBDMA_CHAN_CSRE_PAUSE); + if (wait_for_burst_complete) + udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); + } else { + tegra_dma_global_pause(tdc, wait_for_burst_complete); + } +} + +static void tegra_dma_resume(struct tegra_dma_channel *tdc) +{ + struct tegra_dma *tdma = tdc->tdma; + + if (tdma->chip_data->support_channel_pause) { + tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0); + } else { + tegra_dma_global_resume(tdc); + } +} + static void tegra_dma_stop(struct tegra_dma_channel *tdc) { u32 csr; @@ -408,7 +439,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, * If there is already IEC status then interrupt handler need to * load new configuration. */ - tegra_dma_global_pause(tdc, false); + tegra_dma_pause(tdc, false); status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); /* @@ -418,7 +449,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { dev_err(tdc2dev(tdc), "Skipping new configuration as interrupt is pending\n"); - tegra_dma_global_resume(tdc); + tegra_dma_resume(tdc); return; } @@ -429,7 +460,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); nsg_req->configured = true; - tegra_dma_global_resume(tdc); + tegra_dma_resume(tdc); } static void tdc_start_head_req(struct tegra_dma_channel *tdc) @@ -690,7 +721,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) goto skip_dma_stop; /* Pause DMA before checking the queue status */ - tegra_dma_global_pause(tdc, true); + tegra_dma_pause(tdc, true); status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { @@ -708,7 +739,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) sgreq->dma_desc->bytes_transferred += get_current_xferred_count(tdc, sgreq, status); } - tegra_dma_global_resume(tdc); + tegra_dma_resume(tdc); skip_dma_stop: tegra_dma_abort_all(tdc); @@ -1175,6 +1206,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) static const struct tegra_dma_chip_data tegra20_dma_chip_data = { .nr_channels = 16, .max_dma_count = 1024UL * 64, + .support_channel_pause = false, }; #if defined(CONFIG_OF) @@ -1182,10 +1214,22 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = { static const struct tegra_dma_chip_data tegra30_dma_chip_data = { .nr_channels = 32, .max_dma_count = 1024UL * 64, + .support_channel_pause = false, }; -static const struct of_device_id tegra_dma_of_match[] __devinitconst = { +/* Tegra114 specific DMA controller information */ +static const struct tegra_dma_chip_data tegra114_dma_chip_data = { + .nr_channels = 32, + .max_dma_count = 1024UL * 64, + .support_channel_pause = true, +}; + + +static const struct of_device_id tegra_dma_of_match[] = { { + .compatible = "nvidia,tegra114-apbdma", + .data = &tegra114_dma_chip_data, + }, { .compatible = "nvidia,tegra30-apbdma", .data = &tegra30_dma_chip_data, }, { |