diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/at_xdmac.c | 4 | ||||
-rw-r--r-- | drivers/dma/dw/core.c | 39 | ||||
-rw-r--r-- | drivers/dma/dw/regs.h | 5 | ||||
-rw-r--r-- | drivers/dma/fsl_raid.c | 1 | ||||
-rw-r--r-- | drivers/dma/hsu/hsu.c | 9 | ||||
-rw-r--r-- | drivers/dma/hsu/pci.c | 6 | ||||
-rw-r--r-- | drivers/dma/img-mdc-dma.c | 4 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 56 | ||||
-rw-r--r-- | drivers/dma/pxa_dma.c | 11 | ||||
-rw-r--r-- | drivers/dma/sh/usb-dmac.c | 19 |
10 files changed, 79 insertions, 75 deletions
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index e434ffe7bc5c..832cbd647145 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -2067,7 +2067,7 @@ err_dma_unregister: err_clk_disable: clk_disable_unprepare(atxdmac->clk); err_free_irq: - free_irq(atxdmac->irq, atxdmac->dma.dev); + free_irq(atxdmac->irq, atxdmac); return ret; } @@ -2081,7 +2081,7 @@ static int at_xdmac_remove(struct platform_device *pdev) dma_async_device_unregister(&atxdmac->dma); clk_disable_unprepare(atxdmac->clk); - free_irq(atxdmac->irq, atxdmac->dma.dev); + free_irq(atxdmac->irq, atxdmac); for (i = 0; i < atxdmac->dma.chancnt; i++) { struct at_xdmac_chan *atchan = &atxdmac->chan[i]; diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index edf053f73a49..da18b18561c4 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -46,9 +46,9 @@ u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ DW_DMA_MSIZE_16; \ u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \ - _dwc->p_master : _dwc->m_master; \ + _dwc->dws.p_master : _dwc->dws.m_master; \ u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \ - _dwc->p_master : _dwc->m_master; \ + _dwc->dws.p_master : _dwc->dws.m_master; \ \ (DWC_CTLL_DST_MSIZE(_dmsize) \ | DWC_CTLL_SRC_MSIZE(_smsize) \ @@ -143,12 +143,16 @@ static void dwc_initialize(struct dw_dma_chan *dwc) struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 cfghi = DWC_CFGH_FIFO_MODE; u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); + bool hs_polarity = dwc->dws.hs_polarity; if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) return; - cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); - cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); + cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); + cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); + + /* Set polarity of handshake interface */ + cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_HI, cfghi); @@ -209,7 +213,7 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); - u8 lms = DWC_LLP_LMS(dwc->m_master); + u8 lms = DWC_LLP_LMS(dwc->dws.m_master); unsigned long was_soft_llp; /* ASSERT: channel is idle */ @@ -662,7 +666,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, struct dw_desc *prev; size_t xfer_count; size_t offset; - u8 m_master = dwc->m_master; + u8 m_master = dwc->dws.m_master; unsigned int src_width; unsigned int dst_width; unsigned int data_width = dw->pdata->data_width[m_master]; @@ -740,7 +744,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct dw_desc *prev; struct dw_desc *first; u32 ctllo; - u8 m_master = dwc->m_master; + u8 m_master = dwc->dws.m_master; u8 lms = DWC_LLP_LMS(m_master); dma_addr_t reg; unsigned int reg_width; @@ -895,12 +899,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) return false; /* We have to copy data since dws can be temporary storage */ - - dwc->src_id = dws->src_id; - dwc->dst_id = dws->dst_id; - - dwc->m_master = dws->m_master; - dwc->p_master = dws->p_master; + memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave)); return true; } @@ -1167,11 +1166,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&dwc->lock, flags); /* Clear custom channel configuration */ - dwc->src_id = 0; - dwc->dst_id = 0; - - dwc->m_master = 0; - dwc->p_master = 0; + memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); @@ -1264,7 +1259,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, struct dw_cyclic_desc *retval = NULL; struct dw_desc *desc; struct dw_desc *last = NULL; - u8 lms = DWC_LLP_LMS(dwc->m_master); + u8 lms = DWC_LLP_LMS(dwc->dws.m_master); unsigned long was_cyclic; unsigned int reg_width; unsigned int periods; @@ -1576,11 +1571,7 @@ int dw_dma_probe(struct dw_dma_chip *chip) (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; } else { dwc->block_size = pdata->block_size; - - /* Check if channel supports multi block transfer */ - channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff)); - dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0; - channel_writel(dwc, LLP, 0); + dwc->nollp = pdata->is_nollp; } } diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 4b7bd7834046..f65dd104479f 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -245,10 +245,7 @@ struct dw_dma_chan { bool nollp; /* custom slave configuration */ - u8 src_id; - u8 dst_id; - u8 m_master; - u8 p_master; + struct dw_dma_slave dws; /* configuration passed via .device_config */ struct dma_slave_config dma_sconfig; diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index aad167eaaee8..de2a2a2b1d75 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -836,6 +836,7 @@ static int fsl_re_probe(struct platform_device *ofdev) rc = of_property_read_u32(np, "reg", &off); if (rc) { dev_err(dev, "Reg property not found in JQ node\n"); + of_node_put(np); return -ENODEV; } /* Find out the Job Rings present under each JQ */ diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index c5f21efd6090..29d04ca71d52 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c @@ -200,10 +200,9 @@ EXPORT_SYMBOL_GPL(hsu_dma_get_status); * is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0. * * Return: - * IRQ_NONE for invalid channel number, IRQ_HANDLED otherwise. + * 0 for invalid channel number, 1 otherwise. */ -irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, - u32 status) +int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status) { struct hsu_dma_chan *hsuc; struct hsu_dma_desc *desc; @@ -211,7 +210,7 @@ irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, /* Sanity check */ if (nr >= chip->hsu->nr_channels) - return IRQ_NONE; + return 0; hsuc = &chip->hsu->chan[nr]; @@ -230,7 +229,7 @@ irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, } spin_unlock_irqrestore(&hsuc->vchan.lock, flags); - return IRQ_HANDLED; + return 1; } EXPORT_SYMBOL_GPL(hsu_dma_do_irq); diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c index 9916058531d9..b51639f045ed 100644 --- a/drivers/dma/hsu/pci.c +++ b/drivers/dma/hsu/pci.c @@ -29,7 +29,7 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev) u32 dmaisr; u32 status; unsigned short i; - irqreturn_t ret = IRQ_NONE; + int ret = 0; int err; dmaisr = readl(chip->regs + HSU_PCI_DMAISR); @@ -37,14 +37,14 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev) if (dmaisr & 0x1) { err = hsu_dma_get_status(chip, i, &status); if (err > 0) - ret |= IRQ_HANDLED; + ret |= 1; else if (err == 0) ret |= hsu_dma_do_irq(chip, i, status); } dmaisr >>= 1; } - return ret; + return IRQ_RETVAL(ret); } static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index a4c53be482cf..624f1e1e9c55 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c @@ -861,7 +861,6 @@ static int mdc_dma_probe(struct platform_device *pdev) { struct mdc_dma *mdma; struct resource *res; - const struct of_device_id *match; unsigned int i; u32 val; int ret; @@ -871,8 +870,7 @@ static int mdc_dma_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, mdma); - match = of_match_device(mdc_dma_of_match, &pdev->dev); - mdma->soc = match->data; + mdma->soc = of_device_get_match_data(&pdev->dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mdma->regs = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 03ec76fc22ff..3cb47386fbb9 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -648,15 +648,11 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) writel_relaxed(val, sdma->regs + chnenbl); } -static void sdma_handle_channel_loop(struct sdma_channel *sdmac) -{ - if (sdmac->desc.callback) - sdmac->desc.callback(sdmac->desc.callback_param); -} - static void sdma_update_channel_loop(struct sdma_channel *sdmac) { struct sdma_buffer_descriptor *bd; + int error = 0; + enum dma_status old_status = sdmac->status; /* * loop mode. Iterate over descriptors, re-setup them and @@ -668,17 +664,42 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac) if (bd->mode.status & BD_DONE) break; - if (bd->mode.status & BD_RROR) + if (bd->mode.status & BD_RROR) { + bd->mode.status &= ~BD_RROR; sdmac->status = DMA_ERROR; + error = -EIO; + } + + /* + * We use bd->mode.count to calculate the residue, since contains + * the number of bytes present in the current buffer descriptor. + */ + sdmac->chn_real_count = bd->mode.count; bd->mode.status |= BD_DONE; + bd->mode.count = sdmac->period_len; + + /* + * The callback is called from the interrupt context in order + * to reduce latency and to avoid the risk of altering the + * SDMA transaction status by the time the client tasklet is + * executed. + */ + + if (sdmac->desc.callback) + sdmac->desc.callback(sdmac->desc.callback_param); + sdmac->buf_tail++; sdmac->buf_tail %= sdmac->num_bd; + + if (error) + sdmac->status = old_status; } } -static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) +static void mxc_sdma_handle_channel_normal(unsigned long data) { + struct sdma_channel *sdmac = (struct sdma_channel *) data; struct sdma_buffer_descriptor *bd; int i, error = 0; @@ -705,16 +726,6 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) sdmac->desc.callback(sdmac->desc.callback_param); } -static void sdma_tasklet(unsigned long data) -{ - struct sdma_channel *sdmac = (struct sdma_channel *) data; - - if (sdmac->flags & IMX_DMA_SG_LOOP) - sdma_handle_channel_loop(sdmac); - else - mxc_sdma_handle_channel_normal(sdmac); -} - static irqreturn_t sdma_int_handler(int irq, void *dev_id) { struct sdma_engine *sdma = dev_id; @@ -731,8 +742,8 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) if (sdmac->flags & IMX_DMA_SG_LOOP) sdma_update_channel_loop(sdmac); - - tasklet_schedule(&sdmac->tasklet); + else + tasklet_schedule(&sdmac->tasklet); __clear_bit(channel, &stat); } @@ -1353,7 +1364,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, u32 residue; if (sdmac->flags & IMX_DMA_SG_LOOP) - residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len; + residue = (sdmac->num_bd - sdmac->buf_tail) * + sdmac->period_len - sdmac->chn_real_count; else residue = sdmac->chn_count - sdmac->chn_real_count; @@ -1732,7 +1744,7 @@ static int sdma_probe(struct platform_device *pdev) dma_cookie_init(&sdmac->chan); sdmac->channel = i; - tasklet_init(&sdmac->tasklet, sdma_tasklet, + tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal, (unsigned long) sdmac); /* * Add the channel to the DMAC list. Do not add channel 0 though diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index dc7850a422b8..3f56f9ca4482 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -638,7 +638,7 @@ static bool pxad_try_hotchain(struct virt_dma_chan *vc, vd_last_issued = list_entry(vc->desc_issued.prev, struct virt_dma_desc, node); pxad_desc_chain(vd_last_issued, vd); - if (is_chan_running(chan) || is_desc_completed(vd_last_issued)) + if (is_chan_running(chan) || is_desc_completed(vd)) return true; } @@ -671,6 +671,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) struct virt_dma_desc *vd, *tmp; unsigned int dcsr; unsigned long flags; + bool vd_completed; dma_cookie_t last_started = 0; BUG_ON(!chan); @@ -681,15 +682,17 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) spin_lock_irqsave(&chan->vc.lock, flags); list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) { + vd_completed = is_desc_completed(vd); dev_dbg(&chan->vc.chan.dev->device, - "%s(): checking txd %p[%x]: completed=%d\n", - __func__, vd, vd->tx.cookie, is_desc_completed(vd)); + "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n", + __func__, vd, vd->tx.cookie, vd_completed, + dcsr); last_started = vd->tx.cookie; if (to_pxad_sw_desc(vd)->cyclic) { vchan_cyclic_callback(vd); break; } - if (is_desc_completed(vd)) { + if (vd_completed) { list_del(&vd->node); vchan_cookie_complete(vd); } else { diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 749f1bd5d65d..06ecdc38cee0 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev) { struct usb_dmac_chan *chan = dev; irqreturn_t ret = IRQ_NONE; - u32 mask = USB_DMACHCR_TE; - u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP; + u32 mask = 0; u32 chcr; + bool xfer_end = false; spin_lock(&chan->vc.lock); chcr = usb_dmac_chan_read(chan, USB_DMACHCR); - if (chcr & check_bits) - mask |= USB_DMACHCR_DE | check_bits; + if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) { + mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP; + if (chcr & USB_DMACHCR_DE) + xfer_end = true; + ret |= IRQ_HANDLED; + } if (chcr & USB_DMACHCR_NULL) { /* An interruption of TE will happen after we set FTE */ mask |= USB_DMACHCR_NULL; chcr |= USB_DMACHCR_FTE; ret |= IRQ_HANDLED; } - usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); + if (mask) + usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); - if (chcr & check_bits) { + if (xfer_end) usb_dmac_isr_transfer_end(chan); - ret |= IRQ_HANDLED; - } spin_unlock(&chan->vc.lock); |