summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/at_hdmac.c11
-rw-r--r--drivers/dma/at_xdmac.c8
-rw-r--r--drivers/dma/coh901318.c9
-rw-r--r--drivers/dma/cppi41.c2
-rw-r--r--drivers/dma/dmaengine.h84
-rw-r--r--drivers/dma/dw/core.c14
-rw-r--r--drivers/dma/ep93xx_dma.c10
-rw-r--r--drivers/dma/fsl_raid.c10
-rw-r--r--drivers/dma/fsldma.c6
-rw-r--r--drivers/dma/imx-dma.c4
-rw-r--r--drivers/dma/imx-sdma.c7
-rw-r--r--drivers/dma/ioat/dma.c213
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/dma/iop-adma.c3
-rw-r--r--drivers/dma/ipu/ipu_idmac.c18
-rw-r--r--drivers/dma/mic_x100_dma.c6
-rw-r--r--drivers/dma/mmp_pdma.c14
-rw-r--r--drivers/dma/mmp_tdma.c4
-rw-r--r--drivers/dma/mpc512x_dma.c3
-rw-r--r--drivers/dma/mv_xor.c5
-rw-r--r--drivers/dma/mxs-dma.c3
-rw-r--r--drivers/dma/nbpfaxi.c9
-rw-r--r--drivers/dma/pch_dma.c7
-rw-r--r--drivers/dma/pl330.c10
-rw-r--r--drivers/dma/ppc4xx/adma.c5
-rw-r--r--drivers/dma/qcom/hidma.c57
-rw-r--r--drivers/dma/qcom/hidma.h2
-rw-r--r--drivers/dma/qcom/hidma_ll.c32
-rw-r--r--drivers/dma/sh/rcar-dmac.c16
-rw-r--r--drivers/dma/sh/shdma-base.c12
-rw-r--r--drivers/dma/sirf-dma.c7
-rw-r--r--drivers/dma/ste_dma40.c10
-rw-r--r--drivers/dma/tegra20-apb-dma.c10
-rw-r--r--drivers/dma/timb_dma.c9
-rw-r--r--drivers/dma/txx9dmac.c9
-rw-r--r--drivers/dma/virt-dma.c17
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c10
38 files changed, 419 insertions, 242 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 53d22eb73b56..a4c8f80db29d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -473,15 +473,11 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers,
* no need to replay callback function while stopping */
if (!atc_chan_is_cyclic(atchan)) {
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
-
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
- if (callback)
- callback(param);
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
dma_run_dependencies(txd);
@@ -598,15 +594,12 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
{
struct at_desc *first = atc_first_active(atchan);
struct dma_async_tx_descriptor *txd = &first->txd;
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
dev_vdbg(chan2dev(&atchan->chan_common),
"new cyclic period llp 0x%08x\n",
channel_readl(atchan, DSCR));
- if (callback)
- callback(param);
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
/*-- IRQ & Tasklet ---------------------------------------------------*/
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index e434ffe7bc5c..2badc57a7f31 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1572,8 +1572,8 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
txd = &desc->tx_dma_desc;
- if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
- txd->callback(txd->callback_param);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
static void at_xdmac_tasklet(unsigned long data)
@@ -1616,8 +1616,8 @@ static void at_xdmac_tasklet(unsigned long data)
if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd);
- if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
- txd->callback(txd->callback_param);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
dma_run_dependencies(txd);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 472be1d09586..74794c9859f6 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1875,8 +1875,7 @@ static void dma_tasklet(unsigned long data)
struct coh901318_chan *cohc = (struct coh901318_chan *) data;
struct coh901318_desc *cohd_fin;
unsigned long flags;
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
" nbr_active_done %ld\n", __func__,
@@ -1891,8 +1890,7 @@ static void dma_tasklet(unsigned long data)
goto err;
/* locate callback to client */
- callback = cohd_fin->desc.callback;
- callback_param = cohd_fin->desc.callback_param;
+ dmaengine_desc_get_callback(&cohd_fin->desc, &cb);
/* sign this job as completed on the channel */
dma_cookie_complete(&cohd_fin->desc);
@@ -1907,8 +1905,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&cohc->lock, flags);
/* Call the callback when we're done */
- if (callback)
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&cohc->lock, flags);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 97f4d6c1b6b9..bac5f023013b 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -336,7 +336,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd);
- c->txd.callback(c->txd.callback_param);
+ dmaengine_desc_get_callback_invoke(&c->txd, NULL);
/* Paired with cppi41_dma_issue_pending */
pm_runtime_mark_last_busy(cdd->ddev.dev);
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index 17f983a4e9ba..882ff9448c3b 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -86,4 +86,88 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
state->residue = residue;
}
+struct dmaengine_desc_callback {
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+};
+
+/**
+ * dmaengine_desc_get_callback - get the passed in callback function
+ * @tx: tx descriptor
+ * @cb: temp struct to hold the callback info
+ *
+ * Fill the passed in cb struct with what's available in the passed in
+ * tx descriptor struct
+ * No locking is required.
+ */
+static inline void
+dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_desc_callback *cb)
+{
+ cb->callback = tx->callback;
+ cb->callback_result = tx->callback_result;
+ cb->callback_param = tx->callback_param;
+}
+
+/**
+ * dmaengine_desc_callback_invoke - call the callback function in cb struct
+ * @cb: temp struct that is holding the callback info
+ * @result: transaction result
+ *
+ * Call the callback function provided in the cb struct with the parameter
+ * in the cb struct.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_result dummy_result = {
+ .result = DMA_TRANS_NOERROR,
+ .residue = 0
+ };
+
+ if (cb->callback_result) {
+ if (!result)
+ result = &dummy_result;
+ cb->callback_result(cb->callback_param, result);
+ } else if (cb->callback) {
+ cb->callback(cb->callback_param);
+ }
+}
+
+/**
+ * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
+ * then immediately call the callback.
+ * @tx: dma async tx descriptor
+ * @result: transaction result
+ *
+ * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
+ * in a single function since no work is necessary in between for the driver.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(tx, &cb);
+ dmaengine_desc_callback_invoke(&cb, result);
+}
+
+/**
+ * dmaengine_desc_callback_valid - verify the callback is valid in cb
+ * @cb: callback info struct
+ *
+ * Return a bool that verifies whether callback in cb is valid or not.
+ * No locking is required.
+ */
+static inline bool
+dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
+{
+ return (cb->callback) ? true : false;
+}
+
#endif
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index edf053f73a49..12eedd457193 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -270,20 +270,19 @@ static void
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
bool callback_required)
{
- dma_async_tx_callback callback = NULL;
- void *param = NULL;
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
unsigned long flags;
+ struct dmaengine_desc_callback cb;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
spin_lock_irqsave(&dwc->lock, flags);
dma_cookie_complete(txd);
- if (callback_required) {
- callback = txd->callback;
- param = txd->callback_param;
- }
+ if (callback_required)
+ dmaengine_desc_get_callback(txd, &cb);
+ else
+ memset(&cb, 0, sizeof(cb));
/* async_tx_ack */
list_for_each_entry(child, &desc->tx_list, desc_node)
@@ -292,8 +291,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
dwc_desc_put(dwc, desc);
spin_unlock_irqrestore(&dwc->lock, flags);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index ca17e8751af2..d37e8dda8079 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -737,10 +737,10 @@ static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
struct ep93xx_dma_desc *desc, *d;
- dma_async_tx_callback callback = NULL;
- void *callback_param = NULL;
+ struct dmaengine_desc_callback cb;
LIST_HEAD(list);
+ memset(&cb, 0, sizeof(cb));
spin_lock_irq(&edmac->lock);
/*
* If dma_terminate_all() was called before we get to run, the active
@@ -755,8 +755,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
dma_cookie_complete(&desc->txd);
list_splice_init(&edmac->active, &list);
}
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ dmaengine_desc_get_callback(&desc->txd, &cb);
}
spin_unlock_irq(&edmac->lock);
@@ -769,8 +768,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
ep93xx_dma_desc_put(edmac, desc);
}
- if (callback)
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 496ff8e7d7f9..40c58ae80660 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -134,16 +134,8 @@ static void fsl_re_issue_pending(struct dma_chan *chan)
static void fsl_re_desc_done(struct fsl_re_desc *desc)
{
- dma_async_tx_callback callback;
- void *callback_param;
-
dma_cookie_complete(&desc->async_tx);
-
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback)
- callback(callback_param);
-
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
dma_descriptor_unmap(&desc->async_tx);
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 6ccb787ba56d..87f6ab222d8c 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -517,11 +517,7 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
ret = txd->cookie;
/* Run the link descriptor callback function */
- if (txd->callback) {
- chan_dbg(chan, "LD %p callback\n", desc);
- txd->callback(txd->callback_param);
- }
-
+ dmaengine_desc_get_callback_invoke(txd, NULL);
dma_descriptor_unmap(txd);
}
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a960608c0a4d..ab0fb804fb1e 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -663,9 +663,7 @@ static void imxdma_tasklet(unsigned long data)
out:
spin_unlock_irqrestore(&imxdma->lock, flags);
- if (desc->desc.callback)
- desc->desc.callback(desc->desc.callback_param);
-
+ dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
}
static int imxdma_terminate_all(struct dma_chan *chan)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 03ec76fc22ff..624facb6c8f4 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -650,8 +650,7 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
{
- if (sdmac->desc.callback)
- sdmac->desc.callback(sdmac->desc.callback_param);
+ dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
}
static void sdma_update_channel_loop(struct sdma_channel *sdmac)
@@ -701,8 +700,8 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
sdmac->status = DMA_COMPLETE;
dma_cookie_complete(&sdmac->desc);
- if (sdmac->desc.callback)
- sdmac->desc.callback(sdmac->desc.callback_param);
+
+ dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
}
static void sdma_tasklet(unsigned long data)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index bd09961443b1..49386ce04bf5 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -38,8 +38,54 @@
#include "../dmaengine.h"
+static char *chanerr_str[] = {
+ "DMA Transfer Destination Address Error",
+ "Next Descriptor Address Error",
+ "Descriptor Error",
+ "Chan Address Value Error",
+ "CHANCMD Error",
+ "Chipset Uncorrectable Data Integrity Error",
+ "DMA Uncorrectable Data Integrity Error",
+ "Read Data Error",
+ "Write Data Error",
+ "Descriptor Control Error",
+ "Descriptor Transfer Size Error",
+ "Completion Address Error",
+ "Interrupt Configuration Error",
+ "Super extended descriptor Address Error",
+ "Unaffiliated Error",
+ "CRC or XOR P Error",
+ "XOR Q Error",
+ "Descriptor Count Error",
+ "DIF All F detect Error",
+ "Guard Tag verification Error",
+ "Application Tag verification Error",
+ "Reference Tag verification Error",
+ "Bundle Bit Error",
+ "Result DIF All F detect Error",
+ "Result Guard Tag verification Error",
+ "Result Application Tag verification Error",
+ "Result Reference Tag verification Error",
+ NULL
+};
+
static void ioat_eh(struct ioatdma_chan *ioat_chan);
+static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ if ((chanerr >> i) & 1) {
+ if (chanerr_str[i]) {
+ dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
+ i, chanerr_str[i]);
+ } else
+ break;
+ }
+ }
+}
+
/**
* ioat_dma_do_interrupt - handler used for single vector interrupt mode
* @irq: interrupt id
@@ -568,12 +614,14 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
tx = &desc->txd;
if (tx->cookie) {
+ struct dmaengine_result res;
+
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
+ res.result = DMA_TRANS_NOERROR;
+ dmaengine_desc_get_callback_invoke(tx, NULL);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
}
if (tx->phys == phys_complete)
@@ -622,7 +670,8 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
if (is_ioat_halted(*ioat_chan->completion)) {
u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+ if (chanerr &
+ (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
ioat_eh(ioat_chan);
}
@@ -652,6 +701,61 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
__ioat_restart_chan(ioat_chan);
}
+
+static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
+{
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+ struct ioat_ring_ent *desc;
+ u16 active;
+ int idx = ioat_chan->tail, i;
+
+ /*
+ * We assume that the failed descriptor has been processed.
+ * Now we are just returning all the remaining submitted
+ * descriptors to abort.
+ */
+ active = ioat_ring_active(ioat_chan);
+
+ /* we skip the failed descriptor that tail points to */
+ for (i = 1; i < active; i++) {
+ struct dma_async_tx_descriptor *tx;
+
+ smp_read_barrier_depends();
+ prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
+ desc = ioat_get_ring_ent(ioat_chan, idx + i);
+
+ tx = &desc->txd;
+ if (tx->cookie) {
+ struct dmaengine_result res;
+
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ res.result = DMA_TRANS_ABORTED;
+ dmaengine_desc_get_callback_invoke(tx, &res);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
+
+ /* skip extended descriptors */
+ if (desc_has_ext(desc)) {
+ WARN_ON(i + 1 >= active);
+ i++;
+ }
+
+ /* cleanup super extended descriptors */
+ if (desc->sed) {
+ ioat_free_sed(ioat_dma, desc->sed);
+ desc->sed = NULL;
+ }
+ }
+
+ smp_mb(); /* finish all descriptor reads before incrementing tail */
+ ioat_chan->tail = idx + active;
+
+ desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+ ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
+}
+
static void ioat_eh(struct ioatdma_chan *ioat_chan)
{
struct pci_dev *pdev = to_pdev(ioat_chan);
@@ -662,6 +766,8 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
u32 err_handled = 0;
u32 chanerr_int;
u32 chanerr;
+ bool abort = false;
+ struct dmaengine_result res;
/* cleanup so tail points to descriptor that caused the error */
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
@@ -697,30 +803,55 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
break;
}
+ if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
+ if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
+ res.result = DMA_TRANS_READ_FAILED;
+ err_handled |= IOAT_CHANERR_READ_DATA_ERR;
+ } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
+ res.result = DMA_TRANS_WRITE_FAILED;
+ err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
+ }
+
+ abort = true;
+ } else
+ res.result = DMA_TRANS_NOERROR;
+
/* fault on unhandled error or spurious halt */
if (chanerr ^ err_handled || chanerr == 0) {
dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
__func__, chanerr, err_handled);
+ dev_err(to_dev(ioat_chan), "Errors handled:\n");
+ ioat_print_chanerrs(ioat_chan, err_handled);
+ dev_err(to_dev(ioat_chan), "Errors not handled:\n");
+ ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
+
BUG();
- } else { /* cleanup the faulty descriptor */
- tx = &desc->txd;
- if (tx->cookie) {
- dma_cookie_complete(tx);
- dma_descriptor_unmap(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
- }
}
- writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+ /* cleanup the faulty descriptor since we are continuing */
+ tx = &desc->txd;
+ if (tx->cookie) {
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ dmaengine_desc_get_callback_invoke(tx, &res);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
/* mark faulting descriptor as complete */
*ioat_chan->completion = desc->txd.phys;
spin_lock_bh(&ioat_chan->prep_lock);
+ /* we need abort all descriptors */
+ if (abort) {
+ ioat_abort_descs(ioat_chan);
+ /* clean up the channel, we could be in weird state */
+ ioat_reset_hw(ioat_chan);
+ }
+
+ writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+
ioat_restart_channel(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock);
}
@@ -753,10 +884,28 @@ void ioat_timer_event(unsigned long data)
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
__func__, chanerr);
- if (test_bit(IOAT_RUN, &ioat_chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
+ dev_err(to_dev(ioat_chan), "Errors:\n");
+ ioat_print_chanerrs(ioat_chan, chanerr);
+
+ if (test_bit(IOAT_RUN, &ioat_chan->state)) {
+ spin_lock_bh(&ioat_chan->cleanup_lock);
+ spin_lock_bh(&ioat_chan->prep_lock);
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ ioat_abort_descs(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Reset channel...\n");
+ ioat_reset_hw(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Restart channel...\n");
+ ioat_restart_channel(ioat_chan);
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+ }
+
+ return;
}
spin_lock_bh(&ioat_chan->cleanup_lock);
@@ -780,14 +929,26 @@ void ioat_timer_event(unsigned long data)
u32 chanerr;
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
- dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
- status, chanerr);
- dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
- ioat_ring_active(ioat_chan));
+ dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
+ status, chanerr);
+ dev_err(to_dev(ioat_chan), "Errors:\n");
+ ioat_print_chanerrs(ioat_chan, chanerr);
+
+ dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
+ ioat_ring_active(ioat_chan));
spin_lock_bh(&ioat_chan->prep_lock);
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ ioat_abort_descs(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
+ ioat_reset_hw(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
ioat_restart_channel(ioat_chan);
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
return;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 70534981a49b..48fa4cf9f64a 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -240,6 +240,8 @@
#define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000
#define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR)
+#define IOAT_CHANERR_RECOVER_MASK (IOAT_CHANERR_READ_DATA_ERR | \
+ IOAT_CHANERR_WRITE_DATA_ERR)
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index f039cfadf17b..a410657f7bcd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -71,8 +71,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
- if (tx->callback)
- tx->callback(tx->callback_param);
+ dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx);
if (desc->group_head)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index b54f62de9232..ed76044ce4b9 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1160,11 +1160,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
struct scatterlist **sg, *sgnext, *sgnew = NULL;
/* Next transfer descriptor */
struct idmac_tx_desc *desc, *descnew;
- dma_async_tx_callback callback;
- void *callback_param;
bool done = false;
u32 ready0, ready1, curbuf, err;
unsigned long flags;
+ struct dmaengine_desc_callback cb;
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
@@ -1278,12 +1277,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (likely(sgnew) &&
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
- callback = descnew->txd.callback;
- callback_param = descnew->txd.callback_param;
+ dmaengine_desc_get_callback(&descnew->txd, &cb);
+
list_del_init(&descnew->list);
spin_unlock(&ichan->lock);
- if (callback)
- callback(callback_param);
+
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock(&ichan->lock);
}
@@ -1292,13 +1291,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (done)
dma_cookie_complete(&desc->txd);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ dmaengine_desc_get_callback(&desc->txd, &cb);
spin_unlock(&ichan->lock);
- if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback)
- callback(callback_param);
+ if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
+ dmaengine_desc_callback_invoke(&cb, NULL);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 1502b24b7c7d..818255844a3c 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -104,10 +104,8 @@ static void mic_dma_cleanup(struct mic_dma_chan *ch)
tx = &ch->tx_array[last_tail];
if (tx->cookie) {
dma_cookie_complete(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
+ dmaengine_desc_get_callback_invoke(tx, NULL);
+ tx->callback = NULL;
}
last_tail = mic_dma_hw_ring_inc(last_tail);
}
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index f4b25fb0d040..eb3a1f42ab06 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -864,19 +864,15 @@ static void dma_do_tasklet(unsigned long data)
struct mmp_pdma_desc_sw *desc, *_desc;
LIST_HEAD(chain_cleanup);
unsigned long flags;
+ struct dmaengine_desc_callback cb;
if (chan->cyclic_first) {
- dma_async_tx_callback cb = NULL;
- void *cb_data = NULL;
-
spin_lock_irqsave(&chan->desc_lock, flags);
desc = chan->cyclic_first;
- cb = desc->async_tx.callback;
- cb_data = desc->async_tx.callback_param;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
spin_unlock_irqrestore(&chan->desc_lock, flags);
- if (cb)
- cb(cb_data);
+ dmaengine_desc_callback_invoke(&cb, NULL);
return;
}
@@ -921,8 +917,8 @@ static void dma_do_tasklet(unsigned long data)
/* Remove from the list of transactions */
list_del(&desc->node);
/* Run the link descriptor callback function */
- if (txd->callback)
- txd->callback(txd->callback_param);
+ dmaengine_desc_get_callback(txd, &cb);
+ dmaengine_desc_callback_invoke(&cb, NULL);
dma_pool_free(chan->desc_pool, desc, txd->phys);
}
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index d7422b1bf406..13c68b6434ce 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -349,9 +349,7 @@ static void dma_do_tasklet(unsigned long data)
{
struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
- if (tdmac->desc.callback)
- tdmac->desc.callback(tdmac->desc.callback_param);
-
+ dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
}
static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 9dd99ba18fce..dde713461a95 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -411,8 +411,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
list_for_each_entry(mdesc, &list, node) {
desc = &mdesc->desc;
- if (desc->callback)
- desc->callback(desc->callback_param);
+ dmaengine_desc_get_callback_invoke(desc, NULL);
last_cookie = desc->cookie;
dma_run_dependencies(desc);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index f4c9f98ec35e..f8b5e7424b3a 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -209,10 +209,7 @@ mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
- if (desc->async_tx.callback)
- desc->async_tx.callback(
- desc->async_tx.callback_param);
-
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
dma_descriptor_unmap(&desc->async_tx);
}
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 60de35251da5..50e64e113ffb 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -326,8 +326,7 @@ static void mxs_dma_tasklet(unsigned long data)
{
struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
- if (mxs_chan->desc.callback)
- mxs_chan->desc.callback(mxs_chan->desc.callback_param);
+ dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL);
}
static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 08c45c185549..09de71519d37 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1102,8 +1102,7 @@ static void nbpf_chan_tasklet(unsigned long data)
{
struct nbpf_channel *chan = (struct nbpf_channel *)data;
struct nbpf_desc *desc, *tmp;
- dma_async_tx_callback callback;
- void *param;
+ struct dmaengine_desc_callback cb;
while (!list_empty(&chan->done)) {
bool found = false, must_put, recycling = false;
@@ -1151,14 +1150,12 @@ static void nbpf_chan_tasklet(unsigned long data)
must_put = false;
}
- callback = desc->async_tx.callback;
- param = desc->async_tx.callback_param;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
/* ack and callback completed descriptor */
spin_unlock_irq(&chan->lock);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
if (must_put)
nbpf_desc_put(desc);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 113605f6fe20..df95727dc2fb 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -357,14 +357,13 @@ static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
struct pch_dma_desc *desc)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
+ struct dmaengine_desc_callback cb;
+ dmaengine_desc_get_callback(txd, &cb);
list_splice_init(&desc->tx_list, &pd_chan->free_list);
list_move(&desc->desc_node, &pd_chan->free_list);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static void pdc_complete_all(struct pch_dma_chan *pd_chan)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 4fc3ffbd5ca0..1ecd4674aa23 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2039,14 +2039,12 @@ static void pl330_tasklet(unsigned long data)
}
while (!list_empty(&pch->completed_list)) {
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
desc = list_first_entry(&pch->completed_list,
struct dma_pl330_desc, node);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ dmaengine_desc_get_callback(&desc->txd, &cb);
if (pch->cyclic) {
desc->status = PREP;
@@ -2064,9 +2062,9 @@ static void pl330_tasklet(unsigned long data)
dma_descriptor_unmap(&desc->txd);
- if (callback) {
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irqrestore(&pch->lock, flags);
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&pch->lock, flags);
}
}
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 66bd96724b2f..d45da34a0568 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -1485,10 +1485,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
- if (desc->async_tx.callback)
- desc->async_tx.callback(
- desc->async_tx.callback_param);
-
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
dma_descriptor_unmap(&desc->async_tx);
}
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index b2374cd91e45..e244e10a94b5 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -111,6 +111,7 @@ static void hidma_process_completed(struct hidma_chan *mchan)
struct dma_async_tx_descriptor *desc;
dma_cookie_t last_cookie;
struct hidma_desc *mdesc;
+ struct hidma_desc *next;
unsigned long irqflags;
struct list_head list;
@@ -122,28 +123,36 @@ static void hidma_process_completed(struct hidma_chan *mchan)
spin_unlock_irqrestore(&mchan->lock, irqflags);
/* Execute callbacks and run dependencies */
- list_for_each_entry(mdesc, &list, node) {
+ list_for_each_entry_safe(mdesc, next, &list, node) {
enum dma_status llstat;
+ struct dmaengine_desc_callback cb;
+ struct dmaengine_result result;
desc = &mdesc->desc;
+ last_cookie = desc->cookie;
spin_lock_irqsave(&mchan->lock, irqflags);
dma_cookie_complete(desc);
spin_unlock_irqrestore(&mchan->lock, irqflags);
llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
- if (desc->callback && (llstat == DMA_COMPLETE))
- desc->callback(desc->callback_param);
+ dmaengine_desc_get_callback(desc, &cb);
- last_cookie = desc->cookie;
dma_run_dependencies(desc);
- }
- /* Free descriptors */
- spin_lock_irqsave(&mchan->lock, irqflags);
- list_splice_tail_init(&list, &mchan->free);
- spin_unlock_irqrestore(&mchan->lock, irqflags);
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_move(&mdesc->node, &mchan->free);
+
+ if (llstat == DMA_COMPLETE) {
+ mchan->last_success = last_cookie;
+ result.result = DMA_TRANS_NOERROR;
+ } else
+ result.result = DMA_TRANS_ABORTED;
+
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+ dmaengine_desc_callback_invoke(&cb, &result);
+ }
}
/*
@@ -238,6 +247,19 @@ static void hidma_issue_pending(struct dma_chan *dmach)
hidma_ll_start(dmadev->lldev);
}
+static inline bool hidma_txn_is_success(dma_cookie_t cookie,
+ dma_cookie_t last_success, dma_cookie_t last_used)
+{
+ if (last_success <= last_used) {
+ if ((cookie <= last_success) || (cookie > last_used))
+ return true;
+ } else {
+ if ((cookie <= last_success) && (cookie > last_used))
+ return true;
+ }
+ return false;
+}
+
static enum dma_status hidma_tx_status(struct dma_chan *dmach,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
@@ -246,8 +268,13 @@ static enum dma_status hidma_tx_status(struct dma_chan *dmach,
enum dma_status ret;
ret = dma_cookie_status(dmach, cookie, txstate);
- if (ret == DMA_COMPLETE)
- return ret;
+ if (ret == DMA_COMPLETE) {
+ bool is_success;
+
+ is_success = hidma_txn_is_success(cookie, mchan->last_success,
+ dmach->cookie);
+ return is_success ? ret : DMA_ERROR;
+ }
if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
unsigned long flags;
@@ -398,6 +425,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
hidma_process_completed(mchan);
spin_lock_irqsave(&mchan->lock, irqflags);
+ mchan->last_success = 0;
list_splice_init(&mchan->active, &list);
list_splice_init(&mchan->prepared, &list);
list_splice_init(&mchan->completed, &list);
@@ -413,14 +441,9 @@ static int hidma_terminate_channel(struct dma_chan *chan)
/* return all user requests */
list_for_each_entry_safe(mdesc, tmp, &list, node) {
struct dma_async_tx_descriptor *txd = &mdesc->desc;
- dma_async_tx_callback callback = mdesc->desc.callback;
- void *param = mdesc->desc.callback_param;
dma_descriptor_unmap(txd);
-
- if (callback)
- callback(param);
-
+ dmaengine_desc_get_callback_invoke(txd, NULL);
dma_run_dependencies(txd);
/* move myself to free_list */
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index db413a5efc4e..e52e20716303 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -72,7 +72,6 @@ struct hidma_lldev {
u32 tre_write_offset; /* TRE write location */
struct tasklet_struct task; /* task delivering notifications */
- struct tasklet_struct rst_task; /* task to reset HW */
DECLARE_KFIFO_PTR(handoff_fifo,
struct hidma_tre *); /* pending TREs FIFO */
};
@@ -89,6 +88,7 @@ struct hidma_chan {
bool allocated;
char dbg_name[16];
u32 dma_sig;
+ dma_cookie_t last_success;
/*
* active descriptor on this channel
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index ad20dfb64c71..3224f24c577b 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -381,27 +381,6 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
}
/*
- * Abort all transactions and perform a reset.
- */
-static void hidma_ll_abort(unsigned long arg)
-{
- struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
- u8 err_code = HIDMA_EVRE_STATUS_ERROR;
- u8 err_info = 0xFF;
- int rc;
-
- hidma_cleanup_pending_tre(lldev, err_info, err_code);
-
- /* reset the channel for recovery */
- rc = hidma_ll_setup(lldev);
- if (rc) {
- dev_err(lldev->dev, "channel reinitialize failed after error\n");
- return;
- }
- writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
-}
-
-/*
* The interrupt handler for HIDMA will try to consume as many pending
* EVRE from the event queue as possible. Each EVRE has an associated
* TRE that holds the user interface parameters. EVRE reports the
@@ -454,13 +433,18 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
while (cause) {
if (cause & HIDMA_ERR_INT_MASK) {
- dev_err(lldev->dev, "error 0x%x, resetting...\n",
+ dev_err(lldev->dev, "error 0x%x, disabling...\n",
cause);
/* Clear out pending interrupts */
writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
- tasklet_schedule(&lldev->rst_task);
+ /* No further submissions. */
+ hidma_ll_disable(lldev);
+
+ /* Driver completes the txn and intimates the client.*/
+ hidma_cleanup_pending_tre(lldev, 0xFF,
+ HIDMA_EVRE_STATUS_ERROR);
goto out;
}
@@ -808,7 +792,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
return NULL;
spin_lock_init(&lldev->lock);
- tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev);
tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
lldev->initialized = 1;
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
@@ -831,7 +814,6 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
tasklet_kill(&lldev->task);
- tasklet_kill(&lldev->rst_task);
memset(lldev->trepool, 0, required_bytes);
lldev->trepool = NULL;
lldev->pending_tre_count = 0;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 0dd953884d1d..d1defa4646ba 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1389,21 +1389,18 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
{
struct rcar_dmac_chan *chan = dev;
struct rcar_dmac_desc *desc;
+ struct dmaengine_desc_callback cb;
spin_lock_irq(&chan->lock);
/* For cyclic transfers notify the user after every chunk. */
if (chan->desc.running && chan->desc.running->cyclic) {
- dma_async_tx_callback callback;
- void *callback_param;
-
desc = chan->desc.running;
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
- if (callback) {
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irq(&chan->lock);
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irq(&chan->lock);
}
}
@@ -1418,14 +1415,15 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
dma_cookie_complete(&desc->async_tx);
list_del(&desc->node);
- if (desc->async_tx.callback) {
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irq(&chan->lock);
/*
* We own the only reference to this descriptor, we can
* safely dereference it without holding the channel
* lock.
*/
- desc->async_tx.callback(desc->async_tx.callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irq(&chan->lock);
}
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 10fcabad80f3..12fa48e380cf 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -330,10 +330,11 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
bool head_acked = false;
dma_cookie_t cookie = 0;
dma_async_tx_callback callback = NULL;
- void *param = NULL;
+ struct dmaengine_desc_callback cb;
unsigned long flags;
LIST_HEAD(cyclic_list);
+ memset(&cb, 0, sizeof(cb));
spin_lock_irqsave(&schan->chan_lock, flags);
list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
struct dma_async_tx_descriptor *tx = &desc->async_tx;
@@ -367,8 +368,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
/* Call callback on the last chunk */
if (desc->mark == DESC_COMPLETED && tx->callback) {
desc->mark = DESC_WAITING;
+ dmaengine_desc_get_callback(tx, &cb);
callback = tx->callback;
- param = tx->callback_param;
dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
tx->cookie, tx, schan->id);
BUG_ON(desc->chunks != 1);
@@ -430,8 +431,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
spin_unlock_irqrestore(&schan->chan_lock, flags);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
return callback;
}
@@ -885,9 +885,9 @@ bool shdma_reset(struct shdma_dev *sdev)
/* Complete all */
list_for_each_entry(sdesc, &dl, node) {
struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
+
sdesc->mark = DESC_IDLE;
- if (tx->callback)
- tx->callback(tx->callback_param);
+ dmaengine_desc_get_callback_invoke(tx, NULL);
}
spin_lock(&schan->chan_lock);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d8bc3f2a71db..a96e4a480de5 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -360,9 +360,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
list_for_each_entry(sdesc, &list, node) {
desc = &sdesc->desc;
- if (desc->callback)
- desc->callback(desc->callback_param);
-
+ dmaengine_desc_get_callback_invoke(desc, NULL);
last_cookie = desc->cookie;
dma_run_dependencies(desc);
}
@@ -388,8 +386,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
desc = &sdesc->desc;
while (happened_cyclic != schan->completed_cyclic) {
- if (desc->callback)
- desc->callback(desc->callback_param);
+ dmaengine_desc_get_callback_invoke(desc, NULL);
schan->completed_cyclic++;
}
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index e43d2bbfd122..08f3d7be2df0 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1570,8 +1570,7 @@ static void dma_tasklet(unsigned long data)
struct d40_desc *d40d;
unsigned long flags;
bool callback_active;
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
spin_lock_irqsave(&d40c->lock, flags);
@@ -1598,8 +1597,7 @@ static void dma_tasklet(unsigned long data)
/* Callback to client */
callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
- callback = d40d->txd.callback;
- callback_param = d40d->txd.callback_param;
+ dmaengine_desc_get_callback(&d40d->txd, &cb);
if (!d40d->cyclic) {
if (async_tx_test_ack(&d40d->txd)) {
@@ -1620,8 +1618,8 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback_active && callback)
- callback(callback_param);
+ if (callback_active)
+ dmaengine_desc_callback_invoke(&cb, NULL);
return;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 6ab9eb98588a..3722b9d8d9fe 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -655,8 +655,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
static void tegra_dma_tasklet(unsigned long data)
{
struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
- dma_async_tx_callback callback = NULL;
- void *callback_param = NULL;
+ struct dmaengine_desc_callback cb;
struct tegra_dma_desc *dma_desc;
unsigned long flags;
int cb_count;
@@ -666,13 +665,12 @@ static void tegra_dma_tasklet(unsigned long data)
dma_desc = list_first_entry(&tdc->cb_desc,
typeof(*dma_desc), cb_node);
list_del(&dma_desc->cb_node);
- callback = dma_desc->txd.callback;
- callback_param = dma_desc->txd.callback_param;
+ dmaengine_desc_get_callback(&dma_desc->txd, &cb);
cb_count = dma_desc->cb_count;
dma_desc->cb_count = 0;
spin_unlock_irqrestore(&tdc->lock, flags);
- while (cb_count-- && callback)
- callback(callback_param);
+ while (cb_count--)
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&tdc->lock, flags);
}
spin_unlock_irqrestore(&tdc->lock, flags);
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index e82745aa42a8..896bafb7a532 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -226,8 +226,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
static void __td_finish(struct timb_dma_chan *td_chan)
{
- dma_async_tx_callback callback;
- void *param;
+ struct dmaengine_desc_callback cb;
struct dma_async_tx_descriptor *txd;
struct timb_dma_desc *td_desc;
@@ -252,8 +251,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
dma_cookie_complete(txd);
td_chan->ongoing = false;
- callback = txd->callback;
- param = txd->callback_param;
+ dmaengine_desc_get_callback(txd, &cb);
list_move(&td_desc->desc_node, &td_chan->free_list);
@@ -262,8 +260,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static u32 __td_ier_mask(struct timb_dma *td)
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 7632290e7c14..4d8c7b9078fd 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -403,16 +403,14 @@ static void
txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
struct txx9dmac_desc *desc)
{
- dma_async_tx_callback callback;
- void *param;
+ struct dmaengine_desc_callback cb;
struct dma_async_tx_descriptor *txd = &desc->txd;
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
txd->cookie, desc);
dma_cookie_complete(txd);
- callback = txd->callback;
- param = txd->callback_param;
+ dmaengine_desc_get_callback(txd, &cb);
txx9dmac_sync_desc_for_cpu(dc, desc);
list_splice_init(&desc->tx_list, &dc->free_list);
@@ -423,8 +421,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
dma_run_dependencies(txd);
}
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index a35c211857dd..e47fc9b0944f 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -87,8 +87,7 @@ static void vchan_complete(unsigned long arg)
{
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
struct virt_dma_desc *vd;
- dma_async_tx_callback cb = NULL;
- void *cb_data = NULL;
+ struct dmaengine_desc_callback cb;
LIST_HEAD(head);
spin_lock_irq(&vc->lock);
@@ -96,18 +95,17 @@ static void vchan_complete(unsigned long arg)
vd = vc->cyclic;
if (vd) {
vc->cyclic = NULL;
- cb = vd->tx.callback;
- cb_data = vd->tx.callback_param;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+ } else {
+ memset(&cb, 0, sizeof(cb));
}
spin_unlock_irq(&vc->lock);
- if (cb)
- cb(cb_data);
+ dmaengine_desc_callback_invoke(&cb, NULL);
while (!list_empty(&head)) {
vd = list_first_entry(&head, struct virt_dma_desc, node);
- cb = vd->tx.callback;
- cb_data = vd->tx.callback_param;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
if (dmaengine_desc_test_reuse(&vd->tx))
@@ -115,8 +113,7 @@ static void vchan_complete(unsigned long arg)
else
vc->desc_free(vd);
- if (cb)
- cb(cb_data);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
}
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 9cb93c5b655d..d66ed11baaec 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -608,8 +608,7 @@ static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
dma_cookie_complete(tx);
/* Run the link descriptor callback function */
- if (tx->callback)
- tx->callback(tx->callback_param);
+ dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 4e223d094433..8288fe4d17c3 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -755,8 +755,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
if (desc->cyclic) {
xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
@@ -767,11 +766,10 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
list_del(&desc->node);
/* Run the link descriptor callback function */
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback) {
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irqrestore(&chan->lock, flags);
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&chan->lock, flags);
}