From d88b1397c674178e595319fab4a3cd434c915639 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 25 Apr 2018 11:45:03 +0300 Subject: dmaengine: ti: New directory for Texas Instruments DMA drivers Collect the Texas Instruments DMA drivers under drivers/dma/ti/ Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/ti/Kconfig | 37 + drivers/dma/ti/Makefile | 5 + drivers/dma/ti/cppi41.c | 1223 ++++++++++++++++++++ drivers/dma/ti/dma-crossbar.c | 478 ++++++++ drivers/dma/ti/edma.c | 2568 +++++++++++++++++++++++++++++++++++++++++ drivers/dma/ti/omap-dma.c | 1668 ++++++++++++++++++++++++++ 6 files changed, 5979 insertions(+) create mode 100644 drivers/dma/ti/Kconfig create mode 100644 drivers/dma/ti/Makefile create mode 100644 drivers/dma/ti/cppi41.c create mode 100644 drivers/dma/ti/dma-crossbar.c create mode 100644 drivers/dma/ti/edma.c create mode 100644 drivers/dma/ti/omap-dma.c (limited to 'drivers/dma/ti') diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig new file mode 100644 index 000000000000..e5e74e1361dc --- /dev/null +++ b/drivers/dma/ti/Kconfig @@ -0,0 +1,37 @@ +# +# Texas Instruments DMA drivers +# + +config TI_CPPI41 + tristate "Texas Instruments CPPI 4.1 DMA support" + depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX) + select DMA_ENGINE + help + The Communications Port Programming Interface (CPPI) 4.1 DMA engine + is currently used by the USB driver on AM335x and DA8xx platforms. + +config TI_EDMA + tristate "Texas Instruments EDMA support" + depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST) + default y + help + Enable support for the TI EDMA (Enhanced DMA) controller. This DMA + engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2 + parts. + +config DMA_OMAP + tristate "Texas Instruments sDMA (omap-dma) support" + depends on ARCH_OMAP || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST) + default y + help + Enable support for the TI sDMA (System DMA or DMA4) controller. This + DMA engine is found on OMAP and DRA7xx parts. + +config TI_DMA_CROSSBAR + bool diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile new file mode 100644 index 000000000000..113e59ec9c32 --- /dev/null +++ b/drivers/dma/ti/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_TI_CPPI41) += cppi41.o +obj-$(CONFIG_TI_EDMA) += edma.o +obj-$(CONFIG_DMA_OMAP) += omap-dma.o +obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c new file mode 100644 index 000000000000..1497da367710 --- /dev/null +++ b/drivers/dma/ti/cppi41.c @@ -0,0 +1,1223 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../dmaengine.h" + +#define DESC_TYPE 27 +#define DESC_TYPE_HOST 0x10 +#define DESC_TYPE_TEARD 0x13 + +#define TD_DESC_IS_RX (1 << 16) +#define TD_DESC_DMA_NUM 10 + +#define DESC_LENGTH_BITS_NUM 21 + +#define DESC_TYPE_USB (5 << 26) +#define DESC_PD_COMPLETE (1 << 31) + +/* DMA engine */ +#define DMA_TDFDQ 4 +#define DMA_TXGCR(x) (0x800 + (x) * 0x20) +#define DMA_RXGCR(x) (0x808 + (x) * 0x20) +#define RXHPCRA0 4 + +#define GCR_CHAN_ENABLE (1 << 31) +#define GCR_TEARDOWN (1 << 30) +#define GCR_STARV_RETRY (1 << 24) +#define GCR_DESC_TYPE_HOST (1 << 14) + +/* DMA scheduler */ +#define DMA_SCHED_CTRL 0 +#define DMA_SCHED_CTRL_EN (1 << 31) +#define DMA_SCHED_WORD(x) ((x) * 4 + 0x800) + +#define SCHED_ENTRY0_CHAN(x) ((x) << 0) +#define SCHED_ENTRY0_IS_RX (1 << 7) + +#define SCHED_ENTRY1_CHAN(x) ((x) << 8) +#define SCHED_ENTRY1_IS_RX (1 << 15) + +#define SCHED_ENTRY2_CHAN(x) ((x) << 16) +#define SCHED_ENTRY2_IS_RX (1 << 23) + +#define SCHED_ENTRY3_CHAN(x) ((x) << 24) +#define SCHED_ENTRY3_IS_RX (1 << 31) + +/* Queue manager */ +/* 4 KiB of memory for descriptors, 2 for each endpoint */ +#define ALLOC_DECS_NUM 128 +#define DESCS_AREAS 1 +#define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS) +#define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4) + +#define QMGR_LRAM0_BASE 0x80 +#define QMGR_LRAM_SIZE 0x84 +#define QMGR_LRAM1_BASE 0x88 +#define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10) +#define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10) +#define QMGR_MEMCTRL_IDX_SH 16 +#define QMGR_MEMCTRL_DESC_SH 8 + +#define QMGR_PEND(x) (0x90 + (x) * 4) + +#define QMGR_PENDING_SLOT_Q(x) (x / 32) +#define QMGR_PENDING_BIT_Q(x) (x % 32) + +#define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10) +#define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10) +#define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10) +#define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10) + +/* Packet Descriptor */ +#define PD2_ZERO_LENGTH (1 << 19) + +struct cppi41_channel { + struct dma_chan chan; + struct dma_async_tx_descriptor txd; + struct cppi41_dd *cdd; + struct cppi41_desc *desc; + dma_addr_t desc_phys; + void __iomem *gcr_reg; + int is_tx; + u32 residue; + + unsigned int q_num; + unsigned int q_comp_num; + unsigned int port_num; + + unsigned td_retry; + unsigned td_queued:1; + unsigned td_seen:1; + unsigned td_desc_seen:1; + + struct list_head node; /* Node for pending list */ +}; + +struct cppi41_desc { + u32 pd0; + u32 pd1; + u32 pd2; + u32 pd3; + u32 pd4; + u32 pd5; + u32 pd6; + u32 pd7; +} __aligned(32); + +struct chan_queues { + u16 submit; + u16 complete; +}; + +struct cppi41_dd { + struct dma_device ddev; + + void *qmgr_scratch; + dma_addr_t scratch_phys; + + struct cppi41_desc *cd; + dma_addr_t descs_phys; + u32 first_td_desc; + struct cppi41_channel *chan_busy[ALLOC_DECS_NUM]; + + void __iomem *ctrl_mem; + void __iomem *sched_mem; + void __iomem *qmgr_mem; + unsigned int irq; + const struct chan_queues *queues_rx; + const struct chan_queues *queues_tx; + struct chan_queues td_queue; + u16 first_completion_queue; + u16 qmgr_num_pend; + u32 n_chans; + u8 platform; + + struct list_head pending; /* Pending queued transfers */ + spinlock_t lock; /* Lock for pending list */ + + /* context for suspend/resume */ + unsigned int dma_tdfdq; + + bool is_suspended; +}; + +static struct chan_queues am335x_usb_queues_tx[] = { + /* USB0 ENDP 1 */ + [ 0] = { .submit = 32, .complete = 93}, + [ 1] = { .submit = 34, .complete = 94}, + [ 2] = { .submit = 36, .complete = 95}, + [ 3] = { .submit = 38, .complete = 96}, + [ 4] = { .submit = 40, .complete = 97}, + [ 5] = { .submit = 42, .complete = 98}, + [ 6] = { .submit = 44, .complete = 99}, + [ 7] = { .submit = 46, .complete = 100}, + [ 8] = { .submit = 48, .complete = 101}, + [ 9] = { .submit = 50, .complete = 102}, + [10] = { .submit = 52, .complete = 103}, + [11] = { .submit = 54, .complete = 104}, + [12] = { .submit = 56, .complete = 105}, + [13] = { .submit = 58, .complete = 106}, + [14] = { .submit = 60, .complete = 107}, + + /* USB1 ENDP1 */ + [15] = { .submit = 62, .complete = 125}, + [16] = { .submit = 64, .complete = 126}, + [17] = { .submit = 66, .complete = 127}, + [18] = { .submit = 68, .complete = 128}, + [19] = { .submit = 70, .complete = 129}, + [20] = { .submit = 72, .complete = 130}, + [21] = { .submit = 74, .complete = 131}, + [22] = { .submit = 76, .complete = 132}, + [23] = { .submit = 78, .complete = 133}, + [24] = { .submit = 80, .complete = 134}, + [25] = { .submit = 82, .complete = 135}, + [26] = { .submit = 84, .complete = 136}, + [27] = { .submit = 86, .complete = 137}, + [28] = { .submit = 88, .complete = 138}, + [29] = { .submit = 90, .complete = 139}, +}; + +static const struct chan_queues am335x_usb_queues_rx[] = { + /* USB0 ENDP 1 */ + [ 0] = { .submit = 1, .complete = 109}, + [ 1] = { .submit = 2, .complete = 110}, + [ 2] = { .submit = 3, .complete = 111}, + [ 3] = { .submit = 4, .complete = 112}, + [ 4] = { .submit = 5, .complete = 113}, + [ 5] = { .submit = 6, .complete = 114}, + [ 6] = { .submit = 7, .complete = 115}, + [ 7] = { .submit = 8, .complete = 116}, + [ 8] = { .submit = 9, .complete = 117}, + [ 9] = { .submit = 10, .complete = 118}, + [10] = { .submit = 11, .complete = 119}, + [11] = { .submit = 12, .complete = 120}, + [12] = { .submit = 13, .complete = 121}, + [13] = { .submit = 14, .complete = 122}, + [14] = { .submit = 15, .complete = 123}, + + /* USB1 ENDP 1 */ + [15] = { .submit = 16, .complete = 141}, + [16] = { .submit = 17, .complete = 142}, + [17] = { .submit = 18, .complete = 143}, + [18] = { .submit = 19, .complete = 144}, + [19] = { .submit = 20, .complete = 145}, + [20] = { .submit = 21, .complete = 146}, + [21] = { .submit = 22, .complete = 147}, + [22] = { .submit = 23, .complete = 148}, + [23] = { .submit = 24, .complete = 149}, + [24] = { .submit = 25, .complete = 150}, + [25] = { .submit = 26, .complete = 151}, + [26] = { .submit = 27, .complete = 152}, + [27] = { .submit = 28, .complete = 153}, + [28] = { .submit = 29, .complete = 154}, + [29] = { .submit = 30, .complete = 155}, +}; + +static const struct chan_queues da8xx_usb_queues_tx[] = { + [0] = { .submit = 16, .complete = 24}, + [1] = { .submit = 18, .complete = 24}, + [2] = { .submit = 20, .complete = 24}, + [3] = { .submit = 22, .complete = 24}, +}; + +static const struct chan_queues da8xx_usb_queues_rx[] = { + [0] = { .submit = 1, .complete = 26}, + [1] = { .submit = 3, .complete = 26}, + [2] = { .submit = 5, .complete = 26}, + [3] = { .submit = 7, .complete = 26}, +}; + +struct cppi_glue_infos { + const struct chan_queues *queues_rx; + const struct chan_queues *queues_tx; + struct chan_queues td_queue; + u16 first_completion_queue; + u16 qmgr_num_pend; +}; + +static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c) +{ + return container_of(c, struct cppi41_channel, chan); +} + +static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc) +{ + struct cppi41_channel *c; + u32 descs_size; + u32 desc_num; + + descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM; + + if (!((desc >= cdd->descs_phys) && + (desc < (cdd->descs_phys + descs_size)))) { + return NULL; + } + + desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc); + BUG_ON(desc_num >= ALLOC_DECS_NUM); + c = cdd->chan_busy[desc_num]; + cdd->chan_busy[desc_num] = NULL; + + /* Usecount for chan_busy[], paired with push_desc_queue() */ + pm_runtime_put(cdd->ddev.dev); + + return c; +} + +static void cppi_writel(u32 val, void *__iomem *mem) +{ + __raw_writel(val, mem); +} + +static u32 cppi_readl(void *__iomem *mem) +{ + return __raw_readl(mem); +} + +static u32 pd_trans_len(u32 val) +{ + return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); +} + +static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) +{ + u32 desc; + + desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); + desc &= ~0x1f; + return desc; +} + +static irqreturn_t cppi41_irq(int irq, void *data) +{ + struct cppi41_dd *cdd = data; + u16 first_completion_queue = cdd->first_completion_queue; + u16 qmgr_num_pend = cdd->qmgr_num_pend; + struct cppi41_channel *c; + int i; + + for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend; + i++) { + u32 val; + u32 q_num; + + val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i)); + if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) { + u32 mask; + /* set corresponding bit for completetion Q 93 */ + mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue); + /* not set all bits for queues less than Q 93 */ + mask--; + /* now invert and keep only Q 93+ set */ + val &= ~mask; + } + + if (val) + __iormb(); + + while (val) { + u32 desc, len; + + /* + * This should never trigger, see the comments in + * push_desc_queue() + */ + WARN_ON(cdd->is_suspended); + + q_num = __fls(val); + val &= ~(1 << q_num); + q_num += 32 * i; + desc = cppi41_pop_desc(cdd, q_num); + c = desc_to_chan(cdd, desc); + if (WARN_ON(!c)) { + pr_err("%s() q %d desc %08x\n", __func__, + q_num, desc); + continue; + } + + if (c->desc->pd2 & PD2_ZERO_LENGTH) + len = 0; + else + len = pd_trans_len(c->desc->pd0); + + c->residue = pd_trans_len(c->desc->pd6) - len; + dma_cookie_complete(&c->txd); + dmaengine_desc_get_callback_invoke(&c->txd, NULL); + } + } + return IRQ_HANDLED; +} + +static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx) +{ + dma_cookie_t cookie; + + cookie = dma_cookie_assign(tx); + + return cookie; +} + +static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct cppi41_channel *c = to_cpp41_chan(chan); + struct cppi41_dd *cdd = c->cdd; + int error; + + error = pm_runtime_get_sync(cdd->ddev.dev); + if (error < 0) { + dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", + __func__, error); + pm_runtime_put_noidle(cdd->ddev.dev); + + return error; + } + + dma_cookie_init(chan); + dma_async_tx_descriptor_init(&c->txd, chan); + c->txd.tx_submit = cppi41_tx_submit; + + if (!c->is_tx) + cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); + + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); + + return 0; +} + +static void cppi41_dma_free_chan_resources(struct dma_chan *chan) +{ + struct cppi41_channel *c = to_cpp41_chan(chan); + struct cppi41_dd *cdd = c->cdd; + int error; + + error = pm_runtime_get_sync(cdd->ddev.dev); + if (error < 0) { + pm_runtime_put_noidle(cdd->ddev.dev); + + return; + } + + WARN_ON(!list_empty(&cdd->pending)); + + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); +} + +static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct cppi41_channel *c = to_cpp41_chan(chan); + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + + dma_set_residue(txstate, c->residue); + + return ret; +} + +static void push_desc_queue(struct cppi41_channel *c) +{ + struct cppi41_dd *cdd = c->cdd; + u32 desc_num; + u32 desc_phys; + u32 reg; + + c->residue = 0; + + reg = GCR_CHAN_ENABLE; + if (!c->is_tx) { + reg |= GCR_STARV_RETRY; + reg |= GCR_DESC_TYPE_HOST; + reg |= c->q_comp_num; + } + + cppi_writel(reg, c->gcr_reg); + + /* + * We don't use writel() but __raw_writel() so we have to make sure + * that the DMA descriptor in coherent memory made to the main memory + * before starting the dma engine. + */ + __iowmb(); + + /* + * DMA transfers can take at least 200ms to complete with USB mass + * storage connected. To prevent autosuspend timeouts, we must use + * pm_runtime_get/put() when chan_busy[] is modified. This will get + * cleared in desc_to_chan() or cppi41_stop_chan() depending on the + * outcome of the transfer. + */ + pm_runtime_get(cdd->ddev.dev); + + desc_phys = lower_32_bits(c->desc_phys); + desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); + WARN_ON(cdd->chan_busy[desc_num]); + cdd->chan_busy[desc_num] = c; + + reg = (sizeof(struct cppi41_desc) - 24) / 4; + reg |= desc_phys; + cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); +} + +/* + * Caller must hold cdd->lock to prevent push_desc_queue() + * getting called out of order. We have both cppi41_dma_issue_pending() + * and cppi41_runtime_resume() call this function. + */ +static void cppi41_run_queue(struct cppi41_dd *cdd) +{ + struct cppi41_channel *c, *_c; + + list_for_each_entry_safe(c, _c, &cdd->pending, node) { + push_desc_queue(c); + list_del(&c->node); + } +} + +static void cppi41_dma_issue_pending(struct dma_chan *chan) +{ + struct cppi41_channel *c = to_cpp41_chan(chan); + struct cppi41_dd *cdd = c->cdd; + unsigned long flags; + int error; + + error = pm_runtime_get(cdd->ddev.dev); + if ((error != -EINPROGRESS) && error < 0) { + pm_runtime_put_noidle(cdd->ddev.dev); + dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", + error); + + return; + } + + spin_lock_irqsave(&cdd->lock, flags); + list_add_tail(&c->node, &cdd->pending); + if (!cdd->is_suspended) + cppi41_run_queue(cdd); + spin_unlock_irqrestore(&cdd->lock, flags); + + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); +} + +static u32 get_host_pd0(u32 length) +{ + u32 reg; + + reg = DESC_TYPE_HOST << DESC_TYPE; + reg |= length; + + return reg; +} + +static u32 get_host_pd1(struct cppi41_channel *c) +{ + u32 reg; + + reg = 0; + + return reg; +} + +static u32 get_host_pd2(struct cppi41_channel *c) +{ + u32 reg; + + reg = DESC_TYPE_USB; + reg |= c->q_comp_num; + + return reg; +} + +static u32 get_host_pd3(u32 length) +{ + u32 reg; + + /* PD3 = packet size */ + reg = length; + + return reg; +} + +static u32 get_host_pd6(u32 length) +{ + u32 reg; + + /* PD6 buffer size */ + reg = DESC_PD_COMPLETE; + reg |= length; + + return reg; +} + +static u32 get_host_pd4_or_7(u32 addr) +{ + u32 reg; + + reg = addr; + + return reg; +} + +static u32 get_host_pd5(void) +{ + u32 reg; + + reg = 0; + + return reg; +} + +static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len, + enum dma_transfer_direction dir, unsigned long tx_flags, void *context) +{ + struct cppi41_channel *c = to_cpp41_chan(chan); + struct cppi41_desc *d; + struct scatterlist *sg; + unsigned int i; + + d = c->desc; + for_each_sg(sgl, sg, sg_len, i) { + u32 addr; + u32 len; + + /* We need to use more than one desc once musb supports sg */ + addr = lower_32_bits(sg_dma_address(sg)); + len = sg_dma_len(sg); + + d->pd0 = get_host_pd0(len); + d->pd1 = get_host_pd1(c); + d->pd2 = get_host_pd2(c); + d->pd3 = get_host_pd3(len); + d->pd4 = get_host_pd4_or_7(addr); + d->pd5 = get_host_pd5(); + d->pd6 = get_host_pd6(len); + d->pd7 = get_host_pd4_or_7(addr); + + d++; + } + + return &c->txd; +} + +static void cppi41_compute_td_desc(struct cppi41_desc *d) +{ + d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; +} + +static int cppi41_tear_down_chan(struct cppi41_channel *c) +{ + struct dmaengine_result abort_result; + struct cppi41_dd *cdd = c->cdd; + struct cppi41_desc *td; + u32 reg; + u32 desc_phys; + u32 td_desc_phys; + + td = cdd->cd; + td += cdd->first_td_desc; + + td_desc_phys = cdd->descs_phys; + td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc); + + if (!c->td_queued) { + cppi41_compute_td_desc(td); + __iowmb(); + + reg = (sizeof(struct cppi41_desc) - 24) / 4; + reg |= td_desc_phys; + cppi_writel(reg, cdd->qmgr_mem + + QMGR_QUEUE_D(cdd->td_queue.submit)); + + reg = GCR_CHAN_ENABLE; + if (!c->is_tx) { + reg |= GCR_STARV_RETRY; + reg |= GCR_DESC_TYPE_HOST; + reg |= cdd->td_queue.complete; + } + reg |= GCR_TEARDOWN; + cppi_writel(reg, c->gcr_reg); + c->td_queued = 1; + c->td_retry = 500; + } + + if (!c->td_seen || !c->td_desc_seen) { + + desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); + if (!desc_phys && c->is_tx) + desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); + + if (desc_phys == c->desc_phys) { + c->td_desc_seen = 1; + + } else if (desc_phys == td_desc_phys) { + u32 pd0; + + __iormb(); + pd0 = td->pd0; + WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); + WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); + WARN_ON((pd0 & 0x1f) != c->port_num); + c->td_seen = 1; + } else if (desc_phys) { + WARN_ON_ONCE(1); + } + } + c->td_retry--; + /* + * If the TX descriptor / channel is in use, the caller needs to poke + * his TD bit multiple times. After that he hardware releases the + * transfer descriptor followed by TD descriptor. Waiting seems not to + * cause any difference. + * RX seems to be thrown out right away. However once the TearDown + * descriptor gets through we are done. If we have seens the transfer + * descriptor before the TD we fetch it from enqueue, it has to be + * there waiting for us. + */ + if (!c->td_seen && c->td_retry) { + udelay(1); + return -EAGAIN; + } + WARN_ON(!c->td_retry); + + if (!c->td_desc_seen) { + desc_phys = cppi41_pop_desc(cdd, c->q_num); + if (!desc_phys) + desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); + WARN_ON(!desc_phys); + } + + c->td_queued = 0; + c->td_seen = 0; + c->td_desc_seen = 0; + cppi_writel(0, c->gcr_reg); + + /* Invoke the callback to do the necessary clean-up */ + abort_result.result = DMA_TRANS_ABORTED; + dma_cookie_complete(&c->txd); + dmaengine_desc_get_callback_invoke(&c->txd, &abort_result); + + return 0; +} + +static int cppi41_stop_chan(struct dma_chan *chan) +{ + struct cppi41_channel *c = to_cpp41_chan(chan); + struct cppi41_dd *cdd = c->cdd; + u32 desc_num; + u32 desc_phys; + int ret; + + desc_phys = lower_32_bits(c->desc_phys); + desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); + if (!cdd->chan_busy[desc_num]) + return 0; + + ret = cppi41_tear_down_chan(c); + if (ret) + return ret; + + WARN_ON(!cdd->chan_busy[desc_num]); + cdd->chan_busy[desc_num] = NULL; + + /* Usecount for chan_busy[], paired with push_desc_queue() */ + pm_runtime_put(cdd->ddev.dev); + + return 0; +} + +static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) +{ + struct cppi41_channel *cchan, *chans; + int i; + u32 n_chans = cdd->n_chans; + + /* + * The channels can only be used as TX or as RX. So we add twice + * that much dma channels because USB can only do RX or TX. + */ + n_chans *= 2; + + chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL); + if (!chans) + return -ENOMEM; + + for (i = 0; i < n_chans; i++) { + cchan = &chans[i]; + + cchan->cdd = cdd; + if (i & 1) { + cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1); + cchan->is_tx = 1; + } else { + cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1); + cchan->is_tx = 0; + } + cchan->port_num = i >> 1; + cchan->desc = &cdd->cd[i]; + cchan->desc_phys = cdd->descs_phys; + cchan->desc_phys += i * sizeof(struct cppi41_desc); + cchan->chan.device = &cdd->ddev; + list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels); + } + cdd->first_td_desc = n_chans; + + return 0; +} + +static void purge_descs(struct device *dev, struct cppi41_dd *cdd) +{ + unsigned int mem_decs; + int i; + + mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc); + + for (i = 0; i < DESCS_AREAS; i++) { + + cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); + cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); + + dma_free_coherent(dev, mem_decs, cdd->cd, + cdd->descs_phys); + } +} + +static void disable_sched(struct cppi41_dd *cdd) +{ + cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); +} + +static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) +{ + disable_sched(cdd); + + purge_descs(dev, cdd); + + cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); + cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); + dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, + cdd->scratch_phys); +} + +static int init_descs(struct device *dev, struct cppi41_dd *cdd) +{ + unsigned int desc_size; + unsigned int mem_decs; + int i; + u32 reg; + u32 idx; + + BUILD_BUG_ON(sizeof(struct cppi41_desc) & + (sizeof(struct cppi41_desc) - 1)); + BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32); + BUILD_BUG_ON(ALLOC_DECS_NUM < 32); + + desc_size = sizeof(struct cppi41_desc); + mem_decs = ALLOC_DECS_NUM * desc_size; + + idx = 0; + for (i = 0; i < DESCS_AREAS; i++) { + + reg = idx << QMGR_MEMCTRL_IDX_SH; + reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH; + reg |= ilog2(ALLOC_DECS_NUM) - 5; + + BUILD_BUG_ON(DESCS_AREAS != 1); + cdd->cd = dma_alloc_coherent(dev, mem_decs, + &cdd->descs_phys, GFP_KERNEL); + if (!cdd->cd) + return -ENOMEM; + + cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); + cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i)); + + idx += ALLOC_DECS_NUM; + } + return 0; +} + +static void init_sched(struct cppi41_dd *cdd) +{ + unsigned ch; + unsigned word; + u32 reg; + + word = 0; + cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); + for (ch = 0; ch < cdd->n_chans; ch += 2) { + + reg = SCHED_ENTRY0_CHAN(ch); + reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX; + + reg |= SCHED_ENTRY2_CHAN(ch + 1); + reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX; + cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word)); + word++; + } + reg = cdd->n_chans * 2 - 1; + reg |= DMA_SCHED_CTRL_EN; + cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); +} + +static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) +{ + int ret; + + BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); + cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, + &cdd->scratch_phys, GFP_KERNEL); + if (!cdd->qmgr_scratch) + return -ENOMEM; + + cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); + cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE); + cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); + + ret = init_descs(dev, cdd); + if (ret) + goto err_td; + + cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ); + init_sched(cdd); + + return 0; +err_td: + deinit_cppi41(dev, cdd); + return ret; +} + +static struct platform_driver cpp41_dma_driver; +/* + * The param format is: + * X Y + * X: Port + * Y: 0 = RX else TX + */ +#define INFO_PORT 0 +#define INFO_IS_TX 1 + +static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param) +{ + struct cppi41_channel *cchan; + struct cppi41_dd *cdd; + const struct chan_queues *queues; + u32 *num = param; + + if (chan->device->dev->driver != &cpp41_dma_driver.driver) + return false; + + cchan = to_cpp41_chan(chan); + + if (cchan->port_num != num[INFO_PORT]) + return false; + + if (cchan->is_tx && !num[INFO_IS_TX]) + return false; + cdd = cchan->cdd; + if (cchan->is_tx) + queues = cdd->queues_tx; + else + queues = cdd->queues_rx; + + BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) != + ARRAY_SIZE(am335x_usb_queues_tx)); + if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx))) + return false; + + cchan->q_num = queues[cchan->port_num].submit; + cchan->q_comp_num = queues[cchan->port_num].complete; + return true; +} + +static struct of_dma_filter_info cpp41_dma_info = { + .filter_fn = cpp41_dma_filter_fn, +}; + +static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + int count = dma_spec->args_count; + struct of_dma_filter_info *info = ofdma->of_dma_data; + + if (!info || !info->filter_fn) + return NULL; + + if (count != 2) + return NULL; + + return dma_request_channel(info->dma_cap, info->filter_fn, + &dma_spec->args[0]); +} + +static const struct cppi_glue_infos am335x_usb_infos = { + .queues_rx = am335x_usb_queues_rx, + .queues_tx = am335x_usb_queues_tx, + .td_queue = { .submit = 31, .complete = 0 }, + .first_completion_queue = 93, + .qmgr_num_pend = 5, +}; + +static const struct cppi_glue_infos da8xx_usb_infos = { + .queues_rx = da8xx_usb_queues_rx, + .queues_tx = da8xx_usb_queues_tx, + .td_queue = { .submit = 31, .complete = 0 }, + .first_completion_queue = 24, + .qmgr_num_pend = 2, +}; + +static const struct of_device_id cppi41_dma_ids[] = { + { .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos}, + { .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos}, + {}, +}; +MODULE_DEVICE_TABLE(of, cppi41_dma_ids); + +static const struct cppi_glue_infos *get_glue_info(struct device *dev) +{ + const struct of_device_id *of_id; + + of_id = of_match_node(cppi41_dma_ids, dev->of_node); + if (!of_id) + return NULL; + return of_id->data; +} + +#define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +static int cppi41_dma_probe(struct platform_device *pdev) +{ + struct cppi41_dd *cdd; + struct device *dev = &pdev->dev; + const struct cppi_glue_infos *glue_info; + struct resource *mem; + int index; + int irq; + int ret; + + glue_info = get_glue_info(dev); + if (!glue_info) + return -EINVAL; + + cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL); + if (!cdd) + return -ENOMEM; + + dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask); + cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources; + cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources; + cdd->ddev.device_tx_status = cppi41_dma_tx_status; + cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; + cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; + cdd->ddev.device_terminate_all = cppi41_stop_chan; + cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS; + cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS; + cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + cdd->ddev.dev = dev; + INIT_LIST_HEAD(&cdd->ddev.channels); + cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; + + index = of_property_match_string(dev->of_node, + "reg-names", "controller"); + if (index < 0) + return index; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, index); + cdd->ctrl_mem = devm_ioremap_resource(dev, mem); + if (IS_ERR(cdd->ctrl_mem)) + return PTR_ERR(cdd->ctrl_mem); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1); + cdd->sched_mem = devm_ioremap_resource(dev, mem); + if (IS_ERR(cdd->sched_mem)) + return PTR_ERR(cdd->sched_mem); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2); + cdd->qmgr_mem = devm_ioremap_resource(dev, mem); + if (IS_ERR(cdd->qmgr_mem)) + return PTR_ERR(cdd->qmgr_mem); + + spin_lock_init(&cdd->lock); + INIT_LIST_HEAD(&cdd->pending); + + platform_set_drvdata(pdev, cdd); + + pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(dev, 100); + pm_runtime_use_autosuspend(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto err_get_sync; + + cdd->queues_rx = glue_info->queues_rx; + cdd->queues_tx = glue_info->queues_tx; + cdd->td_queue = glue_info->td_queue; + cdd->qmgr_num_pend = glue_info->qmgr_num_pend; + cdd->first_completion_queue = glue_info->first_completion_queue; + + ret = of_property_read_u32(dev->of_node, + "#dma-channels", &cdd->n_chans); + if (ret) + goto err_get_n_chans; + + ret = init_cppi41(dev, cdd); + if (ret) + goto err_init_cppi; + + ret = cppi41_add_chans(dev, cdd); + if (ret) + goto err_chans; + + irq = irq_of_parse_and_map(dev->of_node, 0); + if (!irq) { + ret = -EINVAL; + goto err_chans; + } + + ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED, + dev_name(dev), cdd); + if (ret) + goto err_chans; + cdd->irq = irq; + + ret = dma_async_device_register(&cdd->ddev); + if (ret) + goto err_chans; + + ret = of_dma_controller_register(dev->of_node, + cppi41_dma_xlate, &cpp41_dma_info); + if (ret) + goto err_of; + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; +err_of: + dma_async_device_unregister(&cdd->ddev); +err_chans: + deinit_cppi41(dev, cdd); +err_init_cppi: + pm_runtime_dont_use_autosuspend(dev); +err_get_n_chans: +err_get_sync: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + return ret; +} + +static int cppi41_dma_remove(struct platform_device *pdev) +{ + struct cppi41_dd *cdd = platform_get_drvdata(pdev); + int error; + + error = pm_runtime_get_sync(&pdev->dev); + if (error < 0) + dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n", + __func__, error); + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&cdd->ddev); + + devm_free_irq(&pdev->dev, cdd->irq, cdd); + deinit_cppi41(&pdev->dev, cdd); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; +} + +static int __maybe_unused cppi41_suspend(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + + cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); + disable_sched(cdd); + + return 0; +} + +static int __maybe_unused cppi41_resume(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + struct cppi41_channel *c; + int i; + + for (i = 0; i < DESCS_AREAS; i++) + cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); + + list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) + if (!c->is_tx) + cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); + + init_sched(cdd); + + cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); + cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); + cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); + cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); + + return 0; +} + +static int __maybe_unused cppi41_runtime_suspend(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + unsigned long flags; + + spin_lock_irqsave(&cdd->lock, flags); + cdd->is_suspended = true; + WARN_ON(!list_empty(&cdd->pending)); + spin_unlock_irqrestore(&cdd->lock, flags); + + return 0; +} + +static int __maybe_unused cppi41_runtime_resume(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + unsigned long flags; + + spin_lock_irqsave(&cdd->lock, flags); + cdd->is_suspended = false; + cppi41_run_queue(cdd); + spin_unlock_irqrestore(&cdd->lock, flags); + + return 0; +} + +static const struct dev_pm_ops cppi41_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume) + SET_RUNTIME_PM_OPS(cppi41_runtime_suspend, + cppi41_runtime_resume, + NULL) +}; + +static struct platform_driver cpp41_dma_driver = { + .probe = cppi41_dma_probe, + .remove = cppi41_dma_remove, + .driver = { + .name = "cppi41-dma-engine", + .pm = &cppi41_pm_ops, + .of_match_table = of_match_ptr(cppi41_dma_ids), + }, +}; + +module_platform_driver(cpp41_dma_driver); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sebastian Andrzej Siewior "); diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c new file mode 100644 index 000000000000..9272b173c746 --- /dev/null +++ b/drivers/dma/ti/dma-crossbar.c @@ -0,0 +1,478 @@ +/* + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define TI_XBAR_DRA7 0 +#define TI_XBAR_AM335X 1 +static const u32 ti_xbar_type[] = { + [TI_XBAR_DRA7] = TI_XBAR_DRA7, + [TI_XBAR_AM335X] = TI_XBAR_AM335X, +}; + +static const struct of_device_id ti_dma_xbar_match[] = { + { + .compatible = "ti,dra7-dma-crossbar", + .data = &ti_xbar_type[TI_XBAR_DRA7], + }, + { + .compatible = "ti,am335x-edma-crossbar", + .data = &ti_xbar_type[TI_XBAR_AM335X], + }, + {}, +}; + +/* Crossbar on AM335x/AM437x family */ +#define TI_AM335X_XBAR_LINES 64 + +struct ti_am335x_xbar_data { + void __iomem *iomem; + + struct dma_router dmarouter; + + u32 xbar_events; /* maximum number of events to select in xbar */ + u32 dma_requests; /* number of DMA requests on eDMA */ +}; + +struct ti_am335x_xbar_map { + u16 dma_line; + u8 mux_val; +}; + +static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) +{ + /* + * TPCC_EVT_MUX_60_63 register layout is different than the + * rest, in the sense, that event 63 is mapped to lowest byte + * and event 60 is mapped to highest, handle it separately. + */ + if (event >= 60 && event <= 63) + writeb_relaxed(val, iomem + (63 - event % 4)); + else + writeb_relaxed(val, iomem + event); +} + +static void ti_am335x_xbar_free(struct device *dev, void *route_data) +{ + struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev); + struct ti_am335x_xbar_map *map = route_data; + + dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n", + map->mux_val, map->dma_line); + + ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0); + kfree(map); +} + +static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); + struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev); + struct ti_am335x_xbar_map *map; + + if (dma_spec->args_count != 3) + return ERR_PTR(-EINVAL); + + if (dma_spec->args[2] >= xbar->xbar_events) { + dev_err(&pdev->dev, "Invalid XBAR event number: %d\n", + dma_spec->args[2]); + return ERR_PTR(-EINVAL); + } + + if (dma_spec->args[0] >= xbar->dma_requests) { + dev_err(&pdev->dev, "Invalid DMA request line number: %d\n", + dma_spec->args[0]); + return ERR_PTR(-EINVAL); + } + + /* The of_node_put() will be done in the core for the node */ + dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); + if (!dma_spec->np) { + dev_err(&pdev->dev, "Can't get DMA master\n"); + return ERR_PTR(-EINVAL); + } + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) { + of_node_put(dma_spec->np); + return ERR_PTR(-ENOMEM); + } + + map->dma_line = (u16)dma_spec->args[0]; + map->mux_val = (u8)dma_spec->args[2]; + + dma_spec->args[2] = 0; + dma_spec->args_count = 2; + + dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n", + map->mux_val, map->dma_line); + + ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); + + return map; +} + +static const struct of_device_id ti_am335x_master_match[] = { + { .compatible = "ti,edma3-tpcc", }, + {}, +}; + +static int ti_am335x_xbar_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const struct of_device_id *match; + struct device_node *dma_node; + struct ti_am335x_xbar_data *xbar; + struct resource *res; + void __iomem *iomem; + int i, ret; + + if (!node) + return -ENODEV; + + xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); + if (!xbar) + return -ENOMEM; + + dma_node = of_parse_phandle(node, "dma-masters", 0); + if (!dma_node) { + dev_err(&pdev->dev, "Can't get DMA master node\n"); + return -ENODEV; + } + + match = of_match_node(ti_am335x_master_match, dma_node); + if (!match) { + dev_err(&pdev->dev, "DMA master is not supported\n"); + of_node_put(dma_node); + return -EINVAL; + } + + if (of_property_read_u32(dma_node, "dma-requests", + &xbar->dma_requests)) { + dev_info(&pdev->dev, + "Missing XBAR output information, using %u.\n", + TI_AM335X_XBAR_LINES); + xbar->dma_requests = TI_AM335X_XBAR_LINES; + } + of_node_put(dma_node); + + if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) { + dev_info(&pdev->dev, + "Missing XBAR input information, using %u.\n", + TI_AM335X_XBAR_LINES); + xbar->xbar_events = TI_AM335X_XBAR_LINES; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iomem = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(iomem)) + return PTR_ERR(iomem); + + xbar->iomem = iomem; + + xbar->dmarouter.dev = &pdev->dev; + xbar->dmarouter.route_free = ti_am335x_xbar_free; + + platform_set_drvdata(pdev, xbar); + + /* Reset the crossbar */ + for (i = 0; i < xbar->dma_requests; i++) + ti_am335x_xbar_write(xbar->iomem, i, 0); + + ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate, + &xbar->dmarouter); + + return ret; +} + +/* Crossbar on DRA7xx family */ +#define TI_DRA7_XBAR_OUTPUTS 127 +#define TI_DRA7_XBAR_INPUTS 256 + +struct ti_dra7_xbar_data { + void __iomem *iomem; + + struct dma_router dmarouter; + struct mutex mutex; + unsigned long *dma_inuse; + + u16 safe_val; /* Value to rest the crossbar lines */ + u32 xbar_requests; /* number of DMA requests connected to XBAR */ + u32 dma_requests; /* number of DMA requests forwarded to DMA */ + u32 dma_offset; +}; + +struct ti_dra7_xbar_map { + u16 xbar_in; + int xbar_out; +}; + +static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val) +{ + writew_relaxed(val, iomem + (xbar * 2)); +} + +static void ti_dra7_xbar_free(struct device *dev, void *route_data) +{ + struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev); + struct ti_dra7_xbar_map *map = route_data; + + dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n", + map->xbar_in, map->xbar_out); + + ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); + mutex_lock(&xbar->mutex); + clear_bit(map->xbar_out, xbar->dma_inuse); + mutex_unlock(&xbar->mutex); + kfree(map); +} + +static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); + struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev); + struct ti_dra7_xbar_map *map; + + if (dma_spec->args[0] >= xbar->xbar_requests) { + dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", + dma_spec->args[0]); + return ERR_PTR(-EINVAL); + } + + /* The of_node_put() will be done in the core for the node */ + dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); + if (!dma_spec->np) { + dev_err(&pdev->dev, "Can't get DMA master\n"); + return ERR_PTR(-EINVAL); + } + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) { + of_node_put(dma_spec->np); + return ERR_PTR(-ENOMEM); + } + + mutex_lock(&xbar->mutex); + map->xbar_out = find_first_zero_bit(xbar->dma_inuse, + xbar->dma_requests); + if (map->xbar_out == xbar->dma_requests) { + mutex_unlock(&xbar->mutex); + dev_err(&pdev->dev, "Run out of free DMA requests\n"); + kfree(map); + return ERR_PTR(-ENOMEM); + } + set_bit(map->xbar_out, xbar->dma_inuse); + mutex_unlock(&xbar->mutex); + + map->xbar_in = (u16)dma_spec->args[0]; + + dma_spec->args[0] = map->xbar_out + xbar->dma_offset; + + dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n", + map->xbar_in, map->xbar_out); + + ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); + + return map; +} + +#define TI_XBAR_EDMA_OFFSET 0 +#define TI_XBAR_SDMA_OFFSET 1 +static const u32 ti_dma_offset[] = { + [TI_XBAR_EDMA_OFFSET] = 0, + [TI_XBAR_SDMA_OFFSET] = 1, +}; + +static const struct of_device_id ti_dra7_master_match[] = { + { + .compatible = "ti,omap4430-sdma", + .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET], + }, + { + .compatible = "ti,edma3", + .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET], + }, + { + .compatible = "ti,edma3-tpcc", + .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET], + }, + {}, +}; + +static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p) +{ + for (; len > 0; len--) + set_bit(offset + (len - 1), p); +} + +static int ti_dra7_xbar_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const struct of_device_id *match; + struct device_node *dma_node; + struct ti_dra7_xbar_data *xbar; + struct property *prop; + struct resource *res; + u32 safe_val; + int sz; + void __iomem *iomem; + int i, ret; + + if (!node) + return -ENODEV; + + xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); + if (!xbar) + return -ENOMEM; + + dma_node = of_parse_phandle(node, "dma-masters", 0); + if (!dma_node) { + dev_err(&pdev->dev, "Can't get DMA master node\n"); + return -ENODEV; + } + + match = of_match_node(ti_dra7_master_match, dma_node); + if (!match) { + dev_err(&pdev->dev, "DMA master is not supported\n"); + of_node_put(dma_node); + return -EINVAL; + } + + if (of_property_read_u32(dma_node, "dma-requests", + &xbar->dma_requests)) { + dev_info(&pdev->dev, + "Missing XBAR output information, using %u.\n", + TI_DRA7_XBAR_OUTPUTS); + xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS; + } + of_node_put(dma_node); + + xbar->dma_inuse = devm_kcalloc(&pdev->dev, + BITS_TO_LONGS(xbar->dma_requests), + sizeof(unsigned long), GFP_KERNEL); + if (!xbar->dma_inuse) + return -ENOMEM; + + if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { + dev_info(&pdev->dev, + "Missing XBAR input information, using %u.\n", + TI_DRA7_XBAR_INPUTS); + xbar->xbar_requests = TI_DRA7_XBAR_INPUTS; + } + + if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) + xbar->safe_val = (u16)safe_val; + + + prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz); + if (prop) { + const char pname[] = "ti,reserved-dma-request-ranges"; + u32 (*rsv_events)[2]; + size_t nelm = sz / sizeof(*rsv_events); + int i; + + if (!nelm) + return -EINVAL; + + rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL); + if (!rsv_events) + return -ENOMEM; + + ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, + nelm * 2); + if (ret) + return ret; + + for (i = 0; i < nelm; i++) { + ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], + xbar->dma_inuse); + } + kfree(rsv_events); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iomem = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(iomem)) + return PTR_ERR(iomem); + + xbar->iomem = iomem; + + xbar->dmarouter.dev = &pdev->dev; + xbar->dmarouter.route_free = ti_dra7_xbar_free; + xbar->dma_offset = *(u32 *)match->data; + + mutex_init(&xbar->mutex); + platform_set_drvdata(pdev, xbar); + + /* Reset the crossbar */ + for (i = 0; i < xbar->dma_requests; i++) { + if (!test_bit(i, xbar->dma_inuse)) + ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val); + } + + ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate, + &xbar->dmarouter); + if (ret) { + /* Restore the defaults for the crossbar */ + for (i = 0; i < xbar->dma_requests; i++) { + if (!test_bit(i, xbar->dma_inuse)) + ti_dra7_xbar_write(xbar->iomem, i, i); + } + } + + return ret; +} + +static int ti_dma_xbar_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + int ret; + + match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node); + if (unlikely(!match)) + return -EINVAL; + + switch (*(u32 *)match->data) { + case TI_XBAR_DRA7: + ret = ti_dra7_xbar_probe(pdev); + break; + case TI_XBAR_AM335X: + ret = ti_am335x_xbar_probe(pdev); + break; + default: + dev_err(&pdev->dev, "Unsupported crossbar\n"); + ret = -ENODEV; + break; + } + + return ret; +} + +static struct platform_driver ti_dma_xbar_driver = { + .driver = { + .name = "ti-dma-crossbar", + .of_match_table = of_match_ptr(ti_dma_xbar_match), + }, + .probe = ti_dma_xbar_probe, +}; + +static int omap_dmaxbar_init(void) +{ + return platform_driver_register(&ti_dma_xbar_driver); +} +arch_initcall(omap_dmaxbar_init); diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c new file mode 100644 index 000000000000..93a5cbf13319 --- /dev/null +++ b/drivers/dma/ti/edma.c @@ -0,0 +1,2568 @@ +/* + * TI EDMA DMA engine driver + * + * Copyright 2012 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "../dmaengine.h" +#include "../virt-dma.h" + +/* Offsets matching "struct edmacc_param" */ +#define PARM_OPT 0x00 +#define PARM_SRC 0x04 +#define PARM_A_B_CNT 0x08 +#define PARM_DST 0x0c +#define PARM_SRC_DST_BIDX 0x10 +#define PARM_LINK_BCNTRLD 0x14 +#define PARM_SRC_DST_CIDX 0x18 +#define PARM_CCNT 0x1c + +#define PARM_SIZE 0x20 + +/* Offsets for EDMA CC global channel registers and their shadows */ +#define SH_ER 0x00 /* 64 bits */ +#define SH_ECR 0x08 /* 64 bits */ +#define SH_ESR 0x10 /* 64 bits */ +#define SH_CER 0x18 /* 64 bits */ +#define SH_EER 0x20 /* 64 bits */ +#define SH_EECR 0x28 /* 64 bits */ +#define SH_EESR 0x30 /* 64 bits */ +#define SH_SER 0x38 /* 64 bits */ +#define SH_SECR 0x40 /* 64 bits */ +#define SH_IER 0x50 /* 64 bits */ +#define SH_IECR 0x58 /* 64 bits */ +#define SH_IESR 0x60 /* 64 bits */ +#define SH_IPR 0x68 /* 64 bits */ +#define SH_ICR 0x70 /* 64 bits */ +#define SH_IEVAL 0x78 +#define SH_QER 0x80 +#define SH_QEER 0x84 +#define SH_QEECR 0x88 +#define SH_QEESR 0x8c +#define SH_QSER 0x90 +#define SH_QSECR 0x94 +#define SH_SIZE 0x200 + +/* Offsets for EDMA CC global registers */ +#define EDMA_REV 0x0000 +#define EDMA_CCCFG 0x0004 +#define EDMA_QCHMAP 0x0200 /* 8 registers */ +#define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ +#define EDMA_QDMAQNUM 0x0260 +#define EDMA_QUETCMAP 0x0280 +#define EDMA_QUEPRI 0x0284 +#define EDMA_EMR 0x0300 /* 64 bits */ +#define EDMA_EMCR 0x0308 /* 64 bits */ +#define EDMA_QEMR 0x0310 +#define EDMA_QEMCR 0x0314 +#define EDMA_CCERR 0x0318 +#define EDMA_CCERRCLR 0x031c +#define EDMA_EEVAL 0x0320 +#define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ +#define EDMA_QRAE 0x0380 /* 4 registers */ +#define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ +#define EDMA_QSTAT 0x0600 /* 2 registers */ +#define EDMA_QWMTHRA 0x0620 +#define EDMA_QWMTHRB 0x0624 +#define EDMA_CCSTAT 0x0640 + +#define EDMA_M 0x1000 /* global channel registers */ +#define EDMA_ECR 0x1008 +#define EDMA_ECRH 0x100C +#define EDMA_SHADOW0 0x2000 /* 4 shadow regions */ +#define EDMA_PARM 0x4000 /* PaRAM entries */ + +#define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) + +#define EDMA_DCHMAP 0x0100 /* 64 registers */ + +/* CCCFG register */ +#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ +#define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */ +#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ +#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ +#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ +#define CHMAP_EXIST BIT(24) + +/* CCSTAT register */ +#define EDMA_CCSTAT_ACTV BIT(4) + +/* + * Max of 20 segments per channel to conserve PaRAM slots + * Also note that MAX_NR_SG should be atleast the no.of periods + * that are required for ASoC, otherwise DMA prep calls will + * fail. Today davinci-pcm is the only user of this driver and + * requires atleast 17 slots, so we setup the default to 20. + */ +#define MAX_NR_SG 20 +#define EDMA_MAX_SLOTS MAX_NR_SG +#define EDMA_DESCRIPTORS 16 + +#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ +#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ +#define EDMA_CONT_PARAMS_ANY 1001 +#define EDMA_CONT_PARAMS_FIXED_EXACT 1002 +#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 + +/* PaRAM slots are laid out like this */ +struct edmacc_param { + u32 opt; + u32 src; + u32 a_b_cnt; + u32 dst; + u32 src_dst_bidx; + u32 link_bcntrld; + u32 src_dst_cidx; + u32 ccnt; +} __packed; + +/* fields in edmacc_param.opt */ +#define SAM BIT(0) +#define DAM BIT(1) +#define SYNCDIM BIT(2) +#define STATIC BIT(3) +#define EDMA_FWID (0x07 << 8) +#define TCCMODE BIT(11) +#define EDMA_TCC(t) ((t) << 12) +#define TCINTEN BIT(20) +#define ITCINTEN BIT(21) +#define TCCHEN BIT(22) +#define ITCCHEN BIT(23) + +struct edma_pset { + u32 len; + dma_addr_t addr; + struct edmacc_param param; +}; + +struct edma_desc { + struct virt_dma_desc vdesc; + struct list_head node; + enum dma_transfer_direction direction; + int cyclic; + int absync; + int pset_nr; + struct edma_chan *echan; + int processed; + + /* + * The following 4 elements are used for residue accounting. + * + * - processed_stat: the number of SG elements we have traversed + * so far to cover accounting. This is updated directly to processed + * during edma_callback and is always <= processed, because processed + * refers to the number of pending transfer (programmed to EDMA + * controller), where as processed_stat tracks number of transfers + * accounted for so far. + * + * - residue: The amount of bytes we have left to transfer for this desc + * + * - residue_stat: The residue in bytes of data we have covered + * so far for accounting. This is updated directly to residue + * during callbacks to keep it current. + * + * - sg_len: Tracks the length of the current intermediate transfer, + * this is required to update the residue during intermediate transfer + * completion callback. + */ + int processed_stat; + u32 sg_len; + u32 residue; + u32 residue_stat; + + struct edma_pset pset[0]; +}; + +struct edma_cc; + +struct edma_tc { + struct device_node *node; + u16 id; +}; + +struct edma_chan { + struct virt_dma_chan vchan; + struct list_head node; + struct edma_desc *edesc; + struct edma_cc *ecc; + struct edma_tc *tc; + int ch_num; + bool alloced; + bool hw_triggered; + int slot[EDMA_MAX_SLOTS]; + int missed; + struct dma_slave_config cfg; +}; + +struct edma_cc { + struct device *dev; + struct edma_soc_info *info; + void __iomem *base; + int id; + bool legacy_mode; + + /* eDMA3 resource information */ + unsigned num_channels; + unsigned num_qchannels; + unsigned num_region; + unsigned num_slots; + unsigned num_tc; + bool chmap_exist; + enum dma_event_q default_queue; + + unsigned int ccint; + unsigned int ccerrint; + + /* + * The slot_inuse bit for each PaRAM slot is clear unless the slot is + * in use by Linux or if it is allocated to be used by DSP. + */ + unsigned long *slot_inuse; + + struct dma_device dma_slave; + struct dma_device *dma_memcpy; + struct edma_chan *slave_chans; + struct edma_tc *tc_list; + int dummy_slot; +}; + +/* dummy param set used to (re)initialize parameter RAM slots */ +static const struct edmacc_param dummy_paramset = { + .link_bcntrld = 0xffff, + .ccnt = 1, +}; + +#define EDMA_BINDING_LEGACY 0 +#define EDMA_BINDING_TPCC 1 +static const u32 edma_binding_type[] = { + [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY, + [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC, +}; + +static const struct of_device_id edma_of_ids[] = { + { + .compatible = "ti,edma3", + .data = &edma_binding_type[EDMA_BINDING_LEGACY], + }, + { + .compatible = "ti,edma3-tpcc", + .data = &edma_binding_type[EDMA_BINDING_TPCC], + }, + {} +}; +MODULE_DEVICE_TABLE(of, edma_of_ids); + +static const struct of_device_id edma_tptc_of_ids[] = { + { .compatible = "ti,edma3-tptc", }, + {} +}; +MODULE_DEVICE_TABLE(of, edma_tptc_of_ids); + +static inline unsigned int edma_read(struct edma_cc *ecc, int offset) +{ + return (unsigned int)__raw_readl(ecc->base + offset); +} + +static inline void edma_write(struct edma_cc *ecc, int offset, int val) +{ + __raw_writel(val, ecc->base + offset); +} + +static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and, + unsigned or) +{ + unsigned val = edma_read(ecc, offset); + + val &= and; + val |= or; + edma_write(ecc, offset, val); +} + +static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and) +{ + unsigned val = edma_read(ecc, offset); + + val &= and; + edma_write(ecc, offset, val); +} + +static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or) +{ + unsigned val = edma_read(ecc, offset); + + val |= or; + edma_write(ecc, offset, val); +} + +static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset, + int i) +{ + return edma_read(ecc, offset + (i << 2)); +} + +static inline void edma_write_array(struct edma_cc *ecc, int offset, int i, + unsigned val) +{ + edma_write(ecc, offset + (i << 2), val); +} + +static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i, + unsigned and, unsigned or) +{ + edma_modify(ecc, offset + (i << 2), and, or); +} + +static inline void edma_or_array(struct edma_cc *ecc, int offset, int i, + unsigned or) +{ + edma_or(ecc, offset + (i << 2), or); +} + +static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j, + unsigned or) +{ + edma_or(ecc, offset + ((i * 2 + j) << 2), or); +} + +static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i, + int j, unsigned val) +{ + edma_write(ecc, offset + ((i * 2 + j) << 2), val); +} + +static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset) +{ + return edma_read(ecc, EDMA_SHADOW0 + offset); +} + +static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc, + int offset, int i) +{ + return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2)); +} + +static inline void edma_shadow0_write(struct edma_cc *ecc, int offset, + unsigned val) +{ + edma_write(ecc, EDMA_SHADOW0 + offset, val); +} + +static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset, + int i, unsigned val) +{ + edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val); +} + +static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset, + int param_no) +{ + return edma_read(ecc, EDMA_PARM + offset + (param_no << 5)); +} + +static inline void edma_param_write(struct edma_cc *ecc, int offset, + int param_no, unsigned val) +{ + edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val); +} + +static inline void edma_param_modify(struct edma_cc *ecc, int offset, + int param_no, unsigned and, unsigned or) +{ + edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or); +} + +static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no, + unsigned and) +{ + edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and); +} + +static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no, + unsigned or) +{ + edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or); +} + +static inline void edma_set_bits(int offset, int len, unsigned long *p) +{ + for (; len > 0; len--) + set_bit(offset + (len - 1), p); +} + +static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no, + int priority) +{ + int bit = queue_no * 4; + + edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); +} + +static void edma_set_chmap(struct edma_chan *echan, int slot) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + + if (ecc->chmap_exist) { + slot = EDMA_CHAN_SLOT(slot); + edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5)); + } +} + +static void edma_setup_interrupt(struct edma_chan *echan, bool enable) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + + if (enable) { + edma_shadow0_write_array(ecc, SH_ICR, channel >> 5, + BIT(channel & 0x1f)); + edma_shadow0_write_array(ecc, SH_IESR, channel >> 5, + BIT(channel & 0x1f)); + } else { + edma_shadow0_write_array(ecc, SH_IECR, channel >> 5, + BIT(channel & 0x1f)); + } +} + +/* + * paRAM slot management functions + */ +static void edma_write_slot(struct edma_cc *ecc, unsigned slot, + const struct edmacc_param *param) +{ + slot = EDMA_CHAN_SLOT(slot); + if (slot >= ecc->num_slots) + return; + memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE); +} + +static int edma_read_slot(struct edma_cc *ecc, unsigned slot, + struct edmacc_param *param) +{ + slot = EDMA_CHAN_SLOT(slot); + if (slot >= ecc->num_slots) + return -EINVAL; + memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE); + + return 0; +} + +/** + * edma_alloc_slot - allocate DMA parameter RAM + * @ecc: pointer to edma_cc struct + * @slot: specific slot to allocate; negative for "any unused slot" + * + * This allocates a parameter RAM slot, initializing it to hold a + * dummy transfer. Slots allocated using this routine have not been + * mapped to a hardware DMA channel, and will normally be used by + * linking to them from a slot associated with a DMA channel. + * + * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific + * slots may be allocated on behalf of DSP firmware. + * + * Returns the number of the slot, else negative errno. + */ +static int edma_alloc_slot(struct edma_cc *ecc, int slot) +{ + if (slot >= 0) { + slot = EDMA_CHAN_SLOT(slot); + /* Requesting entry paRAM slot for a HW triggered channel. */ + if (ecc->chmap_exist && slot < ecc->num_channels) + slot = EDMA_SLOT_ANY; + } + + if (slot < 0) { + if (ecc->chmap_exist) + slot = 0; + else + slot = ecc->num_channels; + for (;;) { + slot = find_next_zero_bit(ecc->slot_inuse, + ecc->num_slots, + slot); + if (slot == ecc->num_slots) + return -ENOMEM; + if (!test_and_set_bit(slot, ecc->slot_inuse)) + break; + } + } else if (slot >= ecc->num_slots) { + return -EINVAL; + } else if (test_and_set_bit(slot, ecc->slot_inuse)) { + return -EBUSY; + } + + edma_write_slot(ecc, slot, &dummy_paramset); + + return EDMA_CTLR_CHAN(ecc->id, slot); +} + +static void edma_free_slot(struct edma_cc *ecc, unsigned slot) +{ + slot = EDMA_CHAN_SLOT(slot); + if (slot >= ecc->num_slots) + return; + + edma_write_slot(ecc, slot, &dummy_paramset); + clear_bit(slot, ecc->slot_inuse); +} + +/** + * edma_link - link one parameter RAM slot to another + * @ecc: pointer to edma_cc struct + * @from: parameter RAM slot originating the link + * @to: parameter RAM slot which is the link target + * + * The originating slot should not be part of any active DMA transfer. + */ +static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to) +{ + if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to))) + dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n"); + + from = EDMA_CHAN_SLOT(from); + to = EDMA_CHAN_SLOT(to); + if (from >= ecc->num_slots || to >= ecc->num_slots) + return; + + edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000, + PARM_OFFSET(to)); +} + +/** + * edma_get_position - returns the current transfer point + * @ecc: pointer to edma_cc struct + * @slot: parameter RAM slot being examined + * @dst: true selects the dest position, false the source + * + * Returns the position of the current active slot + */ +static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot, + bool dst) +{ + u32 offs; + + slot = EDMA_CHAN_SLOT(slot); + offs = PARM_OFFSET(slot); + offs += dst ? PARM_DST : PARM_SRC; + + return edma_read(ecc, offs); +} + +/* + * Channels with event associations will be triggered by their hardware + * events, and channels without such associations will be triggered by + * software. (At this writing there is no interface for using software + * triggers except with channels that don't support hardware triggers.) + */ +static void edma_start(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + int j = (channel >> 5); + unsigned int mask = BIT(channel & 0x1f); + + if (!echan->hw_triggered) { + /* EDMA channels without event association */ + dev_dbg(ecc->dev, "ESR%d %08x\n", j, + edma_shadow0_read_array(ecc, SH_ESR, j)); + edma_shadow0_write_array(ecc, SH_ESR, j, mask); + } else { + /* EDMA channel with event association */ + dev_dbg(ecc->dev, "ER%d %08x\n", j, + edma_shadow0_read_array(ecc, SH_ER, j)); + /* Clear any pending event or error */ + edma_write_array(ecc, EDMA_ECR, j, mask); + edma_write_array(ecc, EDMA_EMCR, j, mask); + /* Clear any SER */ + edma_shadow0_write_array(ecc, SH_SECR, j, mask); + edma_shadow0_write_array(ecc, SH_EESR, j, mask); + dev_dbg(ecc->dev, "EER%d %08x\n", j, + edma_shadow0_read_array(ecc, SH_EER, j)); + } +} + +static void edma_stop(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + int j = (channel >> 5); + unsigned int mask = BIT(channel & 0x1f); + + edma_shadow0_write_array(ecc, SH_EECR, j, mask); + edma_shadow0_write_array(ecc, SH_ECR, j, mask); + edma_shadow0_write_array(ecc, SH_SECR, j, mask); + edma_write_array(ecc, EDMA_EMCR, j, mask); + + /* clear possibly pending completion interrupt */ + edma_shadow0_write_array(ecc, SH_ICR, j, mask); + + dev_dbg(ecc->dev, "EER%d %08x\n", j, + edma_shadow0_read_array(ecc, SH_EER, j)); + + /* REVISIT: consider guarding against inappropriate event + * chaining by overwriting with dummy_paramset. + */ +} + +/* + * Temporarily disable EDMA hardware events on the specified channel, + * preventing them from triggering new transfers + */ +static void edma_pause(struct edma_chan *echan) +{ + int channel = EDMA_CHAN_SLOT(echan->ch_num); + unsigned int mask = BIT(channel & 0x1f); + + edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask); +} + +/* Re-enable EDMA hardware events on the specified channel. */ +static void edma_resume(struct edma_chan *echan) +{ + int channel = EDMA_CHAN_SLOT(echan->ch_num); + unsigned int mask = BIT(channel & 0x1f); + + edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask); +} + +static void edma_trigger_channel(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + unsigned int mask = BIT(channel & 0x1f); + + edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask); + + dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5), + edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5))); +} + +static void edma_clean_channel(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + int j = (channel >> 5); + unsigned int mask = BIT(channel & 0x1f); + + dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j)); + edma_shadow0_write_array(ecc, SH_ECR, j, mask); + /* Clear the corresponding EMR bits */ + edma_write_array(ecc, EDMA_EMCR, j, mask); + /* Clear any SER */ + edma_shadow0_write_array(ecc, SH_SECR, j, mask); + edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); +} + +/* Move channel to a specific event queue */ +static void edma_assign_channel_eventq(struct edma_chan *echan, + enum dma_event_q eventq_no) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + int bit = (channel & 0x7) * 4; + + /* default to low priority queue */ + if (eventq_no == EVENTQ_DEFAULT) + eventq_no = ecc->default_queue; + if (eventq_no >= ecc->num_tc) + return; + + eventq_no &= 7; + edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit), + eventq_no << bit); +} + +static int edma_alloc_channel(struct edma_chan *echan, + enum dma_event_q eventq_no) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + + /* ensure access through shadow region 0 */ + edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); + + /* ensure no events are pending */ + edma_stop(echan); + + edma_setup_interrupt(echan, true); + + edma_assign_channel_eventq(echan, eventq_no); + + return 0; +} + +static void edma_free_channel(struct edma_chan *echan) +{ + /* ensure no events are pending */ + edma_stop(echan); + /* REVISIT should probably take out of shadow region 0 */ + edma_setup_interrupt(echan, false); +} + +static inline struct edma_cc *to_edma_cc(struct dma_device *d) +{ + return container_of(d, struct edma_cc, dma_slave); +} + +static inline struct edma_chan *to_edma_chan(struct dma_chan *c) +{ + return container_of(c, struct edma_chan, vchan.chan); +} + +static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct edma_desc, vdesc.tx); +} + +static void edma_desc_free(struct virt_dma_desc *vdesc) +{ + kfree(container_of(vdesc, struct edma_desc, vdesc)); +} + +/* Dispatch a queued descriptor to the controller (caller holds lock) */ +static void edma_execute(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + struct virt_dma_desc *vdesc; + struct edma_desc *edesc; + struct device *dev = echan->vchan.chan.device->dev; + int i, j, left, nslots; + + if (!echan->edesc) { + /* Setup is needed for the first transfer */ + vdesc = vchan_next_desc(&echan->vchan); + if (!vdesc) + return; + list_del(&vdesc->node); + echan->edesc = to_edma_desc(&vdesc->tx); + } + + edesc = echan->edesc; + + /* Find out how many left */ + left = edesc->pset_nr - edesc->processed; + nslots = min(MAX_NR_SG, left); + edesc->sg_len = 0; + + /* Write descriptor PaRAM set(s) */ + for (i = 0; i < nslots; i++) { + j = i + edesc->processed; + edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param); + edesc->sg_len += edesc->pset[j].len; + dev_vdbg(dev, + "\n pset[%d]:\n" + " chnum\t%d\n" + " slot\t%d\n" + " opt\t%08x\n" + " src\t%08x\n" + " dst\t%08x\n" + " abcnt\t%08x\n" + " ccnt\t%08x\n" + " bidx\t%08x\n" + " cidx\t%08x\n" + " lkrld\t%08x\n", + j, echan->ch_num, echan->slot[i], + edesc->pset[j].param.opt, + edesc->pset[j].param.src, + edesc->pset[j].param.dst, + edesc->pset[j].param.a_b_cnt, + edesc->pset[j].param.ccnt, + edesc->pset[j].param.src_dst_bidx, + edesc->pset[j].param.src_dst_cidx, + edesc->pset[j].param.link_bcntrld); + /* Link to the previous slot if not the last set */ + if (i != (nslots - 1)) + edma_link(ecc, echan->slot[i], echan->slot[i + 1]); + } + + edesc->processed += nslots; + + /* + * If this is either the last set in a set of SG-list transactions + * then setup a link to the dummy slot, this results in all future + * events being absorbed and that's OK because we're done + */ + if (edesc->processed == edesc->pset_nr) { + if (edesc->cyclic) + edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]); + else + edma_link(ecc, echan->slot[nslots - 1], + echan->ecc->dummy_slot); + } + + if (echan->missed) { + /* + * This happens due to setup times between intermediate + * transfers in long SG lists which have to be broken up into + * transfers of MAX_NR_SG + */ + dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); + edma_clean_channel(echan); + edma_stop(echan); + edma_start(echan); + edma_trigger_channel(echan); + echan->missed = 0; + } else if (edesc->processed <= MAX_NR_SG) { + dev_dbg(dev, "first transfer starting on channel %d\n", + echan->ch_num); + edma_start(echan); + } else { + dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", + echan->ch_num, edesc->processed); + edma_resume(echan); + } +} + +static int edma_terminate_all(struct dma_chan *chan) +{ + struct edma_chan *echan = to_edma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&echan->vchan.lock, flags); + + /* + * Stop DMA activity: we assume the callback will not be called + * after edma_dma() returns (even if it does, it will see + * echan->edesc is NULL and exit.) + */ + if (echan->edesc) { + edma_stop(echan); + /* Move the cyclic channel back to default queue */ + if (!echan->tc && echan->edesc->cyclic) + edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); + + vchan_terminate_vdesc(&echan->edesc->vdesc); + echan->edesc = NULL; + } + + vchan_get_all_descriptors(&echan->vchan, &head); + spin_unlock_irqrestore(&echan->vchan.lock, flags); + vchan_dma_desc_free_list(&echan->vchan, &head); + + return 0; +} + +static void edma_synchronize(struct dma_chan *chan) +{ + struct edma_chan *echan = to_edma_chan(chan); + + vchan_synchronize(&echan->vchan); +} + +static int edma_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct edma_chan *echan = to_edma_chan(chan); + + if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + if (cfg->src_maxburst > chan->device->max_burst || + cfg->dst_maxburst > chan->device->max_burst) + return -EINVAL; + + memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); + + return 0; +} + +static int edma_dma_pause(struct dma_chan *chan) +{ + struct edma_chan *echan = to_edma_chan(chan); + + if (!echan->edesc) + return -EINVAL; + + edma_pause(echan); + return 0; +} + +static int edma_dma_resume(struct dma_chan *chan) +{ + struct edma_chan *echan = to_edma_chan(chan); + + edma_resume(echan); + return 0; +} + +/* + * A PaRAM set configuration abstraction used by other modes + * @chan: Channel who's PaRAM set we're configuring + * @pset: PaRAM set to initialize and setup. + * @src_addr: Source address of the DMA + * @dst_addr: Destination address of the DMA + * @burst: In units of dev_width, how much to send + * @dev_width: How much is the dev_width + * @dma_length: Total length of the DMA transfer + * @direction: Direction of the transfer + */ +static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, + dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, + unsigned int acnt, unsigned int dma_length, + enum dma_transfer_direction direction) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + struct edmacc_param *param = &epset->param; + int bcnt, ccnt, cidx; + int src_bidx, dst_bidx, src_cidx, dst_cidx; + int absync; + + /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ + if (!burst) + burst = 1; + /* + * If the maxburst is equal to the fifo width, use + * A-synced transfers. This allows for large contiguous + * buffer transfers using only one PaRAM set. + */ + if (burst == 1) { + /* + * For the A-sync case, bcnt and ccnt are the remainder + * and quotient respectively of the division of: + * (dma_length / acnt) by (SZ_64K -1). This is so + * that in case bcnt over flows, we have ccnt to use. + * Note: In A-sync tranfer only, bcntrld is used, but it + * only applies for sg_dma_len(sg) >= SZ_64K. + * In this case, the best way adopted is- bccnt for the + * first frame will be the remainder below. Then for + * every successive frame, bcnt will be SZ_64K-1. This + * is assured as bcntrld = 0xffff in end of function. + */ + absync = false; + ccnt = dma_length / acnt / (SZ_64K - 1); + bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); + /* + * If bcnt is non-zero, we have a remainder and hence an + * extra frame to transfer, so increment ccnt. + */ + if (bcnt) + ccnt++; + else + bcnt = SZ_64K - 1; + cidx = acnt; + } else { + /* + * If maxburst is greater than the fifo address_width, + * use AB-synced transfers where A count is the fifo + * address_width and B count is the maxburst. In this + * case, we are limited to transfers of C count frames + * of (address_width * maxburst) where C count is limited + * to SZ_64K-1. This places an upper bound on the length + * of an SG segment that can be handled. + */ + absync = true; + bcnt = burst; + ccnt = dma_length / (acnt * bcnt); + if (ccnt > (SZ_64K - 1)) { + dev_err(dev, "Exceeded max SG segment size\n"); + return -EINVAL; + } + cidx = acnt * bcnt; + } + + epset->len = dma_length; + + if (direction == DMA_MEM_TO_DEV) { + src_bidx = acnt; + src_cidx = cidx; + dst_bidx = 0; + dst_cidx = 0; + epset->addr = src_addr; + } else if (direction == DMA_DEV_TO_MEM) { + src_bidx = 0; + src_cidx = 0; + dst_bidx = acnt; + dst_cidx = cidx; + epset->addr = dst_addr; + } else if (direction == DMA_MEM_TO_MEM) { + src_bidx = acnt; + src_cidx = cidx; + dst_bidx = acnt; + dst_cidx = cidx; + } else { + dev_err(dev, "%s: direction not implemented yet\n", __func__); + return -EINVAL; + } + + param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); + /* Configure A or AB synchronized transfers */ + if (absync) + param->opt |= SYNCDIM; + + param->src = src_addr; + param->dst = dst_addr; + + param->src_dst_bidx = (dst_bidx << 16) | src_bidx; + param->src_dst_cidx = (dst_cidx << 16) | src_cidx; + + param->a_b_cnt = bcnt << 16 | acnt; + param->ccnt = ccnt; + /* + * Only time when (bcntrld) auto reload is required is for + * A-sync case, and in this case, a requirement of reload value + * of SZ_64K-1 only is assured. 'link' is initially set to NULL + * and then later will be populated by edma_execute. + */ + param->link_bcntrld = 0xffffffff; + return absync; +} + +static struct dma_async_tx_descriptor *edma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long tx_flags, void *context) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + struct edma_desc *edesc; + dma_addr_t src_addr = 0, dst_addr = 0; + enum dma_slave_buswidth dev_width; + u32 burst; + struct scatterlist *sg; + int i, nslots, ret; + + if (unlikely(!echan || !sgl || !sg_len)) + return NULL; + + if (direction == DMA_DEV_TO_MEM) { + src_addr = echan->cfg.src_addr; + dev_width = echan->cfg.src_addr_width; + burst = echan->cfg.src_maxburst; + } else if (direction == DMA_MEM_TO_DEV) { + dst_addr = echan->cfg.dst_addr; + dev_width = echan->cfg.dst_addr_width; + burst = echan->cfg.dst_maxburst; + } else { + dev_err(dev, "%s: bad direction: %d\n", __func__, direction); + return NULL; + } + + if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { + dev_err(dev, "%s: Undefined slave buswidth\n", __func__); + return NULL; + } + + edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), + GFP_ATOMIC); + if (!edesc) + return NULL; + + edesc->pset_nr = sg_len; + edesc->residue = 0; + edesc->direction = direction; + edesc->echan = echan; + + /* Allocate a PaRAM slot, if needed */ + nslots = min_t(unsigned, MAX_NR_SG, sg_len); + + for (i = 0; i < nslots; i++) { + if (echan->slot[i] < 0) { + echan->slot[i] = + edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); + if (echan->slot[i] < 0) { + kfree(edesc); + dev_err(dev, "%s: Failed to allocate slot\n", + __func__); + return NULL; + } + } + } + + /* Configure PaRAM sets for each SG */ + for_each_sg(sgl, sg, sg_len, i) { + /* Get address for each SG */ + if (direction == DMA_DEV_TO_MEM) + dst_addr = sg_dma_address(sg); + else + src_addr = sg_dma_address(sg); + + ret = edma_config_pset(chan, &edesc->pset[i], src_addr, + dst_addr, burst, dev_width, + sg_dma_len(sg), direction); + if (ret < 0) { + kfree(edesc); + return NULL; + } + + edesc->absync = ret; + edesc->residue += sg_dma_len(sg); + + if (i == sg_len - 1) + /* Enable completion interrupt */ + edesc->pset[i].param.opt |= TCINTEN; + else if (!((i+1) % MAX_NR_SG)) + /* + * Enable early completion interrupt for the + * intermediateset. In this case the driver will be + * notified when the paRAM set is submitted to TC. This + * will allow more time to set up the next set of slots. + */ + edesc->pset[i].param.opt |= (TCINTEN | TCCMODE); + } + edesc->residue_stat = edesc->residue; + + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); +} + +static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long tx_flags) +{ + int ret, nslots; + struct edma_desc *edesc; + struct device *dev = chan->device->dev; + struct edma_chan *echan = to_edma_chan(chan); + unsigned int width, pset_len, array_size; + + if (unlikely(!echan || !len)) + return NULL; + + /* Align the array size (acnt block) with the transfer properties */ + switch (__ffs((src | dest | len))) { + case 0: + array_size = SZ_32K - 1; + break; + case 1: + array_size = SZ_32K - 2; + break; + default: + array_size = SZ_32K - 4; + break; + } + + if (len < SZ_64K) { + /* + * Transfer size less than 64K can be handled with one paRAM + * slot and with one burst. + * ACNT = length + */ + width = len; + pset_len = len; + nslots = 1; + } else { + /* + * Transfer size bigger than 64K will be handled with maximum of + * two paRAM slots. + * slot1: (full_length / 32767) times 32767 bytes bursts. + * ACNT = 32767, length1: (full_length / 32767) * 32767 + * slot2: the remaining amount of data after slot1. + * ACNT = full_length - length1, length2 = ACNT + * + * When the full_length is multibple of 32767 one slot can be + * used to complete the transfer. + */ + width = array_size; + pset_len = rounddown(len, width); + /* One slot is enough for lengths multiple of (SZ_32K -1) */ + if (unlikely(pset_len == len)) + nslots = 1; + else + nslots = 2; + } + + edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), + GFP_ATOMIC); + if (!edesc) + return NULL; + + edesc->pset_nr = nslots; + edesc->residue = edesc->residue_stat = len; + edesc->direction = DMA_MEM_TO_MEM; + edesc->echan = echan; + + ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, + width, pset_len, DMA_MEM_TO_MEM); + if (ret < 0) { + kfree(edesc); + return NULL; + } + + edesc->absync = ret; + + edesc->pset[0].param.opt |= ITCCHEN; + if (nslots == 1) { + /* Enable transfer complete interrupt */ + edesc->pset[0].param.opt |= TCINTEN; + } else { + /* Enable transfer complete chaining for the first slot */ + edesc->pset[0].param.opt |= TCCHEN; + + if (echan->slot[1] < 0) { + echan->slot[1] = edma_alloc_slot(echan->ecc, + EDMA_SLOT_ANY); + if (echan->slot[1] < 0) { + kfree(edesc); + dev_err(dev, "%s: Failed to allocate slot\n", + __func__); + return NULL; + } + } + dest += pset_len; + src += pset_len; + pset_len = width = len % array_size; + + ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, + width, pset_len, DMA_MEM_TO_MEM); + if (ret < 0) { + kfree(edesc); + return NULL; + } + + edesc->pset[1].param.opt |= ITCCHEN; + edesc->pset[1].param.opt |= TCINTEN; + } + + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); +} + +static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long tx_flags) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + struct edma_desc *edesc; + dma_addr_t src_addr, dst_addr; + enum dma_slave_buswidth dev_width; + bool use_intermediate = false; + u32 burst; + int i, ret, nslots; + + if (unlikely(!echan || !buf_len || !period_len)) + return NULL; + + if (direction == DMA_DEV_TO_MEM) { + src_addr = echan->cfg.src_addr; + dst_addr = buf_addr; + dev_width = echan->cfg.src_addr_width; + burst = echan->cfg.src_maxburst; + } else if (direction == DMA_MEM_TO_DEV) { + src_addr = buf_addr; + dst_addr = echan->cfg.dst_addr; + dev_width = echan->cfg.dst_addr_width; + burst = echan->cfg.dst_maxburst; + } else { + dev_err(dev, "%s: bad direction: %d\n", __func__, direction); + return NULL; + } + + if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { + dev_err(dev, "%s: Undefined slave buswidth\n", __func__); + return NULL; + } + + if (unlikely(buf_len % period_len)) { + dev_err(dev, "Period should be multiple of Buffer length\n"); + return NULL; + } + + nslots = (buf_len / period_len) + 1; + + /* + * Cyclic DMA users such as audio cannot tolerate delays introduced + * by cases where the number of periods is more than the maximum + * number of SGs the EDMA driver can handle at a time. For DMA types + * such as Slave SGs, such delays are tolerable and synchronized, + * but the synchronization is difficult to achieve with Cyclic and + * cannot be guaranteed, so we error out early. + */ + if (nslots > MAX_NR_SG) { + /* + * If the burst and period sizes are the same, we can put + * the full buffer into a single period and activate + * intermediate interrupts. This will produce interrupts + * after each burst, which is also after each desired period. + */ + if (burst == period_len) { + period_len = buf_len; + nslots = 2; + use_intermediate = true; + } else { + return NULL; + } + } + + edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), + GFP_ATOMIC); + if (!edesc) + return NULL; + + edesc->cyclic = 1; + edesc->pset_nr = nslots; + edesc->residue = edesc->residue_stat = buf_len; + edesc->direction = direction; + edesc->echan = echan; + + dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n", + __func__, echan->ch_num, nslots, period_len, buf_len); + + for (i = 0; i < nslots; i++) { + /* Allocate a PaRAM slot, if needed */ + if (echan->slot[i] < 0) { + echan->slot[i] = + edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); + if (echan->slot[i] < 0) { + kfree(edesc); + dev_err(dev, "%s: Failed to allocate slot\n", + __func__); + return NULL; + } + } + + if (i == nslots - 1) { + memcpy(&edesc->pset[i], &edesc->pset[0], + sizeof(edesc->pset[0])); + break; + } + + ret = edma_config_pset(chan, &edesc->pset[i], src_addr, + dst_addr, burst, dev_width, period_len, + direction); + if (ret < 0) { + kfree(edesc); + return NULL; + } + + if (direction == DMA_DEV_TO_MEM) + dst_addr += period_len; + else + src_addr += period_len; + + dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i); + dev_vdbg(dev, + "\n pset[%d]:\n" + " chnum\t%d\n" + " slot\t%d\n" + " opt\t%08x\n" + " src\t%08x\n" + " dst\t%08x\n" + " abcnt\t%08x\n" + " ccnt\t%08x\n" + " bidx\t%08x\n" + " cidx\t%08x\n" + " lkrld\t%08x\n", + i, echan->ch_num, echan->slot[i], + edesc->pset[i].param.opt, + edesc->pset[i].param.src, + edesc->pset[i].param.dst, + edesc->pset[i].param.a_b_cnt, + edesc->pset[i].param.ccnt, + edesc->pset[i].param.src_dst_bidx, + edesc->pset[i].param.src_dst_cidx, + edesc->pset[i].param.link_bcntrld); + + edesc->absync = ret; + + /* + * Enable period interrupt only if it is requested + */ + if (tx_flags & DMA_PREP_INTERRUPT) { + edesc->pset[i].param.opt |= TCINTEN; + + /* Also enable intermediate interrupts if necessary */ + if (use_intermediate) + edesc->pset[i].param.opt |= ITCINTEN; + } + } + + /* Place the cyclic channel to highest priority queue */ + if (!echan->tc) + edma_assign_channel_eventq(echan, EVENTQ_0); + + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); +} + +static void edma_completion_handler(struct edma_chan *echan) +{ + struct device *dev = echan->vchan.chan.device->dev; + struct edma_desc *edesc; + + spin_lock(&echan->vchan.lock); + edesc = echan->edesc; + if (edesc) { + if (edesc->cyclic) { + vchan_cyclic_callback(&edesc->vdesc); + spin_unlock(&echan->vchan.lock); + return; + } else if (edesc->processed == edesc->pset_nr) { + edesc->residue = 0; + edma_stop(echan); + vchan_cookie_complete(&edesc->vdesc); + echan->edesc = NULL; + + dev_dbg(dev, "Transfer completed on channel %d\n", + echan->ch_num); + } else { + dev_dbg(dev, "Sub transfer completed on channel %d\n", + echan->ch_num); + + edma_pause(echan); + + /* Update statistics for tx_status */ + edesc->residue -= edesc->sg_len; + edesc->residue_stat = edesc->residue; + edesc->processed_stat = edesc->processed; + } + edma_execute(echan); + } + + spin_unlock(&echan->vchan.lock); +} + +/* eDMA interrupt handler */ +static irqreturn_t dma_irq_handler(int irq, void *data) +{ + struct edma_cc *ecc = data; + int ctlr; + u32 sh_ier; + u32 sh_ipr; + u32 bank; + + ctlr = ecc->id; + if (ctlr < 0) + return IRQ_NONE; + + dev_vdbg(ecc->dev, "dma_irq_handler\n"); + + sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0); + if (!sh_ipr) { + sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1); + if (!sh_ipr) + return IRQ_NONE; + sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1); + bank = 1; + } else { + sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0); + bank = 0; + } + + do { + u32 slot; + u32 channel; + + slot = __ffs(sh_ipr); + sh_ipr &= ~(BIT(slot)); + + if (sh_ier & BIT(slot)) { + channel = (bank << 5) | slot; + /* Clear the corresponding IPR bits */ + edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot)); + edma_completion_handler(&ecc->slave_chans[channel]); + } + } while (sh_ipr); + + edma_shadow0_write(ecc, SH_IEVAL, 1); + return IRQ_HANDLED; +} + +static void edma_error_handler(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + struct device *dev = echan->vchan.chan.device->dev; + struct edmacc_param p; + int err; + + if (!echan->edesc) + return; + + spin_lock(&echan->vchan.lock); + + err = edma_read_slot(ecc, echan->slot[0], &p); + + /* + * Issue later based on missed flag which will be sure + * to happen as: + * (1) we finished transmitting an intermediate slot and + * edma_execute is coming up. + * (2) or we finished current transfer and issue will + * call edma_execute. + * + * Important note: issuing can be dangerous here and + * lead to some nasty recursion when we are in a NULL + * slot. So we avoid doing so and set the missed flag. + */ + if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) { + dev_dbg(dev, "Error on null slot, setting miss\n"); + echan->missed = 1; + } else { + /* + * The slot is already programmed but the event got + * missed, so its safe to issue it here. + */ + dev_dbg(dev, "Missed event, TRIGGERING\n"); + edma_clean_channel(echan); + edma_stop(echan); + edma_start(echan); + edma_trigger_channel(echan); + } + spin_unlock(&echan->vchan.lock); +} + +static inline bool edma_error_pending(struct edma_cc *ecc) +{ + if (edma_read_array(ecc, EDMA_EMR, 0) || + edma_read_array(ecc, EDMA_EMR, 1) || + edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR)) + return true; + + return false; +} + +/* eDMA error interrupt handler */ +static irqreturn_t dma_ccerr_handler(int irq, void *data) +{ + struct edma_cc *ecc = data; + int i, j; + int ctlr; + unsigned int cnt = 0; + unsigned int val; + + ctlr = ecc->id; + if (ctlr < 0) + return IRQ_NONE; + + dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); + + if (!edma_error_pending(ecc)) { + /* + * The registers indicate no pending error event but the irq + * handler has been called. + * Ask eDMA to re-evaluate the error registers. + */ + dev_err(ecc->dev, "%s: Error interrupt without error event!\n", + __func__); + edma_write(ecc, EDMA_EEVAL, 1); + return IRQ_NONE; + } + + while (1) { + /* Event missed register(s) */ + for (j = 0; j < 2; j++) { + unsigned long emr; + + val = edma_read_array(ecc, EDMA_EMR, j); + if (!val) + continue; + + dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val); + emr = val; + for (i = find_next_bit(&emr, 32, 0); i < 32; + i = find_next_bit(&emr, 32, i + 1)) { + int k = (j << 5) + i; + + /* Clear the corresponding EMR bits */ + edma_write_array(ecc, EDMA_EMCR, j, BIT(i)); + /* Clear any SER */ + edma_shadow0_write_array(ecc, SH_SECR, j, + BIT(i)); + edma_error_handler(&ecc->slave_chans[k]); + } + } + + val = edma_read(ecc, EDMA_QEMR); + if (val) { + dev_dbg(ecc->dev, "QEMR 0x%02x\n", val); + /* Not reported, just clear the interrupt reason. */ + edma_write(ecc, EDMA_QEMCR, val); + edma_shadow0_write(ecc, SH_QSECR, val); + } + + val = edma_read(ecc, EDMA_CCERR); + if (val) { + dev_warn(ecc->dev, "CCERR 0x%08x\n", val); + /* Not reported, just clear the interrupt reason. */ + edma_write(ecc, EDMA_CCERRCLR, val); + } + + if (!edma_error_pending(ecc)) + break; + cnt++; + if (cnt > 10) + break; + } + edma_write(ecc, EDMA_EEVAL, 1); + return IRQ_HANDLED; +} + +/* Alloc channel resources */ +static int edma_alloc_chan_resources(struct dma_chan *chan) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct edma_cc *ecc = echan->ecc; + struct device *dev = ecc->dev; + enum dma_event_q eventq_no = EVENTQ_DEFAULT; + int ret; + + if (echan->tc) { + eventq_no = echan->tc->id; + } else if (ecc->tc_list) { + /* memcpy channel */ + echan->tc = &ecc->tc_list[ecc->info->default_queue]; + eventq_no = echan->tc->id; + } + + ret = edma_alloc_channel(echan, eventq_no); + if (ret) + return ret; + + echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num); + if (echan->slot[0] < 0) { + dev_err(dev, "Entry slot allocation failed for channel %u\n", + EDMA_CHAN_SLOT(echan->ch_num)); + ret = echan->slot[0]; + goto err_slot; + } + + /* Set up channel -> slot mapping for the entry slot */ + edma_set_chmap(echan, echan->slot[0]); + echan->alloced = true; + + dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n", + EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, + echan->hw_triggered ? "HW" : "SW"); + + return 0; + +err_slot: + edma_free_channel(echan); + return ret; +} + +/* Free channel resources */ +static void edma_free_chan_resources(struct dma_chan *chan) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = echan->ecc->dev; + int i; + + /* Terminate transfers */ + edma_stop(echan); + + vchan_free_chan_resources(&echan->vchan); + + /* Free EDMA PaRAM slots */ + for (i = 0; i < EDMA_MAX_SLOTS; i++) { + if (echan->slot[i] >= 0) { + edma_free_slot(echan->ecc, echan->slot[i]); + echan->slot[i] = -1; + } + } + + /* Set entry slot to the dummy slot */ + edma_set_chmap(echan, echan->ecc->dummy_slot); + + /* Free EDMA channel */ + if (echan->alloced) { + edma_free_channel(echan); + echan->alloced = false; + } + + echan->tc = NULL; + echan->hw_triggered = false; + + dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n", + EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id); +} + +/* Send pending descriptor to hardware */ +static void edma_issue_pending(struct dma_chan *chan) +{ + struct edma_chan *echan = to_edma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&echan->vchan.lock, flags); + if (vchan_issue_pending(&echan->vchan) && !echan->edesc) + edma_execute(echan); + spin_unlock_irqrestore(&echan->vchan.lock, flags); +} + +/* + * This limit exists to avoid a possible infinite loop when waiting for proof + * that a particular transfer is completed. This limit can be hit if there + * are large bursts to/from slow devices or the CPU is never able to catch + * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART + * RX-FIFO, as many as 55 loops have been seen. + */ +#define EDMA_MAX_TR_WAIT_LOOPS 1000 + +static u32 edma_residue(struct edma_desc *edesc) +{ + bool dst = edesc->direction == DMA_DEV_TO_MEM; + int loop_count = EDMA_MAX_TR_WAIT_LOOPS; + struct edma_chan *echan = edesc->echan; + struct edma_pset *pset = edesc->pset; + dma_addr_t done, pos; + int i; + + /* + * We always read the dst/src position from the first RamPar + * pset. That's the one which is active now. + */ + pos = edma_get_position(echan->ecc, echan->slot[0], dst); + + /* + * "pos" may represent a transfer request that is still being + * processed by the EDMACC or EDMATC. We will busy wait until + * any one of the situations occurs: + * 1. the DMA hardware is idle + * 2. a new transfer request is setup + * 3. we hit the loop limit + */ + while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) { + /* check if a new transfer request is setup */ + if (edma_get_position(echan->ecc, + echan->slot[0], dst) != pos) { + break; + } + + if (!--loop_count) { + dev_dbg_ratelimited(echan->vchan.chan.device->dev, + "%s: timeout waiting for PaRAM update\n", + __func__); + break; + } + + cpu_relax(); + } + + /* + * Cyclic is simple. Just subtract pset[0].addr from pos. + * + * We never update edesc->residue in the cyclic case, so we + * can tell the remaining room to the end of the circular + * buffer. + */ + if (edesc->cyclic) { + done = pos - pset->addr; + edesc->residue_stat = edesc->residue - done; + return edesc->residue_stat; + } + + /* + * For SG operation we catch up with the last processed + * status. + */ + pset += edesc->processed_stat; + + for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { + /* + * If we are inside this pset address range, we know + * this is the active one. Get the current delta and + * stop walking the psets. + */ + if (pos >= pset->addr && pos < pset->addr + pset->len) + return edesc->residue_stat - (pos - pset->addr); + + /* Otherwise mark it done and update residue_stat. */ + edesc->processed_stat++; + edesc->residue_stat -= pset->len; + } + return edesc->residue_stat; +} + +/* Check request completion status */ +static enum dma_status edma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct virt_dma_desc *vdesc; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + spin_lock_irqsave(&echan->vchan.lock, flags); + if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) + txstate->residue = edma_residue(echan->edesc); + else if ((vdesc = vchan_find_desc(&echan->vchan, cookie))) + txstate->residue = to_edma_desc(&vdesc->tx)->residue; + spin_unlock_irqrestore(&echan->vchan.lock, flags); + + return ret; +} + +static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels) +{ + if (!memcpy_channels) + return false; + while (*memcpy_channels != -1) { + if (*memcpy_channels == ch_num) + return true; + memcpy_channels++; + } + return false; +} + +#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) +{ + struct dma_device *s_ddev = &ecc->dma_slave; + struct dma_device *m_ddev = NULL; + s32 *memcpy_channels = ecc->info->memcpy_channels; + int i, j; + + dma_cap_zero(s_ddev->cap_mask); + dma_cap_set(DMA_SLAVE, s_ddev->cap_mask); + dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask); + if (ecc->legacy_mode && !memcpy_channels) { + dev_warn(ecc->dev, + "Legacy memcpy is enabled, things might not work\n"); + + dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); + s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; + s_ddev->directions = BIT(DMA_MEM_TO_MEM); + } + + s_ddev->device_prep_slave_sg = edma_prep_slave_sg; + s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; + s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; + s_ddev->device_free_chan_resources = edma_free_chan_resources; + s_ddev->device_issue_pending = edma_issue_pending; + s_ddev->device_tx_status = edma_tx_status; + s_ddev->device_config = edma_slave_config; + s_ddev->device_pause = edma_dma_pause; + s_ddev->device_resume = edma_dma_resume; + s_ddev->device_terminate_all = edma_terminate_all; + s_ddev->device_synchronize = edma_synchronize; + + s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; + s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; + s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); + s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */ + + s_ddev->dev = ecc->dev; + INIT_LIST_HEAD(&s_ddev->channels); + + if (memcpy_channels) { + m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL); + if (!m_ddev) { + dev_warn(ecc->dev, "memcpy is disabled due to OoM\n"); + memcpy_channels = NULL; + goto ch_setup; + } + ecc->dma_memcpy = m_ddev; + + dma_cap_zero(m_ddev->cap_mask); + dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); + + m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; + m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; + m_ddev->device_free_chan_resources = edma_free_chan_resources; + m_ddev->device_issue_pending = edma_issue_pending; + m_ddev->device_tx_status = edma_tx_status; + m_ddev->device_config = edma_slave_config; + m_ddev->device_pause = edma_dma_pause; + m_ddev->device_resume = edma_dma_resume; + m_ddev->device_terminate_all = edma_terminate_all; + m_ddev->device_synchronize = edma_synchronize; + + m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; + m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; + m_ddev->directions = BIT(DMA_MEM_TO_MEM); + m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + m_ddev->dev = ecc->dev; + INIT_LIST_HEAD(&m_ddev->channels); + } else if (!ecc->legacy_mode) { + dev_info(ecc->dev, "memcpy is disabled\n"); + } + +ch_setup: + for (i = 0; i < ecc->num_channels; i++) { + struct edma_chan *echan = &ecc->slave_chans[i]; + echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); + echan->ecc = ecc; + echan->vchan.desc_free = edma_desc_free; + + if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels)) + vchan_init(&echan->vchan, m_ddev); + else + vchan_init(&echan->vchan, s_ddev); + + INIT_LIST_HEAD(&echan->node); + for (j = 0; j < EDMA_MAX_SLOTS; j++) + echan->slot[j] = -1; + } +} + +static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, + struct edma_cc *ecc) +{ + int i; + u32 value, cccfg; + s8 (*queue_priority_map)[2]; + + /* Decode the eDMA3 configuration from CCCFG register */ + cccfg = edma_read(ecc, EDMA_CCCFG); + + value = GET_NUM_REGN(cccfg); + ecc->num_region = BIT(value); + + value = GET_NUM_DMACH(cccfg); + ecc->num_channels = BIT(value + 1); + + value = GET_NUM_QDMACH(cccfg); + ecc->num_qchannels = value * 2; + + value = GET_NUM_PAENTRY(cccfg); + ecc->num_slots = BIT(value + 4); + + value = GET_NUM_EVQUE(cccfg); + ecc->num_tc = value + 1; + + ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false; + + dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg); + dev_dbg(dev, "num_region: %u\n", ecc->num_region); + dev_dbg(dev, "num_channels: %u\n", ecc->num_channels); + dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels); + dev_dbg(dev, "num_slots: %u\n", ecc->num_slots); + dev_dbg(dev, "num_tc: %u\n", ecc->num_tc); + dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no"); + + /* Nothing need to be done if queue priority is provided */ + if (pdata->queue_priority_mapping) + return 0; + + /* + * Configure TC/queue priority as follows: + * Q0 - priority 0 + * Q1 - priority 1 + * Q2 - priority 2 + * ... + * The meaning of priority numbers: 0 highest priority, 7 lowest + * priority. So Q0 is the highest priority queue and the last queue has + * the lowest priority. + */ + queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8), + GFP_KERNEL); + if (!queue_priority_map) + return -ENOMEM; + + for (i = 0; i < ecc->num_tc; i++) { + queue_priority_map[i][0] = i; + queue_priority_map[i][1] = i; + } + queue_priority_map[i][0] = -1; + queue_priority_map[i][1] = -1; + + pdata->queue_priority_mapping = queue_priority_map; + /* Default queue has the lowest priority */ + pdata->default_queue = i - 1; + + return 0; +} + +#if IS_ENABLED(CONFIG_OF) +static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata, + size_t sz) +{ + const char pname[] = "ti,edma-xbar-event-map"; + struct resource res; + void __iomem *xbar; + s16 (*xbar_chans)[2]; + size_t nelm = sz / sizeof(s16); + u32 shift, offset, mux; + int ret, i; + + xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL); + if (!xbar_chans) + return -ENOMEM; + + ret = of_address_to_resource(dev->of_node, 1, &res); + if (ret) + return -ENOMEM; + + xbar = devm_ioremap(dev, res.start, resource_size(&res)); + if (!xbar) + return -ENOMEM; + + ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans, + nelm); + if (ret) + return -EIO; + + /* Invalidate last entry for the other user of this mess */ + nelm >>= 1; + xbar_chans[nelm][0] = -1; + xbar_chans[nelm][1] = -1; + + for (i = 0; i < nelm; i++) { + shift = (xbar_chans[i][1] & 0x03) << 3; + offset = xbar_chans[i][1] & 0xfffffffc; + mux = readl(xbar + offset); + mux &= ~(0xff << shift); + mux |= xbar_chans[i][0] << shift; + writel(mux, (xbar + offset)); + } + + pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; + return 0; +} + +static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, + bool legacy_mode) +{ + struct edma_soc_info *info; + struct property *prop; + int sz, ret; + + info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + if (legacy_mode) { + prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", + &sz); + if (prop) { + ret = edma_xbar_event_map(dev, info, sz); + if (ret) + return ERR_PTR(ret); + } + return info; + } + + /* Get the list of channels allocated to be used for memcpy */ + prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); + if (prop) { + const char pname[] = "ti,edma-memcpy-channels"; + size_t nelm = sz / sizeof(s32); + s32 *memcpy_ch; + + memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32), + GFP_KERNEL); + if (!memcpy_ch) + return ERR_PTR(-ENOMEM); + + ret = of_property_read_u32_array(dev->of_node, pname, + (u32 *)memcpy_ch, nelm); + if (ret) + return ERR_PTR(ret); + + memcpy_ch[nelm] = -1; + info->memcpy_channels = memcpy_ch; + } + + prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges", + &sz); + if (prop) { + const char pname[] = "ti,edma-reserved-slot-ranges"; + u32 (*tmp)[2]; + s16 (*rsv_slots)[2]; + size_t nelm = sz / sizeof(*tmp); + struct edma_rsv_info *rsv_info; + int i; + + if (!nelm) + return info; + + tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ERR_PTR(-ENOMEM); + + rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); + if (!rsv_info) { + kfree(tmp); + return ERR_PTR(-ENOMEM); + } + + rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), + GFP_KERNEL); + if (!rsv_slots) { + kfree(tmp); + return ERR_PTR(-ENOMEM); + } + + ret = of_property_read_u32_array(dev->of_node, pname, + (u32 *)tmp, nelm * 2); + if (ret) { + kfree(tmp); + return ERR_PTR(ret); + } + + for (i = 0; i < nelm; i++) { + rsv_slots[i][0] = tmp[i][0]; + rsv_slots[i][1] = tmp[i][1]; + } + rsv_slots[nelm][0] = -1; + rsv_slots[nelm][1] = -1; + + info->rsv = rsv_info; + info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; + + kfree(tmp); + } + + return info; +} + +static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct edma_cc *ecc = ofdma->of_dma_data; + struct dma_chan *chan = NULL; + struct edma_chan *echan; + int i; + + if (!ecc || dma_spec->args_count < 1) + return NULL; + + for (i = 0; i < ecc->num_channels; i++) { + echan = &ecc->slave_chans[i]; + if (echan->ch_num == dma_spec->args[0]) { + chan = &echan->vchan.chan; + break; + } + } + + if (!chan) + return NULL; + + if (echan->ecc->legacy_mode && dma_spec->args_count == 1) + goto out; + + if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 && + dma_spec->args[1] < echan->ecc->num_tc) { + echan->tc = &echan->ecc->tc_list[dma_spec->args[1]]; + goto out; + } + + return NULL; +out: + /* The channel is going to be used as HW synchronized */ + echan->hw_triggered = true; + return dma_get_slave_channel(chan); +} +#else +static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, + bool legacy_mode) +{ + return ERR_PTR(-EINVAL); +} + +static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + return NULL; +} +#endif + +static int edma_probe(struct platform_device *pdev) +{ + struct edma_soc_info *info = pdev->dev.platform_data; + s8 (*queue_priority_mapping)[2]; + int i, off, ln; + const s16 (*rsv_slots)[2]; + const s16 (*xbar_chans)[2]; + int irq; + char *irq_name; + struct resource *mem; + struct device_node *node = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct edma_cc *ecc; + bool legacy_mode = true; + int ret; + + if (node) { + const struct of_device_id *match; + + match = of_match_node(edma_of_ids, node); + if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC) + legacy_mode = false; + + info = edma_setup_info_from_dt(dev, legacy_mode); + if (IS_ERR(info)) { + dev_err(dev, "failed to get DT data\n"); + return PTR_ERR(info); + } + } + + if (!info) + return -ENODEV; + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync() failed\n"); + return ret; + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); + if (!ecc) + return -ENOMEM; + + ecc->dev = dev; + ecc->id = pdev->id; + ecc->legacy_mode = legacy_mode; + /* When booting with DT the pdev->id is -1 */ + if (ecc->id < 0) + ecc->id = 0; + + mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc"); + if (!mem) { + dev_dbg(dev, "mem resource not found, using index 0\n"); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(dev, "no mem resource?\n"); + return -ENODEV; + } + } + ecc->base = devm_ioremap_resource(dev, mem); + if (IS_ERR(ecc->base)) + return PTR_ERR(ecc->base); + + platform_set_drvdata(pdev, ecc); + + /* Get eDMA3 configuration from IP */ + ret = edma_setup_from_hw(dev, info, ecc); + if (ret) + return ret; + + /* Allocate memory based on the information we got from the IP */ + ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, + sizeof(*ecc->slave_chans), GFP_KERNEL); + if (!ecc->slave_chans) + return -ENOMEM; + + ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), + sizeof(unsigned long), GFP_KERNEL); + if (!ecc->slot_inuse) + return -ENOMEM; + + ecc->default_queue = info->default_queue; + + for (i = 0; i < ecc->num_slots; i++) + edma_write_slot(ecc, i, &dummy_paramset); + + if (info->rsv) { + /* Set the reserved slots in inuse list */ + rsv_slots = info->rsv->rsv_slots; + if (rsv_slots) { + for (i = 0; rsv_slots[i][0] != -1; i++) { + off = rsv_slots[i][0]; + ln = rsv_slots[i][1]; + edma_set_bits(off, ln, ecc->slot_inuse); + } + } + } + + /* Clear the xbar mapped channels in unused list */ + xbar_chans = info->xbar_chans; + if (xbar_chans) { + for (i = 0; xbar_chans[i][1] != -1; i++) { + off = xbar_chans[i][1]; + } + } + + irq = platform_get_irq_byname(pdev, "edma3_ccint"); + if (irq < 0 && node) + irq = irq_of_parse_and_map(node, 0); + + if (irq >= 0) { + irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", + dev_name(dev)); + ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, + ecc); + if (ret) { + dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); + return ret; + } + ecc->ccint = irq; + } + + irq = platform_get_irq_byname(pdev, "edma3_ccerrint"); + if (irq < 0 && node) + irq = irq_of_parse_and_map(node, 2); + + if (irq >= 0) { + irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", + dev_name(dev)); + ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, + ecc); + if (ret) { + dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); + return ret; + } + ecc->ccerrint = irq; + } + + ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); + if (ecc->dummy_slot < 0) { + dev_err(dev, "Can't allocate PaRAM dummy slot\n"); + return ecc->dummy_slot; + } + + queue_priority_mapping = info->queue_priority_mapping; + + if (!ecc->legacy_mode) { + int lowest_priority = 0; + struct of_phandle_args tc_args; + + ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, + sizeof(*ecc->tc_list), GFP_KERNEL); + if (!ecc->tc_list) + return -ENOMEM; + + for (i = 0;; i++) { + ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs", + 1, i, &tc_args); + if (ret || i == ecc->num_tc) + break; + + ecc->tc_list[i].node = tc_args.np; + ecc->tc_list[i].id = i; + queue_priority_mapping[i][1] = tc_args.args[0]; + if (queue_priority_mapping[i][1] > lowest_priority) { + lowest_priority = queue_priority_mapping[i][1]; + info->default_queue = i; + } + } + } + + /* Event queue priority mapping */ + for (i = 0; queue_priority_mapping[i][0] != -1; i++) + edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], + queue_priority_mapping[i][1]); + + for (i = 0; i < ecc->num_region; i++) { + edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0); + edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0); + edma_write_array(ecc, EDMA_QRAE, i, 0x0); + } + ecc->info = info; + + /* Init the dma device and channels */ + edma_dma_init(ecc, legacy_mode); + + for (i = 0; i < ecc->num_channels; i++) { + /* Assign all channels to the default queue */ + edma_assign_channel_eventq(&ecc->slave_chans[i], + info->default_queue); + /* Set entry slot to the dummy slot */ + edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); + } + + ecc->dma_slave.filter.map = info->slave_map; + ecc->dma_slave.filter.mapcnt = info->slavecnt; + ecc->dma_slave.filter.fn = edma_filter_fn; + + ret = dma_async_device_register(&ecc->dma_slave); + if (ret) { + dev_err(dev, "slave ddev registration failed (%d)\n", ret); + goto err_reg1; + } + + if (ecc->dma_memcpy) { + ret = dma_async_device_register(ecc->dma_memcpy); + if (ret) { + dev_err(dev, "memcpy ddev registration failed (%d)\n", + ret); + dma_async_device_unregister(&ecc->dma_slave); + goto err_reg1; + } + } + + if (node) + of_dma_controller_register(node, of_edma_xlate, ecc); + + dev_info(dev, "TI EDMA DMA engine driver\n"); + + return 0; + +err_reg1: + edma_free_slot(ecc, ecc->dummy_slot); + return ret; +} + +static void edma_cleanupp_vchan(struct dma_device *dmadev) +{ + struct edma_chan *echan, *_echan; + + list_for_each_entry_safe(echan, _echan, + &dmadev->channels, vchan.chan.device_node) { + list_del(&echan->vchan.chan.device_node); + tasklet_kill(&echan->vchan.task); + } +} + +static int edma_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct edma_cc *ecc = dev_get_drvdata(dev); + + devm_free_irq(dev, ecc->ccint, ecc); + devm_free_irq(dev, ecc->ccerrint, ecc); + + edma_cleanupp_vchan(&ecc->dma_slave); + + if (dev->of_node) + of_dma_controller_free(dev->of_node); + dma_async_device_unregister(&ecc->dma_slave); + if (ecc->dma_memcpy) + dma_async_device_unregister(ecc->dma_memcpy); + edma_free_slot(ecc, ecc->dummy_slot); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int edma_pm_suspend(struct device *dev) +{ + struct edma_cc *ecc = dev_get_drvdata(dev); + struct edma_chan *echan = ecc->slave_chans; + int i; + + for (i = 0; i < ecc->num_channels; i++) { + if (echan[i].alloced) + edma_setup_interrupt(&echan[i], false); + } + + return 0; +} + +static int edma_pm_resume(struct device *dev) +{ + struct edma_cc *ecc = dev_get_drvdata(dev); + struct edma_chan *echan = ecc->slave_chans; + int i; + s8 (*queue_priority_mapping)[2]; + + /* re initialize dummy slot to dummy param set */ + edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset); + + queue_priority_mapping = ecc->info->queue_priority_mapping; + + /* Event queue priority mapping */ + for (i = 0; queue_priority_mapping[i][0] != -1; i++) + edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], + queue_priority_mapping[i][1]); + + for (i = 0; i < ecc->num_channels; i++) { + if (echan[i].alloced) { + /* ensure access through shadow region 0 */ + edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5, + BIT(i & 0x1f)); + + edma_setup_interrupt(&echan[i], true); + + /* Set up channel -> slot mapping for the entry slot */ + edma_set_chmap(&echan[i], echan[i].slot[0]); + } + } + + return 0; +} +#endif + +static const struct dev_pm_ops edma_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume) +}; + +static struct platform_driver edma_driver = { + .probe = edma_probe, + .remove = edma_remove, + .driver = { + .name = "edma", + .pm = &edma_pm_ops, + .of_match_table = edma_of_ids, + }, +}; + +static int edma_tptc_probe(struct platform_device *pdev) +{ + pm_runtime_enable(&pdev->dev); + return pm_runtime_get_sync(&pdev->dev); +} + +static struct platform_driver edma_tptc_driver = { + .probe = edma_tptc_probe, + .driver = { + .name = "edma3-tptc", + .of_match_table = edma_tptc_of_ids, + }, +}; + +bool edma_filter_fn(struct dma_chan *chan, void *param) +{ + bool match = false; + + if (chan->device->dev->driver == &edma_driver.driver) { + struct edma_chan *echan = to_edma_chan(chan); + unsigned ch_req = *(unsigned *)param; + if (ch_req == echan->ch_num) { + /* The channel is going to be used as HW synchronized */ + echan->hw_triggered = true; + match = true; + } + } + return match; +} +EXPORT_SYMBOL(edma_filter_fn); + +static int edma_init(void) +{ + int ret; + + ret = platform_driver_register(&edma_tptc_driver); + if (ret) + return ret; + + return platform_driver_register(&edma_driver); +} +subsys_initcall(edma_init); + +static void __exit edma_exit(void) +{ + platform_driver_unregister(&edma_driver); + platform_driver_unregister(&edma_tptc_driver); +} +module_exit(edma_exit); + +MODULE_AUTHOR("Matt Porter "); +MODULE_DESCRIPTION("TI EDMA DMA engine driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c new file mode 100644 index 000000000000..b73fb51fbc81 --- /dev/null +++ b/drivers/dma/ti/omap-dma.c @@ -0,0 +1,1668 @@ +/* + * OMAP DMAengine support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../virt-dma.h" + +#define OMAP_SDMA_REQUESTS 127 +#define OMAP_SDMA_CHANNELS 32 + +struct omap_dmadev { + struct dma_device ddev; + spinlock_t lock; + void __iomem *base; + const struct omap_dma_reg *reg_map; + struct omap_system_dma_plat_info *plat; + bool legacy; + bool ll123_supported; + struct dma_pool *desc_pool; + unsigned dma_requests; + spinlock_t irq_lock; + uint32_t irq_enable_mask; + struct omap_chan **lch_map; +}; + +struct omap_chan { + struct virt_dma_chan vc; + void __iomem *channel_base; + const struct omap_dma_reg *reg_map; + uint32_t ccr; + + struct dma_slave_config cfg; + unsigned dma_sig; + bool cyclic; + bool paused; + bool running; + + int dma_ch; + struct omap_desc *desc; + unsigned sgidx; +}; + +#define DESC_NXT_SV_REFRESH (0x1 << 24) +#define DESC_NXT_SV_REUSE (0x2 << 24) +#define DESC_NXT_DV_REFRESH (0x1 << 26) +#define DESC_NXT_DV_REUSE (0x2 << 26) +#define DESC_NTYPE_TYPE2 (0x2 << 29) + +/* Type 2 descriptor with Source or Destination address update */ +struct omap_type2_desc { + uint32_t next_desc; + uint32_t en; + uint32_t addr; /* src or dst */ + uint16_t fn; + uint16_t cicr; + int16_t cdei; + int16_t csei; + int32_t cdfi; + int32_t csfi; +} __packed; + +struct omap_sg { + dma_addr_t addr; + uint32_t en; /* number of elements (24-bit) */ + uint32_t fn; /* number of frames (16-bit) */ + int32_t fi; /* for double indexing */ + int16_t ei; /* for double indexing */ + + /* Linked list */ + struct omap_type2_desc *t2_desc; + dma_addr_t t2_desc_paddr; +}; + +struct omap_desc { + struct virt_dma_desc vd; + bool using_ll; + enum dma_transfer_direction dir; + dma_addr_t dev_addr; + + int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */ + int16_t ei; /* for double indexing */ + uint8_t es; /* CSDP_DATA_TYPE_xxx */ + uint32_t ccr; /* CCR value */ + uint16_t clnk_ctrl; /* CLNK_CTRL value */ + uint16_t cicr; /* CICR value */ + uint32_t csdp; /* CSDP value */ + + unsigned sglen; + struct omap_sg sg[0]; +}; + +enum { + CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */ + CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */ + + CCR_FS = BIT(5), + CCR_READ_PRIORITY = BIT(6), + CCR_ENABLE = BIT(7), + CCR_AUTO_INIT = BIT(8), /* OMAP1 only */ + CCR_REPEAT = BIT(9), /* OMAP1 only */ + CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */ + CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */ + CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */ + CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */ + CCR_SRC_AMODE_CONSTANT = 0 << 12, + CCR_SRC_AMODE_POSTINC = 1 << 12, + CCR_SRC_AMODE_SGLIDX = 2 << 12, + CCR_SRC_AMODE_DBLIDX = 3 << 12, + CCR_DST_AMODE_CONSTANT = 0 << 14, + CCR_DST_AMODE_POSTINC = 1 << 14, + CCR_DST_AMODE_SGLIDX = 2 << 14, + CCR_DST_AMODE_DBLIDX = 3 << 14, + CCR_CONSTANT_FILL = BIT(16), + CCR_TRANSPARENT_COPY = BIT(17), + CCR_BS = BIT(18), + CCR_SUPERVISOR = BIT(22), + CCR_PREFETCH = BIT(23), + CCR_TRIGGER_SRC = BIT(24), + CCR_BUFFERING_DISABLE = BIT(25), + CCR_WRITE_PRIORITY = BIT(26), + CCR_SYNC_ELEMENT = 0, + CCR_SYNC_FRAME = CCR_FS, + CCR_SYNC_BLOCK = CCR_BS, + CCR_SYNC_PACKET = CCR_BS | CCR_FS, + + CSDP_DATA_TYPE_8 = 0, + CSDP_DATA_TYPE_16 = 1, + CSDP_DATA_TYPE_32 = 2, + CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */ + CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */ + CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */ + CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */ + CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */ + CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */ + CSDP_SRC_PACKED = BIT(6), + CSDP_SRC_BURST_1 = 0 << 7, + CSDP_SRC_BURST_16 = 1 << 7, + CSDP_SRC_BURST_32 = 2 << 7, + CSDP_SRC_BURST_64 = 3 << 7, + CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */ + CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */ + CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */ + CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */ + CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */ + CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */ + CSDP_DST_PACKED = BIT(13), + CSDP_DST_BURST_1 = 0 << 14, + CSDP_DST_BURST_16 = 1 << 14, + CSDP_DST_BURST_32 = 2 << 14, + CSDP_DST_BURST_64 = 3 << 14, + CSDP_WRITE_NON_POSTED = 0 << 16, + CSDP_WRITE_POSTED = 1 << 16, + CSDP_WRITE_LAST_NON_POSTED = 2 << 16, + + CICR_TOUT_IE = BIT(0), /* OMAP1 only */ + CICR_DROP_IE = BIT(1), + CICR_HALF_IE = BIT(2), + CICR_FRAME_IE = BIT(3), + CICR_LAST_IE = BIT(4), + CICR_BLOCK_IE = BIT(5), + CICR_PKT_IE = BIT(7), /* OMAP2+ only */ + CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */ + CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */ + CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */ + CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */ + CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ + + CLNK_CTRL_ENABLE_LNK = BIT(15), + + CDP_DST_VALID_INC = 0 << 0, + CDP_DST_VALID_RELOAD = 1 << 0, + CDP_DST_VALID_REUSE = 2 << 0, + CDP_SRC_VALID_INC = 0 << 2, + CDP_SRC_VALID_RELOAD = 1 << 2, + CDP_SRC_VALID_REUSE = 2 << 2, + CDP_NTYPE_TYPE1 = 1 << 4, + CDP_NTYPE_TYPE2 = 2 << 4, + CDP_NTYPE_TYPE3 = 3 << 4, + CDP_TMODE_NORMAL = 0 << 8, + CDP_TMODE_LLIST = 1 << 8, + CDP_FAST = BIT(10), +}; + +static const unsigned es_bytes[] = { + [CSDP_DATA_TYPE_8] = 1, + [CSDP_DATA_TYPE_16] = 2, + [CSDP_DATA_TYPE_32] = 4, +}; + +static struct of_dma_filter_info omap_dma_info = { + .filter_fn = omap_dma_filter_fn, +}; + +static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) +{ + return container_of(d, struct omap_dmadev, ddev); +} + +static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct omap_chan, vc.chan); +} + +static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct omap_desc, vd.tx); +} + +static void omap_dma_desc_free(struct virt_dma_desc *vd) +{ + struct omap_desc *d = to_omap_dma_desc(&vd->tx); + + if (d->using_ll) { + struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device); + int i; + + for (i = 0; i < d->sglen; i++) { + if (d->sg[i].t2_desc) + dma_pool_free(od->desc_pool, d->sg[i].t2_desc, + d->sg[i].t2_desc_paddr); + } + } + + kfree(d); +} + +static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx, + enum dma_transfer_direction dir, bool last) +{ + struct omap_sg *sg = &d->sg[idx]; + struct omap_type2_desc *t2_desc = sg->t2_desc; + + if (idx) + d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr; + if (last) + t2_desc->next_desc = 0xfffffffc; + + t2_desc->en = sg->en; + t2_desc->addr = sg->addr; + t2_desc->fn = sg->fn & 0xffff; + t2_desc->cicr = d->cicr; + if (!last) + t2_desc->cicr &= ~CICR_BLOCK_IE; + + switch (dir) { + case DMA_DEV_TO_MEM: + t2_desc->cdei = sg->ei; + t2_desc->csei = d->ei; + t2_desc->cdfi = sg->fi; + t2_desc->csfi = d->fi; + + t2_desc->en |= DESC_NXT_DV_REFRESH; + t2_desc->en |= DESC_NXT_SV_REUSE; + break; + case DMA_MEM_TO_DEV: + t2_desc->cdei = d->ei; + t2_desc->csei = sg->ei; + t2_desc->cdfi = d->fi; + t2_desc->csfi = sg->fi; + + t2_desc->en |= DESC_NXT_SV_REFRESH; + t2_desc->en |= DESC_NXT_DV_REUSE; + break; + default: + return; + } + + t2_desc->en |= DESC_NTYPE_TYPE2; +} + +static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) +{ + switch (type) { + case OMAP_DMA_REG_16BIT: + writew_relaxed(val, addr); + break; + case OMAP_DMA_REG_2X16BIT: + writew_relaxed(val, addr); + writew_relaxed(val >> 16, addr + 2); + break; + case OMAP_DMA_REG_32BIT: + writel_relaxed(val, addr); + break; + default: + WARN_ON(1); + } +} + +static unsigned omap_dma_read(unsigned type, void __iomem *addr) +{ + unsigned val; + + switch (type) { + case OMAP_DMA_REG_16BIT: + val = readw_relaxed(addr); + break; + case OMAP_DMA_REG_2X16BIT: + val = readw_relaxed(addr); + val |= readw_relaxed(addr + 2) << 16; + break; + case OMAP_DMA_REG_32BIT: + val = readl_relaxed(addr); + break; + default: + WARN_ON(1); + val = 0; + } + + return val; +} + +static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val) +{ + const struct omap_dma_reg *r = od->reg_map + reg; + + WARN_ON(r->stride); + + omap_dma_write(val, r->type, od->base + r->offset); +} + +static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg) +{ + const struct omap_dma_reg *r = od->reg_map + reg; + + WARN_ON(r->stride); + + return omap_dma_read(r->type, od->base + r->offset); +} + +static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val) +{ + const struct omap_dma_reg *r = c->reg_map + reg; + + omap_dma_write(val, r->type, c->channel_base + r->offset); +} + +static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg) +{ + const struct omap_dma_reg *r = c->reg_map + reg; + + return omap_dma_read(r->type, c->channel_base + r->offset); +} + +static void omap_dma_clear_csr(struct omap_chan *c) +{ + if (dma_omap1()) + omap_dma_chan_read(c, CSR); + else + omap_dma_chan_write(c, CSR, ~0); +} + +static unsigned omap_dma_get_csr(struct omap_chan *c) +{ + unsigned val = omap_dma_chan_read(c, CSR); + + if (!dma_omap1()) + omap_dma_chan_write(c, CSR, val); + + return val; +} + +static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, + unsigned lch) +{ + c->channel_base = od->base + od->plat->channel_stride * lch; + + od->lch_map[lch] = c; +} + +static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) +{ + struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + uint16_t cicr = d->cicr; + + if (__dma_omap15xx(od->plat->dma_attr)) + omap_dma_chan_write(c, CPC, 0); + else + omap_dma_chan_write(c, CDAC, 0); + + omap_dma_clear_csr(c); + + if (d->using_ll) { + uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST; + + if (d->dir == DMA_DEV_TO_MEM) + cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE); + else + cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD); + omap_dma_chan_write(c, CDP, cdp); + + omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr); + omap_dma_chan_write(c, CCDN, 0); + omap_dma_chan_write(c, CCFN, 0xffff); + omap_dma_chan_write(c, CCEN, 0xffffff); + + cicr &= ~CICR_BLOCK_IE; + } else if (od->ll123_supported) { + omap_dma_chan_write(c, CDP, 0); + } + + /* Enable interrupts */ + omap_dma_chan_write(c, CICR, cicr); + + /* Enable channel */ + omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); + + c->running = true; +} + +static void omap_dma_drain_chan(struct omap_chan *c) +{ + int i; + u32 val; + + /* Wait for sDMA FIFO to drain */ + for (i = 0; ; i++) { + val = omap_dma_chan_read(c, CCR); + if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) + break; + + if (i > 100) + break; + + udelay(5); + } + + if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) + dev_err(c->vc.chan.device->dev, + "DMA drain did not complete on lch %d\n", + c->dma_ch); +} + +static int omap_dma_stop(struct omap_chan *c) +{ + struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + uint32_t val; + + /* disable irq */ + omap_dma_chan_write(c, CICR, 0); + + omap_dma_clear_csr(c); + + val = omap_dma_chan_read(c, CCR); + if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { + uint32_t sysconfig; + + sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); + val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; + val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); + omap_dma_glbl_write(od, OCP_SYSCONFIG, val); + + val = omap_dma_chan_read(c, CCR); + val &= ~CCR_ENABLE; + omap_dma_chan_write(c, CCR, val); + + if (!(c->ccr & CCR_BUFFERING_DISABLE)) + omap_dma_drain_chan(c); + + omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); + } else { + if (!(val & CCR_ENABLE)) + return -EINVAL; + + val &= ~CCR_ENABLE; + omap_dma_chan_write(c, CCR, val); + + if (!(c->ccr & CCR_BUFFERING_DISABLE)) + omap_dma_drain_chan(c); + } + + mb(); + + if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) { + val = omap_dma_chan_read(c, CLNK_CTRL); + + if (dma_omap1()) + val |= 1 << 14; /* set the STOP_LNK bit */ + else + val &= ~CLNK_CTRL_ENABLE_LNK; + + omap_dma_chan_write(c, CLNK_CTRL, val); + } + c->running = false; + return 0; +} + +static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d) +{ + struct omap_sg *sg = d->sg + c->sgidx; + unsigned cxsa, cxei, cxfi; + + if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { + cxsa = CDSA; + cxei = CDEI; + cxfi = CDFI; + } else { + cxsa = CSSA; + cxei = CSEI; + cxfi = CSFI; + } + + omap_dma_chan_write(c, cxsa, sg->addr); + omap_dma_chan_write(c, cxei, sg->ei); + omap_dma_chan_write(c, cxfi, sg->fi); + omap_dma_chan_write(c, CEN, sg->en); + omap_dma_chan_write(c, CFN, sg->fn); + + omap_dma_start(c, d); + c->sgidx++; +} + +static void omap_dma_start_desc(struct omap_chan *c) +{ + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + struct omap_desc *d; + unsigned cxsa, cxei, cxfi; + + if (!vd) { + c->desc = NULL; + return; + } + + list_del(&vd->node); + + c->desc = d = to_omap_dma_desc(&vd->tx); + c->sgidx = 0; + + /* + * This provides the necessary barrier to ensure data held in + * DMA coherent memory is visible to the DMA engine prior to + * the transfer starting. + */ + mb(); + + omap_dma_chan_write(c, CCR, d->ccr); + if (dma_omap1()) + omap_dma_chan_write(c, CCR2, d->ccr >> 16); + + if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { + cxsa = CSSA; + cxei = CSEI; + cxfi = CSFI; + } else { + cxsa = CDSA; + cxei = CDEI; + cxfi = CDFI; + } + + omap_dma_chan_write(c, cxsa, d->dev_addr); + omap_dma_chan_write(c, cxei, d->ei); + omap_dma_chan_write(c, cxfi, d->fi); + omap_dma_chan_write(c, CSDP, d->csdp); + omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); + + omap_dma_start_sg(c, d); +} + +static void omap_dma_callback(int ch, u16 status, void *data) +{ + struct omap_chan *c = data; + struct omap_desc *d; + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + d = c->desc; + if (d) { + if (c->cyclic) { + vchan_cyclic_callback(&d->vd); + } else if (d->using_ll || c->sgidx == d->sglen) { + omap_dma_start_desc(c); + vchan_cookie_complete(&d->vd); + } else { + omap_dma_start_sg(c, d); + } + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static irqreturn_t omap_dma_irq(int irq, void *devid) +{ + struct omap_dmadev *od = devid; + unsigned status, channel; + + spin_lock(&od->irq_lock); + + status = omap_dma_glbl_read(od, IRQSTATUS_L1); + status &= od->irq_enable_mask; + if (status == 0) { + spin_unlock(&od->irq_lock); + return IRQ_NONE; + } + + while ((channel = ffs(status)) != 0) { + unsigned mask, csr; + struct omap_chan *c; + + channel -= 1; + mask = BIT(channel); + status &= ~mask; + + c = od->lch_map[channel]; + if (c == NULL) { + /* This should never happen */ + dev_err(od->ddev.dev, "invalid channel %u\n", channel); + continue; + } + + csr = omap_dma_get_csr(c); + omap_dma_glbl_write(od, IRQSTATUS_L1, mask); + + omap_dma_callback(channel, csr, c); + } + + spin_unlock(&od->irq_lock); + + return IRQ_HANDLED; +} + +static int omap_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct omap_dmadev *od = to_omap_dma_dev(chan->device); + struct omap_chan *c = to_omap_dma_chan(chan); + struct device *dev = od->ddev.dev; + int ret; + + if (od->legacy) { + ret = omap_request_dma(c->dma_sig, "DMA engine", + omap_dma_callback, c, &c->dma_ch); + } else { + ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL, + &c->dma_ch); + } + + dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig); + + if (ret >= 0) { + omap_dma_assign(od, c, c->dma_ch); + + if (!od->legacy) { + unsigned val; + + spin_lock_irq(&od->irq_lock); + val = BIT(c->dma_ch); + omap_dma_glbl_write(od, IRQSTATUS_L1, val); + od->irq_enable_mask |= val; + omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); + + val = omap_dma_glbl_read(od, IRQENABLE_L0); + val &= ~BIT(c->dma_ch); + omap_dma_glbl_write(od, IRQENABLE_L0, val); + spin_unlock_irq(&od->irq_lock); + } + } + + if (dma_omap1()) { + if (__dma_omap16xx(od->plat->dma_attr)) { + c->ccr = CCR_OMAP31_DISABLE; + /* Duplicate what plat-omap/dma.c does */ + c->ccr |= c->dma_ch + 1; + } else { + c->ccr = c->dma_sig & 0x1f; + } + } else { + c->ccr = c->dma_sig & 0x1f; + c->ccr |= (c->dma_sig & ~0x1f) << 14; + } + if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING) + c->ccr |= CCR_BUFFERING_DISABLE; + + return ret; +} + +static void omap_dma_free_chan_resources(struct dma_chan *chan) +{ + struct omap_dmadev *od = to_omap_dma_dev(chan->device); + struct omap_chan *c = to_omap_dma_chan(chan); + + if (!od->legacy) { + spin_lock_irq(&od->irq_lock); + od->irq_enable_mask &= ~BIT(c->dma_ch); + omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); + spin_unlock_irq(&od->irq_lock); + } + + c->channel_base = NULL; + od->lch_map[c->dma_ch] = NULL; + vchan_free_chan_resources(&c->vc); + omap_free_dma(c->dma_ch); + + dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch, + c->dma_sig); + c->dma_sig = 0; +} + +static size_t omap_dma_sg_size(struct omap_sg *sg) +{ + return sg->en * sg->fn; +} + +static size_t omap_dma_desc_size(struct omap_desc *d) +{ + unsigned i; + size_t size; + + for (size = i = 0; i < d->sglen; i++) + size += omap_dma_sg_size(&d->sg[i]); + + return size * es_bytes[d->es]; +} + +static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) +{ + unsigned i; + size_t size, es_size = es_bytes[d->es]; + + for (size = i = 0; i < d->sglen; i++) { + size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; + + if (size) + size += this_size; + else if (addr >= d->sg[i].addr && + addr < d->sg[i].addr + this_size) + size += d->sg[i].addr + this_size - addr; + } + return size; +} + +/* + * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is + * read before the DMA controller finished disabling the channel. + */ +static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg) +{ + struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + uint32_t val; + + val = omap_dma_chan_read(c, reg); + if (val == 0 && od->plat->errata & DMA_ERRATA_3_3) + val = omap_dma_chan_read(c, reg); + + return val; +} + +static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c) +{ + struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + dma_addr_t addr, cdac; + + if (__dma_omap15xx(od->plat->dma_attr)) { + addr = omap_dma_chan_read(c, CPC); + } else { + addr = omap_dma_chan_read_3_3(c, CSAC); + cdac = omap_dma_chan_read_3_3(c, CDAC); + + /* + * CDAC == 0 indicates that the DMA transfer on the channel has + * not been started (no data has been transferred so far). + * Return the programmed source start address in this case. + */ + if (cdac == 0) + addr = omap_dma_chan_read(c, CSSA); + } + + if (dma_omap1()) + addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000; + + return addr; +} + +static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c) +{ + struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + dma_addr_t addr; + + if (__dma_omap15xx(od->plat->dma_attr)) { + addr = omap_dma_chan_read(c, CPC); + } else { + addr = omap_dma_chan_read_3_3(c, CDAC); + + /* + * CDAC == 0 indicates that the DMA transfer on the channel + * has not been started (no data has been transferred so + * far). Return the programmed destination start address in + * this case. + */ + if (addr == 0) + addr = omap_dma_chan_read(c, CDSA); + } + + if (dma_omap1()) + addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000; + + return addr; +} + +static enum dma_status omap_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + + if (!c->paused && c->running) { + uint32_t ccr = omap_dma_chan_read(c, CCR); + /* + * The channel is no longer active, set the return value + * accordingly + */ + if (!(ccr & CCR_ENABLE)) + ret = DMA_COMPLETE; + } + + if (ret == DMA_COMPLETE || !txstate) + return ret; + + spin_lock_irqsave(&c->vc.lock, flags); + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); + } else if (c->desc && c->desc->vd.tx.cookie == cookie) { + struct omap_desc *d = c->desc; + dma_addr_t pos; + + if (d->dir == DMA_MEM_TO_DEV) + pos = omap_dma_get_src_pos(c); + else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) + pos = omap_dma_get_dst_pos(c); + else + pos = 0; + + txstate->residue = omap_dma_desc_size_pos(d, pos); + } else { + txstate->residue = 0; + } + if (ret == DMA_IN_PROGRESS && c->paused) + ret = DMA_PAUSED; + spin_unlock_irqrestore(&c->vc.lock, flags); + + return ret; +} + +static void omap_dma_issue_pending(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc) && !c->desc) + omap_dma_start_desc(c); + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, + enum dma_transfer_direction dir, unsigned long tx_flags, void *context) +{ + struct omap_dmadev *od = to_omap_dma_dev(chan->device); + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct scatterlist *sgent; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned i, es, en, frame_bytes; + bool ll_failed = false; + u32 burst; + u32 port_window, port_window_bytes; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + port_window = c->cfg.src_port_window_size; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + port_window = c->cfg.dst_port_window_size; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = CSDP_DATA_TYPE_8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = CSDP_DATA_TYPE_16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = CSDP_DATA_TYPE_32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->es = es; + + /* When the port_window is used, one frame must cover the window */ + if (port_window) { + burst = port_window; + port_window_bytes = port_window * es_bytes[es]; + + d->ei = 1; + /* + * One frame covers the port_window and by configure + * the source frame index to be -1 * (port_window - 1) + * we instruct the sDMA that after a frame is processed + * it should move back to the start of the window. + */ + d->fi = -(port_window_bytes - 1); + } + + d->ccr = c->ccr | CCR_SYNC_FRAME; + if (dir == DMA_DEV_TO_MEM) { + d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; + + d->ccr |= CCR_DST_AMODE_POSTINC; + if (port_window) { + d->ccr |= CCR_SRC_AMODE_DBLIDX; + + if (port_window_bytes >= 64) + d->csdp |= CSDP_SRC_BURST_64; + else if (port_window_bytes >= 32) + d->csdp |= CSDP_SRC_BURST_32; + else if (port_window_bytes >= 16) + d->csdp |= CSDP_SRC_BURST_16; + + } else { + d->ccr |= CCR_SRC_AMODE_CONSTANT; + } + } else { + d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; + + d->ccr |= CCR_SRC_AMODE_POSTINC; + if (port_window) { + d->ccr |= CCR_DST_AMODE_DBLIDX; + + if (port_window_bytes >= 64) + d->csdp |= CSDP_DST_BURST_64; + else if (port_window_bytes >= 32) + d->csdp |= CSDP_DST_BURST_32; + else if (port_window_bytes >= 16) + d->csdp |= CSDP_DST_BURST_16; + } else { + d->ccr |= CCR_DST_AMODE_CONSTANT; + } + } + + d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; + d->csdp |= es; + + if (dma_omap1()) { + d->cicr |= CICR_TOUT_IE; + + if (dir == DMA_DEV_TO_MEM) + d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB; + else + d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF; + } else { + if (dir == DMA_DEV_TO_MEM) + d->ccr |= CCR_TRIGGER_SRC; + + d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; + + if (port_window) + d->csdp |= CSDP_WRITE_LAST_NON_POSTED; + } + if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) + d->clnk_ctrl = c->dma_ch; + + /* + * Build our scatterlist entries: each contains the address, + * the number of elements (EN) in each frame, and the number of + * frames (FN). Number of bytes for this entry = ES * EN * FN. + * + * Burst size translates to number of elements with frame sync. + * Note: DMA engine defines burst to be the number of dev-width + * transfers. + */ + en = burst; + frame_bytes = es_bytes[es] * en; + + if (sglen >= 2) + d->using_ll = od->ll123_supported; + + for_each_sg(sgl, sgent, sglen, i) { + struct omap_sg *osg = &d->sg[i]; + + osg->addr = sg_dma_address(sgent); + osg->en = en; + osg->fn = sg_dma_len(sgent) / frame_bytes; + + if (d->using_ll) { + osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC, + &osg->t2_desc_paddr); + if (!osg->t2_desc) { + dev_err(chan->device->dev, + "t2_desc[%d] allocation failed\n", i); + ll_failed = true; + d->using_ll = false; + continue; + } + + omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1)); + } + } + + d->sglen = sglen; + + /* Release the dma_pool entries if one allocation failed */ + if (ll_failed) { + for (i = 0; i < d->sglen; i++) { + struct omap_sg *osg = &d->sg[i]; + + if (osg->t2_desc) { + dma_pool_free(od->desc_pool, osg->t2_desc, + osg->t2_desc_paddr); + osg->t2_desc = NULL; + } + } + } + + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, unsigned long flags) +{ + struct omap_dmadev *od = to_omap_dma_dev(chan->device); + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned es; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = CSDP_DATA_TYPE_8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = CSDP_DATA_TYPE_16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = CSDP_DATA_TYPE_32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->fi = burst; + d->es = es; + d->sg[0].addr = buf_addr; + d->sg[0].en = period_len / es_bytes[es]; + d->sg[0].fn = buf_len / period_len; + d->sglen = 1; + + d->ccr = c->ccr; + if (dir == DMA_DEV_TO_MEM) + d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; + else + d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; + + d->cicr = CICR_DROP_IE; + if (flags & DMA_PREP_INTERRUPT) + d->cicr |= CICR_FRAME_IE; + + d->csdp = es; + + if (dma_omap1()) { + d->cicr |= CICR_TOUT_IE; + + if (dir == DMA_DEV_TO_MEM) + d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI; + else + d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF; + } else { + if (burst) + d->ccr |= CCR_SYNC_PACKET; + else + d->ccr |= CCR_SYNC_ELEMENT; + + if (dir == DMA_DEV_TO_MEM) { + d->ccr |= CCR_TRIGGER_SRC; + d->csdp |= CSDP_DST_PACKED; + } else { + d->csdp |= CSDP_SRC_PACKED; + } + + d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; + + d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; + } + + if (__dma_omap15xx(od->plat->dma_attr)) + d->ccr |= CCR_AUTO_INIT | CCR_REPEAT; + else + d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK; + + c->cyclic = true; + + return vchan_tx_prep(&c->vc, &d->vd, flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long tx_flags) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct omap_desc *d; + uint8_t data_type; + + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + data_type = __ffs((src | dest | len)); + if (data_type > CSDP_DATA_TYPE_32) + data_type = CSDP_DATA_TYPE_32; + + d->dir = DMA_MEM_TO_MEM; + d->dev_addr = src; + d->fi = 0; + d->es = data_type; + d->sg[0].en = len / BIT(data_type); + d->sg[0].fn = 1; + d->sg[0].addr = dest; + d->sglen = 1; + d->ccr = c->ccr; + d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; + + d->cicr = CICR_DROP_IE | CICR_FRAME_IE; + + d->csdp = data_type; + + if (dma_omap1()) { + d->cicr |= CICR_TOUT_IE; + d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; + } else { + d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; + d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; + d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; + } + + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct omap_desc *d; + struct omap_sg *sg; + uint8_t data_type; + size_t src_icg, dst_icg; + + /* Slave mode is not supported */ + if (is_slave_direction(xt->dir)) + return NULL; + + if (xt->frame_size != 1 || xt->numf == 0) + return NULL; + + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size)); + if (data_type > CSDP_DATA_TYPE_32) + data_type = CSDP_DATA_TYPE_32; + + sg = &d->sg[0]; + d->dir = DMA_MEM_TO_MEM; + d->dev_addr = xt->src_start; + d->es = data_type; + sg->en = xt->sgl[0].size / BIT(data_type); + sg->fn = xt->numf; + sg->addr = xt->dst_start; + d->sglen = 1; + d->ccr = c->ccr; + + src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); + dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); + if (src_icg) { + d->ccr |= CCR_SRC_AMODE_DBLIDX; + d->ei = 1; + d->fi = src_icg; + } else if (xt->src_inc) { + d->ccr |= CCR_SRC_AMODE_POSTINC; + d->fi = 0; + } else { + dev_err(chan->device->dev, + "%s: SRC constant addressing is not supported\n", + __func__); + kfree(d); + return NULL; + } + + if (dst_icg) { + d->ccr |= CCR_DST_AMODE_DBLIDX; + sg->ei = 1; + sg->fi = dst_icg; + } else if (xt->dst_inc) { + d->ccr |= CCR_DST_AMODE_POSTINC; + sg->fi = 0; + } else { + dev_err(chan->device->dev, + "%s: DST constant addressing is not supported\n", + __func__); + kfree(d); + return NULL; + } + + d->cicr = CICR_DROP_IE | CICR_FRAME_IE; + + d->csdp = data_type; + + if (dma_omap1()) { + d->cicr |= CICR_TOUT_IE; + d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; + } else { + d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; + d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; + d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; + } + + return vchan_tx_prep(&c->vc, &d->vd, flags); +} + +static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + if (cfg->src_maxburst > chan->device->max_burst || + cfg->dst_maxburst > chan->device->max_burst) + return -EINVAL; + + memcpy(&c->cfg, cfg, sizeof(c->cfg)); + + return 0; +} + +static int omap_dma_terminate_all(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&c->vc.lock, flags); + + /* + * Stop DMA activity: we assume the callback will not be called + * after omap_dma_stop() returns (even if it does, it will see + * c->desc is NULL and exit.) + */ + if (c->desc) { + vchan_terminate_vdesc(&c->desc->vd); + c->desc = NULL; + /* Avoid stopping the dma twice */ + if (!c->paused) + omap_dma_stop(c); + } + + c->cyclic = false; + c->paused = false; + + vchan_get_all_descriptors(&c->vc, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + + return 0; +} + +static void omap_dma_synchronize(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + vchan_synchronize(&c->vc); +} + +static int omap_dma_pause(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct omap_dmadev *od = to_omap_dma_dev(chan->device); + unsigned long flags; + int ret = -EINVAL; + bool can_pause = false; + + spin_lock_irqsave(&od->irq_lock, flags); + + if (!c->desc) + goto out; + + if (c->cyclic) + can_pause = true; + + /* + * We do not allow DMA_MEM_TO_DEV transfers to be paused. + * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer: + * "When a channel is disabled during a transfer, the channel undergoes + * an abort, unless it is hardware-source-synchronized …". + * A source-synchronised channel is one where the fetching of data is + * under control of the device. In other words, a device-to-memory + * transfer. So, a destination-synchronised channel (which would be a + * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE + * bit is cleared. + * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel + * aborts immediately after completion of current read/write + * transactions and then the FIFO is cleaned up." The term "cleaned up" + * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE + * are both clear _before_ disabling the channel, otherwise data loss + * will occur. + * The problem is that if the channel is active, then device activity + * can result in DMA activity starting between reading those as both + * clear and the write to DMA_CCR to clear the enable bit hitting the + * hardware. If the DMA hardware can't drain the data in its FIFO to the + * destination, then data loss "might" occur (say if we write to an UART + * and the UART is not accepting any further data). + */ + else if (c->desc->dir == DMA_DEV_TO_MEM) + can_pause = true; + + if (can_pause && !c->paused) { + ret = omap_dma_stop(c); + if (!ret) + c->paused = true; + } +out: + spin_unlock_irqrestore(&od->irq_lock, flags); + + return ret; +} + +static int omap_dma_resume(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct omap_dmadev *od = to_omap_dma_dev(chan->device); + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&od->irq_lock, flags); + + if (c->paused && c->desc) { + mb(); + + /* Restore channel link register */ + omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl); + + omap_dma_start(c, c->desc); + c->paused = false; + ret = 0; + } + spin_unlock_irqrestore(&od->irq_lock, flags); + + return ret; +} + +static int omap_dma_chan_init(struct omap_dmadev *od) +{ + struct omap_chan *c; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + c->reg_map = od->reg_map; + c->vc.desc_free = omap_dma_desc_free; + vchan_init(&c->vc, &od->ddev); + + return 0; +} + +static void omap_dma_free(struct omap_dmadev *od) +{ + while (!list_empty(&od->ddev.channels)) { + struct omap_chan *c = list_first_entry(&od->ddev.channels, + struct omap_chan, vc.chan.device_node); + + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + kfree(c); + } +} + +#define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +static int omap_dma_probe(struct platform_device *pdev) +{ + struct omap_dmadev *od; + struct resource *res; + int rc, i, irq; + u32 lch_count; + + od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); + if (!od) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + od->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(od->base)) + return PTR_ERR(od->base); + + od->plat = omap_get_plat_info(); + if (!od->plat) + return -EPROBE_DEFER; + + od->reg_map = od->plat->reg_map; + + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); + dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); + dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask); + od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; + od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; + od->ddev.device_tx_status = omap_dma_tx_status; + od->ddev.device_issue_pending = omap_dma_issue_pending; + od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; + od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; + od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy; + od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved; + od->ddev.device_config = omap_dma_slave_config; + od->ddev.device_pause = omap_dma_pause; + od->ddev.device_resume = omap_dma_resume; + od->ddev.device_terminate_all = omap_dma_terminate_all; + od->ddev.device_synchronize = omap_dma_synchronize; + od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; + od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; + od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ + od->ddev.dev = &pdev->dev; + INIT_LIST_HEAD(&od->ddev.channels); + spin_lock_init(&od->lock); + spin_lock_init(&od->irq_lock); + + /* Number of DMA requests */ + od->dma_requests = OMAP_SDMA_REQUESTS; + if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, + "dma-requests", + &od->dma_requests)) { + dev_info(&pdev->dev, + "Missing dma-requests property, using %u.\n", + OMAP_SDMA_REQUESTS); + } + + /* Number of available logical channels */ + if (!pdev->dev.of_node) { + lch_count = od->plat->dma_attr->lch_count; + if (unlikely(!lch_count)) + lch_count = OMAP_SDMA_CHANNELS; + } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels", + &lch_count)) { + dev_info(&pdev->dev, + "Missing dma-channels property, using %u.\n", + OMAP_SDMA_CHANNELS); + lch_count = OMAP_SDMA_CHANNELS; + } + + od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map), + GFP_KERNEL); + if (!od->lch_map) + return -ENOMEM; + + for (i = 0; i < od->dma_requests; i++) { + rc = omap_dma_chan_init(od); + if (rc) { + omap_dma_free(od); + return rc; + } + } + + irq = platform_get_irq(pdev, 1); + if (irq <= 0) { + dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq); + od->legacy = true; + } else { + /* Disable all interrupts */ + od->irq_enable_mask = 0; + omap_dma_glbl_write(od, IRQENABLE_L1, 0); + + rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, + IRQF_SHARED, "omap-dma-engine", od); + if (rc) + return rc; + } + + if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) + od->ll123_supported = true; + + od->ddev.filter.map = od->plat->slave_map; + od->ddev.filter.mapcnt = od->plat->slavecnt; + od->ddev.filter.fn = omap_dma_filter_fn; + + if (od->ll123_supported) { + od->desc_pool = dma_pool_create(dev_name(&pdev->dev), + &pdev->dev, + sizeof(struct omap_type2_desc), + 4, 0); + if (!od->desc_pool) { + dev_err(&pdev->dev, + "unable to allocate descriptor pool\n"); + od->ll123_supported = false; + } + } + + rc = dma_async_device_register(&od->ddev); + if (rc) { + pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", + rc); + omap_dma_free(od); + return rc; + } + + platform_set_drvdata(pdev, od); + + if (pdev->dev.of_node) { + omap_dma_info.dma_cap = od->ddev.cap_mask; + + /* Device-tree DMA controller registration */ + rc = of_dma_controller_register(pdev->dev.of_node, + of_dma_simple_xlate, &omap_dma_info); + if (rc) { + pr_warn("OMAP-DMA: failed to register DMA controller\n"); + dma_async_device_unregister(&od->ddev); + omap_dma_free(od); + } + } + + dev_info(&pdev->dev, "OMAP DMA engine driver%s\n", + od->ll123_supported ? " (LinkedList1/2/3 supported)" : ""); + + return rc; +} + +static int omap_dma_remove(struct platform_device *pdev) +{ + struct omap_dmadev *od = platform_get_drvdata(pdev); + int irq; + + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); + + irq = platform_get_irq(pdev, 1); + devm_free_irq(&pdev->dev, irq, od); + + dma_async_device_unregister(&od->ddev); + + if (!od->legacy) { + /* Disable all interrupts */ + omap_dma_glbl_write(od, IRQENABLE_L0, 0); + } + + if (od->ll123_supported) + dma_pool_destroy(od->desc_pool); + + omap_dma_free(od); + + return 0; +} + +static const struct of_device_id omap_dma_match[] = { + { .compatible = "ti,omap2420-sdma", }, + { .compatible = "ti,omap2430-sdma", }, + { .compatible = "ti,omap3430-sdma", }, + { .compatible = "ti,omap3630-sdma", }, + { .compatible = "ti,omap4430-sdma", }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_dma_match); + +static struct platform_driver omap_dma_driver = { + .probe = omap_dma_probe, + .remove = omap_dma_remove, + .driver = { + .name = "omap-dma-engine", + .of_match_table = of_match_ptr(omap_dma_match), + }, +}; + +bool omap_dma_filter_fn(struct dma_chan *chan, void *param) +{ + if (chan->device->dev->driver == &omap_dma_driver.driver) { + struct omap_dmadev *od = to_omap_dma_dev(chan->device); + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned req = *(unsigned *)param; + + if (req <= od->dma_requests) { + c->dma_sig = req; + return true; + } + } + return false; +} +EXPORT_SYMBOL_GPL(omap_dma_filter_fn); + +static int omap_dma_init(void) +{ + return platform_driver_register(&omap_dma_driver); +} +subsys_initcall(omap_dma_init); + +static void __exit omap_dma_exit(void) +{ + platform_driver_unregister(&omap_dma_driver); +} +module_exit(omap_dma_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3