From 04ce9ab385dc97eb55299d533cd3af79b8fc7529 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 3 Jun 2009 14:22:28 -0700 Subject: async_xor: permit callers to pass in a 'dma/page scribble' region async_xor() needs space to perform dma and page address conversions. In most cases the code can simply reuse the struct page * array because the size of the native pointer matches the size of a dma/page address. In order to support archs where sizeof(dma_addr_t) is larger than sizeof(struct page *), or to preserve the input parameters, we utilize a memory region passed in by the caller. Since the code is now prepared to handle the case where it cannot perform address conversions on the stack, we no longer need the !HIGHMEM64G dependency in drivers/dma/Kconfig. [ Impact: don't clobber input buffers for address conversions ] Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma/Kconfig') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 3b3c01b6f1ee..912a51b5cbd3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -4,7 +4,7 @@ menuconfig DMADEVICES bool "DMA Engine support" - depends on !HIGHMEM64G && HAS_DMA + depends on HAS_DMA help DMA engines can do asynchronous data transfers without involving the host CPU. Currently, this framework can be -- cgit v1.2.3 From 138f4c359d23d2ec38d18bd70dd9613ae515fe93 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:51 -0700 Subject: dmaengine, async_tx: add a "no channel switch" allocator Channel switching is problematic for some dmaengine drivers as the architecture precludes separating the ->prep from ->submit. In these cases the driver can select ASYNC_TX_DISABLE_CHANNEL_SWITCH to modify the async_tx allocator to only return channels that support all of the required asynchronous operations. For example MD_RAID456=y selects support for asynchronous xor, xor validate, pq, pq validate, and memcpy. When ASYNC_TX_DISABLE_CHANNEL_SWITCH=y any channel with all these capabilities is marked DMA_ASYNC_TX allowing async_tx_find_channel() to quickly locate compatible channels with the guarantee that dependency chains will remain on one channel. When ASYNC_TX_DISABLE_CHANNEL_SWITCH=n async_tx_find_channel() may select channels that lead to operation chains that need to cross channel boundaries using the async_tx channel switch capability. Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 4 ++++ drivers/dma/Kconfig | 4 ++++ drivers/dma/dmaengine.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/linux/dmaengine.h | 10 +++++++++- 4 files changed, 57 insertions(+), 1 deletion(-) (limited to 'drivers/dma/Kconfig') diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 60615fedcf5e..f9cdf04fe7c0 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -81,6 +81,10 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; + #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH + BUG(); + #endif + /* first check to see if we can still append to depend_tx */ spin_lock_bh(&depend_tx->lock); if (depend_tx->parent && depend_tx->chan == tx->chan) { diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 912a51b5cbd3..ddcd9793b25c 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -17,11 +17,15 @@ if DMADEVICES comment "DMA Devices" +config ASYNC_TX_DISABLE_CHANNEL_SWITCH + bool + config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86 select DMA_ENGINE select DCA + select ASYNC_TX_DISABLE_CHANNEL_SWITCH help Enable support for the Intel(R) I/OAT DMA engine present in recent Intel Xeon chipsets. diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 96598479eece..d5bc628d207c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -608,6 +608,40 @@ void dmaengine_put(void) } EXPORT_SYMBOL(dmaengine_put); +static bool device_has_all_tx_types(struct dma_device *device) +{ + /* A device that satisfies this test has channels that will never cause + * an async_tx channel switch event as all possible operation types can + * be handled. + */ + #ifdef CONFIG_ASYNC_TX_DMA + if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) + if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) + if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) + if (!dma_has_cap(DMA_XOR, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) + if (!dma_has_cap(DMA_PQ, device->cap_mask)) + return false; + #endif + + return true; +} + static int get_dma_id(struct dma_device *device) { int rc; @@ -665,6 +699,12 @@ int dma_async_device_register(struct dma_device *device) BUG_ON(!device->device_issue_pending); BUG_ON(!device->dev); + /* note: this only matters in the + * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case + */ + if (device_has_all_tx_types(device)) + dma_cap_set(DMA_ASYNC_TX, device->cap_mask); + idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); if (!idr_ref) return -ENOMEM; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 4d6c1c925fd4..86853ed7970b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -48,6 +48,9 @@ enum dma_status { /** * enum dma_transaction_type - DMA transaction types/indexes + * + * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is + * automatically set as dma devices are registered. */ enum dma_transaction_type { DMA_MEMCPY, @@ -61,6 +64,7 @@ enum dma_transaction_type { DMA_MEMCPY_CRC32C, DMA_INTERRUPT, DMA_PRIVATE, + DMA_ASYNC_TX, DMA_SLAVE, }; @@ -396,7 +400,11 @@ static inline void net_dmaengine_put(void) #ifdef CONFIG_ASYNC_TX_DMA #define async_dmaengine_get() dmaengine_get() #define async_dmaengine_put() dmaengine_put() +#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH +#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) +#else #define async_dma_find_channel(type) dma_find_channel(type) +#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ #else static inline void async_dmaengine_get(void) { @@ -409,7 +417,7 @@ async_dma_find_channel(enum dma_transaction_type type) { return NULL; } -#endif +#endif /* CONFIG_ASYNC_TX_DMA */ dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len); -- cgit v1.2.3 From d8902adcc1a9fd484c8cb5e575152e32192c1ff8 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Mon, 7 Sep 2009 03:26:23 +0000 Subject: dmaengine: sh: Add Support SuperH DMA Engine driver This supported all DMA channels, and it was tested in SH7722, SH7780, SH7785 and SH7763. This can not use with SH DMA API. Signed-off-by: Nobuhiro Iwamatsu Reviewed-by: Matt Fleming Acked-by: Maciej Sosnowski Acked-by: Paul Mundt Signed-off-by: Dan Williams --- arch/sh/drivers/dma/Kconfig | 12 +- arch/sh/drivers/dma/Makefile | 3 +- arch/sh/include/asm/dma-sh.h | 13 + drivers/dma/Kconfig | 8 + drivers/dma/Makefile | 1 + drivers/dma/shdma.c | 786 +++++++++++++++++++++++++++++++++++++++++++ drivers/dma/shdma.h | 64 ++++ 7 files changed, 882 insertions(+), 5 deletions(-) create mode 100644 drivers/dma/shdma.c create mode 100644 drivers/dma/shdma.h (limited to 'drivers/dma/Kconfig') diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig index 63e9dd30b41c..f935a7e5cbe5 100644 --- a/arch/sh/drivers/dma/Kconfig +++ b/arch/sh/drivers/dma/Kconfig @@ -1,12 +1,9 @@ menu "DMA support" -config SH_DMA_API - bool config SH_DMA bool "SuperH on-chip DMA controller (DMAC) support" depends on CPU_SH3 || CPU_SH4 - select SH_DMA_API default n config SH_DMA_IRQ_MULTI @@ -19,6 +16,15 @@ config SH_DMA_IRQ_MULTI CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || \ CPU_SUBTYPE_SH7760 +config SH_DMA_API + depends on SH_DMA + bool "SuperH DMA API support" + default n + help + SH_DMA_API always enabled DMA API of used SuperH. + If you want to use DMA ENGINE, you must not enable this. + Please enable DMA_ENGINE and SH_DMAE. + config NR_ONCHIP_DMA_CHANNELS int depends on SH_DMA diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile index c6068137b46f..d88c9484762c 100644 --- a/arch/sh/drivers/dma/Makefile +++ b/arch/sh/drivers/dma/Makefile @@ -2,8 +2,7 @@ # Makefile for the SuperH DMA specific kernel interface routines under Linux. # -obj-$(CONFIG_SH_DMA_API) += dma-api.o dma-sysfs.o -obj-$(CONFIG_SH_DMA) += dma-sh.o +obj-$(CONFIG_SH_DMA_API) += dma-sh.o dma-api.o dma-sysfs.o obj-$(CONFIG_PVR2_DMA) += dma-pvr2.o obj-$(CONFIG_G2_DMA) += dma-g2.o obj-$(CONFIG_SH_DMABRG) += dmabrg.o diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index 0c8f8e14622a..89bdac558dbc 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h @@ -115,4 +115,17 @@ static u32 dma_base_addr[] __maybe_unused = { #define CHCR 0x0C #define DMAOR 0x40 +/* + * for dma engine + * + * SuperH DMA mode + */ +#define SHDMA_MIX_IRQ (1 << 1) +#define SHDMA_DMAOR1 (1 << 2) +#define SHDMA_DMAE1 (1 << 3) + +struct sh_dmae_pdata { + unsigned int mode; +}; + #endif /* __DMA_SH_H */ diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fe1f3717b1ff..3230a780c3de 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -101,6 +101,14 @@ config TXX9_DMAC Support the TXx9 SoC internal DMA controller. This can be integrated in chips such as the Toshiba TX4927/38/39. +config SH_DMAE + tristate "Renesas SuperH DMAC support" + depends on SUPERH && SH_DMA + depends on !SH_DMA_API + select DMA_ENGINE + help + Enable support for the Renesas SuperH DMA controllers. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 8f115e93b4a1..eca71ba78ae9 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o +obj-$(CONFIG_SH_DMAE) += shdma.o diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c new file mode 100644 index 000000000000..b3b065c4e5c1 --- /dev/null +++ b/drivers/dma/shdma.c @@ -0,0 +1,786 @@ +/* + * Renesas SuperH DMA Engine support + * + * base is drivers/dma/flsdma.c + * + * Copyright (C) 2009 Nobuhiro Iwamatsu + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * - DMA of SuperH does not have Hardware DMA chain mode. + * - MAX DMA size is 16MB. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "shdma.h" + +/* DMA descriptor control */ +#define DESC_LAST (-1) +#define DESC_COMP (1) +#define DESC_NCOMP (0) + +#define NR_DESCS_PER_CHANNEL 32 +/* + * Define the default configuration for dual address memory-memory transfer. + * The 0x400 value represents auto-request, external->external. + * + * And this driver set 4byte burst mode. + * If you want to change mode, you need to change RS_DEFAULT of value. + * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) + */ +#define RS_DEFAULT (RS_DUAL) + +#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) +static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) +{ + ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); +} + +static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) +{ + return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); +} + +static void dmae_init(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = RS_DEFAULT; /* default is DUAL mode */ + sh_dmae_writel(sh_chan, chcr, CHCR); +} + +/* + * Reset DMA controller + * + * SH7780 has two DMAOR register + */ +static void sh_dmae_ctl_stop(int id) +{ + unsigned short dmaor = dmaor_read_reg(id); + + dmaor &= ~(DMAOR_NMIF | DMAOR_AE); + dmaor_write_reg(id, dmaor); +} + +static int sh_dmae_rst(int id) +{ + unsigned short dmaor; + + sh_dmae_ctl_stop(id); + dmaor = (dmaor_read_reg(id)|DMAOR_INIT); + + dmaor_write_reg(id, dmaor); + if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) { + pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); + return -EINVAL; + } + return 0; +} + +static int dmae_is_idle(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = sh_dmae_readl(sh_chan, CHCR); + if (chcr & CHCR_DE) { + if (!(chcr & CHCR_TE)) + return -EBUSY; /* working */ + } + return 0; /* waiting */ +} + +static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = sh_dmae_readl(sh_chan, CHCR); + return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; +} + +static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw) +{ + sh_dmae_writel(sh_chan, hw.sar, SAR); + sh_dmae_writel(sh_chan, hw.dar, DAR); + sh_dmae_writel(sh_chan, + (hw.tcr >> calc_xmit_shift(sh_chan)), TCR); +} + +static void dmae_start(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = sh_dmae_readl(sh_chan, CHCR); + + chcr |= (CHCR_DE|CHCR_IE); + sh_dmae_writel(sh_chan, chcr, CHCR); +} + +static void dmae_halt(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = sh_dmae_readl(sh_chan, CHCR); + + chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); + sh_dmae_writel(sh_chan, chcr, CHCR); +} + +static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) +{ + int ret = dmae_is_idle(sh_chan); + /* When DMA was working, can not set data to CHCR */ + if (ret) + return ret; + + sh_dmae_writel(sh_chan, val, CHCR); + return 0; +} + +#define DMARS1_ADDR 0x04 +#define DMARS2_ADDR 0x08 +#define DMARS_SHIFT 8 +#define DMARS_CHAN_MSK 0x01 +static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) +{ + u32 addr; + int shift = 0; + int ret = dmae_is_idle(sh_chan); + if (ret) + return ret; + + if (sh_chan->id & DMARS_CHAN_MSK) + shift = DMARS_SHIFT; + + switch (sh_chan->id) { + /* DMARS0 */ + case 0: + case 1: + addr = SH_DMARS_BASE; + break; + /* DMARS1 */ + case 2: + case 3: + addr = (SH_DMARS_BASE + DMARS1_ADDR); + break; + /* DMARS2 */ + case 4: + case 5: + addr = (SH_DMARS_BASE + DMARS2_ADDR); + break; + default: + return -EINVAL; + } + + ctrl_outw((val << shift) | + (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), + addr); + + return 0; +} + +static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct sh_desc *desc = tx_to_sh_desc(tx); + struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); + dma_cookie_t cookie; + + spin_lock_bh(&sh_chan->desc_lock); + + cookie = sh_chan->common.cookie; + cookie++; + if (cookie < 0) + cookie = 1; + + /* If desc only in the case of 1 */ + if (desc->async_tx.cookie != -EBUSY) + desc->async_tx.cookie = cookie; + sh_chan->common.cookie = desc->async_tx.cookie; + + list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev); + + spin_unlock_bh(&sh_chan->desc_lock); + + return cookie; +} + +static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) +{ + struct sh_desc *desc, *_desc, *ret = NULL; + + spin_lock_bh(&sh_chan->desc_lock); + list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) { + if (async_tx_test_ack(&desc->async_tx)) { + list_del(&desc->node); + ret = desc; + break; + } + } + spin_unlock_bh(&sh_chan->desc_lock); + + return ret; +} + +static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc) +{ + if (desc) { + spin_lock_bh(&sh_chan->desc_lock); + + list_splice_init(&desc->tx_list, &sh_chan->ld_free); + list_add(&desc->node, &sh_chan->ld_free); + + spin_unlock_bh(&sh_chan->desc_lock); + } +} + +static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) +{ + struct sh_dmae_chan *sh_chan = to_sh_chan(chan); + struct sh_desc *desc; + + spin_lock_bh(&sh_chan->desc_lock); + while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { + spin_unlock_bh(&sh_chan->desc_lock); + desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); + if (!desc) { + spin_lock_bh(&sh_chan->desc_lock); + break; + } + dma_async_tx_descriptor_init(&desc->async_tx, + &sh_chan->common); + desc->async_tx.tx_submit = sh_dmae_tx_submit; + desc->async_tx.flags = DMA_CTRL_ACK; + INIT_LIST_HEAD(&desc->tx_list); + sh_dmae_put_desc(sh_chan, desc); + + spin_lock_bh(&sh_chan->desc_lock); + sh_chan->descs_allocated++; + } + spin_unlock_bh(&sh_chan->desc_lock); + + return sh_chan->descs_allocated; +} + +/* + * sh_dma_free_chan_resources - Free all resources of the channel. + */ +static void sh_dmae_free_chan_resources(struct dma_chan *chan) +{ + struct sh_dmae_chan *sh_chan = to_sh_chan(chan); + struct sh_desc *desc, *_desc; + LIST_HEAD(list); + + BUG_ON(!list_empty(&sh_chan->ld_queue)); + spin_lock_bh(&sh_chan->desc_lock); + + list_splice_init(&sh_chan->ld_free, &list); + sh_chan->descs_allocated = 0; + + spin_unlock_bh(&sh_chan->desc_lock); + + list_for_each_entry_safe(desc, _desc, &list, node) + kfree(desc); +} + +static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( + struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, + size_t len, unsigned long flags) +{ + struct sh_dmae_chan *sh_chan; + struct sh_desc *first = NULL, *prev = NULL, *new; + size_t copy_size; + + if (!chan) + return NULL; + + if (!len) + return NULL; + + sh_chan = to_sh_chan(chan); + + do { + /* Allocate the link descriptor from DMA pool */ + new = sh_dmae_get_desc(sh_chan); + if (!new) { + dev_err(sh_chan->dev, + "No free memory for link descriptor\n"); + goto err_get_desc; + } + + copy_size = min(len, (size_t)SH_DMA_TCR_MAX); + + new->hw.sar = dma_src; + new->hw.dar = dma_dest; + new->hw.tcr = copy_size; + if (!first) + first = new; + + new->mark = DESC_NCOMP; + async_tx_ack(&new->async_tx); + + prev = new; + len -= copy_size; + dma_src += copy_size; + dma_dest += copy_size; + /* Insert the link descriptor to the LD ring */ + list_add_tail(&new->node, &first->tx_list); + } while (len); + + new->async_tx.flags = flags; /* client is in control of this ack */ + new->async_tx.cookie = -EBUSY; /* Last desc */ + + return &first->async_tx; + +err_get_desc: + sh_dmae_put_desc(sh_chan, first); + return NULL; + +} + +/* + * sh_chan_ld_cleanup - Clean up link descriptors + * + * This function clean up the ld_queue of DMA channel. + */ +static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan) +{ + struct sh_desc *desc, *_desc; + + spin_lock_bh(&sh_chan->desc_lock); + list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { + dma_async_tx_callback callback; + void *callback_param; + + /* non send data */ + if (desc->mark == DESC_NCOMP) + break; + + /* send data sesc */ + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + + /* Remove from ld_queue list */ + list_splice_init(&desc->tx_list, &sh_chan->ld_free); + + dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n", + desc); + + list_move(&desc->node, &sh_chan->ld_free); + /* Run the link descriptor callback function */ + if (callback) { + spin_unlock_bh(&sh_chan->desc_lock); + dev_dbg(sh_chan->dev, "link descriptor %p callback\n", + desc); + callback(callback_param); + spin_lock_bh(&sh_chan->desc_lock); + } + } + spin_unlock_bh(&sh_chan->desc_lock); +} + +static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) +{ + struct list_head *ld_node; + struct sh_dmae_regs hw; + + /* DMA work check */ + if (dmae_is_idle(sh_chan)) + return; + + /* Find the first un-transfer desciptor */ + for (ld_node = sh_chan->ld_queue.next; + (ld_node != &sh_chan->ld_queue) + && (to_sh_desc(ld_node)->mark == DESC_COMP); + ld_node = ld_node->next) + cpu_relax(); + + if (ld_node != &sh_chan->ld_queue) { + /* Get the ld start address from ld_queue */ + hw = to_sh_desc(ld_node)->hw; + dmae_set_reg(sh_chan, hw); + dmae_start(sh_chan); + } +} + +static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) +{ + struct sh_dmae_chan *sh_chan = to_sh_chan(chan); + sh_chan_xfer_ld_queue(sh_chan); +} + +static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, + dma_cookie_t *used) +{ + struct sh_dmae_chan *sh_chan = to_sh_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + + sh_dmae_chan_ld_cleanup(sh_chan); + + last_used = chan->cookie; + last_complete = sh_chan->completed_cookie; + if (last_complete == -EBUSY) + last_complete = last_used; + + if (done) + *done = last_complete; + + if (used) + *used = last_used; + + return dma_async_is_complete(cookie, last_complete, last_used); +} + +static irqreturn_t sh_dmae_interrupt(int irq, void *data) +{ + irqreturn_t ret = IRQ_NONE; + struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; + u32 chcr = sh_dmae_readl(sh_chan, CHCR); + + if (chcr & CHCR_TE) { + /* DMA stop */ + dmae_halt(sh_chan); + + ret = IRQ_HANDLED; + tasklet_schedule(&sh_chan->tasklet); + } + + return ret; +} + +#if defined(CONFIG_CPU_SH4) +static irqreturn_t sh_dmae_err(int irq, void *data) +{ + int err = 0; + struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; + + /* IRQ Multi */ + if (shdev->pdata.mode & SHDMA_MIX_IRQ) { + int cnt = 0; + switch (irq) { +#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) + case DMTE6_IRQ: + cnt++; +#endif + case DMTE0_IRQ: + if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { + disable_irq(irq); + return IRQ_HANDLED; + } + default: + return IRQ_NONE; + } + } else { + /* reset dma controller */ + err = sh_dmae_rst(0); + if (err) + return err; + if (shdev->pdata.mode & SHDMA_DMAOR1) { + err = sh_dmae_rst(1); + if (err) + return err; + } + disable_irq(irq); + return IRQ_HANDLED; + } +} +#endif + +static void dmae_do_tasklet(unsigned long data) +{ + struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; + struct sh_desc *desc, *_desc, *cur_desc = NULL; + u32 sar_buf = sh_dmae_readl(sh_chan, SAR); + list_for_each_entry_safe(desc, _desc, + &sh_chan->ld_queue, node) { + if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { + cur_desc = desc; + break; + } + } + + if (cur_desc) { + switch (cur_desc->async_tx.cookie) { + case 0: /* other desc data */ + break; + case -EBUSY: /* last desc */ + sh_chan->completed_cookie = + cur_desc->async_tx.cookie; + break; + default: /* first desc ( 0 < )*/ + sh_chan->completed_cookie = + cur_desc->async_tx.cookie - 1; + break; + } + cur_desc->mark = DESC_COMP; + } + /* Next desc */ + sh_chan_xfer_ld_queue(sh_chan); + sh_dmae_chan_ld_cleanup(sh_chan); +} + +static unsigned int get_dmae_irq(unsigned int id) +{ + unsigned int irq = 0; + if (id < ARRAY_SIZE(dmte_irq_map)) + irq = dmte_irq_map[id]; + return irq; +} + +static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) +{ + int err; + unsigned int irq = get_dmae_irq(id); + unsigned long irqflags = IRQF_DISABLED; + struct sh_dmae_chan *new_sh_chan; + + /* alloc channel */ + new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); + if (!new_sh_chan) { + dev_err(shdev->common.dev, "No free memory for allocating " + "dma channels!\n"); + return -ENOMEM; + } + + new_sh_chan->dev = shdev->common.dev; + new_sh_chan->id = id; + + /* Init DMA tasklet */ + tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, + (unsigned long)new_sh_chan); + + /* Init the channel */ + dmae_init(new_sh_chan); + + spin_lock_init(&new_sh_chan->desc_lock); + + /* Init descripter manage list */ + INIT_LIST_HEAD(&new_sh_chan->ld_queue); + INIT_LIST_HEAD(&new_sh_chan->ld_free); + + /* copy struct dma_device */ + new_sh_chan->common.device = &shdev->common; + + /* Add the channel to DMA device channel list */ + list_add_tail(&new_sh_chan->common.device_node, + &shdev->common.channels); + shdev->common.chancnt++; + + if (shdev->pdata.mode & SHDMA_MIX_IRQ) { + irqflags = IRQF_SHARED; +#if defined(DMTE6_IRQ) + if (irq >= DMTE6_IRQ) + irq = DMTE6_IRQ; + else +#endif + irq = DMTE0_IRQ; + } + + snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), + "sh-dmae%d", new_sh_chan->id); + + /* set up channel irq */ + err = request_irq(irq, &sh_dmae_interrupt, + irqflags, new_sh_chan->dev_id, new_sh_chan); + if (err) { + dev_err(shdev->common.dev, "DMA channel %d request_irq error " + "with return %d\n", id, err); + goto err_no_irq; + } + + /* CHCR register control function */ + new_sh_chan->set_chcr = dmae_set_chcr; + /* DMARS register control function */ + new_sh_chan->set_dmars = dmae_set_dmars; + + shdev->chan[id] = new_sh_chan; + return 0; + +err_no_irq: + /* remove from dmaengine device node */ + list_del(&new_sh_chan->common.device_node); + kfree(new_sh_chan); + return err; +} + +static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) +{ + int i; + + for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { + if (shdev->chan[i]) { + struct sh_dmae_chan *shchan = shdev->chan[i]; + if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) + free_irq(dmte_irq_map[i], shchan); + + list_del(&shchan->common.device_node); + kfree(shchan); + shdev->chan[i] = NULL; + } + } + shdev->common.chancnt = 0; +} + +static int __init sh_dmae_probe(struct platform_device *pdev) +{ + int err = 0, cnt, ecnt; + unsigned long irqflags = IRQF_DISABLED; +#if defined(CONFIG_CPU_SH4) + int eirq[] = { DMAE0_IRQ, +#if defined(DMAE1_IRQ) + DMAE1_IRQ +#endif + }; +#endif + struct sh_dmae_device *shdev; + + shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); + if (!shdev) { + dev_err(&pdev->dev, "No enough memory\n"); + err = -ENOMEM; + goto shdev_err; + } + + /* get platform data */ + if (!pdev->dev.platform_data) + goto shdev_err; + + /* platform data */ + memcpy(&shdev->pdata, pdev->dev.platform_data, + sizeof(struct sh_dmae_pdata)); + + /* reset dma controller */ + err = sh_dmae_rst(0); + if (err) + goto rst_err; + + /* SH7780/85/23 has DMAOR1 */ + if (shdev->pdata.mode & SHDMA_DMAOR1) { + err = sh_dmae_rst(1); + if (err) + goto rst_err; + } + + INIT_LIST_HEAD(&shdev->common.channels); + + dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); + shdev->common.device_alloc_chan_resources + = sh_dmae_alloc_chan_resources; + shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; + shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; + shdev->common.device_is_tx_complete = sh_dmae_is_complete; + shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; + shdev->common.dev = &pdev->dev; + +#if defined(CONFIG_CPU_SH4) + /* Non Mix IRQ mode SH7722/SH7730 etc... */ + if (shdev->pdata.mode & SHDMA_MIX_IRQ) { + irqflags = IRQF_SHARED; + eirq[0] = DMTE0_IRQ; +#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) + eirq[1] = DMTE6_IRQ; +#endif + } + + for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { + err = request_irq(eirq[ecnt], sh_dmae_err, + irqflags, "DMAC Address Error", shdev); + if (err) { + dev_err(&pdev->dev, "DMA device request_irq" + "error (irq %d) with return %d\n", + eirq[ecnt], err); + goto eirq_err; + } + } +#endif /* CONFIG_CPU_SH4 */ + + /* Create DMA Channel */ + for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { + err = sh_dmae_chan_probe(shdev, cnt); + if (err) + goto chan_probe_err; + } + + platform_set_drvdata(pdev, shdev); + dma_async_device_register(&shdev->common); + + return err; + +chan_probe_err: + sh_dmae_chan_remove(shdev); + +eirq_err: + for (ecnt-- ; ecnt >= 0; ecnt--) + free_irq(eirq[ecnt], shdev); + +rst_err: + kfree(shdev); + +shdev_err: + return err; +} + +static int __exit sh_dmae_remove(struct platform_device *pdev) +{ + struct sh_dmae_device *shdev = platform_get_drvdata(pdev); + + dma_async_device_unregister(&shdev->common); + + if (shdev->pdata.mode & SHDMA_MIX_IRQ) { + free_irq(DMTE0_IRQ, shdev); +#if defined(DMTE6_IRQ) + free_irq(DMTE6_IRQ, shdev); +#endif + } + + /* channel data remove */ + sh_dmae_chan_remove(shdev); + + if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { + free_irq(DMAE0_IRQ, shdev); +#if defined(DMAE1_IRQ) + free_irq(DMAE1_IRQ, shdev); +#endif + } + kfree(shdev); + + return 0; +} + +static void sh_dmae_shutdown(struct platform_device *pdev) +{ + struct sh_dmae_device *shdev = platform_get_drvdata(pdev); + sh_dmae_ctl_stop(0); + if (shdev->pdata.mode & SHDMA_DMAOR1) + sh_dmae_ctl_stop(1); +} + +static struct platform_driver sh_dmae_driver = { + .remove = __exit_p(sh_dmae_remove), + .shutdown = sh_dmae_shutdown, + .driver = { + .name = "sh-dma-engine", + }, +}; + +static int __init sh_dmae_init(void) +{ + return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); +} +module_init(sh_dmae_init); + +static void __exit sh_dmae_exit(void) +{ + platform_driver_unregister(&sh_dmae_driver); +} +module_exit(sh_dmae_exit); + +MODULE_AUTHOR("Nobuhiro Iwamatsu "); +MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h new file mode 100644 index 000000000000..2b4bc15a2c0a --- /dev/null +++ b/drivers/dma/shdma.h @@ -0,0 +1,64 @@ +/* + * Renesas SuperH DMA Engine support + * + * Copyright (C) 2009 Nobuhiro Iwamatsu + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ +#ifndef __DMA_SHDMA_H +#define __DMA_SHDMA_H + +#include +#include +#include + +#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ + +struct sh_dmae_regs { + u32 sar; /* SAR / source address */ + u32 dar; /* DAR / destination address */ + u32 tcr; /* TCR / transfer count */ +}; + +struct sh_desc { + struct list_head tx_list; + struct sh_dmae_regs hw; + struct list_head node; + struct dma_async_tx_descriptor async_tx; + int mark; +}; + +struct sh_dmae_chan { + dma_cookie_t completed_cookie; /* The maximum cookie completed */ + spinlock_t desc_lock; /* Descriptor operation lock */ + struct list_head ld_queue; /* Link descriptors queue */ + struct list_head ld_free; /* Link descriptors free */ + struct dma_chan common; /* DMA common channel */ + struct device *dev; /* Channel device */ + struct tasklet_struct tasklet; /* Tasklet */ + int descs_allocated; /* desc count */ + int id; /* Raw id of this channel */ + char dev_id[16]; /* unique name per DMAC of channel */ + + /* Set chcr */ + int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs); + /* Set DMA resource */ + int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res); +}; + +struct sh_dmae_device { + struct dma_device common; + struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; + struct sh_dmae_pdata pdata; +}; + +#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) +#define to_sh_desc(lh) container_of(lh, struct sh_desc, node) +#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) + +#endif /* __DMA_SHDMA_H */ -- cgit v1.2.3 From 9a8de639f35ca3951b910d5e3a2f92f4cf3afc8f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 15:06:10 -0700 Subject: async_tx: remove HIGHMEM64G restriction This restriction prevented ASYNC_TX_DMA from being enabled on platform configurations where DMA address conversion could not be performed in place on the stack. Since commit 04ce9ab3 ("async_xor: permit callers to pass in a 'dma/page scribble' region") the async_tx api now either uses a caller provided 'scribble' buffer, or performs the conversion in place when sizeof(dma_addr_t) <= sizeof(struct page *). Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma/Kconfig') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 3230a780c3de..5903a88351bf 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -128,7 +128,7 @@ config NET_DMA config ASYNC_TX_DMA bool "Async_tx: Offload support for the async_tx api" - depends on DMA_ENGINE && !HIGHMEM64G + depends on DMA_ENGINE help This allows the async_tx api to take advantage of offload engines for memcpy, memset, xor, and raid6 p+q operations. If your platform has -- cgit v1.2.3