From e3ad1c23ba72214669b364c6fa304531dc768c3e Mon Sep 17 00:00:00 2001 From: Pete Popov Date: Tue, 1 Mar 2005 06:33:16 +0000 Subject: Base Au1200 2.6 support. Signed-off-by: Ralf Baechle --- arch/mips/au1000/common/au1xxx_irqmap.c | 32 ++-- arch/mips/au1000/common/cputable.c | 3 +- arch/mips/au1000/common/dbdma.c | 298 +++++++++++++++++++++++--------- arch/mips/au1000/common/irq.c | 4 +- arch/mips/au1000/common/usbdev.c | 12 +- 5 files changed, 244 insertions(+), 105 deletions(-) (limited to 'arch/mips/au1000/common') diff --git a/arch/mips/au1000/common/au1xxx_irqmap.c b/arch/mips/au1000/common/au1xxx_irqmap.c index 8a0f39f67c59..0b2c03c52319 100644 --- a/arch/mips/au1000/common/au1xxx_irqmap.c +++ b/arch/mips/au1000/common/au1xxx_irqmap.c @@ -173,14 +173,14 @@ au1xxx_irq_map_t au1xxx_ic0_map[] = { { AU1550_PSC1_INT, INTC_INT_HIGH_LEVEL, 0}, { AU1550_PSC2_INT, INTC_INT_HIGH_LEVEL, 0}, { AU1550_PSC3_INT, INTC_INT_HIGH_LEVEL, 0}, - { AU1550_TOY_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1550_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1550_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1550_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 }, - { AU1550_RTC_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1550_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1550_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1550_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 }, + { AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 }, { AU1550_NAND_INT, INTC_INT_RISE_EDGE, 0}, { AU1550_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 }, { AU1550_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 }, @@ -201,14 +201,14 @@ au1xxx_irq_map_t au1xxx_ic0_map[] = { { AU1200_PSC1_INT, INTC_INT_HIGH_LEVEL, 0}, { AU1200_AES_INT, INTC_INT_HIGH_LEVEL, 0}, { AU1200_CAMERA_INT, INTC_INT_HIGH_LEVEL, 0}, - { AU1200_TOY_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1200_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1200_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1200_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 }, - { AU1200_RTC_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1200_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1200_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, - { AU1200_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 }, + { AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, + { AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 }, { AU1200_NAND_INT, INTC_INT_RISE_EDGE, 0}, { AU1200_USB_INT, INTC_INT_HIGH_LEVEL, 0 }, { AU1200_LCD_INT, INTC_INT_HIGH_LEVEL, 0}, diff --git a/arch/mips/au1000/common/cputable.c b/arch/mips/au1000/common/cputable.c index f5521dfccfd6..4dbde82c8215 100644 --- a/arch/mips/au1000/common/cputable.c +++ b/arch/mips/au1000/common/cputable.c @@ -37,7 +37,8 @@ struct cpu_spec cpu_specs[] = { { 0xffffffff, 0x02030203, "Au1100 BD", 0, 1 }, { 0xffffffff, 0x02030204, "Au1100 BE", 0, 1 }, { 0xffffffff, 0x03030200, "Au1550 AA", 0, 1 }, - { 0xffffffff, 0x04030200, "Au1200 AA", 0, 1 }, + { 0xffffffff, 0x04030200, "Au1200 AB", 0, 0 }, + { 0xffffffff, 0x04030201, "Au1200 AC", 0, 1 }, { 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0 }, }; diff --git a/arch/mips/au1000/common/dbdma.c b/arch/mips/au1000/common/dbdma.c index adfc3172aace..cf10dc246f82 100644 --- a/arch/mips/au1000/common/dbdma.c +++ b/arch/mips/au1000/common/dbdma.c @@ -29,6 +29,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. * */ + #include #include #include @@ -42,6 +43,8 @@ #include #include +/* #include */ + #if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) /* @@ -55,43 +58,16 @@ * functions. The drivers allocate the data buffers and assign them * to the descriptors. */ -static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock); +static spinlock_t au1xxx_dbdma_spin_lock = SPIN_LOCK_UNLOCKED; /* I couldn't find a macro that did this...... */ #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) -static volatile dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; -static int dbdma_initialized; +static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; +static int dbdma_initialized=0; static void au1xxx_dbdma_init(void); -typedef struct dbdma_device_table { - u32 dev_id; - u32 dev_flags; - u32 dev_tsize; - u32 dev_devwidth; - u32 dev_physaddr; /* If FIFO */ - u32 dev_intlevel; - u32 dev_intpolarity; -} dbdev_tab_t; - -typedef struct dbdma_chan_config { - u32 chan_flags; - u32 chan_index; - dbdev_tab_t *chan_src; - dbdev_tab_t *chan_dest; - au1x_dma_chan_t *chan_ptr; - au1x_ddma_desc_t *chan_desc_base; - au1x_ddma_desc_t *get_ptr, *put_ptr, *cur_ptr; - void *chan_callparam; - void (*chan_callback)(int, void *, struct pt_regs *); -} chan_tab_t; - -#define DEV_FLAGS_INUSE (1 << 0) -#define DEV_FLAGS_ANYUSE (1 << 1) -#define DEV_FLAGS_OUT (1 << 2) -#define DEV_FLAGS_IN (1 << 3) - static dbdev_tab_t dbdev_tab[] = { #ifdef CONFIG_SOC_AU1550 /* UARTS */ @@ -157,13 +133,13 @@ static dbdev_tab_t dbdev_tab[] = { { DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, { DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, - { DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 }, - { DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, - { DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 }, - { DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, + { DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 }, + { DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 }, + { DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 }, + { DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 }, - { DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 }, - { DSCR_CMD0_AES_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, + { DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 }, + { DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 }, { DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 }, { DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 }, @@ -173,9 +149,9 @@ static dbdev_tab_t dbdev_tab[] = { { DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 }, { DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, - { DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, - { DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, - { DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, + { DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 }, + { DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 }, + { DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 }, { DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, { DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, @@ -184,6 +160,24 @@ static dbdev_tab_t dbdev_tab[] = { { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, + + /* Provide 16 user definable device types */ + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, }; #define DBDEV_TAB_SIZE (sizeof(dbdev_tab) / sizeof(dbdev_tab_t)) @@ -203,6 +197,30 @@ find_dbdev_id (u32 id) return NULL; } +u32 +au1xxx_ddma_add_device(dbdev_tab_t *dev) +{ + u32 ret = 0; + dbdev_tab_t *p=NULL; + static u16 new_id=0x1000; + + p = find_dbdev_id(0); + if ( NULL != p ) + { + memcpy(p, dev, sizeof(dbdev_tab_t)); + p->dev_id = DSCR_DEV2CUSTOM_ID(new_id,dev->dev_id); + ret = p->dev_id; + new_id++; +#if 0 + printk("add_device: id:%x flags:%x padd:%x\n", + p->dev_id, p->dev_flags, p->dev_physaddr ); +#endif + } + + return ret; +} +EXPORT_SYMBOL(au1xxx_ddma_add_device); + /* Allocate a channel and return a non-zero descriptor if successful. */ u32 @@ -215,7 +233,7 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, int i; dbdev_tab_t *stp, *dtp; chan_tab_t *ctp; - volatile au1x_dma_chan_t *cp; + au1x_dma_chan_t *cp; /* We do the intialization on the first channel allocation. * We have to wait because of the interrupt handler initialization @@ -225,9 +243,6 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, au1xxx_dbdma_init(); dbdma_initialized = 1; - if ((srcid > DSCR_NDEV_IDS) || (destid > DSCR_NDEV_IDS)) - return 0; - if ((stp = find_dbdev_id(srcid)) == NULL) return 0; if ((dtp = find_dbdev_id(destid)) == NULL) return 0; @@ -269,9 +284,9 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, /* If kmalloc fails, it is caught below same * as a channel not available. */ - ctp = kmalloc(sizeof(chan_tab_t), GFP_KERNEL); + ctp = (chan_tab_t *) + kmalloc(sizeof(chan_tab_t), GFP_KERNEL); chan_tab_ptr[i] = ctp; - ctp->chan_index = chan = i; break; } } @@ -279,10 +294,11 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, if (ctp != NULL) { memset(ctp, 0, sizeof(chan_tab_t)); + ctp->chan_index = chan = i; dcp = DDMA_CHANNEL_BASE; dcp += (0x0100 * chan); ctp->chan_ptr = (au1x_dma_chan_t *)dcp; - cp = (volatile au1x_dma_chan_t *)dcp; + cp = (au1x_dma_chan_t *)dcp; ctp->chan_src = stp; ctp->chan_dest = dtp; ctp->chan_callback = callback; @@ -299,6 +315,9 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, i |= DDMA_CFG_DED; if (dtp->dev_intpolarity) i |= DDMA_CFG_DP; + if ((stp->dev_flags & DEV_FLAGS_SYNC) || + (dtp->dev_flags & DEV_FLAGS_SYNC)) + i |= DDMA_CFG_SYNC; cp->ddma_cfg = i; au_sync(); @@ -309,14 +328,14 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, rv = (u32)(&chan_tab_ptr[chan]); } else { - /* Release devices. - */ + /* Release devices */ stp->dev_flags &= ~DEV_FLAGS_INUSE; dtp->dev_flags &= ~DEV_FLAGS_INUSE; } } return rv; } +EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc); /* Set the device width if source or destination is a FIFO. * Should be 8, 16, or 32 bits. @@ -344,6 +363,7 @@ au1xxx_dbdma_set_devwidth(u32 chanid, int bits) return rv; } +EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth); /* Allocate a descriptor ring, initializing as much as possible. */ @@ -370,7 +390,8 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) * and if we try that first we are likely to not waste larger * slabs of memory. */ - desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t), GFP_KERNEL); + desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t), + GFP_KERNEL|GFP_DMA); if (desc_base == 0) return 0; @@ -381,7 +402,7 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) kfree((const void *)desc_base); i = entries * sizeof(au1x_ddma_desc_t); i += (sizeof(au1x_ddma_desc_t) - 1); - if ((desc_base = (u32)kmalloc(i, GFP_KERNEL)) == 0) + if ((desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA)) == 0) return 0; desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t)); @@ -461,9 +482,14 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) /* If source input is fifo, set static address. */ if (stp->dev_flags & DEV_FLAGS_IN) { - src0 = stp->dev_physaddr; + if ( stp->dev_flags & DEV_FLAGS_BURSTABLE ) + src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST); + else src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC); + } + if (stp->dev_physaddr) + src0 = stp->dev_physaddr; /* Set up dest1. For now, assume no stride and increment. * A channel attribute update can change this later. @@ -487,10 +513,18 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) /* If destination output is fifo, set static address. */ if (dtp->dev_flags & DEV_FLAGS_OUT) { - dest0 = dtp->dev_physaddr; + if ( dtp->dev_flags & DEV_FLAGS_BURSTABLE ) + dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST); + else dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC); } + if (dtp->dev_physaddr) + dest0 = dtp->dev_physaddr; +#if 0 + printk("did:%x sid:%x cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n", + dtp->dev_id, stp->dev_id, cmd0, cmd1, src0, src1, dest0, dest1 ); +#endif for (i=0; idscr_cmd0 = cmd0; dp->dscr_cmd1 = cmd1; @@ -499,6 +533,7 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) dp->dscr_dest0 = dest0; dp->dscr_dest1 = dest1; dp->dscr_stat = 0; + dp->sw_context = dp->sw_status = 0; dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1)); dp++; } @@ -511,13 +546,14 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) return (u32)(ctp->chan_desc_base); } +EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc); /* Put a source buffer into the DMA ring. * This updates the source pointer and byte count. Normally used * for memory to fifo transfers. */ u32 -au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes) +_au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags) { chan_tab_t *ctp; au1x_ddma_desc_t *dp; @@ -544,24 +580,40 @@ au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes) */ dp->dscr_source0 = virt_to_phys(buf); dp->dscr_cmd1 = nbytes; - dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ - ctp->chan_ptr->ddma_dbell = 0xffffffff; /* Make it go */ - + /* Check flags */ + if (flags & DDMA_FLAGS_IE) + dp->dscr_cmd0 |= DSCR_CMD0_IE; + if (flags & DDMA_FLAGS_NOIE) + dp->dscr_cmd0 &= ~DSCR_CMD0_IE; /* Get next descriptor pointer. */ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); + /* + * There is an errata on the Au1200/Au1550 parts that could result + * in "stale" data being DMA'd. It has to do with the snoop logic on + * the dache eviction buffer. NONCOHERENT_IO is on by default for + * these parts. If it is fixedin the future, these dma_cache_inv will + * just be nothing more than empty macros. See io.h. + * */ + dma_cache_wback_inv(buf,nbytes); + dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ + au_sync(); + dma_cache_wback_inv(dp, sizeof(dp)); + ctp->chan_ptr->ddma_dbell = 0; + /* return something not zero. */ return nbytes; } +EXPORT_SYMBOL(_au1xxx_dbdma_put_source); /* Put a destination buffer into the DMA ring. * This updates the destination pointer and byte count. Normally used * to place an empty buffer into the ring for fifo to memory transfers. */ u32 -au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes) +_au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags) { chan_tab_t *ctp; au1x_ddma_desc_t *dp; @@ -583,11 +635,33 @@ au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes) if (dp->dscr_cmd0 & DSCR_CMD0_V) return 0; - /* Load up buffer address and byte count. - */ + /* Load up buffer address and byte count */ + + /* Check flags */ + if (flags & DDMA_FLAGS_IE) + dp->dscr_cmd0 |= DSCR_CMD0_IE; + if (flags & DDMA_FLAGS_NOIE) + dp->dscr_cmd0 &= ~DSCR_CMD0_IE; + dp->dscr_dest0 = virt_to_phys(buf); dp->dscr_cmd1 = nbytes; +#if 0 + printk("cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n", + dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0, + dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1 ); +#endif + /* + * There is an errata on the Au1200/Au1550 parts that could result in + * "stale" data being DMA'd. It has to do with the snoop logic on the + * dache eviction buffer. NONCOHERENT_IO is on by default for these + * parts. If it is fixedin the future, these dma_cache_inv will just + * be nothing more than empty macros. See io.h. + * */ + dma_cache_inv(buf,nbytes); dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ + au_sync(); + dma_cache_wback_inv(dp, sizeof(dp)); + ctp->chan_ptr->ddma_dbell = 0; /* Get next descriptor pointer. */ @@ -597,6 +671,7 @@ au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes) */ return nbytes; } +EXPORT_SYMBOL(_au1xxx_dbdma_put_dest); /* Get a destination buffer into the DMA ring. * Normally used to get a full buffer from the ring during fifo @@ -646,7 +721,7 @@ void au1xxx_dbdma_stop(u32 chanid) { chan_tab_t *ctp; - volatile au1x_dma_chan_t *cp; + au1x_dma_chan_t *cp; int halt_timeout = 0; ctp = *((chan_tab_t **)chanid); @@ -666,6 +741,7 @@ au1xxx_dbdma_stop(u32 chanid) cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V); au_sync(); } +EXPORT_SYMBOL(au1xxx_dbdma_stop); /* Start using the current descriptor pointer. If the dbdma encounters * a not valid descriptor, it will stop. In this case, we can just @@ -675,17 +751,17 @@ void au1xxx_dbdma_start(u32 chanid) { chan_tab_t *ctp; - volatile au1x_dma_chan_t *cp; + au1x_dma_chan_t *cp; ctp = *((chan_tab_t **)chanid); - cp = ctp->chan_ptr; cp->ddma_desptr = virt_to_phys(ctp->cur_ptr); cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */ au_sync(); - cp->ddma_dbell = 0xffffffff; /* Make it go */ + cp->ddma_dbell = 0; au_sync(); } +EXPORT_SYMBOL(au1xxx_dbdma_start); void au1xxx_dbdma_reset(u32 chanid) @@ -704,15 +780,21 @@ au1xxx_dbdma_reset(u32 chanid) do { dp->dscr_cmd0 &= ~DSCR_CMD0_V; + /* reset our SW status -- this is used to determine + * if a descriptor is in use by upper level SW. Since + * posting can reset 'V' bit. + */ + dp->sw_status = 0; dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); } while (dp != ctp->chan_desc_base); } +EXPORT_SYMBOL(au1xxx_dbdma_reset); u32 au1xxx_get_dma_residue(u32 chanid) { chan_tab_t *ctp; - volatile au1x_dma_chan_t *cp; + au1x_dma_chan_t *cp; u32 rv; ctp = *((chan_tab_t **)chanid); @@ -747,15 +829,16 @@ au1xxx_dbdma_chan_free(u32 chanid) kfree(ctp); } +EXPORT_SYMBOL(au1xxx_dbdma_chan_free); -static irqreturn_t +static void dbdma_interrupt(int irq, void *dev_id, struct pt_regs *regs) { - u32 intstat; + u32 intstat, flags; u32 chan_index; chan_tab_t *ctp; au1x_ddma_desc_t *dp; - volatile au1x_dma_chan_t *cp; + au1x_dma_chan_t *cp; intstat = dbdma_gptr->ddma_intstat; au_sync(); @@ -774,19 +857,26 @@ dbdma_interrupt(int irq, void *dev_id, struct pt_regs *regs) (ctp->chan_callback)(irq, ctp->chan_callparam, regs); ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); - - return IRQ_HANDLED; } -static void -au1xxx_dbdma_init(void) +static void au1xxx_dbdma_init(void) { + int irq_nr; + dbdma_gptr->ddma_config = 0; dbdma_gptr->ddma_throttle = 0; dbdma_gptr->ddma_inten = 0xffff; au_sync(); - if (request_irq(AU1550_DDMA_INT, dbdma_interrupt, SA_INTERRUPT, +#if defined(CONFIG_SOC_AU1550) + irq_nr = AU1550_DDMA_INT; +#elif defined(CONFIG_SOC_AU1200) + irq_nr = AU1200_DDMA_INT; +#else + #error Unknown Au1x00 SOC +#endif + + if (request_irq(irq_nr, dbdma_interrupt, SA_INTERRUPT, "Au1xxx dbdma", (void *)dbdma_gptr)) printk("Can't get 1550 dbdma irq"); } @@ -797,7 +887,8 @@ au1xxx_dbdma_dump(u32 chanid) chan_tab_t *ctp; au1x_ddma_desc_t *dp; dbdev_tab_t *stp, *dtp; - volatile au1x_dma_chan_t *cp; + au1x_dma_chan_t *cp; + u32 i = 0; ctp = *((chan_tab_t **)chanid); stp = ctp->chan_src; @@ -822,15 +913,64 @@ au1xxx_dbdma_dump(u32 chanid) dp = ctp->chan_desc_base; do { - printk("dp %08x, cmd0 %08x, cmd1 %08x\n", - (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1); - printk("src0 %08x, src1 %08x, dest0 %08x\n", - dp->dscr_source0, dp->dscr_source1, dp->dscr_dest0); - printk("dest1 %08x, stat %08x, nxtptr %08x\n", - dp->dscr_dest1, dp->dscr_stat, dp->dscr_nxtptr); + printk("Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n", + i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1); + printk("src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n", + dp->dscr_source0, dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1); + printk("stat %08x, nxtptr %08x\n", + dp->dscr_stat, dp->dscr_nxtptr); dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); } while (dp != ctp->chan_desc_base); } +/* Put a descriptor into the DMA ring. + * This updates the source/destination pointers and byte count. + */ +u32 +au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr ) +{ + chan_tab_t *ctp; + au1x_ddma_desc_t *dp; + u32 nbytes=0; + + /* I guess we could check this to be within the + * range of the table...... + */ + ctp = *((chan_tab_t **)chanid); + + /* We should have multiple callers for a particular channel, + * an interrupt doesn't affect this pointer nor the descriptor, + * so no locking should be needed. + */ + dp = ctp->put_ptr; + + /* If the descriptor is valid, we are way ahead of the DMA + * engine, so just return an error condition. + */ + if (dp->dscr_cmd0 & DSCR_CMD0_V) + return 0; + + /* Load up buffer addresses and byte count. + */ + dp->dscr_dest0 = dscr->dscr_dest0; + dp->dscr_source0 = dscr->dscr_source0; + dp->dscr_dest1 = dscr->dscr_dest1; + dp->dscr_source1 = dscr->dscr_source1; + dp->dscr_cmd1 = dscr->dscr_cmd1; + nbytes = dscr->dscr_cmd1; + /* Allow the caller to specifiy if an interrupt is generated */ + dp->dscr_cmd0 &= ~DSCR_CMD0_IE; + dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V; + ctp->chan_ptr->ddma_dbell = 0; + + /* Get next descriptor pointer. + */ + ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); + + /* return something not zero. + */ + return nbytes; +} + #endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */ diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c index 0b912f739feb..ebf93bdbad14 100644 --- a/arch/mips/au1000/common/irq.c +++ b/arch/mips/au1000/common/irq.c @@ -488,7 +488,7 @@ void intc0_req0_irqdispatch(struct pt_regs *regs) intc0_req0 |= au_readl(IC0_REQ0INT); if (!intc0_req0) return; - +#ifdef AU1000_USB_DEV_REQ_INT /* * Because of the tight timing of SETUP token to reply * transactions, the USB devices-side packet complete @@ -499,7 +499,7 @@ void intc0_req0_irqdispatch(struct pt_regs *regs) do_IRQ(AU1000_USB_DEV_REQ_INT, regs); return; } - +#endif irq = au_ffs(intc0_req0) - 1; intc0_req0 &= ~(1<ep0_stage = SETUP_STAGE; break; - } + } spin_unlock(&ep0->lock); - // we're done processing the packet, free it - kfree(pkt); + // we're done processing the packet, free it + kfree(pkt); } @@ -1072,8 +1072,7 @@ dma_done_ep0_intr(int irq, void *dev_id, struct pt_regs *regs) clear_dma_done1(ep0->indma); pkt = send_packet_complete(ep0); - if (pkt) - kfree(pkt); + kfree(pkt); } /* @@ -1302,8 +1301,7 @@ usbdev_exit(void) endpoint_flush(ep); } - if (usbdev.full_conf_desc) - kfree(usbdev.full_conf_desc); + kfree(usbdev.full_conf_desc); } int -- cgit v1.2.3