diff options
author | Krzysztof Hałasa <khc@pm.waw.pl> | 2009-02-20 03:01:33 +0300 |
---|---|---|
committer | Krzysztof Hałasa <khc@pm.waw.pl> | 2009-05-20 17:27:07 +0400 |
commit | a6a9fb857b5599b3cefef5576967389c1c43eb4c (patch) | |
tree | 3a6d3d8e9d7560c1c496fc6c38698687196fab55 /arch/arm/mach-ixp4xx/ixp4xx_qmgr.c | |
parent | 1406de8e11eb043681297adf86d6892ff8efc27a (diff) | |
download | linux-a6a9fb857b5599b3cefef5576967389c1c43eb4c.tar.xz |
IXP4xx: Add support for the second half of the 64 hardware queues.
Signed-off-by: Krzysztof Hałasa <khc@pm.waw.pl>
Diffstat (limited to 'arch/arm/mach-ixp4xx/ixp4xx_qmgr.c')
-rw-r--r-- | arch/arm/mach-ixp4xx/ixp4xx_qmgr.c | 71 |
1 files changed, 49 insertions, 22 deletions
diff --git a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c index bfddc73d0a20..7531bfdb7b4e 100644 --- a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c +++ b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c @@ -18,8 +18,8 @@ struct qmgr_regs __iomem *qmgr_regs; static struct resource *mem_res; static spinlock_t qmgr_lock; static u32 used_sram_bitmap[4]; /* 128 16-dword pages */ -static void (*irq_handlers[HALF_QUEUES])(void *pdev); -static void *irq_pdevs[HALF_QUEUES]; +static void (*irq_handlers[QUEUES])(void *pdev); +static void *irq_pdevs[QUEUES]; #if DEBUG_QMGR char qmgr_queue_descs[QUEUES][32]; @@ -28,29 +28,38 @@ char qmgr_queue_descs[QUEUES][32]; void qmgr_set_irq(unsigned int queue, int src, void (*handler)(void *pdev), void *pdev) { - u32 __iomem *reg = &qmgr_regs->irqsrc[queue / 8]; /* 8 queues / u32 */ - int bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ unsigned long flags; - src &= 7; spin_lock_irqsave(&qmgr_lock, flags); - __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), reg); + if (queue < HALF_QUEUES) { + u32 __iomem *reg; + int bit; + BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL); + reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */ + bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ + __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), + reg); + } else + /* IRQ source for queues 32-63 is fixed */ + BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY); + irq_handlers[queue] = handler; irq_pdevs[queue] = pdev; spin_unlock_irqrestore(&qmgr_lock, flags); } -static irqreturn_t qmgr_irq1(int irq, void *pdev) +static irqreturn_t qmgr_irq(int irq, void *pdev) { - int i; - u32 val = __raw_readl(&qmgr_regs->irqstat[0]); - __raw_writel(val, &qmgr_regs->irqstat[0]); /* ACK */ + int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1); + u32 val = __raw_readl(&qmgr_regs->irqstat[half]); + __raw_writel(val, &qmgr_regs->irqstat[half]); /* ACK */ for (i = 0; i < HALF_QUEUES; i++) - if (val & (1 << i)) - irq_handlers[i](irq_pdevs[i]); - + if (val & (1 << i)) { + int irq = half * HALF_QUEUES + i; + irq_handlers[irq](irq_pdevs[irq]); + } return val ? IRQ_HANDLED : 0; } @@ -58,21 +67,25 @@ static irqreturn_t qmgr_irq1(int irq, void *pdev) void qmgr_enable_irq(unsigned int queue) { unsigned long flags; + int half = queue / 32; + u32 mask = 1 << (queue & (HALF_QUEUES - 1)); spin_lock_irqsave(&qmgr_lock, flags); - __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) | (1 << queue), - &qmgr_regs->irqen[0]); + __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask, + &qmgr_regs->irqen[half]); spin_unlock_irqrestore(&qmgr_lock, flags); } void qmgr_disable_irq(unsigned int queue) { unsigned long flags; + int half = queue / 32; + u32 mask = 1 << (queue & (HALF_QUEUES - 1)); spin_lock_irqsave(&qmgr_lock, flags); - __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) & ~(1 << queue), - &qmgr_regs->irqen[0]); - __raw_writel(1 << queue, &qmgr_regs->irqstat[0]); /* clear */ + __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask, + &qmgr_regs->irqen[half]); + __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */ spin_unlock_irqrestore(&qmgr_lock, flags); } @@ -98,8 +111,7 @@ int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, u32 cfg, addr = 0, mask[4]; /* in 16-dwords */ int err; - if (queue >= HALF_QUEUES) - return -ERANGE; + BUG_ON(queue >= QUEUES); if ((nearly_empty_watermark | nearly_full_watermark) & ~7) return -EINVAL; @@ -180,7 +192,7 @@ void qmgr_release_queue(unsigned int queue) { u32 cfg, addr, mask[4]; - BUG_ON(queue >= HALF_QUEUES); /* not in valid range */ + BUG_ON(queue >= QUEUES); /* not in valid range */ spin_lock_irq(&qmgr_lock); cfg = __raw_readl(&qmgr_regs->sram[queue]); @@ -247,10 +259,13 @@ static int qmgr_init(void) __raw_writel(0, &qmgr_regs->irqen[i]); } + __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h); + __raw_writel(0, &qmgr_regs->statf_h); + for (i = 0; i < QUEUES; i++) __raw_writel(0, &qmgr_regs->sram[i]); - err = request_irq(IRQ_IXP4XX_QM1, qmgr_irq1, 0, + err = request_irq(IRQ_IXP4XX_QM1, qmgr_irq, 0, "IXP4xx Queue Manager", NULL); if (err) { printk(KERN_ERR "qmgr: failed to request IRQ%i\n", @@ -258,12 +273,22 @@ static int qmgr_init(void) goto error_irq; } + err = request_irq(IRQ_IXP4XX_QM2, qmgr_irq, 0, + "IXP4xx Queue Manager", NULL); + if (err) { + printk(KERN_ERR "qmgr: failed to request IRQ%i\n", + IRQ_IXP4XX_QM2); + goto error_irq2; + } + used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */ spin_lock_init(&qmgr_lock); printk(KERN_INFO "IXP4xx Queue Manager initialized.\n"); return 0; +error_irq2: + free_irq(IRQ_IXP4XX_QM1, NULL); error_irq: iounmap(qmgr_regs); error_map: @@ -274,7 +299,9 @@ error_map: static void qmgr_remove(void) { free_irq(IRQ_IXP4XX_QM1, NULL); + free_irq(IRQ_IXP4XX_QM2, NULL); synchronize_irq(IRQ_IXP4XX_QM1); + synchronize_irq(IRQ_IXP4XX_QM2); iounmap(qmgr_regs); release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); } |