diff options
Diffstat (limited to 'arch/powerpc/platforms')
80 files changed, 1776 insertions, 912 deletions
diff --git a/arch/powerpc/platforms/44x/44x.h b/arch/powerpc/platforms/44x/44x.h index dbc4d2b4301a..63f703ecd23c 100644 --- a/arch/powerpc/platforms/44x/44x.h +++ b/arch/powerpc/platforms/44x/44x.h @@ -4,4 +4,8 @@ extern u8 as1_readb(volatile u8 __iomem *addr); extern void as1_writeb(u8 data, volatile u8 __iomem *addr); +#define GPIO0_OSRH 0xC +#define GPIO0_TSRH 0x14 +#define GPIO0_ISR1H 0x34 + #endif /* __POWERPC_PLATFORMS_44X_44X_H */ diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 0f979c5c756b..f485fc5f6d5e 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -115,7 +115,6 @@ config CANYONLANDS bool "Canyonlands" depends on 44x default n - select PPC44x_SIMPLE select 460EX select PCI select PPC4xx_PCI_EXPRESS diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile index c04d16df8488..553db6007217 100644 --- a/arch/powerpc/platforms/44x/Makefile +++ b/arch/powerpc/platforms/44x/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_WARP) += warp.o obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o obj-$(CONFIG_ISS4xx) += iss4xx.o +obj-$(CONFIG_CANYONLANDS)+= canyonlands.o diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c new file mode 100644 index 000000000000..afc5e8ea3775 --- /dev/null +++ b/arch/powerpc/platforms/44x/canyonlands.c @@ -0,0 +1,134 @@ +/* + * This contain platform specific code for APM PPC460EX based Canyonlands + * board. + * + * Copyright (c) 2010, Applied Micro Circuits Corporation + * Author: Rupjyoti Sarmah <rsarmah@apm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + * + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <asm/pci-bridge.h> +#include <asm/ppc4xx.h> +#include <asm/udbg.h> +#include <asm/uic.h> +#include <linux/of_platform.h> +#include <linux/delay.h> +#include "44x.h" + +#define BCSR_USB_EN 0x11 + +static __initdata struct of_device_id ppc460ex_of_bus[] = { + { .compatible = "ibm,plb4", }, + { .compatible = "ibm,opb", }, + { .compatible = "ibm,ebc", }, + { .compatible = "simple-bus", }, + {}, +}; + +static int __init ppc460ex_device_probe(void) +{ + of_platform_bus_probe(NULL, ppc460ex_of_bus, NULL); + + return 0; +} +machine_device_initcall(canyonlands, ppc460ex_device_probe); + +/* Using this code only for the Canyonlands board. */ + +static int __init ppc460ex_probe(void) +{ + unsigned long root = of_get_flat_dt_root(); + if (of_flat_dt_is_compatible(root, "amcc,canyonlands")) { + ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); + return 1; + } + return 0; +} + +/* USB PHY fixup code on Canyonlands kit. */ + +static int __init ppc460ex_canyonlands_fixup(void) +{ + u8 __iomem *bcsr ; + void __iomem *vaddr; + struct device_node *np; + int ret = 0; + + np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-bcsr"); + if (!np) { + printk(KERN_ERR "failed did not find amcc, ppc460ex bcsr node\n"); + return -ENODEV; + } + + bcsr = of_iomap(np, 0); + of_node_put(np); + + if (!bcsr) { + printk(KERN_CRIT "Could not remap bcsr\n"); + ret = -ENODEV; + goto err_bcsr; + } + + np = of_find_compatible_node(NULL, NULL, "ibm,ppc4xx-gpio"); + if (!np) { + printk(KERN_ERR "failed did not find ibm,ppc4xx-gpio node\n"); + return -ENODEV; + } + + vaddr = of_iomap(np, 0); + of_node_put(np); + + if (!vaddr) { + printk(KERN_CRIT "Could not get gpio node address\n"); + ret = -ENODEV; + goto err_gpio; + } + /* Disable USB, through the BCSR7 bits */ + setbits8(&bcsr[7], BCSR_USB_EN); + + /* Wait for a while after reset */ + msleep(100); + + /* Enable USB here */ + clrbits8(&bcsr[7], BCSR_USB_EN); + + /* + * Configure multiplexed gpio16 and gpio19 as alternate1 output + * source after USB reset. In this configuration gpio16 will be + * USB2HStop and gpio19 will be USB2DStop. For more details refer to + * table 34-7 of PPC460EX user manual. + */ + setbits32((vaddr + GPIO0_OSRH), 0x42000000); + setbits32((vaddr + GPIO0_TSRH), 0x42000000); +err_gpio: + iounmap(vaddr); +err_bcsr: + iounmap(bcsr); + return ret; +} +machine_device_initcall(canyonlands, ppc460ex_canyonlands_fixup); +define_machine(canyonlands) { + .name = "Canyonlands", + .probe = ppc460ex_probe, + .progress = udbg_progress, + .init_IRQ = uic_init_tree, + .get_irq = uic_get_irq, + .restart = ppc4xx_reset_system, + .calibrate_decr = generic_calibrate_decr, +}; diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c index 7ddcba3b9397..c81c19c0b3d4 100644 --- a/arch/powerpc/platforms/44x/ppc44x_simple.c +++ b/arch/powerpc/platforms/44x/ppc44x_simple.c @@ -53,7 +53,6 @@ static char *board[] __initdata = { "amcc,arches", "amcc,bamboo", "amcc,bluestone", - "amcc,canyonlands", "amcc,glacier", "ibm,ebony", "amcc,eiger", diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index 4ecf4cf9a51b..cfc4b2009982 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c @@ -59,9 +59,9 @@ irq_to_pic_bit(unsigned int irq) } static void -cpld_mask_irq(unsigned int irq) +cpld_mask_irq(struct irq_data *d) { - unsigned int cpld_irq = (unsigned int)irq_map[irq].hwirq; + unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq; void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); out_8(pic_mask, @@ -69,9 +69,9 @@ cpld_mask_irq(unsigned int irq) } static void -cpld_unmask_irq(unsigned int irq) +cpld_unmask_irq(struct irq_data *d) { - unsigned int cpld_irq = (unsigned int)irq_map[irq].hwirq; + unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq; void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); out_8(pic_mask, @@ -80,9 +80,9 @@ cpld_unmask_irq(unsigned int irq) static struct irq_chip cpld_pic = { .name = "CPLD PIC", - .mask = cpld_mask_irq, - .ack = cpld_mask_irq, - .unmask = cpld_unmask_irq, + .irq_mask = cpld_mask_irq, + .irq_ack = cpld_mask_irq, + .irq_unmask = cpld_unmask_irq, }; static int @@ -132,8 +132,8 @@ static int cpld_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { - irq_to_desc(virq)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &cpld_pic, handle_level_irq); return 0; } @@ -198,7 +198,7 @@ mpc5121_ads_cpld_pic_init(void) goto end; } - set_irq_chained_handler(cascade_irq, cpld_pic_cascade); + irq_set_chained_handler(cascade_irq, cpld_pic_cascade); end: of_node_put(np); } diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 2c7780cb68e5..57a6a349e932 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -49,45 +49,46 @@ struct media5200_irq { }; struct media5200_irq media5200_irq; -static void media5200_irq_unmask(unsigned int virq) +static void media5200_irq_unmask(struct irq_data *d) { unsigned long flags; u32 val; spin_lock_irqsave(&media5200_irq.lock, flags); val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); - val |= 1 << (MEDIA5200_IRQ_SHIFT + irq_map[virq].hwirq); + val |= 1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq); out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); spin_unlock_irqrestore(&media5200_irq.lock, flags); } -static void media5200_irq_mask(unsigned int virq) +static void media5200_irq_mask(struct irq_data *d) { unsigned long flags; u32 val; spin_lock_irqsave(&media5200_irq.lock, flags); val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); - val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irq_map[virq].hwirq)); + val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq)); out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); spin_unlock_irqrestore(&media5200_irq.lock, flags); } static struct irq_chip media5200_irq_chip = { .name = "Media5200 FPGA", - .unmask = media5200_irq_unmask, - .mask = media5200_irq_mask, - .mask_ack = media5200_irq_mask, + .irq_unmask = media5200_irq_unmask, + .irq_mask = media5200_irq_mask, + .irq_mask_ack = media5200_irq_mask, }; void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); int sub_virq, val; u32 status, enable; /* Mask off the cascaded IRQ */ raw_spin_lock(&desc->lock); - desc->chip->mask(virq); + chip->irq_mask(&desc->irq_data); raw_spin_unlock(&desc->lock); /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs @@ -105,24 +106,19 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) /* Processing done; can reenable the cascade now */ raw_spin_lock(&desc->lock); - desc->chip->ack(virq); - if (!(desc->status & IRQ_DISABLED)) - desc->chip->unmask(virq); + chip->irq_ack(&desc->irq_data); + if (!irqd_irq_disabled(&desc->irq_data)) + chip->irq_unmask(&desc->irq_data); raw_spin_unlock(&desc->lock); } static int media5200_irq_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { - struct irq_desc *desc = irq_to_desc(virq); - pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); - set_irq_chip_data(virq, &media5200_irq); - set_irq_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); - set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); - desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); - desc->status |= IRQ_TYPE_LEVEL_LOW | IRQ_LEVEL; - + irq_set_chip_data(virq, &media5200_irq); + irq_set_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); return 0; } @@ -186,8 +182,8 @@ static void __init media5200_init_irq(void) media5200_irq.irqhost->host_data = &media5200_irq; - set_irq_data(cascade_virq, &media5200_irq); - set_irq_chained_handler(cascade_virq, media5200_irq_cascade); + irq_set_handler_data(cascade_virq, &media5200_irq); + irq_set_chained_handler(cascade_virq, media5200_irq_cascade); return; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c index 0dad9a935eb5..1757d1db4b51 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c @@ -147,8 +147,7 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) return 0; } -static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev) { struct mpc52xx_gpiochip *chip; struct mpc52xx_gpio_wkup __iomem *regs; @@ -191,7 +190,7 @@ static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = { {} }; -static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = { +static struct platform_driver mpc52xx_wkup_gpiochip_driver = { .driver = { .name = "gpio_wkup", .owner = THIS_MODULE, @@ -310,8 +309,7 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) return 0; } -static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev) { struct mpc52xx_gpiochip *chip; struct gpio_chip *gc; @@ -349,7 +347,7 @@ static const struct of_device_id mpc52xx_simple_gpiochip_match[] = { {} }; -static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { +static struct platform_driver mpc52xx_simple_gpiochip_driver = { .driver = { .name = "gpio", .owner = THIS_MODULE, @@ -361,10 +359,10 @@ static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { static int __init mpc52xx_gpio_init(void) { - if (of_register_platform_driver(&mpc52xx_wkup_gpiochip_driver)) + if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver)) printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); - if (of_register_platform_driver(&mpc52xx_simple_gpiochip_driver)) + if (platform_driver_register(&mpc52xx_simple_gpiochip_driver)) printk(KERN_ERR "Unable to register simple GPIO driver\n"); return 0; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index e0d703c7fdf7..6c39b9cc2fa3 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -135,9 +135,9 @@ DEFINE_MUTEX(mpc52xx_gpt_list_mutex); * Cascaded interrupt controller hooks */ -static void mpc52xx_gpt_irq_unmask(unsigned int virq) +static void mpc52xx_gpt_irq_unmask(struct irq_data *d) { - struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); + struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); unsigned long flags; spin_lock_irqsave(&gpt->lock, flags); @@ -145,9 +145,9 @@ static void mpc52xx_gpt_irq_unmask(unsigned int virq) spin_unlock_irqrestore(&gpt->lock, flags); } -static void mpc52xx_gpt_irq_mask(unsigned int virq) +static void mpc52xx_gpt_irq_mask(struct irq_data *d) { - struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); + struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); unsigned long flags; spin_lock_irqsave(&gpt->lock, flags); @@ -155,20 +155,20 @@ static void mpc52xx_gpt_irq_mask(unsigned int virq) spin_unlock_irqrestore(&gpt->lock, flags); } -static void mpc52xx_gpt_irq_ack(unsigned int virq) +static void mpc52xx_gpt_irq_ack(struct irq_data *d) { - struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); + struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); out_be32(&gpt->regs->status, MPC52xx_GPT_STATUS_IRQMASK); } -static int mpc52xx_gpt_irq_set_type(unsigned int virq, unsigned int flow_type) +static int mpc52xx_gpt_irq_set_type(struct irq_data *d, unsigned int flow_type) { - struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); + struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); unsigned long flags; u32 reg; - dev_dbg(gpt->dev, "%s: virq=%i type=%x\n", __func__, virq, flow_type); + dev_dbg(gpt->dev, "%s: virq=%i type=%x\n", __func__, d->irq, flow_type); spin_lock_irqsave(&gpt->lock, flags); reg = in_be32(&gpt->regs->mode) & ~MPC52xx_GPT_MODE_ICT_MASK; @@ -184,15 +184,15 @@ static int mpc52xx_gpt_irq_set_type(unsigned int virq, unsigned int flow_type) static struct irq_chip mpc52xx_gpt_irq_chip = { .name = "MPC52xx GPT", - .unmask = mpc52xx_gpt_irq_unmask, - .mask = mpc52xx_gpt_irq_mask, - .ack = mpc52xx_gpt_irq_ack, - .set_type = mpc52xx_gpt_irq_set_type, + .irq_unmask = mpc52xx_gpt_irq_unmask, + .irq_mask = mpc52xx_gpt_irq_mask, + .irq_ack = mpc52xx_gpt_irq_ack, + .irq_set_type = mpc52xx_gpt_irq_set_type, }; void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) { - struct mpc52xx_gpt_priv *gpt = get_irq_data(virq); + struct mpc52xx_gpt_priv *gpt = irq_get_handler_data(virq); int sub_virq; u32 status; @@ -209,8 +209,8 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq, struct mpc52xx_gpt_priv *gpt = h->host_data; dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq); - set_irq_chip_data(virq, gpt); - set_irq_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq); + irq_set_chip_data(virq, gpt); + irq_set_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq); return 0; } @@ -259,8 +259,8 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) } gpt->irqhost->host_data = gpt; - set_irq_data(cascade_virq, gpt); - set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); + irq_set_handler_data(cascade_virq, gpt); + irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); /* If the GPT is currently disabled, then change it to be in Input * Capture mode. If the mode is non-zero, then the pin could be @@ -721,8 +721,7 @@ static inline int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, /* --------------------------------------------------------------------- * of_platform bus binding code */ -static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev) { struct mpc52xx_gpt_priv *gpt; @@ -781,7 +780,7 @@ static const struct of_device_id mpc52xx_gpt_match[] = { {} }; -static struct of_platform_driver mpc52xx_gpt_driver = { +static struct platform_driver mpc52xx_gpt_driver = { .driver = { .name = "mpc52xx-gpt", .owner = THIS_MODULE, @@ -793,10 +792,7 @@ static struct of_platform_driver mpc52xx_gpt_driver = { static int __init mpc52xx_gpt_init(void) { - if (of_register_platform_driver(&mpc52xx_gpt_driver)) - pr_err("error registering MPC52xx GPT driver\n"); - - return 0; + return platform_driver_register(&mpc52xx_gpt_driver); } /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index f4ac213c89c0..9940ce8a2d4e 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -57,7 +57,7 @@ struct mpc52xx_lpbfifo { static struct mpc52xx_lpbfifo lpbfifo; /** - * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered + * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred */ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) { @@ -179,7 +179,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) * * On transmit, the dma completion irq triggers before the fifo completion * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm - * task completion irq becuase everyting is not really done until the LPB FIFO + * task completion irq because everything is not really done until the LPB FIFO * completion irq triggers. * * In other words: @@ -195,7 +195,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) * Exit conditions: * 1) Transfer aborted * 2) FIFO complete without DMA; more data to do - * 3) FIFO complete without DMA; all data transfered + * 3) FIFO complete without DMA; all data transferred * 4) FIFO complete using DMA * * Condition 1 can occur regardless of whether or not DMA is used. @@ -436,8 +436,7 @@ void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) } EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); -static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op) { struct resource res; int rc = -ENOMEM; @@ -536,7 +535,7 @@ static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = { {}, }; -static struct of_platform_driver mpc52xx_lpbfifo_driver = { +static struct platform_driver mpc52xx_lpbfifo_driver = { .driver = { .name = "mpc52xx-lpbfifo", .owner = THIS_MODULE, @@ -551,14 +550,12 @@ static struct of_platform_driver mpc52xx_lpbfifo_driver = { */ static int __init mpc52xx_lpbfifo_init(void) { - pr_debug("Registering LocalPlus bus FIFO driver\n"); - return of_register_platform_driver(&mpc52xx_lpbfifo_driver); + return platform_driver_register(&mpc52xx_lpbfifo_driver); } module_init(mpc52xx_lpbfifo_init); static void __exit mpc52xx_lpbfifo_exit(void) { - pr_debug("Unregistering LocalPlus bus FIFO driver\n"); - of_unregister_platform_driver(&mpc52xx_lpbfifo_driver); + platform_driver_unregister(&mpc52xx_lpbfifo_driver); } module_exit(mpc52xx_lpbfifo_exit); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 4bf4bf7b063e..1dd15400f6f0 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -155,47 +155,47 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno) /* * IRQ[0-3] interrupt irq_chip */ -static void mpc52xx_extirq_mask(unsigned int virq) +static void mpc52xx_extirq_mask(struct irq_data *d) { int irq; int l2irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&intr->ctrl, 11 - l2irq); } -static void mpc52xx_extirq_unmask(unsigned int virq) +static void mpc52xx_extirq_unmask(struct irq_data *d) { int irq; int l2irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->ctrl, 11 - l2irq); } -static void mpc52xx_extirq_ack(unsigned int virq) +static void mpc52xx_extirq_ack(struct irq_data *d) { int irq; int l2irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->ctrl, 27-l2irq); } -static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type) +static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type) { u32 ctrl_reg, type; int irq; int l2irq; void *handler = handle_level_irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type); @@ -214,44 +214,44 @@ static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type) ctrl_reg |= (type << (22 - (l2irq * 2))); out_be32(&intr->ctrl, ctrl_reg); - __set_irq_handler_unlocked(virq, handler); + __irq_set_handler_locked(d->irq, handler); return 0; } static struct irq_chip mpc52xx_extirq_irqchip = { .name = "MPC52xx External", - .mask = mpc52xx_extirq_mask, - .unmask = mpc52xx_extirq_unmask, - .ack = mpc52xx_extirq_ack, - .set_type = mpc52xx_extirq_set_type, + .irq_mask = mpc52xx_extirq_mask, + .irq_unmask = mpc52xx_extirq_unmask, + .irq_ack = mpc52xx_extirq_ack, + .irq_set_type = mpc52xx_extirq_set_type, }; /* * Main interrupt irq_chip */ -static int mpc52xx_null_set_type(unsigned int virq, unsigned int flow_type) +static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type) { return 0; /* Do nothing so that the sense mask will get updated */ } -static void mpc52xx_main_mask(unsigned int virq) +static void mpc52xx_main_mask(struct irq_data *d) { int irq; int l2irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->main_mask, 16 - l2irq); } -static void mpc52xx_main_unmask(unsigned int virq) +static void mpc52xx_main_unmask(struct irq_data *d) { int irq; int l2irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&intr->main_mask, 16 - l2irq); @@ -259,32 +259,32 @@ static void mpc52xx_main_unmask(unsigned int virq) static struct irq_chip mpc52xx_main_irqchip = { .name = "MPC52xx Main", - .mask = mpc52xx_main_mask, - .mask_ack = mpc52xx_main_mask, - .unmask = mpc52xx_main_unmask, - .set_type = mpc52xx_null_set_type, + .irq_mask = mpc52xx_main_mask, + .irq_mask_ack = mpc52xx_main_mask, + .irq_unmask = mpc52xx_main_unmask, + .irq_set_type = mpc52xx_null_set_type, }; /* * Peripherals interrupt irq_chip */ -static void mpc52xx_periph_mask(unsigned int virq) +static void mpc52xx_periph_mask(struct irq_data *d) { int irq; int l2irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->per_mask, 31 - l2irq); } -static void mpc52xx_periph_unmask(unsigned int virq) +static void mpc52xx_periph_unmask(struct irq_data *d) { int irq; int l2irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&intr->per_mask, 31 - l2irq); @@ -292,43 +292,43 @@ static void mpc52xx_periph_unmask(unsigned int virq) static struct irq_chip mpc52xx_periph_irqchip = { .name = "MPC52xx Peripherals", - .mask = mpc52xx_periph_mask, - .mask_ack = mpc52xx_periph_mask, - .unmask = mpc52xx_periph_unmask, - .set_type = mpc52xx_null_set_type, + .irq_mask = mpc52xx_periph_mask, + .irq_mask_ack = mpc52xx_periph_mask, + .irq_unmask = mpc52xx_periph_unmask, + .irq_set_type = mpc52xx_null_set_type, }; /* * SDMA interrupt irq_chip */ -static void mpc52xx_sdma_mask(unsigned int virq) +static void mpc52xx_sdma_mask(struct irq_data *d) { int irq; int l2irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; io_be_setbit(&sdma->IntMask, l2irq); } -static void mpc52xx_sdma_unmask(unsigned int virq) +static void mpc52xx_sdma_unmask(struct irq_data *d) { int irq; int l2irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&sdma->IntMask, l2irq); } -static void mpc52xx_sdma_ack(unsigned int virq) +static void mpc52xx_sdma_ack(struct irq_data *d) { int irq; int l2irq; - irq = irq_map[virq].hwirq; + irq = irq_map[d->irq].hwirq; l2irq = irq & MPC52xx_IRQ_L2_MASK; out_be32(&sdma->IntPend, 1 << l2irq); @@ -336,10 +336,10 @@ static void mpc52xx_sdma_ack(unsigned int virq) static struct irq_chip mpc52xx_sdma_irqchip = { .name = "MPC52xx SDMA", - .mask = mpc52xx_sdma_mask, - .unmask = mpc52xx_sdma_unmask, - .ack = mpc52xx_sdma_ack, - .set_type = mpc52xx_null_set_type, + .irq_mask = mpc52xx_sdma_mask, + .irq_unmask = mpc52xx_sdma_unmask, + .irq_ack = mpc52xx_sdma_ack, + .irq_set_type = mpc52xx_null_set_type, }; /** @@ -414,7 +414,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, else hndlr = handle_level_irq; - set_irq_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); + irq_set_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n", __func__, l2irq, virq, (int)irq, type); return 0; @@ -431,7 +431,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, return -EINVAL; } - set_irq_chip_and_handler(virq, irqchip, handle_level_irq); + irq_set_chip_and_handler(virq, irqchip, handle_level_irq); pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq); return 0; @@ -512,7 +512,7 @@ void __init mpc52xx_init_irq(void) /** * mpc52xx_get_irq - Get pending interrupt number hook function * - * Called by the interupt handler to determine what IRQ handler needs to be + * Called by the interrupt handler to determine what IRQ handler needs to be * executed. * * Status of pending interrupts is determined by reading the encoded status diff --git a/arch/powerpc/platforms/82xx/Makefile b/arch/powerpc/platforms/82xx/Makefile index d982793f4dbd..455fe21e37c4 100644 --- a/arch/powerpc/platforms/82xx/Makefile +++ b/arch/powerpc/platforms/82xx/Makefile @@ -6,4 +6,4 @@ obj-$(CONFIG_CPM2) += pq2.o obj-$(CONFIG_PQ2_ADS_PCI_PIC) += pq2ads-pci-pic.o obj-$(CONFIG_PQ2FADS) += pq2fads.o obj-$(CONFIG_EP8248E) += ep8248e.o -obj-$(CONFIG_MGCOGE) += mgcoge.o +obj-$(CONFIG_MGCOGE) += km82xx.o diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 1565e0446dc8..10ff526cd046 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c @@ -111,8 +111,7 @@ static struct mdiobb_ctrl ep8248e_mdio_ctrl = { .ops = &ep8248e_mdio_ops, }; -static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev) { struct mii_bus *bus; struct resource res; @@ -167,7 +166,7 @@ static const struct of_device_id ep8248e_mdio_match[] = { {}, }; -static struct of_platform_driver ep8248e_mdio_driver = { +static struct platform_driver ep8248e_mdio_driver = { .driver = { .name = "ep8248e-mdio-bitbang", .owner = THIS_MODULE, @@ -308,7 +307,7 @@ static __initdata struct of_device_id of_bus_ids[] = { static int __init declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); - of_register_platform_driver(&ep8248e_mdio_driver); + platform_driver_register(&ep8248e_mdio_driver); return 0; } diff --git a/arch/powerpc/platforms/82xx/mgcoge.c b/arch/powerpc/platforms/82xx/km82xx.c index 7a5de9eb3c73..428c5e0a0e75 100644 --- a/arch/powerpc/platforms/82xx/mgcoge.c +++ b/arch/powerpc/platforms/82xx/km82xx.c @@ -1,6 +1,6 @@ /* - * Keymile mgcoge support - * Copyright 2008 DENX Software Engineering GmbH + * Keymile km82xx support + * Copyright 2008-2011 DENX Software Engineering GmbH * Author: Heiko Schocher <hs@denx.de> * * based on code from: @@ -31,9 +31,10 @@ #include "pq2.h" -static void __init mgcoge_pic_init(void) +static void __init km82xx_pic_init(void) { - struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,pq2-pic"); + struct device_node *np = of_find_compatible_node(NULL, NULL, + "fsl,pq2-pic"); if (!np) { printk(KERN_ERR "PIC init: can not find cpm-pic node\n"); return; @@ -47,12 +48,18 @@ struct cpm_pin { int port, pin, flags; }; -static __initdata struct cpm_pin mgcoge_pins[] = { +static __initdata struct cpm_pin km82xx_pins[] = { /* SMC2 */ {0, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 9, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, + /* SCC1 */ + {2, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, + {2, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, + {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, + {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, + /* SCC4 */ {2, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, @@ -107,30 +114,49 @@ static __initdata struct cpm_pin mgcoge_pins[] = { {3, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, {3, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, #endif + + /* USB */ + {0, 10, CPM_PIN_OUTPUT | CPM_PIN_GPIO}, /* FULL_SPEED */ + {0, 11, CPM_PIN_OUTPUT | CPM_PIN_GPIO}, /*/SLAVE */ + {2, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXN */ + {2, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXP */ + {2, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* /OE */ + {2, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXCLK */ + {3, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXP */ + {3, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXN */ + {3, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXD */ }; static void __init init_ioports(void) { int i; - for (i = 0; i < ARRAY_SIZE(mgcoge_pins); i++) { - const struct cpm_pin *pin = &mgcoge_pins[i]; + for (i = 0; i < ARRAY_SIZE(km82xx_pins); i++) { + const struct cpm_pin *pin = &km82xx_pins[i]; cpm2_set_pin(pin->port, pin->pin, pin->flags); } cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8); + cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_RX); + cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_TX); + cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RTX); cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK7, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK8, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK9, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX); + + /* Force USB FULL SPEED bit to '1' */ + setbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 10)); + /* clear USB_SLAVE */ + clrbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 11)); } -static void __init mgcoge_setup_arch(void) +static void __init km82xx_setup_arch(void) { if (ppc_md.progress) - ppc_md.progress("mgcoge_setup_arch()", 0); + ppc_md.progress("km82xx_setup_arch()", 0); cpm2_reset(); @@ -142,7 +168,7 @@ static void __init mgcoge_setup_arch(void) init_ioports(); if (ppc_md.progress) - ppc_md.progress("mgcoge_setup_arch(), finish", 0); + ppc_md.progress("km82xx_setup_arch(), finish", 0); } static __initdata struct of_device_id of_bus_ids[] = { @@ -156,23 +182,23 @@ static int __init declare_of_platform_devices(void) return 0; } -machine_device_initcall(mgcoge, declare_of_platform_devices); +machine_device_initcall(km82xx, declare_of_platform_devices); /* * Called very early, device-tree isn't unflattened */ -static int __init mgcoge_probe(void) +static int __init km82xx_probe(void) { unsigned long root = of_get_flat_dt_root(); - return of_flat_dt_is_compatible(root, "keymile,mgcoge"); + return of_flat_dt_is_compatible(root, "keymile,km82xx"); } -define_machine(mgcoge) +define_machine(km82xx) { - .name = "Keymile MGCOGE", - .probe = mgcoge_probe, - .setup_arch = mgcoge_setup_arch, - .init_IRQ = mgcoge_pic_init, + .name = "Keymile km82xx", + .probe = km82xx_probe, + .setup_arch = km82xx_setup_arch, + .init_IRQ = km82xx_pic_init, .get_irq = cpm2_get_irq, .calibrate_decr = generic_calibrate_decr, .restart = pq2_restart, diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 5a55d87d6bd6..4a4eb6ffa12f 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c @@ -39,10 +39,10 @@ struct pq2ads_pci_pic { #define NUM_IRQS 32 -static void pq2ads_pci_mask_irq(unsigned int virq) +static void pq2ads_pci_mask_irq(struct irq_data *d) { - struct pq2ads_pci_pic *priv = get_irq_chip_data(virq); - int irq = NUM_IRQS - virq_to_hw(virq) - 1; + struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); + int irq = NUM_IRQS - virq_to_hw(d->irq) - 1; if (irq != -1) { unsigned long flags; @@ -55,10 +55,10 @@ static void pq2ads_pci_mask_irq(unsigned int virq) } } -static void pq2ads_pci_unmask_irq(unsigned int virq) +static void pq2ads_pci_unmask_irq(struct irq_data *d) { - struct pq2ads_pci_pic *priv = get_irq_chip_data(virq); - int irq = NUM_IRQS - virq_to_hw(virq) - 1; + struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); + int irq = NUM_IRQS - virq_to_hw(d->irq) - 1; if (irq != -1) { unsigned long flags; @@ -71,18 +71,17 @@ static void pq2ads_pci_unmask_irq(unsigned int virq) static struct irq_chip pq2ads_pci_ic = { .name = "PQ2 ADS PCI", - .end = pq2ads_pci_unmask_irq, - .mask = pq2ads_pci_mask_irq, - .mask_ack = pq2ads_pci_mask_irq, - .ack = pq2ads_pci_mask_irq, - .unmask = pq2ads_pci_unmask_irq, - .enable = pq2ads_pci_unmask_irq, - .disable = pq2ads_pci_mask_irq + .irq_mask = pq2ads_pci_mask_irq, + .irq_mask_ack = pq2ads_pci_mask_irq, + .irq_ack = pq2ads_pci_mask_irq, + .irq_unmask = pq2ads_pci_unmask_irq, + .irq_enable = pq2ads_pci_unmask_irq, + .irq_disable = pq2ads_pci_mask_irq }; static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) { - struct pq2ads_pci_pic *priv = desc->handler_data; + struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); u32 stat, mask, pend; int bit; @@ -107,17 +106,17 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) static int pci_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { - irq_to_desc(virq)->status |= IRQ_LEVEL; - set_irq_chip_data(virq, h->host_data); - set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_data(virq, h->host_data); + irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); return 0; } static void pci_host_unmap(struct irq_host *h, unsigned int virq) { /* remove chip and handler */ - set_irq_chip_data(virq, NULL); - set_irq_chip(virq, NULL); + irq_set_chip_data(virq, NULL); + irq_set_chip(virq, NULL); } static struct irq_host_ops pci_pic_host_ops = { @@ -176,8 +175,8 @@ int __init pq2ads_pci_init_irq(void) priv->host = host; host->host_data = priv; - set_irq_data(irq, priv); - set_irq_chained_handler(irq, pq2ads_pci_irq_demux); + irq_set_handler_data(irq, priv); + irq_set_chained_handler(irq, pq2ads_pci_irq_demux); of_node_put(np); return 0; diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile index 6e8bbbbcfdf8..ed95bfcbcbff 100644 --- a/arch/powerpc/platforms/83xx/Makefile +++ b/arch/powerpc/platforms/83xx/Makefile @@ -16,4 +16,4 @@ obj-$(CONFIG_MPC837x_MDS) += mpc837x_mds.o obj-$(CONFIG_SBC834x) += sbc834x.o obj-$(CONFIG_MPC837x_RDB) += mpc837x_rdb.o obj-$(CONFIG_ASP834x) += asp834x.o -obj-$(CONFIG_KMETER1) += kmeter1.o +obj-$(CONFIG_KMETER1) += km83xx.o diff --git a/arch/powerpc/platforms/83xx/kmeter1.c b/arch/powerpc/platforms/83xx/km83xx.c index 903acfd851ac..a2b9b9ef1240 100644 --- a/arch/powerpc/platforms/83xx/kmeter1.c +++ b/arch/powerpc/platforms/83xx/km83xx.c @@ -1,5 +1,5 @@ /* - * Copyright 2008 DENX Software Engineering GmbH + * Copyright 2008-2011 DENX Software Engineering GmbH * Author: Heiko Schocher <hs@denx.de> * * Description: @@ -49,12 +49,12 @@ * Setup the architecture * */ -static void __init kmeter1_setup_arch(void) +static void __init mpc83xx_km_setup_arch(void) { struct device_node *np; if (ppc_md.progress) - ppc_md.progress("kmeter1_setup_arch()", 0); + ppc_md.progress("kmpbec83xx_setup_arch()", 0); #ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") @@ -69,6 +69,9 @@ static void __init kmeter1_setup_arch(void) par_io_init(np); of_node_put(np); + for_each_node_by_name(np, "spi") + par_io_of_config(np); + for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) par_io_of_config(np); } @@ -119,7 +122,7 @@ static void __init kmeter1_setup_arch(void) #endif /* CONFIG_QUICC_ENGINE */ } -static struct of_device_id kmeter_ids[] = { +static struct of_device_id kmpbec83xx_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, @@ -131,13 +134,13 @@ static struct of_device_id kmeter_ids[] = { static int __init kmeter_declare_of_platform_devices(void) { /* Publish the QE devices */ - of_platform_bus_probe(NULL, kmeter_ids, NULL); + of_platform_bus_probe(NULL, kmpbec83xx_ids, NULL); return 0; } -machine_device_initcall(kmeter1, kmeter_declare_of_platform_devices); +machine_device_initcall(mpc83xx_km, kmeter_declare_of_platform_devices); -static void __init kmeter1_init_IRQ(void) +static void __init mpc83xx_km_init_IRQ(void) { struct device_node *np; @@ -168,21 +171,34 @@ static void __init kmeter1_init_IRQ(void) #endif /* CONFIG_QUICC_ENGINE */ } +/* list of the supported boards */ +static char *board[] __initdata = { + "Keymile,KMETER1", + "Keymile,kmpbec8321", + NULL +}; + /* * Called very early, MMU is off, device-tree isn't unflattened */ -static int __init kmeter1_probe(void) +static int __init mpc83xx_km_probe(void) { - unsigned long root = of_get_flat_dt_root(); + unsigned long node = of_get_flat_dt_root(); + int i = 0; - return of_flat_dt_is_compatible(root, "keymile,KMETER1"); + while (board[i]) { + if (of_flat_dt_is_compatible(node, board[i])) + break; + i++; + } + return (board[i] != NULL); } -define_machine(kmeter1) { - .name = "KMETER1", - .probe = kmeter1_probe, - .setup_arch = kmeter1_setup_arch, - .init_IRQ = kmeter1_init_IRQ, +define_machine(mpc83xx_km) { + .name = "mpc83xx-km-platform", + .probe = mpc83xx_km_probe, + .setup_arch = mpc83xx_km_setup_arch, + .init_IRQ = mpc83xx_km_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index fd4f2f2f19e6..188272934cfb 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -318,14 +318,18 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { .end = mpc83xx_suspend_end, }; -static int pmc_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int pmc_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; struct resource res; - struct pmc_type *type = match->data; + struct pmc_type *type; int ret = 0; + if (!ofdev->dev.of_match) + return -EINVAL; + + type = ofdev->dev.of_match->data; + if (!of_device_is_available(np)) return -ENODEV; @@ -422,7 +426,7 @@ static struct of_device_id pmc_match[] = { {} }; -static struct of_platform_driver pmc_driver = { +static struct platform_driver pmc_driver = { .driver = { .name = "mpc83xx-pmc", .owner = THIS_MODULE, @@ -434,7 +438,7 @@ static struct of_platform_driver pmc_driver = { static int pmc_init(void) { - return of_register_platform_driver(&pmc_driver); + return platform_driver_register(&pmc_driver); } module_init(pmc_init); diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c index f4d36b5a2e00..c46f9359be15 100644 --- a/arch/powerpc/platforms/85xx/ksi8560.c +++ b/arch/powerpc/platforms/85xx/ksi8560.c @@ -56,12 +56,13 @@ static void machine_restart(char *cmd) static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq; while ((cascade_irq = cpm2_get_irq()) >= 0) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); + chip->irq_eoi(&desc->irq_data); } static void __init ksi8560_pic_init(void) @@ -105,7 +106,7 @@ static void __init ksi8560_pic_init(void) cpm2_pic_init(np); of_node_put(np); - set_irq_chained_handler(irq, cpm2_cascade); + irq_set_chained_handler(irq, cpm2_cascade); #endif } diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c index 9438a892afc4..3b2c9bb66199 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c @@ -50,12 +50,13 @@ static int mpc85xx_exclude_device(struct pci_controller *hose, static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq; while ((cascade_irq = cpm2_get_irq()) >= 0) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); + chip->irq_eoi(&desc->irq_data); } #endif /* CONFIG_CPM2 */ @@ -100,7 +101,7 @@ static void __init mpc85xx_ads_pic_init(void) cpm2_pic_init(np); of_node_put(np); - set_irq_chained_handler(irq, cpm2_cascade); + irq_set_chained_handler(irq, cpm2_cascade); #endif } diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 458d91fba91d..6299a2a51ae8 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -255,7 +255,7 @@ static int mpc85xx_cds_8259_attach(void) } /* Success. Connect our low-level cascade handler. */ - set_irq_handler(cascade_irq, mpc85xx_8259_cascade_handler); + irq_set_handler(cascade_irq, mpc85xx_8259_cascade_handler); return 0; } diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index 8190bc25bf27..c7b97f70312e 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -47,12 +47,13 @@ #ifdef CONFIG_PPC_I8259 static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); if (cascade_irq != NO_IRQ) { generic_handle_irq(cascade_irq); } - desc->chip->eoi(irq); + chip->irq_eoi(&desc->irq_data); } #endif /* CONFIG_PPC_I8259 */ @@ -121,7 +122,7 @@ void __init mpc85xx_ds_pic_init(void) i8259_init(cascade_node, 0); of_node_put(cascade_node); - set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade); + irq_set_chained_handler(cascade_irq, mpc85xx_8259_cascade); #endif /* CONFIG_PPC_I8259 */ } diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c index a5ad1c7794bf..d2dfd465fbf6 100644 --- a/arch/powerpc/platforms/85xx/sbc8560.c +++ b/arch/powerpc/platforms/85xx/sbc8560.c @@ -41,12 +41,13 @@ static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq; while ((cascade_irq = cpm2_get_irq()) >= 0) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); + chip->irq_eoi(&desc->irq_data); } #endif /* CONFIG_CPM2 */ @@ -91,7 +92,7 @@ static void __init sbc8560_pic_init(void) cpm2_pic_init(np); of_node_put(np); - set_irq_chained_handler(irq, cpm2_cascade); + irq_set_chained_handler(irq, cpm2_cascade); #endif } diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 5c91a992f02b..0d00ff9d05a0 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -91,10 +91,14 @@ smp_85xx_kick_cpu(int nr) while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) mdelay(1); #else + smp_generic_kick_cpu(nr); + out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER), __pa((u64)*((unsigned long long *) generic_secondary_smp_init))); - smp_generic_kick_cpu(nr); + if (!ioremappable) + flush_dcache_range((ulong)bptr_vaddr, + (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY)); #endif local_irq_restore(flags); diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index d48527ffc425..db864623b4ae 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c @@ -93,6 +93,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq; /* @@ -103,17 +104,16 @@ void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); - + chip->irq_eoi(&desc->irq_data); } -static void socrates_fpga_pic_ack(unsigned int virq) +static void socrates_fpga_pic_ack(struct irq_data *d) { unsigned long flags; unsigned int hwirq, irq_line; uint32_t mask; - hwirq = socrates_fpga_irq_to_hw(virq); + hwirq = socrates_fpga_irq_to_hw(d->irq); irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); @@ -124,14 +124,14 @@ static void socrates_fpga_pic_ack(unsigned int virq) raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); } -static void socrates_fpga_pic_mask(unsigned int virq) +static void socrates_fpga_pic_mask(struct irq_data *d) { unsigned long flags; unsigned int hwirq; int irq_line; u32 mask; - hwirq = socrates_fpga_irq_to_hw(virq); + hwirq = socrates_fpga_irq_to_hw(d->irq); irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); @@ -142,14 +142,14 @@ static void socrates_fpga_pic_mask(unsigned int virq) raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); } -static void socrates_fpga_pic_mask_ack(unsigned int virq) +static void socrates_fpga_pic_mask_ack(struct irq_data *d) { unsigned long flags; unsigned int hwirq; int irq_line; u32 mask; - hwirq = socrates_fpga_irq_to_hw(virq); + hwirq = socrates_fpga_irq_to_hw(d->irq); irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); @@ -161,14 +161,14 @@ static void socrates_fpga_pic_mask_ack(unsigned int virq) raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); } -static void socrates_fpga_pic_unmask(unsigned int virq) +static void socrates_fpga_pic_unmask(struct irq_data *d) { unsigned long flags; unsigned int hwirq; int irq_line; u32 mask; - hwirq = socrates_fpga_irq_to_hw(virq); + hwirq = socrates_fpga_irq_to_hw(d->irq); irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); @@ -179,14 +179,14 @@ static void socrates_fpga_pic_unmask(unsigned int virq) raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); } -static void socrates_fpga_pic_eoi(unsigned int virq) +static void socrates_fpga_pic_eoi(struct irq_data *d) { unsigned long flags; unsigned int hwirq; int irq_line; u32 mask; - hwirq = socrates_fpga_irq_to_hw(virq); + hwirq = socrates_fpga_irq_to_hw(d->irq); irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); @@ -197,7 +197,7 @@ static void socrates_fpga_pic_eoi(unsigned int virq) raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); } -static int socrates_fpga_pic_set_type(unsigned int virq, +static int socrates_fpga_pic_set_type(struct irq_data *d, unsigned int flow_type) { unsigned long flags; @@ -205,7 +205,7 @@ static int socrates_fpga_pic_set_type(unsigned int virq, int polarity; u32 mask; - hwirq = socrates_fpga_irq_to_hw(virq); + hwirq = socrates_fpga_irq_to_hw(d->irq); if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE) return -EINVAL; @@ -233,21 +233,21 @@ static int socrates_fpga_pic_set_type(unsigned int virq, static struct irq_chip socrates_fpga_pic_chip = { .name = "FPGA-PIC", - .ack = socrates_fpga_pic_ack, - .mask = socrates_fpga_pic_mask, - .mask_ack = socrates_fpga_pic_mask_ack, - .unmask = socrates_fpga_pic_unmask, - .eoi = socrates_fpga_pic_eoi, - .set_type = socrates_fpga_pic_set_type, + .irq_ack = socrates_fpga_pic_ack, + .irq_mask = socrates_fpga_pic_mask, + .irq_mask_ack = socrates_fpga_pic_mask_ack, + .irq_unmask = socrates_fpga_pic_unmask, + .irq_eoi = socrates_fpga_pic_eoi, + .irq_set_type = socrates_fpga_pic_set_type, }; static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hwirq) { /* All interrupts are LEVEL sensitive */ - irq_to_desc(virq)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip, - handle_fasteoi_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &socrates_fpga_pic_chip, + handle_fasteoi_irq); return 0; } @@ -308,8 +308,8 @@ void socrates_fpga_pic_init(struct device_node *pic) pr_warning("FPGA PIC: can't get irq%d.\n", i); continue; } - set_irq_chained_handler(socrates_fpga_irqs[i], - socrates_fpga_pic_cascade); + irq_set_chained_handler(socrates_fpga_irqs[i], + socrates_fpga_pic_cascade); } socrates_fpga_pic_iobase = of_iomap(pic, 0); diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c index bc33d1859ae7..5387e9f06bdb 100644 --- a/arch/powerpc/platforms/85xx/stx_gp3.c +++ b/arch/powerpc/platforms/85xx/stx_gp3.c @@ -46,12 +46,13 @@ static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq; while ((cascade_irq = cpm2_get_irq()) >= 0) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); + chip->irq_eoi(&desc->irq_data); } #endif /* CONFIG_CPM2 */ @@ -101,7 +102,7 @@ static void __init stx_gp3_pic_init(void) cpm2_pic_init(np); of_node_put(np); - set_irq_chained_handler(irq, cpm2_cascade); + irq_set_chained_handler(irq, cpm2_cascade); #endif } diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c index 5e847d0b47c8..325de772725a 100644 --- a/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/arch/powerpc/platforms/85xx/tqm85xx.c @@ -44,12 +44,13 @@ static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq; while ((cascade_irq = cpm2_get_irq()) >= 0) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); + chip->irq_eoi(&desc->irq_data); } #endif /* CONFIG_CPM2 */ @@ -99,7 +100,7 @@ static void __init tqm85xx_pic_init(void) cpm2_pic_init(np); of_node_put(np); - set_irq_chained_handler(irq, cpm2_cascade); + irq_set_chained_handler(irq, cpm2_cascade); #endif } diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c index 6df9e2561c06..0beec7d5566b 100644 --- a/arch/powerpc/platforms/86xx/gef_pic.c +++ b/arch/powerpc/platforms/86xx/gef_pic.c @@ -95,6 +95,7 @@ static int gef_pic_cascade_irq; void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq; /* @@ -106,17 +107,16 @@ void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); - + chip->irq_eoi(&desc->irq_data); } -static void gef_pic_mask(unsigned int virq) +static void gef_pic_mask(struct irq_data *d) { unsigned long flags; unsigned int hwirq; u32 mask; - hwirq = gef_irq_to_hw(virq); + hwirq = gef_irq_to_hw(d->irq); raw_spin_lock_irqsave(&gef_pic_lock, flags); mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); @@ -125,21 +125,21 @@ static void gef_pic_mask(unsigned int virq) raw_spin_unlock_irqrestore(&gef_pic_lock, flags); } -static void gef_pic_mask_ack(unsigned int virq) +static void gef_pic_mask_ack(struct irq_data *d) { /* Don't think we actually have to do anything to ack an interrupt, * we just need to clear down the devices interrupt and it will go away */ - gef_pic_mask(virq); + gef_pic_mask(d); } -static void gef_pic_unmask(unsigned int virq) +static void gef_pic_unmask(struct irq_data *d) { unsigned long flags; unsigned int hwirq; u32 mask; - hwirq = gef_irq_to_hw(virq); + hwirq = gef_irq_to_hw(d->irq); raw_spin_lock_irqsave(&gef_pic_lock, flags); mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); @@ -150,9 +150,9 @@ static void gef_pic_unmask(unsigned int virq) static struct irq_chip gef_pic_chip = { .name = "gefp", - .mask = gef_pic_mask, - .mask_ack = gef_pic_mask_ack, - .unmask = gef_pic_unmask, + .irq_mask = gef_pic_mask, + .irq_mask_ack = gef_pic_mask_ack, + .irq_unmask = gef_pic_unmask, }; @@ -163,8 +163,8 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hwirq) { /* All interrupts are LEVEL sensitive */ - irq_to_desc(virq)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); return 0; } @@ -225,7 +225,7 @@ void __init gef_pic_init(struct device_node *np) return; /* Chain with parent controller */ - set_irq_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); + irq_set_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); } /* diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index 668275d9e668..8ef8960abda6 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c @@ -19,10 +19,13 @@ #ifdef CONFIG_PPC_I8259 static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); + if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); + + chip->irq_eoi(&desc->irq_data); } #endif /* CONFIG_PPC_I8259 */ @@ -74,6 +77,6 @@ void __init mpc86xx_init_irq(void) i8259_init(cascade_node, 0); of_node_put(cascade_node); - set_irq_chained_handler(cascade_irq, mpc86xx_8259_cascade); + irq_set_chained_handler(cascade_irq, mpc86xx_8259_cascade); #endif } diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index dd35ce081cff..ee56a9ea6a79 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -49,12 +49,6 @@ config PPC_ADDER875 This enables support for the Analogue & Micro Adder 875 board. -config PPC_MGSUVD - bool "MGSUVD" - select CPM1 - help - This enables support for the Keymile MGSUVD board. - config TQM8XX bool "TQM8XX" select CPM1 diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile index a491fe6b94fc..76a81c3350a8 100644 --- a/arch/powerpc/platforms/8xx/Makefile +++ b/arch/powerpc/platforms/8xx/Makefile @@ -6,5 +6,4 @@ obj-$(CONFIG_MPC885ADS) += mpc885ads_setup.o obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o obj-$(CONFIG_PPC_EP88XC) += ep88xc.o obj-$(CONFIG_PPC_ADDER875) += adder875.o -obj-$(CONFIG_PPC_MGSUVD) += mgsuvd.o obj-$(CONFIG_TQM8XX) += tqm8xx_setup.o diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index 60168c1f98fe..9ecce995dd4b 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -218,15 +218,20 @@ void mpc8xx_restart(char *cmd) static void cpm_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip; int cascade_irq; if ((cascade_irq = cpm_get_irq()) >= 0) { struct irq_desc *cdesc = irq_to_desc(cascade_irq); generic_handle_irq(cascade_irq); - cdesc->chip->eoi(cascade_irq); + + chip = irq_desc_get_chip(cdesc); + chip->irq_eoi(&cdesc->irq_data); } - desc->chip->eoi(irq); + + chip = irq_desc_get_chip(desc); + chip->irq_eoi(&desc->irq_data); } /* Initialize the internal interrupt controllers. The number of @@ -246,5 +251,5 @@ void __init mpc8xx_pics_init(void) irq = cpm_pic_init(); if (irq != NO_IRQ) - set_irq_chained_handler(irq, cpm_cascade); + irq_set_chained_handler(irq, cpm_cascade); } diff --git a/arch/powerpc/platforms/8xx/mgsuvd.c b/arch/powerpc/platforms/8xx/mgsuvd.c deleted file mode 100644 index ca3cb071772c..000000000000 --- a/arch/powerpc/platforms/8xx/mgsuvd.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * - * Platform setup for the Keymile mgsuvd board - * - * Heiko Schocher <hs@denx.de> - * - * Copyright 2008 DENX Software Engineering GmbH - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include <linux/ioport.h> -#include <linux/of_platform.h> - -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/processor.h> -#include <asm/cpm1.h> -#include <asm/prom.h> -#include <asm/fs_pd.h> - -#include "mpc8xx.h" - -struct cpm_pin { - int port, pin, flags; -}; - -static __initdata struct cpm_pin mgsuvd_pins[] = { - /* SMC1 */ - {CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */ - {CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */ - - /* SCC3 */ - {CPM_PORTA, 10, CPM_PIN_INPUT}, - {CPM_PORTA, 11, CPM_PIN_INPUT}, - {CPM_PORTA, 3, CPM_PIN_INPUT}, - {CPM_PORTA, 2, CPM_PIN_INPUT}, - {CPM_PORTC, 13, CPM_PIN_INPUT}, -}; - -static void __init init_ioports(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(mgsuvd_pins); i++) { - struct cpm_pin *pin = &mgsuvd_pins[i]; - cpm1_set_pin(pin->port, pin->pin, pin->flags); - } - - setbits16(&mpc8xx_immr->im_ioport.iop_pcso, 0x300); - cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RX); - cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK6, CPM_CLK_TX); - cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX); -} - -static void __init mgsuvd_setup_arch(void) -{ - cpm_reset(); - init_ioports(); -} - -static __initdata struct of_device_id of_bus_ids[] = { - { .compatible = "simple-bus" }, - {}, -}; - -static int __init declare_of_platform_devices(void) -{ - of_platform_bus_probe(NULL, of_bus_ids, NULL); - return 0; -} -machine_device_initcall(mgsuvd, declare_of_platform_devices); - -static int __init mgsuvd_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); - return of_flat_dt_is_compatible(root, "keymile,mgsuvd"); -} - -define_machine(mgsuvd) { - .name = "MGSUVD", - .probe = mgsuvd_probe, - .setup_arch = mgsuvd_setup_arch, - .init_IRQ = mpc8xx_pics_init, - .get_irq = mpc8xx_get_irq, - .restart = mpc8xx_restart, - .calibrate_decr = mpc8xx_calibrate_decr, - .set_rtc_time = mpc8xx_set_rtc_time, - .get_rtc_time = mpc8xx_get_rtc_time, -}; diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 20576829eca5..f7b07720aa30 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -46,7 +46,7 @@ config PPC_OF_BOOT_TRAMPOLINE help Support from booting from Open Firmware or yaboot using an Open Firmware client interface. This enables the kernel to - communicate with open firmware to retrieve system informations + communicate with open firmware to retrieve system information such as the device tree. In case of doubt, say Y diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 48cd7d2e1b75..81239ebed83f 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -9,6 +9,7 @@ config PPC_CELL_COMMON select PPC_INDIRECT_IO select PPC_NATIVE select PPC_RTAS + select IRQ_EDGE_EOI_HANDLER config PPC_CELL_NATIVE bool diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index e3e379c6caa7..bb5ebf8fa80b 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -93,7 +93,8 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) { - struct axon_msic *msic = get_irq_data(irq); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct axon_msic *msic = irq_get_handler_data(irq); u32 write_offset, msi; int idx; int retry = 0; @@ -145,7 +146,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) msic->read_offset &= MSIC_FIFO_SIZE_MASK; } - desc->chip->eoi(irq); + chip->irq_eoi(&desc->irq_data); } static struct axon_msic *find_msi_translator(struct pci_dev *dev) @@ -286,7 +287,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) } dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); - set_irq_msi(virq, entry); + irq_set_msi_desc(virq, entry); msg.data = virq; write_msi_msg(virq, &msg); } @@ -304,7 +305,7 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) if (entry->irq == NO_IRQ) continue; - set_irq_msi(entry->irq, NULL); + irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); } } @@ -319,7 +320,7 @@ static struct irq_chip msic_irq_chip = { static int msic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { - set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); + irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); return 0; } @@ -328,7 +329,7 @@ static struct irq_host_ops msic_host_ops = { .map = msic_host_map, }; -static int axon_msi_shutdown(struct platform_device *device) +static void axon_msi_shutdown(struct platform_device *device) { struct axon_msic *msic = dev_get_drvdata(&device->dev); u32 tmp; @@ -338,12 +339,9 @@ static int axon_msi_shutdown(struct platform_device *device) tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; msic_dcr_write(msic, MSIC_CTRL_REG, tmp); - - return 0; } -static int axon_msi_probe(struct platform_device *device, - const struct of_device_id *device_id) +static int axon_msi_probe(struct platform_device *device) { struct device_node *dn = device->dev.of_node; struct axon_msic *msic; @@ -402,8 +400,8 @@ static int axon_msi_probe(struct platform_device *device, msic->irq_host->host_data = msic; - set_irq_data(virq, msic); - set_irq_chained_handler(virq, axon_msi_cascade); + irq_set_handler_data(virq, msic); + irq_set_chained_handler(virq, axon_msi_cascade); pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); /* Enable the MSIC hardware */ @@ -446,7 +444,7 @@ static const struct of_device_id axon_msi_device_id[] = { {} }; -static struct of_platform_driver axon_msi_driver = { +static struct platform_driver axon_msi_driver = { .probe = axon_msi_probe, .shutdown = axon_msi_shutdown, .driver = { @@ -458,7 +456,7 @@ static struct of_platform_driver axon_msi_driver = { static int __init axon_msi_init(void) { - return of_register_platform_driver(&axon_msi_driver); + return platform_driver_register(&axon_msi_driver); } subsys_initcall(axon_msi_init); diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index 682af97321a8..4cb9e147c307 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c @@ -61,59 +61,59 @@ static inline void beatic_update_irq_mask(unsigned int irq_plug) panic("Failed to set mask IRQ!"); } -static void beatic_mask_irq(unsigned int irq_plug) +static void beatic_mask_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); - beatic_irq_mask_enable[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64))); - beatic_update_irq_mask(irq_plug); + beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); + beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } -static void beatic_unmask_irq(unsigned int irq_plug) +static void beatic_unmask_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); - beatic_irq_mask_enable[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); - beatic_update_irq_mask(irq_plug); + beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64)); + beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } -static void beatic_ack_irq(unsigned int irq_plug) +static void beatic_ack_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); - beatic_irq_mask_ack[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64))); - beatic_update_irq_mask(irq_plug); + beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); + beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } -static void beatic_end_irq(unsigned int irq_plug) +static void beatic_end_irq(struct irq_data *d) { s64 err; unsigned long flags; - err = beat_downcount_of_interrupt(irq_plug); + err = beat_downcount_of_interrupt(d->irq); if (err != 0) { if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ panic("Failed to downcount IRQ! Error = %16llx", err); - printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug); + printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq); } raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); - beatic_irq_mask_ack[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); - beatic_update_irq_mask(irq_plug); + beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64)); + beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static struct irq_chip beatic_pic = { .name = "CELL-BEAT", - .unmask = beatic_unmask_irq, - .mask = beatic_mask_irq, - .eoi = beatic_end_irq, + .irq_unmask = beatic_unmask_irq, + .irq_mask = beatic_mask_irq, + .irq_eoi = beatic_end_irq, }; /* @@ -136,15 +136,14 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { - struct irq_desc *desc = irq_to_desc(virq); int64_t err; err = beat_construct_and_connect_irq_plug(virq, hw); if (err < 0) return -EIO; - desc->status |= IRQ_LEVEL; - set_irq_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); return 0; } @@ -232,7 +231,7 @@ unsigned int beatic_get_irq(void) ret = beatic_get_irq_plug(); if (ret != NO_IRQ) - beatic_ack_irq(ret); + beatic_ack_irq(irq_get_irq_data(ret)); return ret; } diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 10eb1a443626..44cfd1bef89b 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -72,15 +72,15 @@ static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; } -static void iic_mask(unsigned int irq) +static void iic_mask(struct irq_data *d) { } -static void iic_unmask(unsigned int irq) +static void iic_unmask(struct irq_data *d) { } -static void iic_eoi(unsigned int irq) +static void iic_eoi(struct irq_data *d) { struct iic *iic = &__get_cpu_var(cpu_iic); out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); @@ -89,19 +89,21 @@ static void iic_eoi(unsigned int irq) static struct irq_chip iic_chip = { .name = "CELL-IIC", - .mask = iic_mask, - .unmask = iic_unmask, - .eoi = iic_eoi, + .irq_mask = iic_mask, + .irq_unmask = iic_unmask, + .irq_eoi = iic_eoi, }; -static void iic_ioexc_eoi(unsigned int irq) +static void iic_ioexc_eoi(struct irq_data *d) { } static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) { - struct cbe_iic_regs __iomem *node_iic = (void __iomem *)desc->handler_data; + struct irq_chip *chip = irq_desc_get_chip(desc); + struct cbe_iic_regs __iomem *node_iic = + (void __iomem *)irq_desc_get_handler_data(desc); unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; unsigned long bits, ack; int cascade; @@ -128,15 +130,15 @@ static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) if (ack) out_be64(&node_iic->iic_is, ack); } - desc->chip->eoi(irq); + chip->irq_eoi(&desc->irq_data); } static struct irq_chip iic_ioexc_chip = { .name = "CELL-IOEX", - .mask = iic_mask, - .unmask = iic_unmask, - .eoi = iic_ioexc_eoi, + .irq_mask = iic_mask, + .irq_unmask = iic_unmask, + .irq_eoi = iic_ioexc_eoi, }; /* Get an IRQ number from the pending state register of the IIC */ @@ -233,65 +235,19 @@ static int iic_host_match(struct irq_host *h, struct device_node *node) "IBM,CBEA-Internal-Interrupt-Controller"); } -extern int noirqdebug; - -static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) -{ - raw_spin_lock(&desc->lock); - - desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); - - /* - * If we're currently running this IRQ, or its disabled, - * we shouldn't process the IRQ. Mark it pending, handle - * the necessary masking and go out - */ - if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || - !desc->action)) { - desc->status |= IRQ_PENDING; - goto out_eoi; - } - - kstat_incr_irqs_this_cpu(irq, desc); - - /* Mark the IRQ currently in progress.*/ - desc->status |= IRQ_INPROGRESS; - - do { - struct irqaction *action = desc->action; - irqreturn_t action_ret; - - if (unlikely(!action)) - goto out_eoi; - - desc->status &= ~IRQ_PENDING; - raw_spin_unlock(&desc->lock); - action_ret = handle_IRQ_event(irq, action); - if (!noirqdebug) - note_interrupt(irq, desc, action_ret); - raw_spin_lock(&desc->lock); - - } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); - - desc->status &= ~IRQ_INPROGRESS; -out_eoi: - desc->chip->eoi(irq); - raw_spin_unlock(&desc->lock); -} - static int iic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { switch (hw & IIC_IRQ_TYPE_MASK) { case IIC_IRQ_TYPE_IPI: - set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); + irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq); break; case IIC_IRQ_TYPE_IOEXC: - set_irq_chip_and_handler(virq, &iic_ioexc_chip, - handle_iic_irq); + irq_set_chip_and_handler(virq, &iic_ioexc_chip, + handle_edge_eoi_irq); break; default: - set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq); + irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); } return 0; } @@ -408,8 +364,8 @@ static int __init setup_iic(void) * irq_data is a generic pointer that gets passed back * to us later, so the forced cast is fine. */ - set_irq_data(cascade, (void __force *)node_iic); - set_irq_chained_handler(cascade , iic_ioexc_cascade); + irq_set_handler_data(cascade, (void __force *)node_iic); + irq_set_chained_handler(cascade, iic_ioexc_cascade); out_be64(&node_iic->iic_ir, (1 << 12) /* priority */ | (node << 4) /* dest node */ | diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 691995761b3d..fd57bfe00edf 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -187,13 +187,15 @@ machine_subsys_initcall(cell, cell_publish_devices); static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) { - struct mpic *mpic = desc->handler_data; + struct irq_chip *chip = irq_desc_get_chip(desc); + struct mpic *mpic = irq_desc_get_handler_data(desc); unsigned int virq; virq = mpic_get_one_irq(mpic); if (virq != NO_IRQ) generic_handle_irq(virq); - desc->chip->eoi(irq); + + chip->irq_eoi(&desc->irq_data); } static void __init mpic_init_IRQ(void) @@ -221,8 +223,8 @@ static void __init mpic_init_IRQ(void) printk(KERN_INFO "%s : hooking up to IRQ %d\n", dn->full_name, virq); - set_irq_data(virq, mpic); - set_irq_chained_handler(virq, cell_mpic_cascade); + irq_set_handler_data(virq, mpic); + irq_set_chained_handler(virq, cell_mpic_cascade); } } diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 3f2e557344a3..c5cf50e6b45a 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -79,30 +79,30 @@ static void __iomem *spider_get_irq_config(struct spider_pic *pic, return pic->regs + TIR_CFGA + 8 * src; } -static void spider_unmask_irq(unsigned int virq) +static void spider_unmask_irq(struct irq_data *d) { - struct spider_pic *pic = spider_virq_to_pic(virq); - void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); + struct spider_pic *pic = spider_virq_to_pic(d->irq); + void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq); out_be32(cfg, in_be32(cfg) | 0x30000000u); } -static void spider_mask_irq(unsigned int virq) +static void spider_mask_irq(struct irq_data *d) { - struct spider_pic *pic = spider_virq_to_pic(virq); - void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); + struct spider_pic *pic = spider_virq_to_pic(d->irq); + void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq); out_be32(cfg, in_be32(cfg) & ~0x30000000u); } -static void spider_ack_irq(unsigned int virq) +static void spider_ack_irq(struct irq_data *d) { - struct spider_pic *pic = spider_virq_to_pic(virq); - unsigned int src = irq_map[virq].hwirq; + struct spider_pic *pic = spider_virq_to_pic(d->irq); + unsigned int src = irq_map[d->irq].hwirq; /* Reset edge detection logic if necessary */ - if (irq_to_desc(virq)->status & IRQ_LEVEL) + if (irqd_is_level_type(d)) return; /* Only interrupts 47 to 50 can be set to edge */ @@ -113,13 +113,12 @@ static void spider_ack_irq(unsigned int virq) out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); } -static int spider_set_irq_type(unsigned int virq, unsigned int type) +static int spider_set_irq_type(struct irq_data *d, unsigned int type) { unsigned int sense = type & IRQ_TYPE_SENSE_MASK; - struct spider_pic *pic = spider_virq_to_pic(virq); - unsigned int hw = irq_map[virq].hwirq; + struct spider_pic *pic = spider_virq_to_pic(d->irq); + unsigned int hw = irq_map[d->irq].hwirq; void __iomem *cfg = spider_get_irq_config(pic, hw); - struct irq_desc *desc = irq_to_desc(virq); u32 old_mask; u32 ic; @@ -147,12 +146,6 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type) return -EINVAL; } - /* Update irq_desc */ - desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); - desc->status |= type & IRQ_TYPE_SENSE_MASK; - if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) - desc->status |= IRQ_LEVEL; - /* Configure the source. One gross hack that was there before and * that I've kept around is the priority to the BE which I set to * be the same as the interrupt source number. I don't know wether @@ -169,19 +162,19 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type) static struct irq_chip spider_pic = { .name = "SPIDER", - .unmask = spider_unmask_irq, - .mask = spider_mask_irq, - .ack = spider_ack_irq, - .set_type = spider_set_irq_type, + .irq_unmask = spider_unmask_irq, + .irq_mask = spider_mask_irq, + .irq_ack = spider_ack_irq, + .irq_set_type = spider_set_irq_type, }; static int spider_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { - set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq); + irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); /* Set default irq type */ - set_irq_type(virq, IRQ_TYPE_NONE); + irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; } @@ -207,7 +200,8 @@ static struct irq_host_ops spider_host_ops = { static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) { - struct spider_pic *pic = desc->handler_data; + struct irq_chip *chip = irq_desc_get_chip(desc); + struct spider_pic *pic = irq_desc_get_handler_data(desc); unsigned int cs, virq; cs = in_be32(pic->regs + TIR_CS) >> 24; @@ -215,9 +209,11 @@ static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) virq = NO_IRQ; else virq = irq_linear_revmap(pic->host, cs); + if (virq != NO_IRQ) generic_handle_irq(virq); - desc->chip->eoi(irq); + + chip->irq_eoi(&desc->irq_data); } /* For hooking up the cascace we have a problem. Our device-tree is @@ -325,8 +321,8 @@ static void __init spider_init_one(struct device_node *of_node, int chip, virq = spider_find_cascade_and_node(pic); if (virq == NO_IRQ) return; - set_irq_data(virq, pic); - set_irq_chained_handler(virq, spider_irq_cascade); + irq_set_handler_data(virq, pic); + irq_set_chained_handler(virq, spider_irq_cascade); printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", pic->node_id, addr, of_node->full_name); diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c index 3b894f585280..147069938cfe 100644 --- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c +++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c @@ -90,7 +90,7 @@ int spu_alloc_lscsa(struct spu_state *csa) */ for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) { /* XXX This is likely to fail, we should use a special pool - * similiar to what hugetlbfs does. + * similar to what hugetlbfs does. */ csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL, SPU_64K_PAGE_ORDER); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 0b0466284932..65203857b0ce 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -846,7 +846,7 @@ static struct spu_context *grab_runnable_context(int prio, int node) struct list_head *rq = &spu_prio->runq[best]; list_for_each_entry(ctx, rq, rq) { - /* XXX(hch): check for affinity here aswell */ + /* XXX(hch): check for affinity here as well */ if (__node_allowed(ctx, node)) { __spu_del_from_rq(ctx); goto found; diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c index 21a9c952d88b..72c905f1ee7a 100644 --- a/arch/powerpc/platforms/cell/spufs/spu_restore.c +++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c @@ -284,7 +284,7 @@ static inline void restore_complete(void) exit_instrs[3] = BR_INSTR; break; default: - /* SPU_Status[R]=1. No additonal instructions. */ + /* SPU_Status[R]=1. No additional instructions. */ break; } spu_sync(); diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 187a7d32f86a..a3d2ce54ea2e 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, if (!IS_ERR(tmp)) { struct nameidata nd; - ret = path_lookup(tmp, LOOKUP_PARENT, &nd); + ret = kern_path_parent(tmp, &nd); if (!ret) { nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE; ret = spufs_create(&nd, flags, mode, neighbor); diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 8553cc49e0d6..122786498419 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -365,10 +365,13 @@ void __init chrp_setup_arch(void) static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); + if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); + + chip->irq_eoi(&desc->irq_data); } /* @@ -514,7 +517,7 @@ static void __init chrp_find_8259(void) if (cascade_irq == NO_IRQ) printk(KERN_ERR "i8259: failed to map cascade irq\n"); else - set_irq_chained_handler(cascade_irq, + irq_set_chained_handler(cascade_irq, chrp_8259_cascade); } } diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index c278bd3a8fec..12aa62b6f227 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c @@ -46,10 +46,10 @@ * */ -static void flipper_pic_mask_and_ack(unsigned int virq) +static void flipper_pic_mask_and_ack(struct irq_data *d) { - int irq = virq_to_hw(virq); - void __iomem *io_base = get_irq_chip_data(virq); + int irq = virq_to_hw(d->irq); + void __iomem *io_base = irq_data_get_irq_chip_data(d); u32 mask = 1 << irq; clrbits32(io_base + FLIPPER_IMR, mask); @@ -57,27 +57,27 @@ static void flipper_pic_mask_and_ack(unsigned int virq) out_be32(io_base + FLIPPER_ICR, mask); } -static void flipper_pic_ack(unsigned int virq) +static void flipper_pic_ack(struct irq_data *d) { - int irq = virq_to_hw(virq); - void __iomem *io_base = get_irq_chip_data(virq); + int irq = virq_to_hw(d->irq); + void __iomem *io_base = irq_data_get_irq_chip_data(d); /* this is at least needed for RSW */ out_be32(io_base + FLIPPER_ICR, 1 << irq); } -static void flipper_pic_mask(unsigned int virq) +static void flipper_pic_mask(struct irq_data *d) { - int irq = virq_to_hw(virq); - void __iomem *io_base = get_irq_chip_data(virq); + int irq = virq_to_hw(d->irq); + void __iomem *io_base = irq_data_get_irq_chip_data(d); clrbits32(io_base + FLIPPER_IMR, 1 << irq); } -static void flipper_pic_unmask(unsigned int virq) +static void flipper_pic_unmask(struct irq_data *d) { - int irq = virq_to_hw(virq); - void __iomem *io_base = get_irq_chip_data(virq); + int irq = virq_to_hw(d->irq); + void __iomem *io_base = irq_data_get_irq_chip_data(d); setbits32(io_base + FLIPPER_IMR, 1 << irq); } @@ -85,10 +85,10 @@ static void flipper_pic_unmask(unsigned int virq) static struct irq_chip flipper_pic = { .name = "flipper-pic", - .ack = flipper_pic_ack, - .mask_ack = flipper_pic_mask_and_ack, - .mask = flipper_pic_mask, - .unmask = flipper_pic_unmask, + .irq_ack = flipper_pic_ack, + .irq_mask_ack = flipper_pic_mask_and_ack, + .irq_mask = flipper_pic_mask, + .irq_unmask = flipper_pic_unmask, }; /* @@ -101,16 +101,16 @@ static struct irq_host *flipper_irq_host; static int flipper_pic_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hwirq) { - set_irq_chip_data(virq, h->host_data); - irq_to_desc(virq)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq); + irq_set_chip_data(virq, h->host_data); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &flipper_pic, handle_level_irq); return 0; } static void flipper_pic_unmap(struct irq_host *h, unsigned int irq) { - set_irq_chip_data(irq, NULL); - set_irq_chip(irq, NULL); + irq_set_chip_data(irq, NULL); + irq_set_chip(irq, NULL); } static int flipper_pic_match(struct irq_host *h, struct device_node *np) diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index a771f91e215b..2bdddfc9d520 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -41,36 +41,36 @@ * */ -static void hlwd_pic_mask_and_ack(unsigned int virq) +static void hlwd_pic_mask_and_ack(struct irq_data *d) { - int irq = virq_to_hw(virq); - void __iomem *io_base = get_irq_chip_data(virq); + int irq = virq_to_hw(d->irq); + void __iomem *io_base = irq_data_get_irq_chip_data(d); u32 mask = 1 << irq; clrbits32(io_base + HW_BROADWAY_IMR, mask); out_be32(io_base + HW_BROADWAY_ICR, mask); } -static void hlwd_pic_ack(unsigned int virq) +static void hlwd_pic_ack(struct irq_data *d) { - int irq = virq_to_hw(virq); - void __iomem *io_base = get_irq_chip_data(virq); + int irq = virq_to_hw(d->irq); + void __iomem *io_base = irq_data_get_irq_chip_data(d); out_be32(io_base + HW_BROADWAY_ICR, 1 << irq); } -static void hlwd_pic_mask(unsigned int virq) +static void hlwd_pic_mask(struct irq_data *d) { - int irq = virq_to_hw(virq); - void __iomem *io_base = get_irq_chip_data(virq); + int irq = virq_to_hw(d->irq); + void __iomem *io_base = irq_data_get_irq_chip_data(d); clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq); } -static void hlwd_pic_unmask(unsigned int virq) +static void hlwd_pic_unmask(struct irq_data *d) { - int irq = virq_to_hw(virq); - void __iomem *io_base = get_irq_chip_data(virq); + int irq = virq_to_hw(d->irq); + void __iomem *io_base = irq_data_get_irq_chip_data(d); setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); } @@ -78,10 +78,10 @@ static void hlwd_pic_unmask(unsigned int virq) static struct irq_chip hlwd_pic = { .name = "hlwd-pic", - .ack = hlwd_pic_ack, - .mask_ack = hlwd_pic_mask_and_ack, - .mask = hlwd_pic_mask, - .unmask = hlwd_pic_unmask, + .irq_ack = hlwd_pic_ack, + .irq_mask_ack = hlwd_pic_mask_and_ack, + .irq_mask = hlwd_pic_mask, + .irq_unmask = hlwd_pic_unmask, }; /* @@ -94,16 +94,16 @@ static struct irq_host *hlwd_irq_host; static int hlwd_pic_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hwirq) { - set_irq_chip_data(virq, h->host_data); - irq_to_desc(virq)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(virq, &hlwd_pic, handle_level_irq); + irq_set_chip_data(virq, h->host_data); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &hlwd_pic, handle_level_irq); return 0; } static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq) { - set_irq_chip_data(irq, NULL); - set_irq_chip(irq, NULL); + irq_set_chip_data(irq, NULL); + irq_set_chip(irq, NULL); } static struct irq_host_ops hlwd_irq_host_ops = { @@ -129,11 +129,12 @@ static unsigned int __hlwd_pic_get_irq(struct irq_host *h) static void hlwd_pic_irq_cascade(unsigned int cascade_virq, struct irq_desc *desc) { - struct irq_host *irq_host = get_irq_data(cascade_virq); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_host *irq_host = irq_get_handler_data(cascade_virq); unsigned int virq; raw_spin_lock(&desc->lock); - desc->chip->mask(cascade_virq); /* IRQ_LEVEL */ + chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */ raw_spin_unlock(&desc->lock); virq = __hlwd_pic_get_irq(irq_host); @@ -143,9 +144,9 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq, pr_err("spurious interrupt!\n"); raw_spin_lock(&desc->lock); - desc->chip->ack(cascade_virq); /* IRQ_LEVEL */ - if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) - desc->chip->unmask(cascade_virq); + chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */ + if (!irqd_irq_disabled(&desc->irq_data) && chip->irq_unmask) + chip->irq_unmask(&desc->irq_data); raw_spin_unlock(&desc->lock); } @@ -217,8 +218,8 @@ void hlwd_pic_probe(void) host = hlwd_pic_init(np); BUG_ON(!host); cascade_virq = irq_of_parse_and_map(np, 0); - set_irq_data(cascade_virq, host); - set_irq_chained_handler(cascade_virq, + irq_set_handler_data(cascade_virq, host); + irq_set_chained_handler(cascade_virq, hlwd_pic_irq_cascade); hlwd_irq_host = host; break; diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index b21fde589ca7..487bda0d18d8 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c @@ -198,8 +198,8 @@ static void __init holly_init_IRQ(void) cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq); tsi108_pci_int_init(cascade_node); - set_irq_data(cascade_pci_irq, mpic); - set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); + irq_set_handler_data(cascade_pci_irq, mpic); + irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); #endif /* Configure MPIC outputs to CPU0 */ tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 7a2ba39d7811..1cb907c94359 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -153,8 +153,8 @@ static void __init mpc7448_hpc2_init_IRQ(void) DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq); tsi108_pci_int_init(cascade_node); - set_irq_data(cascade_pci_irq, mpic); - set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); + irq_set_handler_data(cascade_pci_irq, mpic); + irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); #endif /* Configure MPIC outputs to CPU0 */ tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c index fdb7384c0c4f..f0491cc28900 100644 --- a/arch/powerpc/platforms/iseries/dt.c +++ b/arch/powerpc/platforms/iseries/dt.c @@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); - for (i = 0; i < NR_CPUS; i++) { - if (lppaca_of(i).dyn_proc_status >= 2) + for (i = 0; i < NR_LPPACAS; i++) { + if (lppaca[i].dyn_proc_status >= 2) continue; snprintf(p, 32 - (p - buf), "@%d", i); @@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) dt_prop_str(dt, "device_type", device_type_cpu); - index = lppaca_of(i).dyn_hv_phys_proc_index; + index = lppaca[i].dyn_hv_phys_proc_index; d = &xIoHriProcessorVpd[index]; dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index ba446bf355a9..52a6889832c7 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c @@ -167,11 +167,11 @@ static void pci_event_handler(struct HvLpEvent *event) * This will be called by device drivers (via enable_IRQ) * to enable INTA in the bridge interrupt status register. */ -static void iseries_enable_IRQ(unsigned int irq) +static void iseries_enable_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; - unsigned int rirq = (unsigned int)irq_map[irq].hwirq; + unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; /* The IRQ has already been locked by the caller */ bus = REAL_IRQ_TO_BUS(rirq); @@ -184,23 +184,23 @@ static void iseries_enable_IRQ(unsigned int irq) } /* This is called by iseries_activate_IRQs */ -static unsigned int iseries_startup_IRQ(unsigned int irq) +static unsigned int iseries_startup_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; - unsigned int rirq = (unsigned int)irq_map[irq].hwirq; + unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; bus = REAL_IRQ_TO_BUS(rirq); function = REAL_IRQ_TO_FUNC(rirq); dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; /* Link the IRQ number to the bridge */ - HvCallXm_connectBusUnit(bus, sub_bus, dev_id, irq); + HvCallXm_connectBusUnit(bus, sub_bus, dev_id, d->irq); /* Unmask bridge interrupts in the FISR */ mask = 0x01010000 << function; HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask); - iseries_enable_IRQ(irq); + iseries_enable_IRQ(d); return 0; } @@ -215,21 +215,26 @@ void __init iSeries_activate_IRQs() for_each_irq (irq) { struct irq_desc *desc = irq_to_desc(irq); + struct irq_chip *chip; - if (desc && desc->chip && desc->chip->startup) { + if (!desc) + continue; + + chip = irq_desc_get_chip(desc); + if (chip && chip->irq_startup) { raw_spin_lock_irqsave(&desc->lock, flags); - desc->chip->startup(irq); + chip->irq_startup(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); } } } /* this is not called anywhere currently */ -static void iseries_shutdown_IRQ(unsigned int irq) +static void iseries_shutdown_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; - unsigned int rirq = (unsigned int)irq_map[irq].hwirq; + unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; /* irq should be locked by the caller */ bus = REAL_IRQ_TO_BUS(rirq); @@ -248,11 +253,11 @@ static void iseries_shutdown_IRQ(unsigned int irq) * This will be called by device drivers (via disable_IRQ) * to disable INTA in the bridge interrupt status register. */ -static void iseries_disable_IRQ(unsigned int irq) +static void iseries_disable_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; - unsigned int rirq = (unsigned int)irq_map[irq].hwirq; + unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; /* The IRQ has already been locked by the caller */ bus = REAL_IRQ_TO_BUS(rirq); @@ -264,9 +269,9 @@ static void iseries_disable_IRQ(unsigned int irq) HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask); } -static void iseries_end_IRQ(unsigned int irq) +static void iseries_end_IRQ(struct irq_data *d) { - unsigned int rirq = (unsigned int)irq_map[irq].hwirq; + unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); @@ -274,11 +279,11 @@ static void iseries_end_IRQ(unsigned int irq) static struct irq_chip iseries_pic = { .name = "iSeries", - .startup = iseries_startup_IRQ, - .shutdown = iseries_shutdown_IRQ, - .unmask = iseries_enable_IRQ, - .mask = iseries_disable_IRQ, - .eoi = iseries_end_IRQ + .irq_startup = iseries_startup_IRQ, + .irq_shutdown = iseries_shutdown_IRQ, + .irq_unmask = iseries_enable_IRQ, + .irq_mask = iseries_disable_IRQ, + .irq_eoi = iseries_end_IRQ }; /* @@ -341,7 +346,7 @@ unsigned int iSeries_get_irq(void) static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { - set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); + irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); return 0; } diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index b5e026bdca21..62dabe3c2bfa 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c @@ -51,7 +51,7 @@ static int mf_initialized; /* - * This is the structure layout for the Machine Facilites LPAR event + * This is the structure layout for the Machine Facilities LPAR event * flows. */ struct vsp_cmd_data { diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index b0863410517f..2946ae10fbfd 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c @@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void) * on but calling this function multiple times is fine. */ identify_cpu(0, mfspr(SPRN_PVR)); + initialise_paca(&boot_paca, 0); powerpc_firmware_features |= FW_FEATURE_ISERIES; powerpc_firmware_features |= FW_FEATURE_LPAR; diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index b5f05d943a90..2376069cdc14 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c @@ -396,7 +396,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mTargetInst)) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "int msg rcvd, source inst (%d) doesnt match (%d)\n", + "int msg rcvd, source inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mTargetInst, event->xSourceInstanceId); return; @@ -407,7 +407,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mSourceInst)) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "int msg rcvd, target inst (%d) doesnt match (%d)\n", + "int msg rcvd, target inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mSourceInst, event->xTargetInstanceId); return; @@ -418,7 +418,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mSourceInst) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "ack msg rcvd, source inst (%d) doesnt match (%d)\n", + "ack msg rcvd, source inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mSourceInst, event->xSourceInstanceId); return; @@ -428,7 +428,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mTargetInst) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n", + "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mTargetInst, event->xTargetInstanceId); return; diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index 04296ffff8bf..dd2e48b28508 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -498,7 +498,7 @@ void __devinit maple_pci_irq_fixup(struct pci_dev *dev) printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); dev->irq = irq_create_mapping(NULL, 1); if (dev->irq != NO_IRQ) - set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); + irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); } /* Hide AMD8111 IDE interrupt when in legacy mode so diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index 09695ae50f91..321a9b3a2d00 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -379,9 +379,9 @@ void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, } EXPORT_SYMBOL(pasemi_dma_free_buf); -/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel syncronization +/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization * - * Allocates a flag for use with channel syncronization (event descriptors). + * Allocates a flag for use with channel synchronization (event descriptors). * Returns allocated flag (0-63), < 0 on error. */ int pasemi_dma_alloc_flag(void) diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index a5d907b5a4c2..9886296e08da 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -216,8 +216,7 @@ static int gpio_mdio_reset(struct mii_bus *bus) } -static int __devinit gpio_mdio_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit gpio_mdio_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; @@ -299,7 +298,7 @@ static struct of_device_id gpio_mdio_match[] = }; MODULE_DEVICE_TABLE(of, gpio_mdio_match); -static struct of_platform_driver gpio_mdio_driver = +static struct platform_driver gpio_mdio_driver = { .probe = gpio_mdio_probe, .remove = gpio_mdio_remove, @@ -326,13 +325,13 @@ int gpio_mdio_init(void) if (!gpio_regs) return -ENODEV; - return of_register_platform_driver(&gpio_mdio_driver); + return platform_driver_register(&gpio_mdio_driver); } module_init(gpio_mdio_init); void gpio_mdio_exit(void) { - of_unregister_platform_driver(&gpio_mdio_driver); + platform_driver_unregister(&gpio_mdio_driver); if (gpio_regs) iounmap(gpio_regs); } diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index f372ec1691a3..7c858e6f843c 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -239,8 +239,8 @@ static __init void pas_init_IRQ(void) if (nmiprop) { nmi_virq = irq_create_mapping(NULL, *nmiprop); mpic_irq_set_priority(nmi_virq, 15); - set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING); - mpic_unmask_irq(nmi_virq); + irq_set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING); + mpic_unmask_irq(irq_get_irq_data(nmi_virq)); } of_node_put(mpic_node); @@ -266,7 +266,7 @@ static int pas_machine_check_handler(struct pt_regs *regs) if (nmi_virq != NO_IRQ && mpic_get_mcirq() == nmi_virq) { printk(KERN_ERR "NMI delivered\n"); debugger(regs); - mpic_end_irq(nmi_virq); + mpic_end_irq(irq_get_irq_data(nmi_virq)); goto out; } diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index 50f169392551..ea47df66fee5 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile @@ -11,7 +11,7 @@ obj-y += pic.o setup.o time.o feature.o pci.o \ obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o -# CONFIG_NVRAM is an arch. independant tristate symbol, for pmac32 we really +# CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really # need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really # CONFIG_NVRAM=y obj-$(CONFIG_NVRAM:m=y) += nvram.o diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 415ca6d6b273..04af5f48b4eb 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c @@ -429,7 +429,7 @@ static u32 read_gpio(struct device_node *np) return offset; } -static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) +static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) { /* Ok, this could be made a bit smarter, but let's be robust for now. We * always force a speed change to high speed before sleep, to make sure diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 480567e5fa9a..e9c8a607268e 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -904,7 +904,7 @@ static void __init smu_i2c_probe(void) printk(KERN_INFO "SMU i2c %s\n", controller->full_name); /* Look for childs, note that they might not be of the right - * type as older device trees mix i2c busses and other thigns + * type as older device trees mix i2c busses and other things * at the same level */ for (busnode = NULL; diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 3bc075c788ef..f33e08d573ce 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -299,7 +299,7 @@ static void __init setup_chaos(struct pci_controller *hose, * This function deals with some "special cases" devices. * * 0 -> No special case - * 1 -> Skip the device but act as if the access was successfull + * 1 -> Skip the device but act as if the access was successful * (return 0xff's on reads, eventually, cache config space * accesses in a later version) * -1 -> Hide the device (unsuccessful access) @@ -988,7 +988,7 @@ void __devinit pmac_pci_irq_fixup(struct pci_dev *dev) dev->vendor == PCI_VENDOR_ID_DEC && dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) { dev->irq = irq_create_mapping(NULL, 60); - set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); + irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); } #endif /* CONFIG_PPC32 */ } diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 890d5f72b198..023f24086a0a 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -82,9 +82,9 @@ static void __pmac_retrigger(unsigned int irq_nr) } } -static void pmac_mask_and_ack_irq(unsigned int virq) +static void pmac_mask_and_ack_irq(struct irq_data *d) { - unsigned int src = irq_map[virq].hwirq; + unsigned int src = irq_map[d->irq].hwirq; unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; unsigned long flags; @@ -104,9 +104,9 @@ static void pmac_mask_and_ack_irq(unsigned int virq) raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } -static void pmac_ack_irq(unsigned int virq) +static void pmac_ack_irq(struct irq_data *d) { - unsigned int src = irq_map[virq].hwirq; + unsigned int src = irq_map[d->irq].hwirq; unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; unsigned long flags; @@ -149,15 +149,15 @@ static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) /* When an irq gets requested for the first client, if it's an * edge interrupt, we clear any previous one on the controller */ -static unsigned int pmac_startup_irq(unsigned int virq) +static unsigned int pmac_startup_irq(struct irq_data *d) { unsigned long flags; - unsigned int src = irq_map[virq].hwirq; + unsigned int src = irq_map[d->irq].hwirq; unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; raw_spin_lock_irqsave(&pmac_pic_lock, flags); - if ((irq_to_desc(virq)->status & IRQ_LEVEL) == 0) + if (!irqd_is_level_type(d)) out_le32(&pmac_irq_hw[i]->ack, bit); __set_bit(src, ppc_cached_irq_mask); __pmac_set_irq_mask(src, 0); @@ -166,10 +166,10 @@ static unsigned int pmac_startup_irq(unsigned int virq) return 0; } -static void pmac_mask_irq(unsigned int virq) +static void pmac_mask_irq(struct irq_data *d) { unsigned long flags; - unsigned int src = irq_map[virq].hwirq; + unsigned int src = irq_map[d->irq].hwirq; raw_spin_lock_irqsave(&pmac_pic_lock, flags); __clear_bit(src, ppc_cached_irq_mask); @@ -177,10 +177,10 @@ static void pmac_mask_irq(unsigned int virq) raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } -static void pmac_unmask_irq(unsigned int virq) +static void pmac_unmask_irq(struct irq_data *d) { unsigned long flags; - unsigned int src = irq_map[virq].hwirq; + unsigned int src = irq_map[d->irq].hwirq; raw_spin_lock_irqsave(&pmac_pic_lock, flags); __set_bit(src, ppc_cached_irq_mask); @@ -188,24 +188,24 @@ static void pmac_unmask_irq(unsigned int virq) raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } -static int pmac_retrigger(unsigned int virq) +static int pmac_retrigger(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&pmac_pic_lock, flags); - __pmac_retrigger(irq_map[virq].hwirq); + __pmac_retrigger(irq_map[d->irq].hwirq); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); return 1; } static struct irq_chip pmac_pic = { .name = "PMAC-PIC", - .startup = pmac_startup_irq, - .mask = pmac_mask_irq, - .ack = pmac_ack_irq, - .mask_ack = pmac_mask_and_ack_irq, - .unmask = pmac_unmask_irq, - .retrigger = pmac_retrigger, + .irq_startup = pmac_startup_irq, + .irq_mask = pmac_mask_irq, + .irq_ack = pmac_ack_irq, + .irq_mask_ack = pmac_mask_and_ack_irq, + .irq_unmask = pmac_unmask_irq, + .irq_retrigger = pmac_retrigger, }; static irqreturn_t gatwick_action(int cpl, void *dev_id) @@ -289,7 +289,6 @@ static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { - struct irq_desc *desc = irq_to_desc(virq); int level; if (hw >= max_irqs) @@ -300,9 +299,9 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, */ level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); if (level) - desc->status |= IRQ_LEVEL; - set_irq_chip_and_handler(virq, &pmac_pic, level ? - handle_level_irq : handle_edge_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &pmac_pic, + level ? handle_level_irq : handle_edge_irq); return 0; } @@ -472,12 +471,14 @@ int of_irq_map_oldworld(struct device_node *device, int index, static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc) { - struct mpic *mpic = desc->handler_data; - + struct irq_chip *chip = irq_desc_get_chip(desc); + struct mpic *mpic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = mpic_get_one_irq(mpic); + if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); + + chip->irq_eoi(&desc->irq_data); } static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) @@ -589,8 +590,8 @@ static int __init pmac_pic_probe_mpic(void) of_node_put(slave); return 0; } - set_irq_data(cascade, mpic2); - set_irq_chained_handler(cascade, pmac_u3_cascade); + irq_set_handler_data(cascade, mpic2); + irq_set_chained_handler(cascade, pmac_u3_cascade); of_node_put(slave); return 0; @@ -707,7 +708,7 @@ static int pmacpic_resume(struct sys_device *sysdev) mb(); for (i = 0; i < max_real_irqs; ++i) if (test_bit(i, sleep_save_mask)) - pmac_unmask_irq(i); + pmac_unmask_irq(irq_get_irq_data(i)); return 0; } diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index f0bc08f6c1f0..20468f49aec0 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h @@ -33,7 +33,6 @@ extern void pmac_setup_pci_dma(void); extern void pmac_check_ht_link(void); extern void pmac_setup_smp(void); -extern void pmac32_cpu_die(void); extern void low_cpu_die(void) __attribute__((noreturn)); extern int pmac_nvram_init(void); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index d5aceb7fb125..aa45281bd296 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -650,51 +650,6 @@ static int pmac_pci_probe_mode(struct pci_bus *bus) return PCI_PROBE_NORMAL; return PCI_PROBE_DEVTREE; } - -#ifdef CONFIG_HOTPLUG_CPU -/* access per cpu vars from generic smp.c */ -DECLARE_PER_CPU(int, cpu_state); - -static void pmac64_cpu_die(void) -{ - /* - * turn off as much as possible, we'll be - * kicked out as this will only be invoked - * on core99 platforms for now ... - */ - - printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); - __get_cpu_var(cpu_state) = CPU_DEAD; - smp_wmb(); - - /* - * during the path that leads here preemption is disabled, - * reenable it now so that when coming up preempt count is - * zero correctly - */ - preempt_enable(); - - /* - * hard-disable interrupts for the non-NAP case, the NAP code - * needs to re-enable interrupts (but soft-disables them) - */ - hard_irq_disable(); - - while (1) { - /* let's not take timer interrupts too often ... */ - set_dec(0x7fffffff); - - /* should always be true at this point */ - if (cpu_has_feature(CPU_FTR_CAN_NAP)) - power4_cpu_offline_powersave(); - else { - HMT_low(); - HMT_very_low(); - } - } -} -#endif /* CONFIG_HOTPLUG_CPU */ - #endif /* CONFIG_PPC64 */ define_machine(powermac) { @@ -726,15 +681,4 @@ define_machine(powermac) { .pcibios_after_init = pmac_pcibios_after_init, .phys_mem_access_prot = pci_phys_mem_access_prot, #endif -#ifdef CONFIG_HOTPLUG_CPU -#ifdef CONFIG_PPC64 - .cpu_die = pmac64_cpu_die, -#endif -#ifdef CONFIG_PPC32 - .cpu_die = pmac32_cpu_die, -#endif -#endif -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) - .cpu_die = generic_mach_cpu_die, -#endif }; diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c95215f4f8b6..bc5f0dc6ae1e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -840,92 +840,151 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) /* Setup openpic */ mpic_setup_this_cpu(); +} - if (cpu_nr == 0) { #ifdef CONFIG_PPC64 - extern void g5_phy_disable_cpu1(void); +#ifdef CONFIG_HOTPLUG_CPU +static int smp_core99_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + int rc; - /* Close i2c bus if it was used for tb sync */ + switch(action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + /* Open i2c bus if it was used for tb sync */ if (pmac_tb_clock_chip_host) { - pmac_i2c_close(pmac_tb_clock_chip_host); - pmac_tb_clock_chip_host = NULL; + rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); + if (rc) { + pr_err("Failed to open i2c bus for time sync\n"); + return notifier_from_errno(rc); + } } + break; + case CPU_ONLINE: + case CPU_UP_CANCELED: + /* Close i2c bus if it was used for tb sync */ + if (pmac_tb_clock_chip_host) + pmac_i2c_close(pmac_tb_clock_chip_host); + break; + default: + break; + } + return NOTIFY_OK; +} - /* If we didn't start the second CPU, we must take - * it off the bus - */ - if (of_machine_is_compatible("MacRISC4") && - num_online_cpus() < 2) - g5_phy_disable_cpu1(); -#endif /* CONFIG_PPC64 */ +static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { + .notifier_call = smp_core99_cpu_notify, +}; +#endif /* CONFIG_HOTPLUG_CPU */ + +static void __init smp_core99_bringup_done(void) +{ + extern void g5_phy_disable_cpu1(void); - if (ppc_md.progress) - ppc_md.progress("core99_setup_cpu 0 done", 0x349); + /* Close i2c bus if it was used for tb sync */ + if (pmac_tb_clock_chip_host) + pmac_i2c_close(pmac_tb_clock_chip_host); + + /* If we didn't start the second CPU, we must take + * it off the bus. + */ + if (of_machine_is_compatible("MacRISC4") && + num_online_cpus() < 2) { + set_cpu_present(1, false); + g5_phy_disable_cpu1(); } -} +#ifdef CONFIG_HOTPLUG_CPU + register_cpu_notifier(&smp_core99_cpu_nb); +#endif + if (ppc_md.progress) + ppc_md.progress("smp_core99_bringup_done", 0x349); +} +#endif /* CONFIG_PPC64 */ -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) +#ifdef CONFIG_HOTPLUG_CPU -int smp_core99_cpu_disable(void) +static int smp_core99_cpu_disable(void) { - set_cpu_online(smp_processor_id(), false); + int rc = generic_cpu_disable(); + if (rc) + return rc; - /* XXX reset cpu affinity here */ mpic_cpu_set_priority(0xf); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); - mb(); - udelay(20); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); + return 0; } -static int cpu_dead[NR_CPUS]; +#ifdef CONFIG_PPC32 -void pmac32_cpu_die(void) +static void pmac_cpu_die(void) { + int cpu = smp_processor_id(); + local_irq_disable(); - cpu_dead[smp_processor_id()] = 1; + idle_task_exit(); + pr_debug("CPU%d offline\n", cpu); + generic_set_cpu_dead(cpu); + smp_wmb(); mb(); low_cpu_die(); } -void smp_core99_cpu_die(unsigned int cpu) +#else /* CONFIG_PPC32 */ + +static void pmac_cpu_die(void) { - int timeout; + int cpu = smp_processor_id(); - timeout = 1000; - while (!cpu_dead[cpu]) { - if (--timeout == 0) { - printk("CPU %u refused to die!\n", cpu); - break; - } - msleep(1); + local_irq_disable(); + idle_task_exit(); + + /* + * turn off as much as possible, we'll be + * kicked out as this will only be invoked + * on core99 platforms for now ... + */ + + printk(KERN_INFO "CPU#%d offline\n", cpu); + generic_set_cpu_dead(cpu); + smp_wmb(); + + /* + * Re-enable interrupts. The NAP code needs to enable them + * anyways, do it now so we deal with the case where one already + * happened while soft-disabled. + * We shouldn't get any external interrupts, only decrementer, and the + * decrementer handler is safe for use on offline CPUs + */ + local_irq_enable(); + + while (1) { + /* let's not take timer interrupts too often ... */ + set_dec(0x7fffffff); + + /* Enter NAP mode */ + power4_idle(); } - cpu_dead[cpu] = 0; } -#endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ +#endif /* else CONFIG_PPC32 */ +#endif /* CONFIG_HOTPLUG_CPU */ /* Core99 Macs (dual G4s and G5s) */ struct smp_ops_t core99_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_core99_probe, +#ifdef CONFIG_PPC64 + .bringup_done = smp_core99_bringup_done, +#endif .kick_cpu = smp_core99_kick_cpu, .setup_cpu = smp_core99_setup_cpu, .give_timebase = smp_core99_give_timebase, .take_timebase = smp_core99_take_timebase, #if defined(CONFIG_HOTPLUG_CPU) -# if defined(CONFIG_PPC32) .cpu_disable = smp_core99_cpu_disable, - .cpu_die = smp_core99_cpu_die, -# endif -# if defined(CONFIG_PPC64) - .cpu_disable = generic_cpu_disable, .cpu_die = generic_cpu_die, - /* intentionally do *NOT* assign cpu_enable, - * the generic code will use kick_cpu then! */ -# endif #endif }; @@ -957,5 +1016,10 @@ void __init pmac_setup_smp(void) smp_ops = &psurge_smp_ops; } #endif /* CONFIG_PPC32 */ + +#ifdef CONFIG_HOTPLUG_CPU + ppc_md.cpu_die = pmac_cpu_die; +#endif } + diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 92290ff4761a..f2f6413b81d3 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c @@ -99,16 +99,16 @@ static DEFINE_PER_CPU(struct ps3_private, ps3_private); * Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). */ -static void ps3_chip_mask(unsigned int virq) +static void ps3_chip_mask(struct irq_data *d) { - struct ps3_private *pd = get_irq_chip_data(virq); + struct ps3_private *pd = irq_data_get_irq_chip_data(d); unsigned long flags; pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, - pd->thread_id, virq); + pd->thread_id, d->irq); local_irq_save(flags); - clear_bit(63 - virq, &pd->bmp.mask); + clear_bit(63 - d->irq, &pd->bmp.mask); lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); local_irq_restore(flags); } @@ -120,16 +120,16 @@ static void ps3_chip_mask(unsigned int virq) * Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). */ -static void ps3_chip_unmask(unsigned int virq) +static void ps3_chip_unmask(struct irq_data *d) { - struct ps3_private *pd = get_irq_chip_data(virq); + struct ps3_private *pd = irq_data_get_irq_chip_data(d); unsigned long flags; pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, - pd->thread_id, virq); + pd->thread_id, d->irq); local_irq_save(flags); - set_bit(63 - virq, &pd->bmp.mask); + set_bit(63 - d->irq, &pd->bmp.mask); lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); local_irq_restore(flags); } @@ -141,10 +141,10 @@ static void ps3_chip_unmask(unsigned int virq) * Calls lv1_end_of_interrupt_ext(). */ -static void ps3_chip_eoi(unsigned int virq) +static void ps3_chip_eoi(struct irq_data *d) { - const struct ps3_private *pd = get_irq_chip_data(virq); - lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, virq); + const struct ps3_private *pd = irq_data_get_irq_chip_data(d); + lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq); } /** @@ -153,9 +153,9 @@ static void ps3_chip_eoi(unsigned int virq) static struct irq_chip ps3_irq_chip = { .name = "ps3", - .mask = ps3_chip_mask, - .unmask = ps3_chip_unmask, - .eoi = ps3_chip_eoi, + .irq_mask = ps3_chip_mask, + .irq_unmask = ps3_chip_unmask, + .irq_eoi = ps3_chip_eoi, }; /** @@ -194,7 +194,7 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet, pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, outlet, cpu, *virq); - result = set_irq_chip_data(*virq, pd); + result = irq_set_chip_data(*virq, pd); if (result) { pr_debug("%s:%d: set_irq_chip_data failed\n", @@ -202,7 +202,7 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet, goto fail_set; } - ps3_chip_mask(*virq); + ps3_chip_mask(irq_get_irq_data(*virq)); return result; @@ -221,12 +221,12 @@ fail_create: static int ps3_virq_destroy(unsigned int virq) { - const struct ps3_private *pd = get_irq_chip_data(virq); + const struct ps3_private *pd = irq_get_chip_data(virq); pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, __LINE__, pd->ppe_id, pd->thread_id, virq); - set_irq_chip_data(virq, NULL); + irq_set_chip_data(virq, NULL); irq_dispose_mapping(virq); pr_debug("%s:%d <-\n", __func__, __LINE__); @@ -256,7 +256,7 @@ int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet, goto fail_setup; } - pd = get_irq_chip_data(*virq); + pd = irq_get_chip_data(*virq); /* Binds outlet to cpu + virq. */ @@ -291,12 +291,12 @@ EXPORT_SYMBOL_GPL(ps3_irq_plug_setup); int ps3_irq_plug_destroy(unsigned int virq) { int result; - const struct ps3_private *pd = get_irq_chip_data(virq); + const struct ps3_private *pd = irq_get_chip_data(virq); pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, __LINE__, pd->ppe_id, pd->thread_id, virq); - ps3_chip_mask(virq); + ps3_chip_mask(irq_get_irq_data(virq)); result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq); @@ -357,7 +357,7 @@ int ps3_event_receive_port_destroy(unsigned int virq) pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq); - ps3_chip_mask(virq); + ps3_chip_mask(irq_get_irq_data(virq)); result = lv1_destruct_event_receive_port(virq_to_hw(virq)); @@ -492,7 +492,7 @@ int ps3_io_irq_destroy(unsigned int virq) int result; unsigned long outlet = virq_to_hw(virq); - ps3_chip_mask(virq); + ps3_chip_mask(irq_get_irq_data(virq)); /* * lv1_destruct_io_irq_outlet() will destroy the IRQ plug, @@ -553,7 +553,7 @@ int ps3_vuart_irq_destroy(unsigned int virq) { int result; - ps3_chip_mask(virq); + ps3_chip_mask(irq_get_irq_data(virq)); result = lv1_deconfigure_virtual_uart_irq(); if (result) { @@ -605,7 +605,7 @@ int ps3_spe_irq_destroy(unsigned int virq) { int result; - ps3_chip_mask(virq); + ps3_chip_mask(irq_get_irq_data(virq)); result = ps3_irq_plug_destroy(virq); BUG_ON(result); @@ -661,7 +661,7 @@ static void dump_bmp(struct ps3_private* pd) {}; static void ps3_host_unmap(struct irq_host *h, unsigned int virq) { - set_irq_chip_data(virq, NULL); + irq_set_chip_data(virq, NULL); } static int ps3_host_map(struct irq_host *h, unsigned int virq, @@ -670,7 +670,7 @@ static int ps3_host_map(struct irq_host *h, unsigned int virq, pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, virq); - set_irq_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); + irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); return 0; } diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index f4803868642c..3cafc306b971 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -508,12 +508,7 @@ static int cmm_memory_isolate_cb(struct notifier_block *self, if (action == MEM_ISOLATE_COUNT) ret = cmm_count_pages(arg); - if (ret) - ret = notifier_from_errno(ret); - else - ret = NOTIFY_OK; - - return ret; + return notifier_from_errno(ret); } static struct notifier_block cmm_mem_isolate_nb = { @@ -635,12 +630,7 @@ static int cmm_memory_cb(struct notifier_block *self, break; } - if (ret) - ret = notifier_from_errno(ret); - else - ret = NOTIFY_OK; - - return ret; + return notifier_from_errno(ret); } static struct notifier_block cmm_mem_nb = { diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index b74a9230edc9..57ceb92b2288 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -74,7 +74,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) return NULL; /* The configure connector reported name does not contain a - * preceeding '/', so we allocate a buffer large enough to + * preceding '/', so we allocate a buffer large enough to * prepend this to the full_name. */ name = (char *)ccwa + ccwa->name_offset; diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 17a11c82e6f8..89649173d3a3 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -65,7 +65,7 @@ * with EEH. * * Ideally, a PCI device driver, when suspecting that an isolation - * event has occured (e.g. by reading 0xff's), will then ask EEH + * event has occurred (e.g. by reading 0xff's), will then ask EEH * whether this is the case, and then take appropriate steps to * reset the PCI slot, the PCI device, and then resume operations. * However, until that day, the checking is done here, with the @@ -876,7 +876,7 @@ void eeh_restore_bars(struct pci_dn *pdn) * * Save the values of the device bars. Unlike the restore * routine, this routine is *not* recursive. This is because - * PCI devices are added individuallly; but, for the restore, + * PCI devices are added individually; but, for the restore, * an entire slot is reset at a time. */ static void eeh_save_bars(struct pci_dn *pdn) diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fd50ccd4bac1..ef8c45489e20 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -216,7 +216,7 @@ static void pseries_cpu_die(unsigned int cpu) cpu, pcpu, cpu_status); } - /* Isolation and deallocation are definatly done by + /* Isolation and deallocation are definitely done by * drslot_chrp_cpu. If they were not they would be * done here. Change isolate state to Isolate and * change allocation-state to Unusable. diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index bc8803664140..33867ec4a234 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -17,6 +17,54 @@ #include <asm/pSeries_reconfig.h> #include <asm/sparsemem.h> +static unsigned long get_memblock_size(void) +{ + struct device_node *np; + unsigned int memblock_size = 0; + + np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); + if (np) { + const unsigned long *size; + + size = of_get_property(np, "ibm,lmb-size", NULL); + memblock_size = size ? *size : 0; + + of_node_put(np); + } else { + unsigned int memzero_size = 0; + const unsigned int *regs; + + np = of_find_node_by_path("/memory@0"); + if (np) { + regs = of_get_property(np, "reg", NULL); + memzero_size = regs ? regs[3] : 0; + of_node_put(np); + } + + if (memzero_size) { + /* We now know the size of memory@0, use this to find + * the first memoryblock and get its size. + */ + char buf[64]; + + sprintf(buf, "/memory@%x", memzero_size); + np = of_find_node_by_path(buf); + if (np) { + regs = of_get_property(np, "reg", NULL); + memblock_size = regs ? regs[3] : 0; + of_node_put(np); + } + } + } + + return memblock_size; +} + +unsigned long memory_block_size_bytes(void) +{ + return get_memblock_size(); +} + static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long start, start_pfn; @@ -127,30 +175,22 @@ static int pseries_add_memory(struct device_node *np) static int pseries_drconf_memory(unsigned long *base, unsigned int action) { - struct device_node *np; - const unsigned long *lmb_size; + unsigned long memblock_size; int rc; - np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); - if (!np) + memblock_size = get_memblock_size(); + if (!memblock_size) return -EINVAL; - lmb_size = of_get_property(np, "ibm,lmb-size", NULL); - if (!lmb_size) { - of_node_put(np); - return -EINVAL; - } - if (action == PSERIES_DRCONF_MEM_ADD) { - rc = memblock_add(*base, *lmb_size); + rc = memblock_add(*base, memblock_size); rc = (rc < 0) ? -EINVAL : 0; } else if (action == PSERIES_DRCONF_MEM_REMOVE) { - rc = pseries_remove_memblock(*base, *lmb_size); + rc = pseries_remove_memblock(*base, memblock_size); } else { rc = -EINVAL; } - of_node_put(np); return rc; } diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index edea60b7ee90..6d5412a18b26 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -33,6 +33,7 @@ #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/crash_dump.h> +#include <linux/memory.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -45,6 +46,7 @@ #include <asm/tce.h> #include <asm/ppc-pci.h> #include <asm/udbg.h> +#include <asm/mmzone.h> #include "plpar_wrappers.h" @@ -270,6 +272,152 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) return tce_ret; } +/* this is compatible with cells for the device tree property */ +struct dynamic_dma_window_prop { + __be32 liobn; /* tce table number */ + __be64 dma_base; /* address hi,lo */ + __be32 tce_shift; /* ilog2(tce_page_size) */ + __be32 window_shift; /* ilog2(tce_window_size) */ +}; + +struct direct_window { + struct device_node *device; + const struct dynamic_dma_window_prop *prop; + struct list_head list; +}; + +/* Dynamic DMA Window support */ +struct ddw_query_response { + u32 windows_available; + u32 largest_available_block; + u32 page_size; + u32 migration_capable; +}; + +struct ddw_create_response { + u32 liobn; + u32 addr_hi; + u32 addr_lo; +}; + +static LIST_HEAD(direct_window_list); +/* prevents races between memory on/offline and window creation */ +static DEFINE_SPINLOCK(direct_window_list_lock); +/* protects initializing window twice for same device */ +static DEFINE_MUTEX(direct_window_init_mutex); +#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" + +static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn, + unsigned long num_pfn, const void *arg) +{ + const struct dynamic_dma_window_prop *maprange = arg; + int rc; + u64 tce_size, num_tce, dma_offset, next; + u32 tce_shift; + long limit; + + tce_shift = be32_to_cpu(maprange->tce_shift); + tce_size = 1ULL << tce_shift; + next = start_pfn << PAGE_SHIFT; + num_tce = num_pfn << PAGE_SHIFT; + + /* round back to the beginning of the tce page size */ + num_tce += next & (tce_size - 1); + next &= ~(tce_size - 1); + + /* covert to number of tces */ + num_tce |= tce_size - 1; + num_tce >>= tce_shift; + + do { + /* + * Set up the page with TCE data, looping through and setting + * the values. + */ + limit = min_t(long, num_tce, 512); + dma_offset = next + be64_to_cpu(maprange->dma_base); + + rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn), + dma_offset, + 0, limit); + num_tce -= limit; + } while (num_tce > 0 && !rc); + + return rc; +} + +static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, + unsigned long num_pfn, const void *arg) +{ + const struct dynamic_dma_window_prop *maprange = arg; + u64 *tcep, tce_size, num_tce, dma_offset, next, proto_tce, liobn; + u32 tce_shift; + u64 rc = 0; + long l, limit; + + local_irq_disable(); /* to protect tcep and the page behind it */ + tcep = __get_cpu_var(tce_page); + + if (!tcep) { + tcep = (u64 *)__get_free_page(GFP_ATOMIC); + if (!tcep) { + local_irq_enable(); + return -ENOMEM; + } + __get_cpu_var(tce_page) = tcep; + } + + proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; + + liobn = (u64)be32_to_cpu(maprange->liobn); + tce_shift = be32_to_cpu(maprange->tce_shift); + tce_size = 1ULL << tce_shift; + next = start_pfn << PAGE_SHIFT; + num_tce = num_pfn << PAGE_SHIFT; + + /* round back to the beginning of the tce page size */ + num_tce += next & (tce_size - 1); + next &= ~(tce_size - 1); + + /* covert to number of tces */ + num_tce |= tce_size - 1; + num_tce >>= tce_shift; + + /* We can map max one pageful of TCEs at a time */ + do { + /* + * Set up the page with TCE data, looping through and setting + * the values. + */ + limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE); + dma_offset = next + be64_to_cpu(maprange->dma_base); + + for (l = 0; l < limit; l++) { + tcep[l] = proto_tce | next; + next += tce_size; + } + + rc = plpar_tce_put_indirect(liobn, + dma_offset, + (u64)virt_to_abs(tcep), + limit); + + num_tce -= limit; + } while (num_tce > 0 && !rc); + + /* error cleanup: caller will clear whole range */ + + local_irq_enable(); + return rc; +} + +static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, + unsigned long num_pfn, void *arg) +{ + return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); +} + + #ifdef CONFIG_PCI static void iommu_table_setparms(struct pci_controller *phb, struct device_node *dn, @@ -495,6 +643,329 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) pci_name(dev)); } +static int __read_mostly disable_ddw; + +static int __init disable_ddw_setup(char *str) +{ + disable_ddw = 1; + printk(KERN_INFO "ppc iommu: disabling ddw.\n"); + + return 0; +} + +early_param("disable_ddw", disable_ddw_setup); + +static void remove_ddw(struct device_node *np) +{ + struct dynamic_dma_window_prop *dwp; + struct property *win64; + const u32 *ddr_avail; + u64 liobn; + int len, ret; + + ddr_avail = of_get_property(np, "ibm,ddw-applicable", &len); + win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); + if (!win64 || !ddr_avail || len < 3 * sizeof(u32)) + return; + + dwp = win64->value; + liobn = (u64)be32_to_cpu(dwp->liobn); + + /* clear the whole window, note the arg is in kernel pages */ + ret = tce_clearrange_multi_pSeriesLP(0, + 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp); + if (ret) + pr_warning("%s failed to clear tces in window.\n", + np->full_name); + else + pr_debug("%s successfully cleared tces in window.\n", + np->full_name); + + ret = rtas_call(ddr_avail[2], 1, 1, NULL, liobn); + if (ret) + pr_warning("%s: failed to remove direct window: rtas returned " + "%d to ibm,remove-pe-dma-window(%x) %llx\n", + np->full_name, ret, ddr_avail[2], liobn); + else + pr_debug("%s: successfully removed direct window: rtas returned " + "%d to ibm,remove-pe-dma-window(%x) %llx\n", + np->full_name, ret, ddr_avail[2], liobn); +} + + +static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *pdn) +{ + struct device_node *dn; + struct pci_dn *pcidn; + struct direct_window *window; + const struct dynamic_dma_window_prop *direct64; + u64 dma_addr = 0; + + dn = pci_device_to_OF_node(dev); + pcidn = PCI_DN(dn); + spin_lock(&direct_window_list_lock); + /* check if we already created a window and dupe that config if so */ + list_for_each_entry(window, &direct_window_list, list) { + if (window->device == pdn) { + direct64 = window->prop; + dma_addr = direct64->dma_base; + break; + } + } + spin_unlock(&direct_window_list_lock); + + return dma_addr; +} + +static u64 dupe_ddw_if_kexec(struct pci_dev *dev, struct device_node *pdn) +{ + struct device_node *dn; + struct pci_dn *pcidn; + int len; + struct direct_window *window; + const struct dynamic_dma_window_prop *direct64; + u64 dma_addr = 0; + + dn = pci_device_to_OF_node(dev); + pcidn = PCI_DN(dn); + direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); + if (direct64) { + window = kzalloc(sizeof(*window), GFP_KERNEL); + if (!window) { + remove_ddw(pdn); + } else { + window->device = pdn; + window->prop = direct64; + spin_lock(&direct_window_list_lock); + list_add(&window->list, &direct_window_list); + spin_unlock(&direct_window_list_lock); + dma_addr = direct64->dma_base; + } + } + + return dma_addr; +} + +static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail, + struct ddw_query_response *query) +{ + struct device_node *dn; + struct pci_dn *pcidn; + u32 cfg_addr; + u64 buid; + int ret; + + /* + * Get the config address and phb buid of the PE window. + * Rely on eeh to retrieve this for us. + * Retrieve them from the pci device, not the node with the + * dma-window property + */ + dn = pci_device_to_OF_node(dev); + pcidn = PCI_DN(dn); + cfg_addr = pcidn->eeh_config_addr; + if (pcidn->eeh_pe_config_addr) + cfg_addr = pcidn->eeh_pe_config_addr; + buid = pcidn->phb->buid; + ret = rtas_call(ddr_avail[0], 3, 5, (u32 *)query, + cfg_addr, BUID_HI(buid), BUID_LO(buid)); + dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" + " returned %d\n", ddr_avail[0], cfg_addr, BUID_HI(buid), + BUID_LO(buid), ret); + return ret; +} + +static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail, + struct ddw_create_response *create, int page_shift, + int window_shift) +{ + struct device_node *dn; + struct pci_dn *pcidn; + u32 cfg_addr; + u64 buid; + int ret; + + /* + * Get the config address and phb buid of the PE window. + * Rely on eeh to retrieve this for us. + * Retrieve them from the pci device, not the node with the + * dma-window property + */ + dn = pci_device_to_OF_node(dev); + pcidn = PCI_DN(dn); + cfg_addr = pcidn->eeh_config_addr; + if (pcidn->eeh_pe_config_addr) + cfg_addr = pcidn->eeh_pe_config_addr; + buid = pcidn->phb->buid; + + do { + /* extra outputs are LIOBN and dma-addr (hi, lo) */ + ret = rtas_call(ddr_avail[1], 5, 4, (u32 *)create, cfg_addr, + BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); + } while (rtas_busy_delay(ret)); + dev_info(&dev->dev, + "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " + "(liobn = 0x%x starting addr = %x %x)\n", ddr_avail[1], + cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, + window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); + + return ret; +} + +/* + * If the PE supports dynamic dma windows, and there is space for a table + * that can map all pages in a linear offset, then setup such a table, + * and record the dma-offset in the struct device. + * + * dev: the pci device we are checking + * pdn: the parent pe node with the ibm,dma_window property + * Future: also check if we can remap the base window for our base page size + * + * returns the dma offset for use by dma_set_mask + */ +static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) +{ + int len, ret; + struct ddw_query_response query; + struct ddw_create_response create; + int page_shift; + u64 dma_addr, max_addr; + struct device_node *dn; + const u32 *uninitialized_var(ddr_avail); + struct direct_window *window; + struct property *uninitialized_var(win64); + struct dynamic_dma_window_prop *ddwprop; + + mutex_lock(&direct_window_init_mutex); + + dma_addr = dupe_ddw_if_already_created(dev, pdn); + if (dma_addr != 0) + goto out_unlock; + + dma_addr = dupe_ddw_if_kexec(dev, pdn); + if (dma_addr != 0) + goto out_unlock; + + /* + * the ibm,ddw-applicable property holds the tokens for: + * ibm,query-pe-dma-window + * ibm,create-pe-dma-window + * ibm,remove-pe-dma-window + * for the given node in that order. + * the property is actually in the parent, not the PE + */ + ddr_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); + if (!ddr_avail || len < 3 * sizeof(u32)) + goto out_unlock; + + /* + * Query if there is a second window of size to map the + * whole partition. Query returns number of windows, largest + * block assigned to PE (partition endpoint), and two bitmasks + * of page sizes: supported and supported for migrate-dma. + */ + dn = pci_device_to_OF_node(dev); + ret = query_ddw(dev, ddr_avail, &query); + if (ret != 0) + goto out_unlock; + + if (query.windows_available == 0) { + /* + * no additional windows are available for this device. + * We might be able to reallocate the existing window, + * trading in for a larger page size. + */ + dev_dbg(&dev->dev, "no free dynamic windows"); + goto out_unlock; + } + if (query.page_size & 4) { + page_shift = 24; /* 16MB */ + } else if (query.page_size & 2) { + page_shift = 16; /* 64kB */ + } else if (query.page_size & 1) { + page_shift = 12; /* 4kB */ + } else { + dev_dbg(&dev->dev, "no supported direct page size in mask %x", + query.page_size); + goto out_unlock; + } + /* verify the window * number of ptes will map the partition */ + /* check largest block * page size > max memory hotplug addr */ + max_addr = memory_hotplug_max(); + if (query.largest_available_block < (max_addr >> page_shift)) { + dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " + "%llu-sized pages\n", max_addr, query.largest_available_block, + 1ULL << page_shift); + goto out_unlock; + } + len = order_base_2(max_addr); + win64 = kzalloc(sizeof(struct property), GFP_KERNEL); + if (!win64) { + dev_info(&dev->dev, + "couldn't allocate property for 64bit dma window\n"); + goto out_unlock; + } + win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); + win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); + if (!win64->name || !win64->value) { + dev_info(&dev->dev, + "couldn't allocate property name and value\n"); + goto out_free_prop; + } + + ret = create_ddw(dev, ddr_avail, &create, page_shift, len); + if (ret != 0) + goto out_free_prop; + + ddwprop->liobn = cpu_to_be32(create.liobn); + ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2)); + ddwprop->tce_shift = cpu_to_be32(page_shift); + ddwprop->window_shift = cpu_to_be32(len); + + dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n", + create.liobn, dn->full_name); + + window = kzalloc(sizeof(*window), GFP_KERNEL); + if (!window) + goto out_clear_window; + + ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, + win64->value, tce_setrange_multi_pSeriesLP_walk); + if (ret) { + dev_info(&dev->dev, "failed to map direct window for %s: %d\n", + dn->full_name, ret); + goto out_clear_window; + } + + ret = prom_add_property(pdn, win64); + if (ret) { + dev_err(&dev->dev, "unable to add dma window property for %s: %d", + pdn->full_name, ret); + goto out_clear_window; + } + + window->device = pdn; + window->prop = ddwprop; + spin_lock(&direct_window_list_lock); + list_add(&window->list, &direct_window_list); + spin_unlock(&direct_window_list_lock); + + dma_addr = of_read_number(&create.addr_hi, 2); + goto out_unlock; + +out_clear_window: + remove_ddw(pdn); + +out_free_prop: + kfree(win64->name); + kfree(win64->value); + kfree(win64); + +out_unlock: + mutex_unlock(&direct_window_init_mutex); + return dma_addr; +} + static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) { struct device_node *pdn, *dn; @@ -505,7 +976,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); /* dev setup for LPAR is a little tricky, since the device tree might - * contain the dma-window properties per-device and not neccesarily + * contain the dma-window properties per-device and not necessarily * for the bus. So we need to search upwards in the tree until we * either hit a dma-window property, OR find a parent with a table * already allocated. @@ -541,23 +1012,137 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) set_iommu_table_base(&dev->dev, pci->iommu_table); } + +static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) +{ + bool ddw_enabled = false; + struct device_node *pdn, *dn; + struct pci_dev *pdev; + const void *dma_window = NULL; + u64 dma_offset; + + if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + return -EIO; + + /* only attempt to use a new window if 64-bit DMA is requested */ + if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { + pdev = to_pci_dev(dev); + + dn = pci_device_to_OF_node(pdev); + dev_dbg(dev, "node is %s\n", dn->full_name); + + /* + * the device tree might contain the dma-window properties + * per-device and not necessarily for the bus. So we need to + * search upwards in the tree until we either hit a dma-window + * property, OR find a parent with a table already allocated. + */ + for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; + pdn = pdn->parent) { + dma_window = of_get_property(pdn, "ibm,dma-window", NULL); + if (dma_window) + break; + } + if (pdn && PCI_DN(pdn)) { + dma_offset = enable_ddw(pdev, pdn); + if (dma_offset != 0) { + dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset); + set_dma_offset(dev, dma_offset); + set_dma_ops(dev, &dma_direct_ops); + ddw_enabled = true; + } + } + } + + /* fall-through to iommu ops */ + if (!ddw_enabled) { + dev_info(dev, "Using 32-bit DMA via iommu\n"); + set_dma_ops(dev, &dma_iommu_ops); + } + + *dev->dma_mask = dma_mask; + return 0; +} + #else /* CONFIG_PCI */ #define pci_dma_bus_setup_pSeries NULL #define pci_dma_dev_setup_pSeries NULL #define pci_dma_bus_setup_pSeriesLP NULL #define pci_dma_dev_setup_pSeriesLP NULL +#define dma_set_mask_pSeriesLP NULL #endif /* !CONFIG_PCI */ +static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct direct_window *window; + struct memory_notify *arg = data; + int ret = 0; + + switch (action) { + case MEM_GOING_ONLINE: + spin_lock(&direct_window_list_lock); + list_for_each_entry(window, &direct_window_list, list) { + ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, + arg->nr_pages, window->prop); + /* XXX log error */ + } + spin_unlock(&direct_window_list_lock); + break; + case MEM_CANCEL_ONLINE: + case MEM_OFFLINE: + spin_lock(&direct_window_list_lock); + list_for_each_entry(window, &direct_window_list, list) { + ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, + arg->nr_pages, window->prop); + /* XXX log error */ + } + spin_unlock(&direct_window_list_lock); + break; + default: + break; + } + if (ret && action != MEM_CANCEL_ONLINE) + return NOTIFY_BAD; + + return NOTIFY_OK; +} + +static struct notifier_block iommu_mem_nb = { + .notifier_call = iommu_mem_notifier, +}; + static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) { int err = NOTIFY_OK; struct device_node *np = node; struct pci_dn *pci = PCI_DN(np); + struct direct_window *window; switch (action) { case PSERIES_RECONFIG_REMOVE: if (pci && pci->iommu_table) iommu_free_table(pci->iommu_table, np->full_name); + + spin_lock(&direct_window_list_lock); + list_for_each_entry(window, &direct_window_list, list) { + if (window->device == np) { + list_del(&window->list); + kfree(window); + break; + } + } + spin_unlock(&direct_window_list_lock); + + /* + * Because the notifier runs after isolation of the + * slot, we are guaranteed any DMA window has already + * been revoked and the TCEs have been marked invalid, + * so we don't need a call to remove_ddw(np). However, + * if an additional notifier action is added before the + * isolate call, we should update this code for + * completeness with such a call. + */ break; default: err = NOTIFY_DONE; @@ -587,6 +1172,7 @@ void iommu_init_early_pSeries(void) ppc_md.tce_get = tce_get_pSeriesLP; ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP; ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP; + ppc_md.dma_set_mask = dma_set_mask_pSeriesLP; } else { ppc_md.tce_build = tce_build_pSeries; ppc_md.tce_free = tce_free_pSeries; @@ -597,6 +1183,7 @@ void iommu_init_early_pSeries(void) pSeries_reconfig_notifier_register(&iommu_reconfig_nb); + register_memory_notifier(&iommu_mem_nb); set_pci_dma_ops(&dma_iommu_ops); } diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 1164c3430f2c..38d24e7e7bb1 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -93,8 +93,18 @@ static void rtas_disable_msi(struct pci_dev *pdev) if (!pdn) return; - if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) - pr_debug("rtas_msi: Setting MSIs to 0 failed!\n"); + /* + * disabling MSI with the explicit interface also disables MSI-X + */ + if (rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, 0) != 0) { + /* + * may have failed because explicit interface is not + * present + */ + if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) { + pr_debug("rtas_msi: Setting MSIs to 0 failed!\n"); + } + } } static int rtas_query_irq_number(struct pci_dn *pdn, int offset) @@ -127,7 +137,7 @@ static void rtas_teardown_msi_irqs(struct pci_dev *pdev) if (entry->irq == NO_IRQ) continue; - set_irq_msi(entry->irq, NULL); + irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); } @@ -427,7 +437,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) } dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq); - set_irq_msi(virq, entry); + irq_set_msi_desc(virq, entry); /* Read config space back so we can restore after reset */ read_msi_msg(virq, &msg); diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 7e828ba29bc3..00cc3a094885 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -16,6 +16,8 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/kmsg_dump.h> #include <asm/uaccess.h> #include <asm/nvram.h> #include <asm/rtas.h> @@ -30,17 +32,54 @@ static int nvram_fetch, nvram_store; static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ static DEFINE_SPINLOCK(nvram_lock); -static long nvram_error_log_index = -1; -static long nvram_error_log_size = 0; - struct err_log_info { int error_type; unsigned int seq_num; }; -#define NVRAM_MAX_REQ 2079 -#define NVRAM_MIN_REQ 1055 -#define NVRAM_LOG_PART_NAME "ibm,rtas-log" +struct nvram_os_partition { + const char *name; + int req_size; /* desired size, in bytes */ + int min_size; /* minimum acceptable size (0 means req_size) */ + long size; /* size of data portion (excluding err_log_info) */ + long index; /* offset of data portion of partition */ +}; + +static struct nvram_os_partition rtas_log_partition = { + .name = "ibm,rtas-log", + .req_size = 2079, + .min_size = 1055, + .index = -1 +}; + +static struct nvram_os_partition oops_log_partition = { + .name = "lnx,oops-log", + .req_size = 4000, + .min_size = 2000, + .index = -1 +}; + +static const char *pseries_nvram_os_partitions[] = { + "ibm,rtas-log", + "lnx,oops-log", + NULL +}; + +static void oops_to_nvram(struct kmsg_dumper *dumper, + enum kmsg_dump_reason reason, + const char *old_msgs, unsigned long old_len, + const char *new_msgs, unsigned long new_len); + +static struct kmsg_dumper nvram_kmsg_dumper = { + .dump = oops_to_nvram +}; + +/* See clobbering_unread_rtas_event() */ +#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */ +static unsigned long last_unread_rtas_event; /* timestamp */ + +/* We preallocate oops_buf during init to avoid kmalloc during oops/panic. */ +static char *oops_buf; static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index) { @@ -134,7 +173,7 @@ static ssize_t pSeries_nvram_get_size(void) } -/* nvram_write_error_log +/* nvram_write_os_partition, nvram_write_error_log * * We need to buffer the error logs into nvram to ensure that we have * the failure information to decode. If we have a severe error there @@ -156,48 +195,58 @@ static ssize_t pSeries_nvram_get_size(void) * The 'data' section would look like (in bytes): * +--------------+------------+-----------------------------------+ * | event_logged | sequence # | error log | - * |0 3|4 7|8 nvram_error_log_size-1| + * |0 3|4 7|8 error_log_size-1| * +--------------+------------+-----------------------------------+ * * event_logged: 0 if event has not been logged to syslog, 1 if it has * sequence #: The unique sequence # for each event. (until it wraps) * error log: The error log from event_scan */ -int nvram_write_error_log(char * buff, int length, - unsigned int err_type, unsigned int error_log_cnt) +int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, + int length, unsigned int err_type, unsigned int error_log_cnt) { int rc; loff_t tmp_index; struct err_log_info info; - if (nvram_error_log_index == -1) { + if (part->index == -1) { return -ESPIPE; } - if (length > nvram_error_log_size) { - length = nvram_error_log_size; + if (length > part->size) { + length = part->size; } info.error_type = err_type; info.seq_num = error_log_cnt; - tmp_index = nvram_error_log_index; + tmp_index = part->index; rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index); if (rc <= 0) { - printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc); + pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc); return rc; } rc = ppc_md.nvram_write(buff, length, &tmp_index); if (rc <= 0) { - printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc); + pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc); return rc; } return 0; } +int nvram_write_error_log(char * buff, int length, + unsigned int err_type, unsigned int error_log_cnt) +{ + int rc = nvram_write_os_partition(&rtas_log_partition, buff, length, + err_type, error_log_cnt); + if (!rc) + last_unread_rtas_event = get_seconds(); + return rc; +} + /* nvram_read_error_log * * Reads nvram for error log for at most 'length' @@ -209,13 +258,13 @@ int nvram_read_error_log(char * buff, int length, loff_t tmp_index; struct err_log_info info; - if (nvram_error_log_index == -1) + if (rtas_log_partition.index == -1) return -1; - if (length > nvram_error_log_size) - length = nvram_error_log_size; + if (length > rtas_log_partition.size) + length = rtas_log_partition.size; - tmp_index = nvram_error_log_index; + tmp_index = rtas_log_partition.index; rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index); if (rc <= 0) { @@ -244,37 +293,40 @@ int nvram_clear_error_log(void) int clear_word = ERR_FLAG_ALREADY_LOGGED; int rc; - if (nvram_error_log_index == -1) + if (rtas_log_partition.index == -1) return -1; - tmp_index = nvram_error_log_index; + tmp_index = rtas_log_partition.index; rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index); if (rc <= 0) { printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc); return rc; } + last_unread_rtas_event = 0; return 0; } -/* pseries_nvram_init_log_partition +/* pseries_nvram_init_os_partition * - * This will setup the partition we need for buffering the - * error logs and cleanup partitions if needed. + * This sets up a partition with an "OS" signature. * * The general strategy is the following: - * 1.) If there is log partition large enough then use it. - * 2.) If there is none large enough, search - * for a free partition that is large enough. - * 3.) If there is not a free partition large enough remove - * _all_ OS partitions and consolidate the space. - * 4.) Will first try getting a chunk that will satisfy the maximum - * error log size (NVRAM_MAX_REQ). - * 5.) If the max chunk cannot be allocated then try finding a chunk - * that will satisfy the minum needed (NVRAM_MIN_REQ). + * 1.) If a partition with the indicated name already exists... + * - If it's large enough, use it. + * - Otherwise, recycle it and keep going. + * 2.) Search for a free partition that is large enough. + * 3.) If there's not a free partition large enough, recycle any obsolete + * OS partitions and try again. + * 4.) Will first try getting a chunk that will satisfy the requested size. + * 5.) If a chunk of the requested size cannot be allocated, then try finding + * a chunk that will satisfy the minum needed. + * + * Returns 0 on success, else -1. */ -static int __init pseries_nvram_init_log_partition(void) +static int __init pseries_nvram_init_os_partition(struct nvram_os_partition + *part) { loff_t p; int size; @@ -282,47 +334,76 @@ static int __init pseries_nvram_init_log_partition(void) /* Scan nvram for partitions */ nvram_scan_partitions(); - /* Lookg for ours */ - p = nvram_find_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS, &size); + /* Look for ours */ + p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size); /* Found one but too small, remove it */ - if (p && size < NVRAM_MIN_REQ) { - pr_info("nvram: Found too small "NVRAM_LOG_PART_NAME" partition" - ",removing it..."); - nvram_remove_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS); + if (p && size < part->min_size) { + pr_info("nvram: Found too small %s partition," + " removing it...\n", part->name); + nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL); p = 0; } /* Create one if we didn't find */ if (!p) { - p = nvram_create_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS, - NVRAM_MAX_REQ, NVRAM_MIN_REQ); - /* No room for it, try to get rid of any OS partition - * and try again - */ + p = nvram_create_partition(part->name, NVRAM_SIG_OS, + part->req_size, part->min_size); if (p == -ENOSPC) { - pr_info("nvram: No room to create "NVRAM_LOG_PART_NAME - " partition, deleting all OS partitions..."); - nvram_remove_partition(NULL, NVRAM_SIG_OS); - p = nvram_create_partition(NVRAM_LOG_PART_NAME, - NVRAM_SIG_OS, NVRAM_MAX_REQ, - NVRAM_MIN_REQ); + pr_info("nvram: No room to create %s partition, " + "deleting any obsolete OS partitions...\n", + part->name); + nvram_remove_partition(NULL, NVRAM_SIG_OS, + pseries_nvram_os_partitions); + p = nvram_create_partition(part->name, NVRAM_SIG_OS, + part->req_size, part->min_size); } } if (p <= 0) { - pr_err("nvram: Failed to find or create "NVRAM_LOG_PART_NAME - " partition, err %d\n", (int)p); - return 0; + pr_err("nvram: Failed to find or create %s" + " partition, err %d\n", part->name, (int)p); + return -1; } - nvram_error_log_index = p; - nvram_error_log_size = nvram_get_partition_size(p) - - sizeof(struct err_log_info); + part->index = p; + part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info); return 0; } -machine_arch_initcall(pseries, pseries_nvram_init_log_partition); + +static void __init nvram_init_oops_partition(int rtas_partition_exists) +{ + int rc; + + rc = pseries_nvram_init_os_partition(&oops_log_partition); + if (rc != 0) { + if (!rtas_partition_exists) + return; + pr_notice("nvram: Using %s partition to log both" + " RTAS errors and oops/panic reports\n", + rtas_log_partition.name); + memcpy(&oops_log_partition, &rtas_log_partition, + sizeof(rtas_log_partition)); + } + oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL); + rc = kmsg_dump_register(&nvram_kmsg_dumper); + if (rc != 0) { + pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); + kfree(oops_buf); + return; + } +} + +static int __init pseries_nvram_init_log_partitions(void) +{ + int rc; + + rc = pseries_nvram_init_os_partition(&rtas_log_partition); + nvram_init_oops_partition(rc == 0); + return 0; +} +machine_arch_initcall(pseries, pseries_nvram_init_log_partitions); int __init pSeries_nvram_init(void) { @@ -353,3 +434,83 @@ int __init pSeries_nvram_init(void) return 0; } + +/* + * Try to capture the last capture_len bytes of the printk buffer. Return + * the amount actually captured. + */ +static size_t capture_last_msgs(const char *old_msgs, size_t old_len, + const char *new_msgs, size_t new_len, + char *captured, size_t capture_len) +{ + if (new_len >= capture_len) { + memcpy(captured, new_msgs + (new_len - capture_len), + capture_len); + return capture_len; + } else { + /* Grab the end of old_msgs. */ + size_t old_tail_len = min(old_len, capture_len - new_len); + memcpy(captured, old_msgs + (old_len - old_tail_len), + old_tail_len); + memcpy(captured + old_tail_len, new_msgs, new_len); + return old_tail_len + new_len; + } +} + +/* + * Are we using the ibm,rtas-log for oops/panic reports? And if so, + * would logging this oops/panic overwrite an RTAS event that rtas_errd + * hasn't had a chance to read and process? Return 1 if so, else 0. + * + * We assume that if rtas_errd hasn't read the RTAS event in + * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to. + */ +static int clobbering_unread_rtas_event(void) +{ + return (oops_log_partition.index == rtas_log_partition.index + && last_unread_rtas_event + && get_seconds() - last_unread_rtas_event <= + NVRAM_RTAS_READ_TIMEOUT); +} + +/* our kmsg_dump callback */ +static void oops_to_nvram(struct kmsg_dumper *dumper, + enum kmsg_dump_reason reason, + const char *old_msgs, unsigned long old_len, + const char *new_msgs, unsigned long new_len) +{ + static unsigned int oops_count = 0; + static bool panicking = false; + size_t text_len; + + switch (reason) { + case KMSG_DUMP_RESTART: + case KMSG_DUMP_HALT: + case KMSG_DUMP_POWEROFF: + /* These are almost always orderly shutdowns. */ + return; + case KMSG_DUMP_OOPS: + case KMSG_DUMP_KEXEC: + break; + case KMSG_DUMP_PANIC: + panicking = true; + break; + case KMSG_DUMP_EMERG: + if (panicking) + /* Panic report already captured. */ + return; + break; + default: + pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", + __FUNCTION__, (int) reason); + return; + } + + if (clobbering_unread_rtas_event()) + return; + + text_len = capture_last_msgs(old_msgs, old_len, new_msgs, new_len, + oops_buf, oops_log_partition.size); + (void) nvram_write_os_partition(&oops_log_partition, oops_buf, + (int) text_len, ERR_TYPE_KERNEL_PANIC, ++oops_count); +} diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 75a6f480d931..08672d9136ab 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h @@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu) #endif extern enum cpu_state_vals get_preferred_offline_state(int cpu); -extern int start_secondary(void); -extern void start_secondary_resume(void); #endif diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 5fcc92a12d3e..3bf4488aaec6 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -149,7 +149,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) if (dn->child) eeh_add_device_tree_early(dn); - pcibios_scan_phb(phb, dn); + pcibios_scan_phb(phb); pcibios_finish_adding_to_bus(phb->bus); return phb; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index d345bfd56bbe..6c42cfde8415 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -114,10 +114,13 @@ static void __init fwnmi_init(void) static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); + if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); - desc->chip->eoi(irq); + + chip->irq_eoi(&desc->irq_data); } static void __init pseries_setup_i8259_cascade(void) @@ -166,7 +169,7 @@ static void __init pseries_setup_i8259_cascade(void) printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack); i8259_init(found, intack); of_node_put(found); - set_irq_chained_handler(cascade, pseries_8259_cascade); + irq_set_chained_handler(cascade, pseries_8259_cascade); } static void __init pseries_mpic_init_IRQ(void) @@ -284,14 +287,22 @@ static int alloc_dispatch_logs(void) int cpu, ret; struct paca_struct *pp; struct dtl_entry *dtl; + struct kmem_cache *dtl_cache; if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return 0; + dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, + DISPATCH_LOG_BYTES, 0, NULL); + if (!dtl_cache) { + pr_warn("Failed to create dispatch trace log buffer cache\n"); + pr_warn("Stolen time statistics will be unreliable\n"); + return 0; + } + for_each_possible_cpu(cpu) { pp = &paca[cpu]; - dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL, - cpu_to_node(cpu)); + dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); if (!dtl) { pr_warn("Failed to allocate dispatch trace log for cpu %d\n", cpu); @@ -375,7 +386,7 @@ static int __init pSeries_init_panel(void) return 0; } -arch_initcall(pSeries_init_panel); +machine_arch_initcall(pseries, pSeries_init_panel); static int pseries_set_dabr(unsigned long dabr) { diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 0317cce877c6..a509c5292a67 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -64,8 +64,8 @@ int smp_query_cpu_stopped(unsigned int pcpu) int qcss_tok = rtas_token("query-cpu-stopped-state"); if (qcss_tok == RTAS_UNKNOWN_SERVICE) { - printk(KERN_INFO "Firmware doesn't support " - "query-cpu-stopped-state\n"); + printk_once(KERN_INFO + "Firmware doesn't support query-cpu-stopped-state\n"); return QCSS_HARDWARE_ERROR; } @@ -112,10 +112,10 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) /* Fixup atomic count: it exited inside IRQ handler. */ task_thread_info(paca[lcpu].__current)->preempt_count = 0; - +#ifdef CONFIG_HOTPLUG_CPU if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE) goto out; - +#endif /* * If the RTAS start-cpu token does not exist then presume the * cpu is already spinning. @@ -130,7 +130,9 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) return 0; } +#ifdef CONFIG_HOTPLUG_CPU out: +#endif return 1; } @@ -144,16 +146,15 @@ static void __devinit smp_xics_setup_cpu(int cpu) vpa_init(cpu); cpumask_clear_cpu(cpu, of_spin_mask); +#ifdef CONFIG_HOTPLUG_CPU set_cpu_current_state(cpu, CPU_STATE_ONLINE); set_default_offline_state(cpu); - +#endif } #endif /* CONFIG_XICS */ static void __devinit smp_pSeries_kick_cpu(int nr) { - long rc; - unsigned long hcpuid; BUG_ON(nr < 0 || nr >= NR_CPUS); if (!smp_startup_cpu(nr)) @@ -165,16 +166,20 @@ static void __devinit smp_pSeries_kick_cpu(int nr) * the processor will continue on to secondary_start */ paca[nr].cpu_start = 1; - +#ifdef CONFIG_HOTPLUG_CPU set_preferred_offline_state(nr, CPU_STATE_ONLINE); if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) { + long rc; + unsigned long hcpuid; + hcpuid = get_hard_smp_processor_id(nr); rc = plpar_hcall_norets(H_PROD, hcpuid); if (rc != H_SUCCESS) printk(KERN_ERR "Error: Prod to wake up processor %d " "Ret= %ld\n", nr, rc); } +#endif } static int smp_pSeries_cpu_bootable(unsigned int nr) diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 7b96e5a270ce..d6901334d66e 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -202,88 +202,88 @@ static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, #define get_irq_server(virq, cpumask, strict_check) (default_server) #endif -static void xics_unmask_irq(unsigned int virq) +static void xics_unmask_irq(struct irq_data *d) { - unsigned int irq; + unsigned int hwirq; int call_status; int server; - pr_devel("xics: unmask virq %d\n", virq); + pr_devel("xics: unmask virq %d\n", d->irq); - irq = (unsigned int)irq_map[virq].hwirq; - pr_devel(" -> map to hwirq 0x%x\n", irq); - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + hwirq = (unsigned int)irq_map[d->irq].hwirq; + pr_devel(" -> map to hwirq 0x%x\n", hwirq); + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) return; - server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0); + server = get_irq_server(d->irq, d->affinity, 0); - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server, DEFAULT_PRIORITY); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive irq %u server %x returned %d\n", - __func__, irq, server, call_status); + __func__, hwirq, server, call_status); return; } /* Now unmask the interrupt (often a no-op) */ - call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); + call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", - __func__, irq, call_status); + __func__, hwirq, call_status); return; } } -static unsigned int xics_startup(unsigned int virq) +static unsigned int xics_startup(struct irq_data *d) { /* * The generic MSI code returns with the interrupt disabled on the * card, using the MSI mask bits. Firmware doesn't appear to unmask * at that level, so we do it here by hand. */ - if (irq_to_desc(virq)->msi_desc) - unmask_msi_irq(irq_get_irq_data(virq)); + if (d->msi_desc) + unmask_msi_irq(d); /* unmask it */ - xics_unmask_irq(virq); + xics_unmask_irq(d); return 0; } -static void xics_mask_real_irq(unsigned int irq) +static void xics_mask_real_irq(unsigned int hwirq) { int call_status; - if (irq == XICS_IPI) + if (hwirq == XICS_IPI) return; - call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq); + call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", - __func__, irq, call_status); + __func__, hwirq, call_status); return; } /* Have to set XIVE to 0xff to be able to remove a slot */ - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, default_server, 0xff); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", - __func__, irq, call_status); + __func__, hwirq, call_status); return; } } -static void xics_mask_irq(unsigned int virq) +static void xics_mask_irq(struct irq_data *d) { - unsigned int irq; + unsigned int hwirq; - pr_devel("xics: mask virq %d\n", virq); + pr_devel("xics: mask virq %d\n", d->irq); - irq = (unsigned int)irq_map[virq].hwirq; - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + hwirq = (unsigned int)irq_map[d->irq].hwirq; + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) return; - xics_mask_real_irq(irq); + xics_mask_real_irq(hwirq); } static void xics_mask_unknown_vec(unsigned int vec) @@ -371,57 +371,58 @@ static unsigned char pop_cppr(void) return os_cppr->stack[--os_cppr->index]; } -static void xics_eoi_direct(unsigned int virq) +static void xics_eoi_direct(struct irq_data *d) { - unsigned int irq = (unsigned int)irq_map[virq].hwirq; + unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; iosync(); - direct_xirr_info_set((pop_cppr() << 24) | irq); + direct_xirr_info_set((pop_cppr() << 24) | hwirq); } -static void xics_eoi_lpar(unsigned int virq) +static void xics_eoi_lpar(struct irq_data *d) { - unsigned int irq = (unsigned int)irq_map[virq].hwirq; + unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; iosync(); - lpar_xirr_info_set((pop_cppr() << 24) | irq); + lpar_xirr_info_set((pop_cppr() << 24) | hwirq); } -static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) +static int +xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { - unsigned int irq; + unsigned int hwirq; int status; int xics_status[2]; int irq_server; - irq = (unsigned int)irq_map[virq].hwirq; - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + hwirq = (unsigned int)irq_map[d->irq].hwirq; + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) return -1; - status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", - __func__, irq, status); + __func__, hwirq, status); return -1; } - irq_server = get_irq_server(virq, cpumask, 1); + irq_server = get_irq_server(d->irq, cpumask, 1); if (irq_server == -1) { char cpulist[128]; cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); printk(KERN_WARNING "%s: No online cpus in the mask %s for irq %d\n", - __func__, cpulist, virq); + __func__, cpulist, d->irq); return -1; } status = rtas_call(ibm_set_xive, 3, 1, NULL, - irq, irq_server, xics_status[1]); + hwirq, irq_server, xics_status[1]); if (status) { printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", - __func__, irq, status); + __func__, hwirq, status); return -1; } @@ -430,20 +431,20 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) static struct irq_chip xics_pic_direct = { .name = "XICS", - .startup = xics_startup, - .mask = xics_mask_irq, - .unmask = xics_unmask_irq, - .eoi = xics_eoi_direct, - .set_affinity = xics_set_affinity + .irq_startup = xics_startup, + .irq_mask = xics_mask_irq, + .irq_unmask = xics_unmask_irq, + .irq_eoi = xics_eoi_direct, + .irq_set_affinity = xics_set_affinity }; static struct irq_chip xics_pic_lpar = { .name = "XICS", - .startup = xics_startup, - .mask = xics_mask_irq, - .unmask = xics_unmask_irq, - .eoi = xics_eoi_lpar, - .set_affinity = xics_set_affinity + .irq_startup = xics_startup, + .irq_mask = xics_mask_irq, + .irq_unmask = xics_unmask_irq, + .irq_eoi = xics_eoi_lpar, + .irq_set_affinity = xics_set_affinity }; @@ -469,8 +470,8 @@ static int xics_host_map(struct irq_host *h, unsigned int virq, /* Insert the interrupt mapping into the radix tree for fast lookup */ irq_radix_revmap_insert(xics_host, virq, hw); - irq_to_desc(virq)->status |= IRQ_LEVEL; - set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); return 0; } @@ -599,7 +600,7 @@ static void xics_request_ipi(void) * IPIs are marked IRQF_DISABLED as they must run with irqs * disabled */ - set_irq_handler(ipi, handle_percpu_irq); + irq_set_handler(ipi, handle_percpu_irq); if (firmware_has_feature(FW_FEATURE_LPAR)) rc = request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); @@ -873,7 +874,7 @@ void xics_kexec_teardown_cpu(int secondary) void xics_migrate_irqs_away(void) { int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); - unsigned int irq, virq; + int virq; /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == default_server) @@ -890,33 +891,38 @@ void xics_migrate_irqs_away(void) for_each_irq(virq) { struct irq_desc *desc; + struct irq_chip *chip; + unsigned int hwirq; int xics_status[2]; int status; unsigned long flags; - /* We cant set affinity on ISA interrupts */ + /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; if (irq_map[virq].host != xics_host) continue; - irq = (unsigned int)irq_map[virq].hwirq; + hwirq = (unsigned int)irq_map[virq].hwirq; /* We need to get IPIs still. */ - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) continue; + desc = irq_to_desc(virq); /* We only need to migrate enabled IRQS */ - if (desc == NULL || desc->chip == NULL - || desc->action == NULL - || desc->chip->set_affinity == NULL) + if (desc == NULL || desc->action == NULL) + continue; + + chip = irq_desc_get_chip(desc); + if (chip == NULL || chip->irq_set_affinity == NULL) continue; raw_spin_lock_irqsave(&desc->lock, flags); - status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", - __func__, irq, status); + __func__, hwirq, status); goto unlock; } @@ -934,8 +940,8 @@ void xics_migrate_irqs_away(void) virq, cpu); /* Reset affinity to all cpus */ - cpumask_setall(irq_to_desc(virq)->affinity); - desc->chip->set_affinity(virq, cpu_all_mask); + cpumask_setall(desc->irq_data.affinity); + chip->irq_set_affinity(&desc->irq_data, cpu_all_mask, true); unlock: raw_spin_unlock_irqrestore(&desc->lock, flags); } |