diff options
author | Steve French <sfrench@us.ibm.com> | 2008-02-16 00:06:08 +0300 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2008-02-16 00:06:08 +0300 |
commit | 0a3abcf75bf391fec4e32356ab5ddb8f5d2e6b41 (patch) | |
tree | b80b1d344ec24cad28b057ef803cebac9434be01 /arch/powerpc/platforms | |
parent | 70eff55d2d979cca700aa6906494f0c474f3f7ff (diff) | |
parent | 101142c37be8e5af9b847860219217e6b958c739 (diff) | |
download | linux-0a3abcf75bf391fec4e32356ab5ddb8f5d2e6b41.tar.xz |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/powerpc/platforms')
43 files changed, 705 insertions, 262 deletions
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index 74f31177e47a..a9260e21451e 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -72,6 +72,7 @@ config WALNUT default y select 405GP select PCI + select OF_RTC help This option enables support for the IBM PPC405GP evaluation board. diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c index 88b66444dfb2..0422590040db 100644 --- a/arch/powerpc/platforms/40x/virtex.c +++ b/arch/powerpc/platforms/40x/virtex.c @@ -37,7 +37,7 @@ static int __init virtex_probe(void) { unsigned long root = of_get_flat_dt_root(); - if (!of_flat_dt_is_compatible(root, "xilinx,virtex")) + if (!of_flat_dt_is_compatible(root, "xlnx,virtex")) return 0; return 1; diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c index 5d9edd917f92..b8b257efeb77 100644 --- a/arch/powerpc/platforms/40x/walnut.c +++ b/arch/powerpc/platforms/40x/walnut.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/of_platform.h> +#include <linux/rtc.h> #include <asm/machdep.h> #include <asm/prom.h> diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c index 8f01563dbd2a..da5b7b7599db 100644 --- a/arch/powerpc/platforms/44x/warp.c +++ b/arch/powerpc/platforms/44x/warp.c @@ -137,7 +137,7 @@ static int __init pika_dtm_start(void) } of_node_put(np); - fpga = ioremap(res.start + 0x20, 4); + fpga = ioremap(res.start, 0x24); if (fpga == NULL) return -ENOENT; diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig new file mode 100644 index 000000000000..4c0da0c079e9 --- /dev/null +++ b/arch/powerpc/platforms/512x/Kconfig @@ -0,0 +1,19 @@ +config PPC_MPC512x + bool + select FSL_SOC + select IPIC + default n + +config PPC_MPC5121 + bool + select PPC_MPC512x + default n + +config MPC5121_ADS + bool "Freescale MPC5121E ADS" + depends on PPC_MULTIPLATFORM && PPC32 + select DEFAULT_UIMAGE + select PPC_MPC5121 + help + This option enables support for the MPC5121E ADS board. + default n diff --git a/arch/powerpc/platforms/512x/Makefile b/arch/powerpc/platforms/512x/Makefile new file mode 100644 index 000000000000..232c89f2039a --- /dev/null +++ b/arch/powerpc/platforms/512x/Makefile @@ -0,0 +1,4 @@ +# +# Makefile for the Freescale PowerPC 512x linux kernel. +# +obj-$(CONFIG_MPC5121_ADS) += mpc5121_ads.o diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c new file mode 100644 index 000000000000..50bd3a319022 --- /dev/null +++ b/arch/powerpc/platforms/512x/mpc5121_ads.c @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: John Rigby, <jrigby@freescale.com>, Thur Mar 29 2007 + * + * Description: + * MPC5121 ADS board setup + * + * This is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/of_platform.h> + +#include <asm/machdep.h> +#include <asm/ipic.h> +#include <asm/prom.h> +#include <asm/time.h> + +/** + * mpc512x_find_ips_freq - Find the IPS bus frequency for a device + * @node: device node + * + * Returns IPS bus frequency, or 0 if the bus frequency cannot be found. + */ +unsigned long +mpc512x_find_ips_freq(struct device_node *node) +{ + struct device_node *np; + const unsigned int *p_ips_freq = NULL; + + of_node_get(node); + while (node) { + p_ips_freq = of_get_property(node, "bus-frequency", NULL); + if (p_ips_freq) + break; + + np = of_get_parent(node); + of_node_put(node); + node = np; + } + if (node) + of_node_put(node); + + return p_ips_freq ? *p_ips_freq : 0; +} +EXPORT_SYMBOL(mpc512x_find_ips_freq); + +static struct of_device_id __initdata of_bus_ids[] = { + { .name = "soc", }, + { .name = "localbus", }, + {}, +}; + +static void __init mpc5121_ads_declare_of_platform_devices(void) +{ + /* Find every child of the SOC node and add it to of_platform */ + if (of_platform_bus_probe(NULL, of_bus_ids, NULL)) + printk(KERN_ERR __FILE__ ": " + "Error while probing of_platform bus\n"); +} + +static void __init mpc5121_ads_init_IRQ(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "fsl,ipic"); + if (!np) + return; + + ipic_init(np, 0); + of_node_put(np); + + /* + * Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + ipic_set_default_priority(); +} + +/* + * Called very early, MMU is off, device-tree isn't unflattened + */ +static int __init mpc5121_ads_probe(void) +{ + unsigned long root = of_get_flat_dt_root(); + + return of_flat_dt_is_compatible(root, "fsl,mpc5121ads"); +} + +define_machine(mpc5121_ads) { + .name = "MPC5121 ADS", + .probe = mpc5121_ads_probe, + .init = mpc5121_ads_declare_of_platform_devices, + .init_IRQ = mpc5121_ads_init_IRQ, + .get_irq = ipic_get_irq, + .calibrate_decr = generic_calibrate_decr, +}; diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig index 515f244c90bb..cf945d55c276 100644 --- a/arch/powerpc/platforms/52xx/Kconfig +++ b/arch/powerpc/platforms/52xx/Kconfig @@ -8,7 +8,6 @@ config PPC_MPC5200_SIMPLE bool "Generic support for simple MPC5200 based boards" depends on PPC_MPC52xx select DEFAULT_UIMAGE - select WANT_DEVICE_TREE help This option enables support for a simple MPC52xx based boards which do not need a custom platform specific setup. Such boards are @@ -35,7 +34,6 @@ config PPC_LITE5200 bool "Freescale Lite5200 Eval Board" depends on PPC_MPC52xx select DEFAULT_UIMAGE - select WANT_DEVICE_TREE config PPC_MPC5200_BUGFIX bool "MPC5200 (L25R) bugfix support" diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c index 3fce6b375dbc..7d3018751988 100644 --- a/arch/powerpc/platforms/82xx/mpc8272_ads.c +++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c @@ -134,13 +134,12 @@ static void __init mpc8272_ads_setup_arch(void) } bcsr = of_iomap(np, 0); + of_node_put(np); if (!bcsr) { printk(KERN_ERR "Cannot map BCSR registers\n"); return; } - of_node_put(np); - clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN); setbits32(&bcsr[1], BCSR1_FETH_RST); diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c index 68196e349994..e1dceeec4994 100644 --- a/arch/powerpc/platforms/82xx/pq2fads.c +++ b/arch/powerpc/platforms/82xx/pq2fads.c @@ -130,13 +130,12 @@ static void __init pq2fads_setup_arch(void) } bcsr = of_iomap(np, 0); + of_node_put(np); if (!bcsr) { printk(KERN_ERR "Cannot map BCSR registers\n"); return; } - of_node_put(np); - /* Enable the serial and ethernet ports */ clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN); diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index 9f0fd88b2b1f..e7f706b624fe 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -101,7 +101,7 @@ static void __init mpc832x_rdb_setup_arch(void) #ifdef CONFIG_QUICC_ENGINE qe_reset(); - if ((np = of_find_node_by_name(np, "par_io")) != NULL) { + if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { par_io_init(np); of_node_put(np); diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h index 88bb748aff0d..68065e62fc3d 100644 --- a/arch/powerpc/platforms/83xx/mpc83xx.h +++ b/arch/powerpc/platforms/83xx/mpc83xx.h @@ -14,6 +14,8 @@ #define MPC83XX_SCCR_USB_DRCM_11 0x00300000 #define MPC83XX_SCCR_USB_DRCM_01 0x00100000 #define MPC83XX_SCCR_USB_DRCM_10 0x00200000 +#define MPC8315_SCCR_USB_MASK 0x00c00000 +#define MPC8315_SCCR_USB_DRCM_11 0x00c00000 #define MPC837X_SCCR_USB_DRCM_11 0x00c00000 /* system i/o configuration register low */ diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c index 681230a30acd..471fdd8f4108 100644 --- a/arch/powerpc/platforms/83xx/usb.c +++ b/arch/powerpc/platforms/83xx/usb.c @@ -104,6 +104,7 @@ int mpc831x_usb_cfg(void) u32 temp; void __iomem *immap, *usb_regs; struct device_node *np = NULL; + struct device_node *immr_node = NULL; const void *prop; struct resource res; int ret = 0; @@ -124,10 +125,15 @@ int mpc831x_usb_cfg(void) } /* Configure clock */ - temp = in_be32(immap + MPC83XX_SCCR_OFFS); - temp &= ~MPC83XX_SCCR_USB_MASK; - temp |= MPC83XX_SCCR_USB_DRCM_11; /* 1:3 */ - out_be32(immap + MPC83XX_SCCR_OFFS, temp); + immr_node = of_get_parent(np); + if (immr_node && of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) + clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, + MPC8315_SCCR_USB_MASK, + MPC8315_SCCR_USB_DRCM_11); + else + clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, + MPC83XX_SCCR_USB_MASK, + MPC83XX_SCCR_USB_DRCM_11); /* Configure pin mux for ULPI. There is no pin mux for UTMI */ if (prop && !strcmp(prop, "ulpi")) { @@ -144,6 +150,9 @@ int mpc831x_usb_cfg(void) iounmap(immap); + if (immr_node) + of_node_put(immr_node); + /* Map USB SOC space */ ret = of_address_to_resource(np, 0, &res); if (ret) { diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c index c6bc0783c3b0..82363e98f50e 100644 --- a/arch/powerpc/platforms/8xx/adder875.c +++ b/arch/powerpc/platforms/8xx/adder875.c @@ -15,12 +15,12 @@ #include <asm/time.h> #include <asm/machdep.h> -#include <asm/commproc.h> +#include <asm/cpm1.h> #include <asm/fs_pd.h> #include <asm/udbg.h> #include <asm/prom.h> -#include <sysdev/commproc.h> +#include "mpc8xx.h" struct cpm_pin { int port, pin, flags; @@ -108,7 +108,7 @@ define_machine(adder875) { .name = "Adder MPC875", .probe = adder875_probe, .setup_arch = adder875_setup, - .init_IRQ = m8xx_pic_init, + .init_IRQ = mpc8xx_pics_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = generic_calibrate_decr, diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c index a8dffa005775..7d9ac6040d63 100644 --- a/arch/powerpc/platforms/8xx/ep88xc.c +++ b/arch/powerpc/platforms/8xx/ep88xc.c @@ -15,7 +15,6 @@ #include <asm/machdep.h> #include <asm/io.h> #include <asm/udbg.h> -#include <asm/commproc.h> #include <asm/cpm1.h> #include "mpc8xx.h" diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index fdce10c4f074..0afd22595546 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -15,7 +15,6 @@ config PPC_MULTIPLATFORM config PPC_82xx bool "Freescale 82xx" depends on 6xx - select WANT_DEVICE_TREE config PPC_83xx bool "Freescale 83xx" @@ -23,7 +22,7 @@ config PPC_83xx select FSL_SOC select MPC83xx select IPIC - select WANT_DEVICE_TREE + select FSL_EMB_PERFMON config PPC_86xx bool "Freescale 86xx" @@ -41,6 +40,7 @@ config CLASSIC32 source "arch/powerpc/platforms/pseries/Kconfig" source "arch/powerpc/platforms/iseries/Kconfig" source "arch/powerpc/platforms/chrp/Kconfig" +source "arch/powerpc/platforms/512x/Kconfig" source "arch/powerpc/platforms/52xx/Kconfig" source "arch/powerpc/platforms/powermac/Kconfig" source "arch/powerpc/platforms/prep/Kconfig" diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 7fc41104d53e..73d81ce14b67 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -14,7 +14,7 @@ choice There are five families of 32 bit PowerPC chips supported. The most common ones are the desktop and server CPUs (601, 603, 604, 740, 750, 74xx) CPUs from Freescale and IBM, with their - embedded 52xx/82xx/83xx/86xx counterparts. + embedded 512x/52xx/82xx/83xx/86xx counterparts. The other embeeded parts, namely 4xx, 8xx, e200 (55xx) and e500 (85xx) each form a family of their own that is not compatible with the others. @@ -22,33 +22,29 @@ choice If unsure, select 52xx/6xx/7xx/74xx/82xx/83xx/86xx. config 6xx - bool "52xx/6xx/7xx/74xx/82xx/83xx/86xx" + bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx" select PPC_FPU config PPC_85xx bool "Freescale 85xx" select E500 select FSL_SOC - select WANT_DEVICE_TREE select MPC85xx config PPC_8xx bool "Freescale 8xx" select FSL_SOC select 8xx - select WANT_DEVICE_TREE select PPC_LIB_RHEAP config 40x bool "AMCC 40x" select PPC_DCR_NATIVE - select WANT_DEVICE_TREE select PPC_UDBG_16550 config 44x bool "AMCC 44x" select PPC_DCR_NATIVE - select WANT_DEVICE_TREE select PPC_UDBG_16550 config E200 @@ -94,6 +90,7 @@ config 8xx bool config E500 + select FSL_EMB_PERFMON bool config PPC_FPU @@ -115,6 +112,9 @@ config FSL_BOOKE depends on E200 || E500 default y +config FSL_EMB_PERFMON + bool + config PTE_64BIT bool depends on 44x || E500 @@ -221,7 +221,7 @@ config NR_CPUS config NOT_COHERENT_CACHE bool - depends on 4xx || 8xx || E200 + depends on 4xx || 8xx || E200 || PPC_MPC512x default y config CHECK_CACHE_COHERENCY diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index 6d9079da5f5a..a984894466d9 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile @@ -11,6 +11,7 @@ endif obj-$(CONFIG_PPC_CHRP) += chrp/ obj-$(CONFIG_40x) += 40x/ obj-$(CONFIG_44x) += 44x/ +obj-$(CONFIG_PPC_MPC512x) += 512x/ obj-$(CONFIG_PPC_MPC52xx) += 52xx/ obj-$(CONFIG_PPC_8xx) += 8xx/ obj-$(CONFIG_PPC_82xx) += 82xx/ diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 3a963b4a9be0..2f169991896d 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -54,6 +54,13 @@ config SPU_FS_64K_LS uses 4K pages. This can improve performances of applications using multiple SPEs by lowering the TLB pressure on them. +config SPU_TRACE + tristate "SPU event tracing support" + depends on SPU_FS && MARKERS + help + This option allows reading a trace of spu-related events through + the sputrace file in procfs. + config SPU_BASE bool default n diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 095988f13bf4..d95e71dee91f 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -13,7 +13,7 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/msi.h> -#include <linux/reboot.h> +#include <linux/of_platform.h> #include <asm/dcr.h> #include <asm/machdep.h> @@ -65,14 +65,12 @@ struct axon_msic { struct irq_host *irq_host; - __le32 *fifo; + __le32 *fifo_virt; + dma_addr_t fifo_phys; dcr_host_t dcr_host; - struct list_head list; u32 read_offset; }; -static LIST_HEAD(axon_msic_list); - static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) { pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); @@ -94,7 +92,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) while (msic->read_offset != write_offset) { idx = msic->read_offset / sizeof(__le32); - msi = le32_to_cpu(msic->fifo[idx]); + msi = le32_to_cpu(msic->fifo_virt[idx]); msi &= 0xFFFF; pr_debug("axon_msi: woff %x roff %x msi %x\n", @@ -139,6 +137,7 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev) tmp = dn; dn = of_find_node_by_phandle(*ph); + of_node_put(tmp); if (!dn) { dev_dbg(&dev->dev, "axon_msi: msi-translator doesn't point to a node\n"); @@ -156,7 +155,6 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev) out_error: of_node_put(dn); - of_node_put(tmp); return msic; } @@ -292,30 +290,24 @@ static struct irq_host_ops msic_host_ops = { .map = msic_host_map, }; -static int axon_msi_notify_reboot(struct notifier_block *nb, - unsigned long code, void *data) +static int axon_msi_shutdown(struct of_device *device) { - struct axon_msic *msic; + struct axon_msic *msic = device->dev.platform_data; u32 tmp; - list_for_each_entry(msic, &axon_msic_list, list) { - pr_debug("axon_msi: disabling %s\n", - msic->irq_host->of_node->full_name); - tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); - tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; - msic_dcr_write(msic, MSIC_CTRL_REG, tmp); - } + pr_debug("axon_msi: disabling %s\n", + msic->irq_host->of_node->full_name); + tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); + tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; + msic_dcr_write(msic, MSIC_CTRL_REG, tmp); return 0; } -static struct notifier_block axon_msi_reboot_notifier = { - .notifier_call = axon_msi_notify_reboot -}; - -static int axon_msi_setup_one(struct device_node *dn) +static int axon_msi_probe(struct of_device *device, + const struct of_device_id *device_id) { - struct page *page; + struct device_node *dn = device->node; struct axon_msic *msic; unsigned int virq; int dcr_base, dcr_len; @@ -346,16 +338,14 @@ static int axon_msi_setup_one(struct device_node *dn) goto out_free_msic; } - page = alloc_pages_node(of_node_to_nid(dn), GFP_KERNEL, - get_order(MSIC_FIFO_SIZE_BYTES)); - if (!page) { + msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, + &msic->fifo_phys, GFP_KERNEL); + if (!msic->fifo_virt) { printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n", dn->full_name); goto out_free_msic; } - msic->fifo = page_address(page); - msic->irq_host = irq_alloc_host(of_node_get(dn), IRQ_HOST_MAP_NOMAP, NR_IRQS, &msic_host_ops, 0); if (!msic->irq_host) { @@ -378,14 +368,18 @@ static int axon_msi_setup_one(struct device_node *dn) pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq); /* Enable the MSIC hardware */ - msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, (u64)msic->fifo >> 32); + msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32); msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, - (u64)msic->fifo & 0xFFFFFFFF); + msic->fifo_phys & 0xFFFFFFFF); msic_dcr_write(msic, MSIC_CTRL_REG, MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | MSIC_CTRL_FIFO_SIZE); - list_add(&msic->list, &axon_msic_list); + device->dev.platform_data = msic; + + ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs; + ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs; + ppc_md.msi_check_device = axon_msi_check_device; printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name); @@ -394,7 +388,8 @@ static int axon_msi_setup_one(struct device_node *dn) out_free_host: kfree(msic->irq_host); out_free_fifo: - __free_pages(virt_to_page(msic->fifo), get_order(MSIC_FIFO_SIZE_BYTES)); + dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, + msic->fifo_phys); out_free_msic: kfree(msic); out: @@ -402,28 +397,24 @@ out: return -1; } -static int axon_msi_init(void) -{ - struct device_node *dn; - int found = 0; - - pr_debug("axon_msi: initialising ...\n"); - - for_each_compatible_node(dn, NULL, "ibm,axon-msic") { - if (axon_msi_setup_one(dn) == 0) - found++; - } - - if (found) { - ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs; - ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs; - ppc_md.msi_check_device = axon_msi_check_device; - - register_reboot_notifier(&axon_msi_reboot_notifier); +static const struct of_device_id axon_msi_device_id[] = { + { + .compatible = "ibm,axon-msic" + }, + {} +}; - pr_debug("axon_msi: registered callbacks!\n"); - } +static struct of_platform_driver axon_msi_driver = { + .match_table = axon_msi_device_id, + .probe = axon_msi_probe, + .shutdown = axon_msi_shutdown, + .driver = { + .name = "axon-msi" + }, +}; - return 0; +static int __init axon_msi_init(void) +{ + return of_register_platform_driver(&axon_msi_driver); } -arch_initcall(axon_msi_init); +subsys_initcall(axon_msi_init); diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index df330666ccc9..edab631a8dcb 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -26,6 +26,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/notifier.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <asm/prom.h> @@ -789,18 +790,16 @@ static int __init cell_iommu_init_disabled(void) static u64 cell_iommu_get_fixed_address(struct device *dev) { u64 cpu_addr, size, best_size, pci_addr = OF_BAD_ADDR; - struct device_node *tmp, *np; + struct device_node *np; const u32 *ranges = NULL; int i, len, best; - np = dev->archdata.of_node; - of_node_get(np); - ranges = of_get_property(np, "dma-ranges", &len); - while (!ranges && np) { - tmp = of_get_parent(np); - of_node_put(np); - np = tmp; + np = of_node_get(dev->archdata.of_node); + while (np) { ranges = of_get_property(np, "dma-ranges", &len); + if (ranges) + break; + np = of_get_next_parent(np); } if (!ranges) { @@ -842,19 +841,18 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; - if (dma_mask == DMA_BIT_MASK(64)) { - if (cell_iommu_get_fixed_address(dev) == OF_BAD_ADDR) - dev_dbg(dev, "iommu: 64-bit OK, but bad addr\n"); - else { - dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); - set_dma_ops(dev, &dma_iommu_fixed_ops); - cell_dma_dev_setup(dev); - } + if (dma_mask == DMA_BIT_MASK(64) && + cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) + { + dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); + set_dma_ops(dev, &dma_iommu_fixed_ops); } else { dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); set_dma_ops(dev, get_pci_dma_ops()); } + cell_dma_dev_setup(dev); + *dev->dma_mask = dma_mask; return 0; @@ -918,6 +916,18 @@ static int __init cell_iommu_fixed_mapping_init(void) return -1; } + /* We must have dma-ranges properties for fixed mapping to work */ + for (np = NULL; (np = of_find_all_nodes(np));) { + if (of_find_property(np, "dma-ranges", NULL)) + break; + } + of_node_put(np); + + if (!np) { + pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); + return -1; + } + /* The default setup is to have the fixed mapping sit after the * dynamic region, so find the top of the largest IOMMU window * on any axon, then add the size of RAM and that's our max value. @@ -981,8 +991,8 @@ static int __init cell_iommu_fixed_mapping_init(void) dsize = htab_size_bytes; } - pr_debug("iommu: setting up %d, dynamic window %lx-%lx " \ - "fixed window %lx-%lx\n", iommu->nid, dbase, + printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " + "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, dbase + dsize, fbase, fbase + fsize); cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize); @@ -998,8 +1008,6 @@ static int __init cell_iommu_fixed_mapping_init(void) dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch; set_pci_dma_ops(&dma_iommu_ops); - printk(KERN_DEBUG "IOMMU fixed mapping established.\n"); - return 0; } diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index b2494ebcdbe9..e43024c0392e 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -1,4 +1,13 @@ -#define DEBUG +/* + * Copyright 2006-2008, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#undef DEBUG #include <linux/types.h> #include <linux/kernel.h> diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index e6534b519c9a..a7f609b3b876 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -98,7 +98,7 @@ static int __init cell_publish_devices(void) } return 0; } -machine_device_initcall(cell, cell_publish_devices); +machine_subsys_initcall(cell, cell_publish_devices); static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) { diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile index d3a349fb42e5..99610a6361f2 100644 --- a/arch/powerpc/platforms/cell/spufs/Makefile +++ b/arch/powerpc/platforms/cell/spufs/Makefile @@ -4,6 +4,8 @@ spufs-y += inode.o file.o context.o syscalls.o coredump.o spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o spufs-y += switch.o fault.o lscsa_alloc.o +obj-$(CONFIG_SPU_TRACE) += sputrace.o + # Rules to build switch.o with the help of SPU tool chain SPU_CROSS := spu- SPU_CC := $(SPU_CROSS)gcc diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c index 50d98a154aaf..64eb15b22040 100644 --- a/arch/powerpc/platforms/cell/spufs/backing_ops.c +++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c @@ -288,6 +288,12 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) spin_lock(&ctx->csa.register_lock); ctx->csa.prob.spu_runcntl_RW = val; if (val & SPU_RUNCNTL_RUNNABLE) { + ctx->csa.prob.spu_status_R &= + ~SPU_STATUS_STOPPED_BY_STOP & + ~SPU_STATUS_STOPPED_BY_HALT & + ~SPU_STATUS_SINGLE_STEP & + ~SPU_STATUS_INVALID_INSTR & + ~SPU_STATUS_INVALID_CH; ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING; } else { ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING; diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index eff4d291ba85..e46d300e21a5 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -108,7 +108,7 @@ int spufs_handle_class1(struct spu_context *ctx) u64 ea, dsisr, access; unsigned long flags; unsigned flt = 0; - int ret, ret2; + int ret; /* * dar and dsisr get passed from the registers @@ -148,13 +148,10 @@ int spufs_handle_class1(struct spu_context *ctx) ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt); /* - * If spu_acquire fails due to a pending signal we just want to return - * EINTR to userspace even if that means missing the dma restart or - * updating the page fault statistics. + * This is nasty: we need the state_mutex for all the bookkeeping even + * if the syscall was interrupted by a signal. ewww. */ - ret2 = spu_acquire(ctx); - if (ret2) - goto out; + mutex_lock(&ctx->state_mutex); /* * Clear dsisr under ctxt lock after handling the fault, so that @@ -185,7 +182,6 @@ int spufs_handle_class1(struct spu_context *ctx) } else spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); - out: spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 3fcd06418b01..c66c3756970d 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -29,6 +29,7 @@ #include <linux/poll.h> #include <linux/ptrace.h> #include <linux/seq_file.h> +#include <linux/marker.h> #include <asm/io.h> #include <asm/semaphore.h> @@ -357,6 +358,9 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, { struct spu_context *ctx = vma->vm_file->private_data; unsigned long area, offset = address - vma->vm_start; + int ret = 0; + + spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx); offset += vma->vm_pgoff << PAGE_SHIFT; if (offset >= ps_size) @@ -375,14 +379,18 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, if (ctx->state == SPU_STATE_SAVED) { up_read(¤t->mm->mmap_sem); - spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); + spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx); + ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); + spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu); down_read(¤t->mm->mmap_sem); } else { area = ctx->spu->problem_phys + ps_offs; vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); + spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu); } - spu_release(ctx); + if (!ret) + spu_release(ctx); return NOPFN_REFAULT; } @@ -454,7 +462,7 @@ static int spufs_cntl_open(struct inode *inode, struct file *file) if (!i->i_openers++) ctx->cntl = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); - return spufs_attr_open(inode, file, spufs_cntl_get, + return simple_attr_open(inode, file, spufs_cntl_get, spufs_cntl_set, "0x%08lx"); } @@ -464,7 +472,7 @@ spufs_cntl_release(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spufs_attr_release(inode, file); + simple_attr_release(inode, file); mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) @@ -476,8 +484,8 @@ spufs_cntl_release(struct inode *inode, struct file *file) static const struct file_operations spufs_cntl_fops = { .open = spufs_cntl_open, .release = spufs_cntl_release, - .read = spufs_attr_read, - .write = spufs_attr_write, + .read = simple_attr_read, + .write = simple_attr_write, .mmap = spufs_cntl_mmap, }; @@ -749,23 +757,25 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf, count = spu_acquire(ctx); if (count) - return count; + goto out; /* wait only for the first element */ count = 0; if (file->f_flags & O_NONBLOCK) { - if (!spu_ibox_read(ctx, &ibox_data)) + if (!spu_ibox_read(ctx, &ibox_data)) { count = -EAGAIN; + goto out_unlock; + } } else { count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); + if (count) + goto out; } - if (count) - goto out; /* if we can't write at all, return -EFAULT */ count = __put_user(ibox_data, udata); if (count) - goto out; + goto out_unlock; for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { int ret; @@ -782,9 +792,9 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf, break; } -out: +out_unlock: spu_release(ctx); - +out: return count; } @@ -899,7 +909,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, count = spu_acquire(ctx); if (count) - return count; + goto out; /* * make sure we can at least write one element, by waiting @@ -907,14 +917,16 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, */ count = 0; if (file->f_flags & O_NONBLOCK) { - if (!spu_wbox_write(ctx, wbox_data)) + if (!spu_wbox_write(ctx, wbox_data)) { count = -EAGAIN; + goto out_unlock; + } } else { count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); + if (count) + goto out; } - if (count) - goto out; /* write as much as possible */ for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { @@ -928,8 +940,9 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, break; } -out: +out_unlock: spu_release(ctx); +out: return count; } @@ -1592,12 +1605,11 @@ static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, } else { ret = spufs_wait(ctx->mfc_wq, spufs_read_mfc_tagstatus(ctx, &status)); + if (ret) + goto out; } spu_release(ctx); - if (ret) - goto out; - ret = 4; if (copy_to_user(buffer, &status, 4)) ret = -EFAULT; @@ -1726,6 +1738,8 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, int status; ret = spufs_wait(ctx->mfc_wq, spu_send_mfc_command(ctx, cmd, &status)); + if (ret) + goto out; if (status) ret = status; } @@ -1779,7 +1793,7 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id) ret = spu_acquire(ctx); if (ret) - return ret; + goto out; #if 0 /* this currently hangs */ ret = spufs_wait(ctx->mfc_wq, @@ -1788,12 +1802,13 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id) goto out; ret = spufs_wait(ctx->mfc_wq, ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); -out: + if (ret) + goto out; #else ret = 0; #endif spu_release(ctx); - +out: return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index c0e968a4c211..6d1228c66c5e 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -1,3 +1,4 @@ + /* * SPU file system * @@ -322,7 +323,7 @@ static struct spu_context * spufs_assert_affinity(unsigned int flags, struct spu_gang *gang, struct file *filp) { - struct spu_context *tmp, *neighbor; + struct spu_context *tmp, *neighbor, *err; int count, node; int aff_supp; @@ -354,11 +355,15 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang, if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) && !list_is_last(&neighbor->aff_list, &gang->aff_list_head) && !list_entry(neighbor->aff_list.next, struct spu_context, - aff_list)->aff_head) - return ERR_PTR(-EEXIST); + aff_list)->aff_head) { + err = ERR_PTR(-EEXIST); + goto out_put_neighbor; + } - if (gang != neighbor->gang) - return ERR_PTR(-EINVAL); + if (gang != neighbor->gang) { + err = ERR_PTR(-EINVAL); + goto out_put_neighbor; + } count = 1; list_for_each_entry(tmp, &gang->aff_list_head, aff_list) @@ -372,11 +377,17 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang, break; } - if (node == MAX_NUMNODES) - return ERR_PTR(-EEXIST); + if (node == MAX_NUMNODES) { + err = ERR_PTR(-EEXIST); + goto out_put_neighbor; + } } return neighbor; + +out_put_neighbor: + put_spu_context(neighbor); + return err; } static void @@ -454,9 +465,12 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, if (ret) goto out_aff_unlock; - if (affinity) + if (affinity) { spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx, neighbor); + if (neighbor) + put_spu_context(neighbor); + } /* * get references for dget and mntget, will be released @@ -579,7 +593,7 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode, ret = -EINVAL; /* check if we are on spufs */ - if (nd->dentry->d_sb->s_type != &spufs_type) + if (nd->path.dentry->d_sb->s_type != &spufs_type) goto out; /* don't accept undefined flags */ @@ -587,9 +601,9 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode, goto out; /* only threads can be underneath a gang */ - if (nd->dentry != nd->dentry->d_sb->s_root) { + if (nd->path.dentry != nd->path.dentry->d_sb->s_root) { if ((flags & SPU_CREATE_GANG) || - !SPUFS_I(nd->dentry->d_inode)->i_gang) + !SPUFS_I(nd->path.dentry->d_inode)->i_gang) goto out; } @@ -605,16 +619,17 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode, mode &= ~current->fs->umask; if (flags & SPU_CREATE_GANG) - return spufs_create_gang(nd->dentry->d_inode, - dentry, nd->mnt, mode); + return spufs_create_gang(nd->path.dentry->d_inode, + dentry, nd->path.mnt, mode); else - return spufs_create_context(nd->dentry->d_inode, - dentry, nd->mnt, flags, mode, filp); + return spufs_create_context(nd->path.dentry->d_inode, + dentry, nd->path.mnt, flags, mode, + filp); out_dput: dput(dentry); out_dir: - mutex_unlock(&nd->dentry->d_inode->i_mutex); + mutex_unlock(&nd->path.dentry->d_inode->i_mutex); out: return ret; } @@ -742,8 +757,11 @@ spufs_fill_super(struct super_block *sb, void *data, int silent) .statfs = simple_statfs, .delete_inode = spufs_delete_inode, .drop_inode = generic_delete_inode, + .show_options = generic_show_options, }; + save_mount_options(sb, data); + sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index c01a09da1e56..fca22e18069a 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -53,7 +53,7 @@ int spu_stopped(struct spu_context *ctx, u32 *stat) stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; - if (*stat & stopped) + if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) return 1; dsisr = ctx->csa.dsisr; @@ -354,8 +354,15 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) do { ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); - if (unlikely(ret)) + if (unlikely(ret)) { + /* + * This is nasty: we need the state_mutex for all the + * bookkeeping even if the syscall was interrupted by + * a signal. ewww. + */ + mutex_lock(&ctx->state_mutex); break; + } spu = ctx->spu; if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))) { @@ -388,16 +395,14 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_SINGLE_STEP))); - if ((status & SPU_STATUS_STOPPED_BY_STOP) && - (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100) && - (ctx->state == SPU_STATE_RUNNABLE)) - ctx->stats.libassist++; - - spu_disable_spu(ctx); ret = spu_run_fini(ctx, npc, &status); spu_yield(ctx); + if ((status & SPU_STATUS_STOPPED_BY_STOP) && + (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100)) + ctx->stats.libassist++; + if ((ret == 0) || ((ret == -ERESTARTSYS) && ((status & SPU_STATUS_STOPPED_BY_HALT) || @@ -410,8 +415,11 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) * since we have TIF_SINGLESTEP set, thus the kernel will do * it upon return from the syscall anyawy */ - if ((status & SPU_STATUS_STOPPED_BY_STOP) - && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) { + if (unlikely(status & SPU_STATUS_SINGLE_STEP)) + ret = -ERESTARTSYS; + + else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP) + && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) { force_sig(SIGTRAP, current); ret = -ERESTARTSYS; } diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 00d914232af1..5915343e2599 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -39,6 +39,7 @@ #include <linux/pid_namespace.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/marker.h> #include <asm/io.h> #include <asm/mmu_context.h> @@ -216,8 +217,8 @@ void do_notify_spus_active(void) */ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) { - pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid, - spu->number, spu->node); + spu_context_trace(spu_bind_context__enter, ctx, spu); + spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); if (ctx->flags & SPU_CREATE_NOSCHED) @@ -399,8 +400,8 @@ static int has_affinity(struct spu_context *ctx) */ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) { - pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, - spu->pid, spu->number, spu->node); + spu_context_trace(spu_unbind_context__enter, ctx, spu); + spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); if (spu->ctx->flags & SPU_CREATE_NOSCHED) @@ -528,6 +529,8 @@ static struct spu *spu_get_idle(struct spu_context *ctx) struct spu *spu, *aff_ref_spu; int node, n; + spu_context_nospu_trace(spu_get_idle__enter, ctx); + if (ctx->gang) { mutex_lock(&ctx->gang->aff_mutex); if (has_affinity(ctx)) { @@ -546,8 +549,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx) if (atomic_dec_and_test(&ctx->gang->aff_sched_count)) ctx->gang->aff_ref_spu = NULL; mutex_unlock(&ctx->gang->aff_mutex); - - return NULL; + goto not_found; } mutex_unlock(&ctx->gang->aff_mutex); } @@ -565,12 +567,14 @@ static struct spu *spu_get_idle(struct spu_context *ctx) mutex_unlock(&cbe_spu_info[node].list_mutex); } + not_found: + spu_context_nospu_trace(spu_get_idle__not_found, ctx); return NULL; found: spu->alloc_state = SPU_USED; mutex_unlock(&cbe_spu_info[node].list_mutex); - pr_debug("Got SPU %d %d\n", spu->number, spu->node); + spu_context_trace(spu_get_idle__found, ctx, spu); spu_init_channels(spu); return spu; } @@ -587,6 +591,8 @@ static struct spu *find_victim(struct spu_context *ctx) struct spu *spu; int node, n; + spu_context_nospu_trace(spu_find_vitim__enter, ctx); + /* * Look for a possible preemption candidate on the local node first. * If there is no candidate look at the other nodes. This isn't @@ -640,6 +646,8 @@ static struct spu *find_victim(struct spu_context *ctx) goto restart; } + spu_context_trace(__spu_deactivate__unload, ctx, spu); + mutex_lock(&cbe_spu_info[node].list_mutex); cbe_spu_info[node].nr_active--; spu_unbind_context(spu, victim); @@ -822,6 +830,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) */ void spu_deactivate(struct spu_context *ctx) { + spu_context_nospu_trace(spu_deactivate__enter, ctx); __spu_deactivate(ctx, 1, MAX_PRIO); } @@ -835,6 +844,7 @@ void spu_deactivate(struct spu_context *ctx) */ void spu_yield(struct spu_context *ctx) { + spu_context_nospu_trace(spu_yield__enter, ctx); if (!(ctx->flags & SPU_CREATE_NOSCHED)) { mutex_lock(&ctx->state_mutex); __spu_deactivate(ctx, 0, MAX_PRIO); @@ -864,11 +874,15 @@ static noinline void spusched_tick(struct spu_context *ctx) goto out; spu = ctx->spu; + + spu_context_trace(spusched_tick__preempt, ctx, spu); + new = grab_runnable_context(ctx->prio + 1, spu->node); if (new) { spu_unschedule(spu, ctx); spu_add_to_rq(ctx); } else { + spu_context_nospu_trace(spusched_tick__newslice, ctx); ctx->time_slice++; } out: diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 0e114038ea6f..2c2fe3c07d72 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -268,6 +268,9 @@ extern char *isolated_loader; * Same as wait_event_interruptible(), except that here * we need to call spu_release(ctx) before sleeping, and * then spu_acquire(ctx) when awoken. + * + * Returns with state_mutex re-acquired when successfull or + * with -ERESTARTSYS and the state_mutex dropped when interrupted. */ #define spufs_wait(wq, condition) \ @@ -278,11 +281,11 @@ extern char *isolated_loader; prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ if (condition) \ break; \ + spu_release(ctx); \ if (signal_pending(current)) { \ __ret = -ERESTARTSYS; \ break; \ } \ - spu_release(ctx); \ schedule(); \ __ret = spu_acquire(ctx); \ if (__ret) \ @@ -325,4 +328,9 @@ extern void spu_free_lscsa(struct spu_state *csa); extern void spuctx_switch_state(struct spu_context *ctx, enum spu_utilization_state new_state); +#define spu_context_trace(name, ctx, spu) \ + trace_mark(name, "%p %p", ctx, spu); +#define spu_context_nospu_trace(name, ctx) \ + trace_mark(name, "%p", ctx); + #endif diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c new file mode 100644 index 000000000000..01974f7776e1 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/sputrace.c @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2007 IBM Deutschland Entwicklung GmbH + * Released under GPL v2. + * + * Partially based on net/ipv4/tcp_probe.c. + * + * Simple tracing facility for spu contexts. + */ +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/marker.h> +#include <linux/proc_fs.h> +#include <linux/wait.h> +#include <asm/atomic.h> +#include <asm/uaccess.h> +#include "spufs.h" + +struct spu_probe { + const char *name; + const char *format; + marker_probe_func *probe_func; +}; + +struct sputrace { + ktime_t tstamp; + int owner_tid; /* owner */ + int curr_tid; + const char *name; + int number; +}; + +static int bufsize __read_mostly = 16384; +MODULE_PARM_DESC(bufsize, "Log buffer size (number of records)"); +module_param(bufsize, int, 0); + + +static DEFINE_SPINLOCK(sputrace_lock); +static DECLARE_WAIT_QUEUE_HEAD(sputrace_wait); +static ktime_t sputrace_start; +static unsigned long sputrace_head, sputrace_tail; +static struct sputrace *sputrace_log; + +static int sputrace_used(void) +{ + return (sputrace_head - sputrace_tail) % bufsize; +} + +static inline int sputrace_avail(void) +{ + return bufsize - sputrace_used(); +} + +static int sputrace_sprint(char *tbuf, int n) +{ + const struct sputrace *t = sputrace_log + sputrace_tail % bufsize; + struct timespec tv = + ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start)); + + return snprintf(tbuf, n, + "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n", + (unsigned long) tv.tv_sec, + (unsigned long) tv.tv_nsec, + t->owner_tid, + t->name, + t->curr_tid, + t->number); +} + +static ssize_t sputrace_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + int error = 0, cnt = 0; + + if (!buf || len < 0) + return -EINVAL; + + while (cnt < len) { + char tbuf[128]; + int width; + + error = wait_event_interruptible(sputrace_wait, + sputrace_used() > 0); + if (error) + break; + + spin_lock(&sputrace_lock); + if (sputrace_head == sputrace_tail) { + spin_unlock(&sputrace_lock); + continue; + } + + width = sputrace_sprint(tbuf, sizeof(tbuf)); + if (width < len) + sputrace_tail = (sputrace_tail + 1) % bufsize; + spin_unlock(&sputrace_lock); + + if (width >= len) + break; + + error = copy_to_user(buf + cnt, tbuf, width); + if (error) + break; + cnt += width; + } + + return cnt == 0 ? error : cnt; +} + +static int sputrace_open(struct inode *inode, struct file *file) +{ + spin_lock(&sputrace_lock); + sputrace_head = sputrace_tail = 0; + sputrace_start = ktime_get(); + spin_unlock(&sputrace_lock); + + return 0; +} + +static const struct file_operations sputrace_fops = { + .owner = THIS_MODULE, + .open = sputrace_open, + .read = sputrace_read, +}; + +static void sputrace_log_item(const char *name, struct spu_context *ctx, + struct spu *spu) +{ + spin_lock(&sputrace_lock); + if (sputrace_avail() > 1) { + struct sputrace *t = sputrace_log + sputrace_head; + + t->tstamp = ktime_get(); + t->owner_tid = ctx->tid; + t->name = name; + t->curr_tid = current->pid; + t->number = spu ? spu->number : -1; + + sputrace_head = (sputrace_head + 1) % bufsize; + } else { + printk(KERN_WARNING + "sputrace: lost samples due to full buffer.\n"); + } + spin_unlock(&sputrace_lock); + + wake_up(&sputrace_wait); +} + +static void spu_context_event(void *probe_private, void *call_data, + const char *format, va_list *args) +{ + struct spu_probe *p = probe_private; + struct spu_context *ctx; + struct spu *spu; + + ctx = va_arg(*args, struct spu_context *); + spu = va_arg(*args, struct spu *); + + sputrace_log_item(p->name, ctx, spu); +} + +static void spu_context_nospu_event(void *probe_private, void *call_data, + const char *format, va_list *args) +{ + struct spu_probe *p = probe_private; + struct spu_context *ctx; + + ctx = va_arg(*args, struct spu_context *); + + sputrace_log_item(p->name, ctx, NULL); +} + +struct spu_probe spu_probes[] = { + { "spu_bind_context__enter", "%p %p", spu_context_event }, + { "spu_unbind_context__enter", "%p %p", spu_context_event }, + { "spu_get_idle__enter", "%p", spu_context_nospu_event }, + { "spu_get_idle__found", "%p %p", spu_context_event }, + { "spu_get_idle__not_found", "%p", spu_context_nospu_event }, + { "spu_find_victim__enter", "%p", spu_context_nospu_event }, + { "spusched_tick__preempt", "%p %p", spu_context_event }, + { "spusched_tick__newslice", "%p", spu_context_nospu_event }, + { "spu_yield__enter", "%p", spu_context_nospu_event }, + { "spu_deactivate__enter", "%p", spu_context_nospu_event }, + { "__spu_deactivate__unload", "%p %p", spu_context_event }, + { "spufs_ps_nopfn__enter", "%p", spu_context_nospu_event }, + { "spufs_ps_nopfn__sleep", "%p", spu_context_nospu_event }, + { "spufs_ps_nopfn__wake", "%p %p", spu_context_event }, + { "spufs_ps_nopfn__insert", "%p %p", spu_context_event }, + { "spu_acquire_saved__enter", "%p", spu_context_nospu_event }, + { "destroy_spu_context__enter", "%p", spu_context_nospu_event }, +}; + +static int __init sputrace_init(void) +{ + struct proc_dir_entry *entry; + int i, error = -ENOMEM; + + sputrace_log = kcalloc(sizeof(struct sputrace), + bufsize, GFP_KERNEL); + if (!sputrace_log) + goto out; + + entry = create_proc_entry("sputrace", S_IRUSR, NULL); + if (!entry) + goto out_free_log; + entry->proc_fops = &sputrace_fops; + + for (i = 0; i < ARRAY_SIZE(spu_probes); i++) { + struct spu_probe *p = &spu_probes[i]; + + error = marker_probe_register(p->name, p->format, + p->probe_func, p); + if (error) + printk(KERN_INFO "Unable to register probe %s\n", + p->name); + } + + return 0; + +out_free_log: + kfree(sputrace_log); +out: + return -ENOMEM; +} + +static void __exit sputrace_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(spu_probes); i++) + marker_probe_unregister(spu_probes[i].name, + spu_probes[i].probe_func, &spu_probes[i]); + + remove_proc_entry("sputrace", NULL); + kfree(sputrace_log); +} + +module_init(sputrace_init); +module_exit(sputrace_exit); + +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 430404413178..49c87769b1f8 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -73,7 +73,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, LOOKUP_OPEN|LOOKUP_CREATE, &nd); if (!ret) { ret = spufs_create(&nd, flags, mode, neighbor); - path_release(&nd); + path_put(&nd.path); } putname(tmp); } diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig index 6c8083757938..429088967813 100644 --- a/arch/powerpc/platforms/embedded6xx/Kconfig +++ b/arch/powerpc/platforms/embedded6xx/Kconfig @@ -24,7 +24,6 @@ config STORCENTER select MPIC select FSL_SOC select PPC_UDBG_16550 if SERIAL_8250 - select WANT_DEVICE_TREE select MPC10X_OPENPIC select MPC10X_BRIDGE help @@ -37,7 +36,6 @@ config MPC7448HPC2 select TSI108_BRIDGE select DEFAULT_UIMAGE select PPC_UDBG_16550 - select WANT_DEVICE_TREE select TSI108_BRIDGE help Select MPC7448HPC2 if configuring for Freescale MPC7448HPC2 (Taiga) @@ -48,7 +46,6 @@ config PPC_HOLLY depends on EMBEDDED6xx select TSI108_BRIDGE select PPC_UDBG_16550 - select WANT_DEVICE_TREE select TSI108_BRIDGE help Select PPC_HOLLY if configuring for an IBM 750GX/CL Eval @@ -59,7 +56,6 @@ config PPC_PRPMC2800 depends on EMBEDDED6xx select MV64X60 select NOT_COHERENT_CACHE - select WANT_DEVICE_TREE help This option enables support for the Motorola PrPMC2800 board diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c index e12e9d298716..8864e4884980 100644 --- a/arch/powerpc/platforms/embedded6xx/storcenter.c +++ b/arch/powerpc/platforms/embedded6xx/storcenter.c @@ -132,33 +132,18 @@ static void __init storcenter_init_IRQ(void) paddr = (phys_addr_t)of_translate_address(dnp, prop); mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, - 4, 32, " EPIC "); + 16, 32, " OpenPIC "); of_node_put(dnp); BUG_ON(mpic == NULL); - /* PCI IRQs */ /* - * 2.6.12 patch: - * openpic_set_sources(0, 5, OpenPIC_Addr + 0x10200); - * openpic_set_sources(5, 2, OpenPIC_Addr + 0x11120); - * first_irq, num_irqs, __iomem first_ISR - * o_ss: i, src: 0, fdf50200 - * o_ss: i, src: 1, fdf50220 - * o_ss: i, src: 2, fdf50240 - * o_ss: i, src: 3, fdf50260 - * o_ss: i, src: 4, fdf50280 - * o_ss: i, src: 5, fdf51120 - * o_ss: i, src: 6, fdf51140 + * 16 Serial Interrupts followed by 16 Internal Interrupts. + * I2C is the second internal, so it is at 17, 0x11020. */ mpic_assign_isu(mpic, 0, paddr + 0x10200); - mpic_assign_isu(mpic, 1, paddr + 0x10220); - mpic_assign_isu(mpic, 2, paddr + 0x10240); - mpic_assign_isu(mpic, 3, paddr + 0x10260); - mpic_assign_isu(mpic, 4, paddr + 0x10280); - mpic_assign_isu(mpic, 5, paddr + 0x11120); - mpic_assign_isu(mpic, 6, paddr + 0x11140); + mpic_assign_isu(mpic, 1, paddr + 0x11000); mpic_init(mpic); } @@ -178,7 +163,7 @@ static int __init storcenter_probe(void) { unsigned long root = of_get_flat_dt_root(); - return of_flat_dt_is_compatible(root, "storcenter"); + return of_flat_dt_is_compatible(root, "iomega,storcenter"); } define_machine(storcenter){ diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c index be06cfd9fa3d..657b72f68493 100644 --- a/arch/powerpc/platforms/iseries/vio.c +++ b/arch/powerpc/platforms/iseries/vio.c @@ -75,7 +75,7 @@ static struct property *new_property(const char *name, int length, return np; } -static void __init free_property(struct property *np) +static void free_property(struct property *np) { kfree(np); } diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index c04abcc28a7a..792d3ce8112e 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c @@ -113,8 +113,6 @@ static inline void debug_calc_bogomips(void) * result. We backup/restore the value to avoid affecting the * core cpufreq framework's own calculation. */ - extern void calibrate_delay(void); - unsigned long save_lpj = loops_per_jiffy; calibrate_delay(); loops_per_jiffy = save_lpj; diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index ba931be2175c..5169ecc37123 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -2565,6 +2565,8 @@ static void __init probe_uninorth(void) /* Locate core99 Uni-N */ uninorth_node = of_find_node_by_name(NULL, "uni-n"); + uninorth_maj = 1; + /* Locate G5 u3 */ if (uninorth_node == NULL) { uninorth_node = of_find_node_by_name(NULL, "u3"); @@ -2575,8 +2577,10 @@ static void __init probe_uninorth(void) uninorth_node = of_find_node_by_name(NULL, "u4"); uninorth_maj = 4; } - if (uninorth_node == NULL) + if (uninorth_node == NULL) { + uninorth_maj = 0; return; + } addrp = of_get_property(uninorth_node, "reg", NULL); if (addrp == NULL) @@ -3029,3 +3033,8 @@ void pmac_resume_agp_for_card(struct pci_dev *dev) pmac_agp_resume(pmac_agp_bridge); } EXPORT_SYMBOL(pmac_resume_agp_for_card); + +int pmac_get_uninorth_variant(void) +{ + return uninorth_maj; +} diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index c4ad54e0f288..1f032483c026 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -58,7 +58,7 @@ static void pseries_mach_cpu_die(void) { local_irq_disable(); idle_task_exit(); - xics_teardown_cpu(0); + xics_teardown_cpu(); unregister_slb_shadow(hard_smp_processor_id(), __pa(get_slb_shadow())); rtas_stop_self(); /* Should never get here... */ diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 412a5e7aff2d..e9dd5fe081c9 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -54,7 +54,7 @@ void __init setup_kexec_cpu_down_mpic(void) static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary) { pseries_kexec_cpu_down(crash_shutdown, secondary); - xics_teardown_cpu(secondary); + xics_kexec_teardown_cpu(secondary); } void __init setup_kexec_cpu_down_xics(void) diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index c02f8742c54d..2800fced8c7c 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -167,6 +167,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np) if ((child = of_get_next_child(np, NULL))) { of_node_put(child); + of_node_put(parent); return -EBUSY; } diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 8f8dd9c3ca6b..ca52b587166d 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -160,6 +160,46 @@ static inline void lpar_qirr_info(int n_cpu , u8 value) /* High level handlers and init code */ +static void xics_update_irq_servers(void) +{ + int i, j; + struct device_node *np; + u32 ilen; + const u32 *ireg, *isize; + u32 hcpuid; + + /* Find the server numbers for the boot cpu. */ + np = of_get_cpu_node(boot_cpuid, NULL); + BUG_ON(!np); + + ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); + if (!ireg) { + of_node_put(np); + return; + } + + i = ilen / sizeof(int); + hcpuid = get_hard_smp_processor_id(boot_cpuid); + + /* Global interrupt distribution server is specified in the last + * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last + * entry fom this property for current boot cpu id and use it as + * default distribution server + */ + for (j = 0; j < i; j += 2) { + if (ireg[j] == hcpuid) { + default_server = hcpuid; + default_distrib_server = ireg[j+1]; + + isize = of_get_property(np, + "ibm,interrupt-server#-size", NULL); + if (isize) + interrupt_server_size = *isize; + } + } + + of_node_put(np); +} #ifdef CONFIG_SMP static int get_irq_server(unsigned int virq, unsigned int strict_check) @@ -169,6 +209,9 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check) cpumask_t cpumask = irq_desc[virq].affinity; cpumask_t tmp = CPU_MASK_NONE; + if (! cpu_isset(default_server, cpu_online_map)) + xics_update_irq_servers(); + if (!distribute_irqs) return default_server; @@ -658,39 +701,11 @@ static void __init xics_setup_8259_cascade(void) set_irq_chained_handler(cascade, pseries_8259_cascade); } -static struct device_node *cpuid_to_of_node(int cpu) -{ - struct device_node *np; - u32 hcpuid = get_hard_smp_processor_id(cpu); - - for_each_node_by_type(np, "cpu") { - int i, len; - const u32 *intserv; - - intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", - &len); - - if (!intserv) - intserv = of_get_property(np, "reg", &len); - - i = len / sizeof(u32); - - while (i--) - if (intserv[i] == hcpuid) - return np; - } - - return NULL; -} - void __init xics_init_IRQ(void) { - int i, j; struct device_node *np; - u32 ilen, indx = 0; - const u32 *ireg, *isize; + u32 indx = 0; int found = 0; - u32 hcpuid; ppc64_boot_msg(0x20, "XICS Init"); @@ -709,34 +724,7 @@ void __init xics_init_IRQ(void) return; xics_init_host(); - - /* Find the server numbers for the boot cpu. */ - np = cpuid_to_of_node(boot_cpuid); - BUG_ON(!np); - ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); - if (!ireg) - goto skip_gserver_check; - i = ilen / sizeof(int); - hcpuid = get_hard_smp_processor_id(boot_cpuid); - - /* Global interrupt distribution server is specified in the last - * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last - * entry fom this property for current boot cpu id and use it as - * default distribution server - */ - for (j = 0; j < i; j += 2) { - if (ireg[j] == hcpuid) { - default_server = hcpuid; - default_distrib_server = ireg[j+1]; - - isize = of_get_property(np, - "ibm,interrupt-server#-size", NULL); - if (isize) - interrupt_server_size = *isize; - } - } -skip_gserver_check: - of_node_put(np); + xics_update_irq_servers(); if (firmware_has_feature(FW_FEATURE_LPAR)) ppc_md.get_irq = xics_get_irq_lpar; @@ -775,11 +763,9 @@ void xics_request_IPIs(void) } #endif /* CONFIG_SMP */ -void xics_teardown_cpu(int secondary) +void xics_teardown_cpu() { int cpu = smp_processor_id(); - unsigned int ipi; - struct irq_desc *desc; xics_set_cpu_priority(0); @@ -790,9 +776,17 @@ void xics_teardown_cpu(int secondary) lpar_qirr_info(cpu, 0xff); else direct_qirr_info(cpu, 0xff); +} + +void xics_kexec_teardown_cpu(int secondary) +{ + unsigned int ipi; + struct irq_desc *desc; + + xics_teardown_cpu(); /* - * we need to EOI the IPI if we got here from kexec down IPI + * we need to EOI the IPI * * probably need to check all the other interrupts too * should we be flagging idle loop instead? @@ -880,8 +874,8 @@ void xics_migrate_irqs_away(void) virq, cpu); /* Reset affinity to all cpus */ + irq_desc[virq].affinity = CPU_MASK_ALL; desc->chip->set_affinity(virq, CPU_MASK_ALL); - irq_desc[irq].affinity = CPU_MASK_ALL; unlock: spin_unlock_irqrestore(&desc->lock, flags); } diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h index 9ffd809d29e2..c26bcff47b6d 100644 --- a/arch/powerpc/platforms/pseries/xics.h +++ b/arch/powerpc/platforms/pseries/xics.h @@ -16,7 +16,8 @@ extern void xics_init_IRQ(void); extern void xics_setup_cpu(void); -extern void xics_teardown_cpu(int secondary); +extern void xics_teardown_cpu(void); +extern void xics_kexec_teardown_cpu(int secondary); extern void xics_cause_IPI(int cpu); extern void xics_request_IPIs(void); extern void xics_migrate_irqs_away(void); |