summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/85xx/common.c1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c12
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c4
-rw-r--r--arch/powerpc/platforms/85xx/smp.c4
-rw-r--r--arch/powerpc/platforms/Kconfig5
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype3
-rw-r--r--arch/powerpc/platforms/cell/Kconfig11
-rw-r--r--arch/powerpc/platforms/cell/Makefile15
-rw-r--r--arch/powerpc/platforms/cell/beat.c264
-rw-r--r--arch/powerpc/platforms/cell/beat.h39
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c445
-rw-r--r--arch/powerpc/platforms/cell/beat_hvCall.S285
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c253
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.h30
-rw-r--r--arch/powerpc/platforms/cell/beat_iommu.c115
-rw-r--r--arch/powerpc/platforms/cell/beat_spu_priv1.c205
-rw-r--r--arch/powerpc/platforms/cell/beat_syscall.h164
-rw-r--r--arch/powerpc/platforms/cell/beat_udbg.c98
-rw-r--r--arch/powerpc/platforms/cell/beat_wrapper.h290
-rw-r--r--arch/powerpc/platforms/cell/cell.h24
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c500
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.h46
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc.h232
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_epci.c428
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c538
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_sio.c99
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_uhc.c95
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c243
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c11
-rw-r--r--arch/powerpc/platforms/cell/setup.c5
-rw-r--r--arch/powerpc/platforms/cell/smp.c9
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c1
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/maple/maple.h2
-rw-r--r--arch/powerpc/platforms/maple/pci.c4
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c6
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h1
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c5
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c38
-rw-r--r--arch/powerpc/platforms/powermac/pic.c3
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h3
-rw-r--r--arch/powerpc/platforms/powermac/setup.c22
-rw-r--r--arch/powerpc/platforms/powermac/smp.c18
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig7
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c1149
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c1300
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c10
-rw-r--r--arch/powerpc/platforms/powernv/opal-power.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor.c30
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S7
-rw-r--r--arch/powerpc/platforms/powernv/opal.c92
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c797
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c214
-rw-r--r--arch/powerpc/platforms/powernv/pci.h38
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c54
-rw-r--r--arch/powerpc/platforms/powernv/smp.c27
-rw-r--r--arch/powerpc/platforms/ps3/smp.c4
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c118
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c98
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c489
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c11
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c70
-rw-r--r--arch/powerpc/platforms/pseries/msi.c6
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c674
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h14
-rw-r--r--arch/powerpc/platforms/pseries/setup.c48
-rw-r--r--arch/powerpc/platforms/pseries/smp.c6
80 files changed, 3044 insertions, 6837 deletions
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 4a9ad871a168..7bfb9b184dd4 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -40,6 +40,7 @@ static const struct of_device_id mpc85xx_common_ids[] __initconst = {
{ .compatible = "fsl,qoriq-pcie-v2.4", },
{ .compatible = "fsl,qoriq-pcie-v2.3", },
{ .compatible = "fsl,qoriq-pcie-v2.2", },
+ { .compatible = "fsl,fman", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 1f309ccb096e..9824d2cf79bd 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -88,6 +88,15 @@ static const struct of_device_id of_device_ids[] = {
.compatible = "simple-bus"
},
{
+ .compatible = "mdio-mux-gpio"
+ },
+ {
+ .compatible = "fsl,fpga-ngpixis"
+ },
+ {
+ .compatible = "fsl,fpga-qixis"
+ },
+ {
.compatible = "fsl,srio",
},
{
@@ -108,6 +117,9 @@ static const struct of_device_id of_device_ids[] = {
{
.compatible = "fsl,qe",
},
+ {
+ .compatible = "fsl,fman",
+ },
/* The following two are for the Freescale hypervisor */
{
.name = "hypervisor",
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index 7a180f0308d5..680232d6ba48 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -50,14 +50,14 @@ void p1022rdk_set_pixel_clock(unsigned int pixclock)
/* Map the global utilities registers. */
guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
if (!guts_np) {
- pr_err("p1022rdk: missing global utilties device node\n");
+ pr_err("p1022rdk: missing global utilities device node\n");
return;
}
guts = of_iomap(guts_np, 0);
of_node_put(guts_np);
if (!guts) {
- pr_err("p1022rdk: could not map global utilties device\n");
+ pr_err("p1022rdk: could not map global utilities device\n");
return;
}
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index d7c1e69f3070..8631ac5f0e57 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -360,10 +360,10 @@ static void mpc85xx_smp_kexec_down(void *arg)
static void map_and_flush(unsigned long paddr)
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
- unsigned long kaddr = (unsigned long)kmap(page);
+ unsigned long kaddr = (unsigned long)kmap_atomic(page);
flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
- kunmap(page);
+ kunmap_atomic((void *)kaddr);
}
/**
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 391b3f6b54a3..b7f9c408bf24 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -72,11 +72,6 @@ config PPC_SMP_MUXED_IPI
cpu. This will enable the generic code to multiplex the 4
messages on to one ipi.
-config PPC_UDBG_BEAT
- bool "BEAT based debug console"
- depends on PPC_CELLEB
- default n
-
config IPIC
bool
default n
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 76483e3acd60..7264e91190be 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -2,6 +2,7 @@ config PPC64
bool "64-bit kernel"
default n
select HAVE_VIRT_CPU_ACCOUNTING
+ select ZLIB_DEFLATE
help
This option selects whether a 32-bit or a 64-bit kernel
will be built.
@@ -15,7 +16,7 @@ choice
The most common ones are the desktop and server CPUs (601, 603,
604, 740, 750, 74xx) CPUs from Freescale and IBM, with their
embedded 512x/52xx/82xx/83xx/86xx counterparts.
- The other embeeded parts, namely 4xx, 8xx, e200 (55xx) and e500
+ The other embedded parts, namely 4xx, 8xx, e200 (55xx) and e500
(85xx) each form a family of their own that is not compatible
with the others.
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 870b6dbd4d18..2f23133ab3d1 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -33,17 +33,6 @@ config PPC_IBM_CELL_BLADE
select PPC_UDBG_16550
select UDBG_RTAS_CONSOLE
-config PPC_CELLEB
- bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
- depends on PPC64 && PPC_BOOK3S
- select PPC_CELL_NATIVE
- select PPC_OF_PLATFORM_PCI
- select PCI
- select HAS_TXX9_SERIAL
- select PPC_UDBG_BEAT
- select USB_OHCI_BIG_ENDIAN_MMIO
- select USB_EHCI_BIG_ENDIAN_MMIO
-
config PPC_CELL_QPACE
bool "IBM Cell - QPACE"
depends on PPC64 && PPC_BOOK3S
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 2d16884f67b9..34699bddfddd 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -29,18 +29,3 @@ obj-$(CONFIG_AXON_MSI) += axon_msi.o
# qpace setup
obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o
-
-# celleb stuff
-ifeq ($(CONFIG_PPC_CELLEB),y)
-obj-y += celleb_setup.o \
- celleb_pci.o celleb_scc_epci.o \
- celleb_scc_pciex.o \
- celleb_scc_uhc.o \
- spider-pci.o beat.o beat_htab.o \
- beat_hvCall.o beat_interrupt.o \
- beat_iommu.o
-
-obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o
-obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o
-obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o
-endif
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
deleted file mode 100644
index affcf566d460..000000000000
--- a/arch/powerpc/platforms/cell/beat.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Simple routines for Celleb/Beat
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/rtc.h>
-#include <linux/interrupt.h>
-#include <linux/irqreturn.h>
-#include <linux/reboot.h>
-
-#include <asm/hvconsole.h>
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/firmware.h>
-
-#include "beat_wrapper.h"
-#include "beat.h"
-#include "beat_interrupt.h"
-
-static int beat_pm_poweroff_flag;
-
-void beat_restart(char *cmd)
-{
- beat_shutdown_logical_partition(!beat_pm_poweroff_flag);
-}
-
-void beat_power_off(void)
-{
- beat_shutdown_logical_partition(0);
-}
-
-u64 beat_halt_code = 0x1000000000000000UL;
-EXPORT_SYMBOL(beat_halt_code);
-
-void beat_halt(void)
-{
- beat_shutdown_logical_partition(beat_halt_code);
-}
-
-int beat_set_rtc_time(struct rtc_time *rtc_time)
-{
- u64 tim;
- tim = mktime(rtc_time->tm_year+1900,
- rtc_time->tm_mon+1, rtc_time->tm_mday,
- rtc_time->tm_hour, rtc_time->tm_min, rtc_time->tm_sec);
- if (beat_rtc_write(tim))
- return -1;
- return 0;
-}
-
-void beat_get_rtc_time(struct rtc_time *rtc_time)
-{
- u64 tim;
-
- if (beat_rtc_read(&tim))
- tim = 0;
- to_tm(tim, rtc_time);
- rtc_time->tm_year -= 1900;
- rtc_time->tm_mon -= 1;
-}
-
-#define BEAT_NVRAM_SIZE 4096
-
-ssize_t beat_nvram_read(char *buf, size_t count, loff_t *index)
-{
- unsigned int i;
- unsigned long len;
- char *p = buf;
-
- if (*index >= BEAT_NVRAM_SIZE)
- return -ENODEV;
- i = *index;
- if (i + count > BEAT_NVRAM_SIZE)
- count = BEAT_NVRAM_SIZE - i;
-
- for (; count != 0; count -= len) {
- len = count;
- if (len > BEAT_NVRW_CNT)
- len = BEAT_NVRW_CNT;
- if (beat_eeprom_read(i, len, p))
- return -EIO;
-
- p += len;
- i += len;
- }
- *index = i;
- return p - buf;
-}
-
-ssize_t beat_nvram_write(char *buf, size_t count, loff_t *index)
-{
- unsigned int i;
- unsigned long len;
- char *p = buf;
-
- if (*index >= BEAT_NVRAM_SIZE)
- return -ENODEV;
- i = *index;
- if (i + count > BEAT_NVRAM_SIZE)
- count = BEAT_NVRAM_SIZE - i;
-
- for (; count != 0; count -= len) {
- len = count;
- if (len > BEAT_NVRW_CNT)
- len = BEAT_NVRW_CNT;
- if (beat_eeprom_write(i, len, p))
- return -EIO;
-
- p += len;
- i += len;
- }
- *index = i;
- return p - buf;
-}
-
-ssize_t beat_nvram_get_size(void)
-{
- return BEAT_NVRAM_SIZE;
-}
-
-int beat_set_xdabr(unsigned long dabr, unsigned long dabrx)
-{
- if (beat_set_dabr(dabr, dabrx))
- return -1;
- return 0;
-}
-
-int64_t beat_get_term_char(u64 vterm, u64 *len, u64 *t1, u64 *t2)
-{
- u64 db[2];
- s64 ret;
-
- ret = beat_get_characters_from_console(vterm, len, (u8 *)db);
- if (ret == 0) {
- *t1 = db[0];
- *t2 = db[1];
- }
- return ret;
-}
-EXPORT_SYMBOL(beat_get_term_char);
-
-int64_t beat_put_term_char(u64 vterm, u64 len, u64 t1, u64 t2)
-{
- u64 db[2];
-
- db[0] = t1;
- db[1] = t2;
- return beat_put_characters_to_console(vterm, len, (u8 *)db);
-}
-EXPORT_SYMBOL(beat_put_term_char);
-
-void beat_power_save(void)
-{
- beat_pause(0);
-}
-
-#ifdef CONFIG_KEXEC
-void beat_kexec_cpu_down(int crash, int secondary)
-{
- beatic_deinit_IRQ();
-}
-#endif
-
-static irqreturn_t beat_power_event(int virq, void *arg)
-{
- printk(KERN_DEBUG "Beat: power button pressed\n");
- beat_pm_poweroff_flag = 1;
- ctrl_alt_del();
- return IRQ_HANDLED;
-}
-
-static irqreturn_t beat_reset_event(int virq, void *arg)
-{
- printk(KERN_DEBUG "Beat: reset button pressed\n");
- beat_pm_poweroff_flag = 0;
- ctrl_alt_del();
- return IRQ_HANDLED;
-}
-
-static struct beat_event_list {
- const char *typecode;
- irq_handler_t handler;
- unsigned int virq;
-} beat_event_list[] = {
- { "power", beat_power_event, 0 },
- { "reset", beat_reset_event, 0 },
-};
-
-static int __init beat_register_event(void)
-{
- u64 path[4], data[2];
- int rc, i;
- unsigned int virq;
-
- for (i = 0; i < ARRAY_SIZE(beat_event_list); i++) {
- struct beat_event_list *ev = &beat_event_list[i];
-
- if (beat_construct_event_receive_port(data) != 0) {
- printk(KERN_ERR "Beat: "
- "cannot construct event receive port for %s\n",
- ev->typecode);
- return -EINVAL;
- }
-
- virq = irq_create_mapping(NULL, data[0]);
- if (virq == NO_IRQ) {
- printk(KERN_ERR "Beat: failed to get virtual IRQ"
- " for event receive port for %s\n",
- ev->typecode);
- beat_destruct_event_receive_port(data[0]);
- return -EIO;
- }
- ev->virq = virq;
-
- rc = request_irq(virq, ev->handler, 0,
- ev->typecode, NULL);
- if (rc != 0) {
- printk(KERN_ERR "Beat: failed to request virtual IRQ"
- " for event receive port for %s\n",
- ev->typecode);
- beat_destruct_event_receive_port(data[0]);
- return rc;
- }
-
- path[0] = 0x1000000065780000ul; /* 1,ex */
- path[1] = 0x627574746f6e0000ul; /* button */
- path[2] = 0;
- strncpy((char *)&path[2], ev->typecode, 8);
- path[3] = 0;
- data[1] = 0;
-
- beat_create_repository_node(path, data);
- }
- return 0;
-}
-
-static int __init beat_event_init(void)
-{
- if (!firmware_has_feature(FW_FEATURE_BEAT))
- return -EINVAL;
-
- beat_pm_poweroff_flag = 0;
- return beat_register_event();
-}
-
-device_initcall(beat_event_init);
diff --git a/arch/powerpc/platforms/cell/beat.h b/arch/powerpc/platforms/cell/beat.h
deleted file mode 100644
index bfcb8e351ae5..000000000000
--- a/arch/powerpc/platforms/cell/beat.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Guest OS Interfaces.
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _CELLEB_BEAT_H
-#define _CELLEB_BEAT_H
-
-int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *);
-int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t);
-int64_t beat_repository_encode(int, const char *, uint64_t[4]);
-void beat_restart(char *);
-void beat_power_off(void);
-void beat_halt(void);
-int beat_set_rtc_time(struct rtc_time *);
-void beat_get_rtc_time(struct rtc_time *);
-ssize_t beat_nvram_get_size(void);
-ssize_t beat_nvram_read(char *, size_t, loff_t *);
-ssize_t beat_nvram_write(char *, size_t, loff_t *);
-int beat_set_xdabr(unsigned long, unsigned long);
-void beat_power_save(void);
-void beat_kexec_cpu_down(int, int);
-
-#endif /* _CELLEB_BEAT_H */
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
deleted file mode 100644
index bee9232fe619..000000000000
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- * "Cell Reference Set" HTAB support.
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/pseries/lpar.c:
- * Copyright (C) 2001 Todd Inglett, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG_LOW
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-
-#include <asm/mmu.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/machdep.h>
-#include <asm/udbg.h>
-
-#include "beat_wrapper.h"
-
-#ifdef DEBUG_LOW
-#define DBG_LOW(fmt...) do { udbg_printf(fmt); } while (0)
-#else
-#define DBG_LOW(fmt...) do { } while (0)
-#endif
-
-static DEFINE_RAW_SPINLOCK(beat_htab_lock);
-
-static inline unsigned int beat_read_mask(unsigned hpte_group)
-{
- unsigned long rmask = 0;
- u64 hpte_v[5];
-
- beat_read_htab_entries(0, hpte_group + 0, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x8000;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x4000;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x2000;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x1000;
- beat_read_htab_entries(0, hpte_group + 4, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x0800;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x0400;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x0200;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x0100;
- hpte_group = ~hpte_group & (htab_hash_mask * HPTES_PER_GROUP);
- beat_read_htab_entries(0, hpte_group + 0, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x80;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x40;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x20;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x10;
- beat_read_htab_entries(0, hpte_group + 4, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x08;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x04;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x02;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x01;
- return rmask;
-}
-
-static long beat_lpar_hpte_insert(unsigned long hpte_group,
- unsigned long vpn, unsigned long pa,
- unsigned long rflags, unsigned long vflags,
- int psize, int apsize, int ssize)
-{
- unsigned long lpar_rc;
- u64 hpte_v, hpte_r, slot;
-
- if (vflags & HPTE_V_SECONDARY)
- return -1;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
- "rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, va, pa, rflags, vflags, psize);
-
- hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
- vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
-
- if (rflags & _PAGE_NO_CACHE)
- hpte_r &= ~HPTE_R_M;
-
- raw_spin_lock(&beat_htab_lock);
- lpar_rc = beat_read_mask(hpte_group);
- if (lpar_rc == 0) {
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" full\n");
- raw_spin_unlock(&beat_htab_lock);
- return -1;
- }
-
- lpar_rc = beat_insert_htab_entry(0, hpte_group, lpar_rc << 48,
- hpte_v, hpte_r, &slot);
- raw_spin_unlock(&beat_htab_lock);
-
- /*
- * Since we try and ioremap PHBs we don't own, the pte insert
- * will fail. However we must catch the failure in hash_page
- * or we will loop forever, so return -2 in this case.
- */
- if (unlikely(lpar_rc != 0)) {
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" lpar err %lx\n", lpar_rc);
- return -2;
- }
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" -> slot: %lx\n", slot);
-
- /* We have to pass down the secondary bucket bit here as well */
- return (slot ^ hpte_group) & 15;
-}
-
-static long beat_lpar_hpte_remove(unsigned long hpte_group)
-{
- DBG_LOW("hpte_remove(group=%lx)\n", hpte_group);
- return -1;
-}
-
-static unsigned long beat_lpar_hpte_getword0(unsigned long slot)
-{
- unsigned long dword0;
- unsigned long lpar_rc;
- u64 dword[5];
-
- lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword);
-
- dword0 = dword[slot&3];
-
- BUG_ON(lpar_rc != 0);
-
- return dword0;
-}
-
-static void beat_lpar_hptab_clear(void)
-{
- unsigned long size_bytes = 1UL << ppc64_pft_size;
- unsigned long hpte_count = size_bytes >> 4;
- int i;
- u64 dummy0, dummy1;
-
- /* TODO: Use bulk call */
- for (i = 0; i < hpte_count; i++)
- beat_write_htab_entry(0, i, 0, 0, -1UL, -1UL, &dummy0, &dummy1);
-}
-
-/*
- * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
- * the low 3 bits of flags happen to line up. So no transform is needed.
- * We can probably optimize here and assume the high bits of newpp are
- * already zero. For now I am paranoid.
- */
-static long beat_lpar_hpte_updatepp(unsigned long slot,
- unsigned long newpp,
- unsigned long vpn,
- int psize, int apsize,
- int ssize, unsigned long flags)
-{
- unsigned long lpar_rc;
- u64 dummy0, dummy1;
- unsigned long want_v;
-
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
-
- DBG_LOW(" update: "
- "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
- want_v & HPTE_V_AVPN, slot, psize, newpp);
-
- raw_spin_lock(&beat_htab_lock);
- dummy0 = beat_lpar_hpte_getword0(slot);
- if ((dummy0 & ~0x7FUL) != (want_v & ~0x7FUL)) {
- DBG_LOW("not found !\n");
- raw_spin_unlock(&beat_htab_lock);
- return -1;
- }
-
- lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0,
- &dummy1);
- raw_spin_unlock(&beat_htab_lock);
- if (lpar_rc != 0 || dummy0 == 0) {
- DBG_LOW("not found !\n");
- return -1;
- }
-
- DBG_LOW("ok %lx %lx\n", dummy0, dummy1);
-
- BUG_ON(lpar_rc != 0);
-
- return 0;
-}
-
-static long beat_lpar_hpte_find(unsigned long vpn, int psize)
-{
- unsigned long hash;
- unsigned long i, j;
- long slot;
- unsigned long want_v, hpte_v;
-
- hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
-
- for (j = 0; j < 2; j++) {
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- for (i = 0; i < HPTES_PER_GROUP; i++) {
- hpte_v = beat_lpar_hpte_getword0(slot);
-
- if (HPTE_V_COMPARE(hpte_v, want_v)
- && (hpte_v & HPTE_V_VALID)
- && (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
- /* HPTE matches */
- if (j)
- slot = -slot;
- return slot;
- }
- ++slot;
- }
- hash = ~hash;
- }
-
- return -1;
-}
-
-static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
- unsigned long ea,
- int psize, int ssize)
-{
- unsigned long vpn;
- unsigned long lpar_rc, slot, vsid;
- u64 dummy0, dummy1;
-
- vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
- vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
-
- raw_spin_lock(&beat_htab_lock);
- slot = beat_lpar_hpte_find(vpn, psize);
- BUG_ON(slot == -1);
-
- lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
- &dummy0, &dummy1);
- raw_spin_unlock(&beat_htab_lock);
-
- BUG_ON(lpar_rc != 0);
-}
-
-static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
- int psize, int apsize,
- int ssize, int local)
-{
- unsigned long want_v;
- unsigned long lpar_rc;
- u64 dummy1, dummy2;
- unsigned long flags;
-
- DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
- slot, va, psize, local);
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
-
- raw_spin_lock_irqsave(&beat_htab_lock, flags);
- dummy1 = beat_lpar_hpte_getword0(slot);
-
- if ((dummy1 & ~0x7FUL) != (want_v & ~0x7FUL)) {
- DBG_LOW("not found !\n");
- raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
- return;
- }
-
- lpar_rc = beat_write_htab_entry(0, slot, 0, 0, HPTE_V_VALID, 0,
- &dummy1, &dummy2);
- raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
-
- BUG_ON(lpar_rc != 0);
-}
-
-void __init hpte_init_beat(void)
-{
- ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate;
- ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = beat_lpar_hpte_insert;
- ppc_md.hpte_remove = beat_lpar_hpte_remove;
- ppc_md.hpte_clear_all = beat_lpar_hptab_clear;
-}
-
-static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
- unsigned long vpn, unsigned long pa,
- unsigned long rflags, unsigned long vflags,
- int psize, int apsize, int ssize)
-{
- unsigned long lpar_rc;
- u64 hpte_v, hpte_r, slot;
-
- if (vflags & HPTE_V_SECONDARY)
- return -1;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
- "rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, vpn, pa, rflags, vflags, psize);
-
- hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
- vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
-
- if (rflags & _PAGE_NO_CACHE)
- hpte_r &= ~HPTE_R_M;
-
- /* insert into not-volted entry */
- lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r,
- HPTE_V_BOLTED, 0, &slot);
- /*
- * Since we try and ioremap PHBs we don't own, the pte insert
- * will fail. However we must catch the failure in hash_page
- * or we will loop forever, so return -2 in this case.
- */
- if (unlikely(lpar_rc != 0)) {
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" lpar err %lx\n", lpar_rc);
- return -2;
- }
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" -> slot: %lx\n", slot);
-
- /* We have to pass down the secondary bucket bit here as well */
- return (slot ^ hpte_group) & 15;
-}
-
-/*
- * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
- * the low 3 bits of flags happen to line up. So no transform is needed.
- * We can probably optimize here and assume the high bits of newpp are
- * already zero. For now I am paranoid.
- */
-static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
- unsigned long newpp,
- unsigned long vpn,
- int psize, int apsize,
- int ssize, unsigned long flags)
-{
- unsigned long lpar_rc;
- unsigned long want_v;
- unsigned long pss;
-
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
- pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
-
- DBG_LOW(" update: "
- "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
- want_v & HPTE_V_AVPN, slot, psize, newpp);
-
- lpar_rc = beat_update_htab_permission3(0, slot, want_v, pss, 7, newpp);
-
- if (lpar_rc == 0xfffffff7) {
- DBG_LOW("not found !\n");
- return -1;
- }
-
- DBG_LOW("ok\n");
-
- BUG_ON(lpar_rc != 0);
-
- return 0;
-}
-
-static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
- int psize, int apsize,
- int ssize, int local)
-{
- unsigned long want_v;
- unsigned long lpar_rc;
- unsigned long pss;
-
- DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
- slot, vpn, psize, local);
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
- pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
-
- lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
-
- /* E_busy can be valid output: page may be already replaced */
- BUG_ON(lpar_rc != 0 && lpar_rc != 0xfffffff7);
-}
-
-static int64_t _beat_lpar_hptab_clear_v3(void)
-{
- return beat_clear_htab3(0);
-}
-
-static void beat_lpar_hptab_clear_v3(void)
-{
- _beat_lpar_hptab_clear_v3();
-}
-
-void __init hpte_init_beat_v3(void)
-{
- if (_beat_lpar_hptab_clear_v3() == 0) {
- ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate_v3;
- ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp_v3;
- ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = beat_lpar_hpte_insert_v3;
- ppc_md.hpte_remove = beat_lpar_hpte_remove;
- ppc_md.hpte_clear_all = beat_lpar_hptab_clear_v3;
- } else {
- ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate;
- ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = beat_lpar_hpte_insert;
- ppc_md.hpte_remove = beat_lpar_hpte_remove;
- ppc_md.hpte_clear_all = beat_lpar_hptab_clear;
- }
-}
diff --git a/arch/powerpc/platforms/cell/beat_hvCall.S b/arch/powerpc/platforms/cell/beat_hvCall.S
deleted file mode 100644
index 96c801907126..000000000000
--- a/arch/powerpc/platforms/cell/beat_hvCall.S
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * Beat hypervisor call I/F
- *
- * (C) Copyright 2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/pseries/hvCall.S.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <asm/ppc_asm.h>
-
-/* Not implemented on Beat, now */
-#define HCALL_INST_PRECALL
-#define HCALL_INST_POSTCALL
-
- .text
-
-#define HVSC .long 0x44000022
-
-/* Note: takes only 7 input parameters at maximum */
-_GLOBAL(beat_hcall_norets)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- mr r11,r3
- mr r3,r4
- mr r4,r5
- mr r5,r6
- mr r6,r7
- mr r7,r8
- mr r8,r9
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes 8 input parameters at maximum */
-_GLOBAL(beat_hcall_norets8)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- mr r11,r3
- mr r3,r4
- mr r4,r5
- mr r5,r6
- mr r6,r7
- mr r7,r8
- mr r8,r9
- ld r10,STK_PARAM(R10)(r1)
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 1 output parameters at maximum */
-_GLOBAL(beat_hcall1)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 2 output parameters at maximum */
-_GLOBAL(beat_hcall2)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 3 output parameters at maximum */
-_GLOBAL(beat_hcall3)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 4 output parameters at maximum */
-_GLOBAL(beat_hcall4)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
- std r7, 24(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 5 output parameters at maximum */
-_GLOBAL(beat_hcall5)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
- std r7, 24(r12)
- std r8, 32(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 6 output parameters at maximum */
-_GLOBAL(beat_hcall6)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
- std r7, 24(r12)
- std r8, 32(r12)
- std r9, 40(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
deleted file mode 100644
index 9e5dfbcc00af..000000000000
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Celleb/Beat Interrupt controller
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/percpu.h>
-#include <linux/types.h>
-
-#include <asm/machdep.h>
-
-#include "beat_interrupt.h"
-#include "beat_wrapper.h"
-
-#define MAX_IRQS NR_IRQS
-static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock);
-static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64];
-static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64];
-
-static struct irq_domain *beatic_host;
-
-/*
- * In this implementation, "virq" == "IRQ plug number",
- * "(irq_hw_number_t)hwirq" == "IRQ outlet number".
- */
-
-/* assumption: locked */
-static inline void beatic_update_irq_mask(unsigned int irq_plug)
-{
- int off;
- unsigned long masks[4];
-
- off = (irq_plug / 256) * 4;
- masks[0] = beatic_irq_mask_enable[off + 0]
- & beatic_irq_mask_ack[off + 0];
- masks[1] = beatic_irq_mask_enable[off + 1]
- & beatic_irq_mask_ack[off + 1];
- masks[2] = beatic_irq_mask_enable[off + 2]
- & beatic_irq_mask_ack[off + 2];
- masks[3] = beatic_irq_mask_enable[off + 3]
- & beatic_irq_mask_ack[off + 3];
- if (beat_set_interrupt_mask(irq_plug&~255UL,
- masks[0], masks[1], masks[2], masks[3]) != 0)
- panic("Failed to set mask IRQ!");
-}
-
-static void beatic_mask_irq(struct irq_data *d)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static void beatic_unmask_irq(struct irq_data *d)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static void beatic_ack_irq(struct irq_data *d)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static void beatic_end_irq(struct irq_data *d)
-{
- s64 err;
- unsigned long flags;
-
- err = beat_downcount_of_interrupt(d->irq);
- if (err != 0) {
- if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */
- panic("Failed to downcount IRQ! Error = %16llx", err);
-
- printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq);
- }
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static struct irq_chip beatic_pic = {
- .name = "CELL-BEAT",
- .irq_unmask = beatic_unmask_irq,
- .irq_mask = beatic_mask_irq,
- .irq_eoi = beatic_end_irq,
-};
-
-/*
- * Dispose binding hardware IRQ number (hw) and Virtuql IRQ number (virq),
- * update flags.
- *
- * Note that the number (virq) is already assigned at upper layer.
- */
-static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq)
-{
- beat_destruct_irq_plug(virq);
-}
-
-/*
- * Create or update binding hardware IRQ number (hw) and Virtuql
- * IRQ number (virq). This is called only once for a given mapping.
- *
- * Note that the number (virq) is already assigned at upper layer.
- */
-static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- int64_t err;
-
- err = beat_construct_and_connect_irq_plug(virq, hw);
- if (err < 0)
- return -EIO;
-
- irq_set_status_flags(virq, IRQ_LEVEL);
- irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq);
- return 0;
-}
-
-/*
- * Translate device-tree interrupt spec to irq_hw_number_t style (ulong),
- * to pass away to irq_create_mapping().
- *
- * Called from irq_create_of_mapping() only.
- * Note: We have only 1 entry to translate.
- */
-static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq,
- unsigned int *out_flags)
-{
- const u64 *intspec2 = (const u64 *)intspec;
-
- *out_hwirq = *intspec2;
- *out_flags |= IRQ_TYPE_LEVEL_LOW;
- return 0;
-}
-
-static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np)
-{
- /* Match all */
- return 1;
-}
-
-static const struct irq_domain_ops beatic_pic_host_ops = {
- .map = beatic_pic_host_map,
- .unmap = beatic_pic_host_unmap,
- .xlate = beatic_pic_host_xlate,
- .match = beatic_pic_host_match,
-};
-
-/*
- * Get an IRQ number
- * Note: returns VIRQ
- */
-static inline unsigned int beatic_get_irq_plug(void)
-{
- int i;
- uint64_t pending[4], ub;
-
- for (i = 0; i < MAX_IRQS; i += 256) {
- beat_detect_pending_interrupts(i, pending);
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[0] & beatic_irq_mask_enable[i/64+0]
- & beatic_irq_mask_ack[i/64+0]));
- if (ub != 64)
- return i + ub + 0;
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[1] & beatic_irq_mask_enable[i/64+1]
- & beatic_irq_mask_ack[i/64+1]));
- if (ub != 64)
- return i + ub + 64;
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[2] & beatic_irq_mask_enable[i/64+2]
- & beatic_irq_mask_ack[i/64+2]));
- if (ub != 64)
- return i + ub + 128;
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[3] & beatic_irq_mask_enable[i/64+3]
- & beatic_irq_mask_ack[i/64+3]));
- if (ub != 64)
- return i + ub + 192;
- }
-
- return NO_IRQ;
-}
-unsigned int beatic_get_irq(void)
-{
- unsigned int ret;
-
- ret = beatic_get_irq_plug();
- if (ret != NO_IRQ)
- beatic_ack_irq(irq_get_irq_data(ret));
- return ret;
-}
-
-/*
- */
-void __init beatic_init_IRQ(void)
-{
- int i;
-
- memset(beatic_irq_mask_enable, 0, sizeof(beatic_irq_mask_enable));
- memset(beatic_irq_mask_ack, 255, sizeof(beatic_irq_mask_ack));
- for (i = 0; i < MAX_IRQS; i += 256)
- beat_set_interrupt_mask(i, 0L, 0L, 0L, 0L);
-
- /* Set out get_irq function */
- ppc_md.get_irq = beatic_get_irq;
-
- /* Allocate an irq host */
- beatic_host = irq_domain_add_nomap(NULL, ~0, &beatic_pic_host_ops, NULL);
- BUG_ON(beatic_host == NULL);
- irq_set_default_host(beatic_host);
-}
-
-void beatic_deinit_IRQ(void)
-{
- int i;
-
- for (i = 1; i < nr_irqs; i++)
- beat_destruct_irq_plug(i);
-}
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h
deleted file mode 100644
index a7e52f91a078..000000000000
--- a/arch/powerpc/platforms/cell/beat_interrupt.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Celleb/Beat Interrupt controller
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef ASM_BEAT_PIC_H
-#define ASM_BEAT_PIC_H
-#ifdef __KERNEL__
-
-extern void beatic_init_IRQ(void);
-extern unsigned int beatic_get_irq(void);
-extern void beatic_deinit_IRQ(void);
-
-#endif
-#endif /* ASM_BEAT_PIC_H */
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
deleted file mode 100644
index 3ce685568935..000000000000
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Support for IOMMU on Celleb platform.
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/pci.h>
-#include <linux/of_platform.h>
-
-#include <asm/machdep.h>
-
-#include "beat_wrapper.h"
-
-#define DMA_FLAGS 0xf800000000000000UL /* r/w permitted, coherency required,
- strongest order */
-
-static int __init find_dma_window(u64 *io_space_id, u64 *ioid,
- u64 *base, u64 *size, u64 *io_page_size)
-{
- struct device_node *dn;
- const unsigned long *dma_window;
-
- for_each_node_by_type(dn, "ioif") {
- dma_window = of_get_property(dn, "toshiba,dma-window", NULL);
- if (dma_window) {
- *io_space_id = (dma_window[0] >> 32) & 0xffffffffUL;
- *ioid = dma_window[0] & 0x7ffUL;
- *base = dma_window[1];
- *size = dma_window[2];
- *io_page_size = 1 << dma_window[3];
- of_node_put(dn);
- return 1;
- }
- }
- return 0;
-}
-
-static unsigned long celleb_dma_direct_offset;
-
-static void __init celleb_init_direct_mapping(void)
-{
- u64 lpar_addr, io_addr;
- u64 io_space_id, ioid, dma_base, dma_size, io_page_size;
-
- if (!find_dma_window(&io_space_id, &ioid, &dma_base, &dma_size,
- &io_page_size)) {
- pr_info("No dma window found !\n");
- return;
- }
-
- for (lpar_addr = 0; lpar_addr < dma_size; lpar_addr += io_page_size) {
- io_addr = lpar_addr + dma_base;
- (void)beat_put_iopte(io_space_id, io_addr, lpar_addr,
- ioid, DMA_FLAGS);
- }
-
- celleb_dma_direct_offset = dma_base;
-}
-
-static void celleb_dma_dev_setup(struct device *dev)
-{
- set_dma_ops(dev, &dma_direct_ops);
- set_dma_offset(dev, celleb_dma_direct_offset);
-}
-
-static void celleb_pci_dma_dev_setup(struct pci_dev *pdev)
-{
- celleb_dma_dev_setup(&pdev->dev);
-}
-
-static int celleb_of_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
-
- /* We are only intereted in device addition */
- if (action != BUS_NOTIFY_ADD_DEVICE)
- return 0;
-
- celleb_dma_dev_setup(dev);
-
- return 0;
-}
-
-static struct notifier_block celleb_of_bus_notifier = {
- .notifier_call = celleb_of_bus_notify
-};
-
-static int __init celleb_init_iommu(void)
-{
- celleb_init_direct_mapping();
- ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
- bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
-
- return 0;
-}
-
-machine_arch_initcall(celleb_beat, celleb_init_iommu);
diff --git a/arch/powerpc/platforms/cell/beat_spu_priv1.c b/arch/powerpc/platforms/cell/beat_spu_priv1.c
deleted file mode 100644
index 13f52589d3a9..000000000000
--- a/arch/powerpc/platforms/cell/beat_spu_priv1.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * spu hypervisor abstraction for Beat
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <asm/types.h>
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-
-#include "beat_wrapper.h"
-
-static inline void _int_mask_set(struct spu *spu, int class, u64 mask)
-{
- spu->shadow_int_mask_RW[class] = mask;
- beat_set_irq_mask_for_spe(spu->spe_id, class, mask);
-}
-
-static inline u64 _int_mask_get(struct spu *spu, int class)
-{
- return spu->shadow_int_mask_RW[class];
-}
-
-static void int_mask_set(struct spu *spu, int class, u64 mask)
-{
- _int_mask_set(spu, class, mask);
-}
-
-static u64 int_mask_get(struct spu *spu, int class)
-{
- return _int_mask_get(spu, class);
-}
-
-static void int_mask_and(struct spu *spu, int class, u64 mask)
-{
- u64 old_mask;
- old_mask = _int_mask_get(spu, class);
- _int_mask_set(spu, class, old_mask & mask);
-}
-
-static void int_mask_or(struct spu *spu, int class, u64 mask)
-{
- u64 old_mask;
- old_mask = _int_mask_get(spu, class);
- _int_mask_set(spu, class, old_mask | mask);
-}
-
-static void int_stat_clear(struct spu *spu, int class, u64 stat)
-{
- beat_clear_interrupt_status_of_spe(spu->spe_id, class, stat);
-}
-
-static u64 int_stat_get(struct spu *spu, int class)
-{
- u64 int_stat;
- beat_get_interrupt_status_of_spe(spu->spe_id, class, &int_stat);
- return int_stat;
-}
-
-static void cpu_affinity_set(struct spu *spu, int cpu)
-{
- return;
-}
-
-static u64 mfc_dar_get(struct spu *spu)
-{
- u64 dar;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_dar_RW), &dar);
- return dar;
-}
-
-static u64 mfc_dsisr_get(struct spu *spu)
-{
- u64 dsisr;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_dsisr_RW), &dsisr);
- return dsisr;
-}
-
-static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_dsisr_RW), dsisr);
-}
-
-static void mfc_sdr_setup(struct spu *spu)
-{
- return;
-}
-
-static void mfc_sr1_set(struct spu *spu, u64 sr1)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_sr1_RW), sr1);
-}
-
-static u64 mfc_sr1_get(struct spu *spu)
-{
- u64 sr1;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_sr1_RW), &sr1);
- return sr1;
-}
-
-static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_tclass_id_RW), tclass_id);
-}
-
-static u64 mfc_tclass_id_get(struct spu *spu)
-{
- u64 tclass_id;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_tclass_id_RW), &tclass_id);
- return tclass_id;
-}
-
-static void tlb_invalidate(struct spu *spu)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, tlb_invalidate_entry_W), 0ul);
-}
-
-static void resource_allocation_groupID_set(struct spu *spu, u64 id)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_groupID_RW),
- id);
-}
-
-static u64 resource_allocation_groupID_get(struct spu *spu)
-{
- u64 id;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_groupID_RW),
- &id);
- return id;
-}
-
-static void resource_allocation_enable_set(struct spu *spu, u64 enable)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_enable_RW),
- enable);
-}
-
-static u64 resource_allocation_enable_get(struct spu *spu)
-{
- u64 enable;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_enable_RW),
- &enable);
- return enable;
-}
-
-const struct spu_priv1_ops spu_priv1_beat_ops = {
- .int_mask_and = int_mask_and,
- .int_mask_or = int_mask_or,
- .int_mask_set = int_mask_set,
- .int_mask_get = int_mask_get,
- .int_stat_clear = int_stat_clear,
- .int_stat_get = int_stat_get,
- .cpu_affinity_set = cpu_affinity_set,
- .mfc_dar_get = mfc_dar_get,
- .mfc_dsisr_get = mfc_dsisr_get,
- .mfc_dsisr_set = mfc_dsisr_set,
- .mfc_sdr_setup = mfc_sdr_setup,
- .mfc_sr1_set = mfc_sr1_set,
- .mfc_sr1_get = mfc_sr1_get,
- .mfc_tclass_id_set = mfc_tclass_id_set,
- .mfc_tclass_id_get = mfc_tclass_id_get,
- .tlb_invalidate = tlb_invalidate,
- .resource_allocation_groupID_set = resource_allocation_groupID_set,
- .resource_allocation_groupID_get = resource_allocation_groupID_get,
- .resource_allocation_enable_set = resource_allocation_enable_set,
- .resource_allocation_enable_get = resource_allocation_enable_get,
-};
diff --git a/arch/powerpc/platforms/cell/beat_syscall.h b/arch/powerpc/platforms/cell/beat_syscall.h
deleted file mode 100644
index 8580dc7e1798..000000000000
--- a/arch/powerpc/platforms/cell/beat_syscall.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Beat hypervisor call numbers
- *
- * (C) Copyright 2004-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef BEAT_BEAT_syscall_H
-#define BEAT_BEAT_syscall_H
-
-#ifdef __ASSEMBLY__
-#define __BEAT_ADD_VENDOR_ID(__x, __v) ((__v)<<60|(__x))
-#else
-#define __BEAT_ADD_VENDOR_ID(__x, __v) ((u64)(__v)<<60|(__x))
-#endif
-#define HV_allocate_memory __BEAT_ADD_VENDOR_ID(0, 0)
-#define HV_construct_virtual_address_space __BEAT_ADD_VENDOR_ID(2, 0)
-#define HV_destruct_virtual_address_space __BEAT_ADD_VENDOR_ID(10, 0)
-#define HV_get_virtual_address_space_id_of_ppe __BEAT_ADD_VENDOR_ID(4, 0)
-#define HV_query_logical_partition_address_region_info \
- __BEAT_ADD_VENDOR_ID(6, 0)
-#define HV_release_memory __BEAT_ADD_VENDOR_ID(13, 0)
-#define HV_select_virtual_address_space __BEAT_ADD_VENDOR_ID(7, 0)
-#define HV_load_range_registers __BEAT_ADD_VENDOR_ID(68, 0)
-#define HV_set_ppe_l2cache_rmt_entry __BEAT_ADD_VENDOR_ID(70, 0)
-#define HV_set_ppe_tlb_rmt_entry __BEAT_ADD_VENDOR_ID(71, 0)
-#define HV_set_spe_tlb_rmt_entry __BEAT_ADD_VENDOR_ID(72, 0)
-#define HV_get_io_address_translation_fault_info __BEAT_ADD_VENDOR_ID(14, 0)
-#define HV_get_iopte __BEAT_ADD_VENDOR_ID(16, 0)
-#define HV_preload_iopt_cache __BEAT_ADD_VENDOR_ID(17, 0)
-#define HV_put_iopte __BEAT_ADD_VENDOR_ID(15, 0)
-#define HV_connect_event_ports __BEAT_ADD_VENDOR_ID(21, 0)
-#define HV_construct_event_receive_port __BEAT_ADD_VENDOR_ID(18, 0)
-#define HV_destruct_event_receive_port __BEAT_ADD_VENDOR_ID(19, 0)
-#define HV_destruct_event_send_port __BEAT_ADD_VENDOR_ID(22, 0)
-#define HV_get_state_of_event_send_port __BEAT_ADD_VENDOR_ID(25, 0)
-#define HV_request_to_connect_event_ports __BEAT_ADD_VENDOR_ID(20, 0)
-#define HV_send_event_externally __BEAT_ADD_VENDOR_ID(23, 0)
-#define HV_send_event_locally __BEAT_ADD_VENDOR_ID(24, 0)
-#define HV_construct_and_connect_irq_plug __BEAT_ADD_VENDOR_ID(28, 0)
-#define HV_destruct_irq_plug __BEAT_ADD_VENDOR_ID(29, 0)
-#define HV_detect_pending_interrupts __BEAT_ADD_VENDOR_ID(26, 0)
-#define HV_end_of_interrupt __BEAT_ADD_VENDOR_ID(27, 0)
-#define HV_assign_control_signal_notification_port __BEAT_ADD_VENDOR_ID(45, 0)
-#define HV_end_of_control_signal_processing __BEAT_ADD_VENDOR_ID(48, 0)
-#define HV_get_control_signal __BEAT_ADD_VENDOR_ID(46, 0)
-#define HV_set_irq_mask_for_spe __BEAT_ADD_VENDOR_ID(61, 0)
-#define HV_shutdown_logical_partition __BEAT_ADD_VENDOR_ID(44, 0)
-#define HV_connect_message_ports __BEAT_ADD_VENDOR_ID(35, 0)
-#define HV_destruct_message_port __BEAT_ADD_VENDOR_ID(36, 0)
-#define HV_receive_message __BEAT_ADD_VENDOR_ID(37, 0)
-#define HV_get_message_port_info __BEAT_ADD_VENDOR_ID(34, 0)
-#define HV_request_to_connect_message_ports __BEAT_ADD_VENDOR_ID(33, 0)
-#define HV_send_message __BEAT_ADD_VENDOR_ID(32, 0)
-#define HV_get_logical_ppe_id __BEAT_ADD_VENDOR_ID(69, 0)
-#define HV_pause __BEAT_ADD_VENDOR_ID(9, 0)
-#define HV_destruct_shared_memory_handle __BEAT_ADD_VENDOR_ID(51, 0)
-#define HV_get_shared_memory_info __BEAT_ADD_VENDOR_ID(52, 0)
-#define HV_permit_sharing_memory __BEAT_ADD_VENDOR_ID(50, 0)
-#define HV_request_to_attach_shared_memory __BEAT_ADD_VENDOR_ID(49, 0)
-#define HV_enable_logical_spe_execution __BEAT_ADD_VENDOR_ID(55, 0)
-#define HV_construct_logical_spe __BEAT_ADD_VENDOR_ID(53, 0)
-#define HV_disable_logical_spe_execution __BEAT_ADD_VENDOR_ID(56, 0)
-#define HV_destruct_logical_spe __BEAT_ADD_VENDOR_ID(54, 0)
-#define HV_sense_spe_execution_status __BEAT_ADD_VENDOR_ID(58, 0)
-#define HV_insert_htab_entry __BEAT_ADD_VENDOR_ID(101, 0)
-#define HV_read_htab_entries __BEAT_ADD_VENDOR_ID(95, 0)
-#define HV_write_htab_entry __BEAT_ADD_VENDOR_ID(94, 0)
-#define HV_assign_io_address_translation_fault_port \
- __BEAT_ADD_VENDOR_ID(100, 0)
-#define HV_set_interrupt_mask __BEAT_ADD_VENDOR_ID(73, 0)
-#define HV_get_logical_partition_id __BEAT_ADD_VENDOR_ID(74, 0)
-#define HV_create_repository_node2 __BEAT_ADD_VENDOR_ID(90, 0)
-#define HV_create_repository_node __BEAT_ADD_VENDOR_ID(90, 0) /* alias */
-#define HV_get_repository_node_value2 __BEAT_ADD_VENDOR_ID(91, 0)
-#define HV_get_repository_node_value __BEAT_ADD_VENDOR_ID(91, 0) /* alias */
-#define HV_modify_repository_node_value2 __BEAT_ADD_VENDOR_ID(92, 0)
-#define HV_modify_repository_node_value __BEAT_ADD_VENDOR_ID(92, 0) /* alias */
-#define HV_remove_repository_node2 __BEAT_ADD_VENDOR_ID(93, 0)
-#define HV_remove_repository_node __BEAT_ADD_VENDOR_ID(93, 0) /* alias */
-#define HV_cancel_shared_memory __BEAT_ADD_VENDOR_ID(104, 0)
-#define HV_clear_interrupt_status_of_spe __BEAT_ADD_VENDOR_ID(206, 0)
-#define HV_construct_spe_irq_outlet __BEAT_ADD_VENDOR_ID(80, 0)
-#define HV_destruct_spe_irq_outlet __BEAT_ADD_VENDOR_ID(81, 0)
-#define HV_disconnect_ipspc_service __BEAT_ADD_VENDOR_ID(88, 0)
-#define HV_execute_ipspc_command __BEAT_ADD_VENDOR_ID(86, 0)
-#define HV_get_interrupt_status_of_spe __BEAT_ADD_VENDOR_ID(205, 0)
-#define HV_get_spe_privileged_state_1_registers __BEAT_ADD_VENDOR_ID(208, 0)
-#define HV_permit_use_of_ipspc_service __BEAT_ADD_VENDOR_ID(85, 0)
-#define HV_reinitialize_logical_spe __BEAT_ADD_VENDOR_ID(82, 0)
-#define HV_request_ipspc_service __BEAT_ADD_VENDOR_ID(84, 0)
-#define HV_stop_ipspc_command __BEAT_ADD_VENDOR_ID(87, 0)
-#define HV_set_spe_privileged_state_1_registers __BEAT_ADD_VENDOR_ID(204, 0)
-#define HV_get_status_of_ipspc_service __BEAT_ADD_VENDOR_ID(203, 0)
-#define HV_put_characters_to_console __BEAT_ADD_VENDOR_ID(0x101, 1)
-#define HV_get_characters_from_console __BEAT_ADD_VENDOR_ID(0x102, 1)
-#define HV_get_base_clock __BEAT_ADD_VENDOR_ID(0x111, 1)
-#define HV_set_base_clock __BEAT_ADD_VENDOR_ID(0x112, 1)
-#define HV_get_frame_cycle __BEAT_ADD_VENDOR_ID(0x114, 1)
-#define HV_disable_console __BEAT_ADD_VENDOR_ID(0x115, 1)
-#define HV_disable_all_console __BEAT_ADD_VENDOR_ID(0x116, 1)
-#define HV_oneshot_timer __BEAT_ADD_VENDOR_ID(0x117, 1)
-#define HV_set_dabr __BEAT_ADD_VENDOR_ID(0x118, 1)
-#define HV_get_dabr __BEAT_ADD_VENDOR_ID(0x119, 1)
-#define HV_start_hv_stats __BEAT_ADD_VENDOR_ID(0x21c, 1)
-#define HV_stop_hv_stats __BEAT_ADD_VENDOR_ID(0x21d, 1)
-#define HV_get_hv_stats __BEAT_ADD_VENDOR_ID(0x21e, 1)
-#define HV_get_hv_error_stats __BEAT_ADD_VENDOR_ID(0x221, 1)
-#define HV_get_stats __BEAT_ADD_VENDOR_ID(0x224, 1)
-#define HV_get_heap_stats __BEAT_ADD_VENDOR_ID(0x225, 1)
-#define HV_get_memory_stats __BEAT_ADD_VENDOR_ID(0x227, 1)
-#define HV_get_memory_detail __BEAT_ADD_VENDOR_ID(0x228, 1)
-#define HV_set_priority_of_irq_outlet __BEAT_ADD_VENDOR_ID(0x122, 1)
-#define HV_get_physical_spe_by_reservation_id __BEAT_ADD_VENDOR_ID(0x128, 1)
-#define HV_get_spe_context __BEAT_ADD_VENDOR_ID(0x129, 1)
-#define HV_set_spe_context __BEAT_ADD_VENDOR_ID(0x12a, 1)
-#define HV_downcount_of_interrupt __BEAT_ADD_VENDOR_ID(0x12e, 1)
-#define HV_peek_spe_context __BEAT_ADD_VENDOR_ID(0x12f, 1)
-#define HV_read_bpa_register __BEAT_ADD_VENDOR_ID(0x131, 1)
-#define HV_write_bpa_register __BEAT_ADD_VENDOR_ID(0x132, 1)
-#define HV_map_context_table_of_spe __BEAT_ADD_VENDOR_ID(0x137, 1)
-#define HV_get_slb_for_logical_spe __BEAT_ADD_VENDOR_ID(0x138, 1)
-#define HV_set_slb_for_logical_spe __BEAT_ADD_VENDOR_ID(0x139, 1)
-#define HV_init_pm __BEAT_ADD_VENDOR_ID(0x150, 1)
-#define HV_set_pm_signal __BEAT_ADD_VENDOR_ID(0x151, 1)
-#define HV_get_pm_signal __BEAT_ADD_VENDOR_ID(0x152, 1)
-#define HV_set_pm_config __BEAT_ADD_VENDOR_ID(0x153, 1)
-#define HV_get_pm_config __BEAT_ADD_VENDOR_ID(0x154, 1)
-#define HV_get_inner_trace_data __BEAT_ADD_VENDOR_ID(0x155, 1)
-#define HV_set_ext_trace_buffer __BEAT_ADD_VENDOR_ID(0x156, 1)
-#define HV_get_ext_trace_buffer __BEAT_ADD_VENDOR_ID(0x157, 1)
-#define HV_set_pm_interrupt __BEAT_ADD_VENDOR_ID(0x158, 1)
-#define HV_get_pm_interrupt __BEAT_ADD_VENDOR_ID(0x159, 1)
-#define HV_kick_pm __BEAT_ADD_VENDOR_ID(0x160, 1)
-#define HV_construct_pm_context __BEAT_ADD_VENDOR_ID(0x164, 1)
-#define HV_destruct_pm_context __BEAT_ADD_VENDOR_ID(0x165, 1)
-#define HV_be_slow __BEAT_ADD_VENDOR_ID(0x170, 1)
-#define HV_assign_ipspc_server_connection_status_notification_port \
- __BEAT_ADD_VENDOR_ID(0x173, 1)
-#define HV_get_raid_of_physical_spe __BEAT_ADD_VENDOR_ID(0x174, 1)
-#define HV_set_physical_spe_to_rag __BEAT_ADD_VENDOR_ID(0x175, 1)
-#define HV_release_physical_spe_from_rag __BEAT_ADD_VENDOR_ID(0x176, 1)
-#define HV_rtc_read __BEAT_ADD_VENDOR_ID(0x190, 1)
-#define HV_rtc_write __BEAT_ADD_VENDOR_ID(0x191, 1)
-#define HV_eeprom_read __BEAT_ADD_VENDOR_ID(0x192, 1)
-#define HV_eeprom_write __BEAT_ADD_VENDOR_ID(0x193, 1)
-#define HV_insert_htab_entry3 __BEAT_ADD_VENDOR_ID(0x104, 1)
-#define HV_invalidate_htab_entry3 __BEAT_ADD_VENDOR_ID(0x105, 1)
-#define HV_update_htab_permission3 __BEAT_ADD_VENDOR_ID(0x106, 1)
-#define HV_clear_htab3 __BEAT_ADD_VENDOR_ID(0x107, 1)
-#endif
diff --git a/arch/powerpc/platforms/cell/beat_udbg.c b/arch/powerpc/platforms/cell/beat_udbg.c
deleted file mode 100644
index 350735bc8888..000000000000
--- a/arch/powerpc/platforms/cell/beat_udbg.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * udbg function for Beat
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/console.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-
-#include "beat.h"
-
-#define celleb_vtermno 0
-
-static void udbg_putc_beat(char c)
-{
- unsigned long rc;
-
- if (c == '\n')
- udbg_putc_beat('\r');
-
- rc = beat_put_term_char(celleb_vtermno, 1, (uint64_t)c << 56, 0);
-}
-
-/* Buffered chars getc */
-static u64 inbuflen;
-static u64 inbuf[2]; /* must be 2 u64s */
-
-static int udbg_getc_poll_beat(void)
-{
- /* The interface is tricky because it may return up to 16 chars.
- * We save them statically for future calls to udbg_getc().
- */
- char ch, *buf = (char *)inbuf;
- int i;
- long rc;
- if (inbuflen == 0) {
- /* get some more chars. */
- inbuflen = 0;
- rc = beat_get_term_char(celleb_vtermno, &inbuflen,
- inbuf+0, inbuf+1);
- if (rc != 0)
- inbuflen = 0; /* otherwise inbuflen is garbage */
- }
- if (inbuflen <= 0 || inbuflen > 16) {
- /* Catch error case as well as other oddities (corruption) */
- inbuflen = 0;
- return -1;
- }
- ch = buf[0];
- for (i = 1; i < inbuflen; i++) /* shuffle them down. */
- buf[i-1] = buf[i];
- inbuflen--;
- return ch;
-}
-
-static int udbg_getc_beat(void)
-{
- int ch;
- for (;;) {
- ch = udbg_getc_poll_beat();
- if (ch == -1) {
- /* This shouldn't be needed...but... */
- volatile unsigned long delay;
- for (delay = 0; delay < 2000000; delay++)
- ;
- } else {
- return ch;
- }
- }
-}
-
-/* call this from early_init() for a working debug console on
- * vterm capable LPAR machines
- */
-void __init udbg_init_debug_beat(void)
-{
- udbg_putc = udbg_putc_beat;
- udbg_getc = udbg_getc_beat;
- udbg_getc_poll = udbg_getc_poll_beat;
-}
diff --git a/arch/powerpc/platforms/cell/beat_wrapper.h b/arch/powerpc/platforms/cell/beat_wrapper.h
deleted file mode 100644
index c1109969f242..000000000000
--- a/arch/powerpc/platforms/cell/beat_wrapper.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Beat hypervisor call I/F
- *
- * (C) Copyright 2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/pseries/plpar_wrapper.h.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-#ifndef BEAT_HCALL
-#include <linux/string.h>
-#include "beat_syscall.h"
-
-/* defined in hvCall.S */
-extern s64 beat_hcall_norets(u64 opcode, ...);
-extern s64 beat_hcall_norets8(u64 opcode, u64 arg1, u64 arg2, u64 arg3,
- u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8);
-extern s64 beat_hcall1(u64 opcode, u64 retbuf[1], ...);
-extern s64 beat_hcall2(u64 opcode, u64 retbuf[2], ...);
-extern s64 beat_hcall3(u64 opcode, u64 retbuf[3], ...);
-extern s64 beat_hcall4(u64 opcode, u64 retbuf[4], ...);
-extern s64 beat_hcall5(u64 opcode, u64 retbuf[5], ...);
-extern s64 beat_hcall6(u64 opcode, u64 retbuf[6], ...);
-
-static inline s64 beat_downcount_of_interrupt(u64 plug_id)
-{
- return beat_hcall_norets(HV_downcount_of_interrupt, plug_id);
-}
-
-static inline s64 beat_set_interrupt_mask(u64 index,
- u64 val0, u64 val1, u64 val2, u64 val3)
-{
- return beat_hcall_norets(HV_set_interrupt_mask, index,
- val0, val1, val2, val3);
-}
-
-static inline s64 beat_destruct_irq_plug(u64 plug_id)
-{
- return beat_hcall_norets(HV_destruct_irq_plug, plug_id);
-}
-
-static inline s64 beat_construct_and_connect_irq_plug(u64 plug_id,
- u64 outlet_id)
-{
- return beat_hcall_norets(HV_construct_and_connect_irq_plug, plug_id,
- outlet_id);
-}
-
-static inline s64 beat_detect_pending_interrupts(u64 index, u64 *retbuf)
-{
- return beat_hcall4(HV_detect_pending_interrupts, retbuf, index);
-}
-
-static inline s64 beat_pause(u64 style)
-{
- return beat_hcall_norets(HV_pause, style);
-}
-
-static inline s64 beat_read_htab_entries(u64 htab_id, u64 index, u64 *retbuf)
-{
- return beat_hcall5(HV_read_htab_entries, retbuf, htab_id, index);
-}
-
-static inline s64 beat_insert_htab_entry(u64 htab_id, u64 group,
- u64 bitmask, u64 hpte_v, u64 hpte_r, u64 *slot)
-{
- u64 dummy[3];
- s64 ret;
-
- ret = beat_hcall3(HV_insert_htab_entry, dummy, htab_id, group,
- bitmask, hpte_v, hpte_r);
- *slot = dummy[0];
- return ret;
-}
-
-static inline s64 beat_write_htab_entry(u64 htab_id, u64 slot,
- u64 hpte_v, u64 hpte_r, u64 mask_v, u64 mask_r,
- u64 *ret_v, u64 *ret_r)
-{
- u64 dummy[2];
- s64 ret;
-
- ret = beat_hcall2(HV_write_htab_entry, dummy, htab_id, slot,
- hpte_v, hpte_r, mask_v, mask_r);
- *ret_v = dummy[0];
- *ret_r = dummy[1];
- return ret;
-}
-
-static inline s64 beat_insert_htab_entry3(u64 htab_id, u64 group,
- u64 hpte_v, u64 hpte_r, u64 mask_v, u64 value_v, u64 *slot)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_insert_htab_entry3, dummy, htab_id, group,
- hpte_v, hpte_r, mask_v, value_v);
- *slot = dummy[0];
- return ret;
-}
-
-static inline s64 beat_invalidate_htab_entry3(u64 htab_id, u64 group,
- u64 va, u64 pss)
-{
- return beat_hcall_norets(HV_invalidate_htab_entry3,
- htab_id, group, va, pss);
-}
-
-static inline s64 beat_update_htab_permission3(u64 htab_id, u64 group,
- u64 va, u64 pss, u64 ptel_mask, u64 ptel_value)
-{
- return beat_hcall_norets(HV_update_htab_permission3,
- htab_id, group, va, pss, ptel_mask, ptel_value);
-}
-
-static inline s64 beat_clear_htab3(u64 htab_id)
-{
- return beat_hcall_norets(HV_clear_htab3, htab_id);
-}
-
-static inline void beat_shutdown_logical_partition(u64 code)
-{
- (void)beat_hcall_norets(HV_shutdown_logical_partition, code);
-}
-
-static inline s64 beat_rtc_write(u64 time_from_epoch)
-{
- return beat_hcall_norets(HV_rtc_write, time_from_epoch);
-}
-
-static inline s64 beat_rtc_read(u64 *time_from_epoch)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_rtc_read, dummy);
- *time_from_epoch = dummy[0];
- return ret;
-}
-
-#define BEAT_NVRW_CNT (sizeof(u64) * 6)
-
-static inline s64 beat_eeprom_write(u64 index, u64 length, u8 *buffer)
-{
- u64 b[6];
-
- if (length > BEAT_NVRW_CNT)
- return -1;
- memcpy(b, buffer, sizeof(b));
- return beat_hcall_norets8(HV_eeprom_write, index, length,
- b[0], b[1], b[2], b[3], b[4], b[5]);
-}
-
-static inline s64 beat_eeprom_read(u64 index, u64 length, u8 *buffer)
-{
- u64 b[6];
- s64 ret;
-
- if (length > BEAT_NVRW_CNT)
- return -1;
- ret = beat_hcall6(HV_eeprom_read, b, index, length);
- memcpy(buffer, b, length);
- return ret;
-}
-
-static inline s64 beat_set_dabr(u64 value, u64 style)
-{
- return beat_hcall_norets(HV_set_dabr, value, style);
-}
-
-static inline s64 beat_get_characters_from_console(u64 termno, u64 *len,
- u8 *buffer)
-{
- u64 dummy[3];
- s64 ret;
-
- ret = beat_hcall3(HV_get_characters_from_console, dummy, termno, len);
- *len = dummy[0];
- memcpy(buffer, dummy + 1, *len);
- return ret;
-}
-
-static inline s64 beat_put_characters_to_console(u64 termno, u64 len,
- u8 *buffer)
-{
- u64 b[2];
-
- memcpy(b, buffer, len);
- return beat_hcall_norets(HV_put_characters_to_console, termno, len,
- b[0], b[1]);
-}
-
-static inline s64 beat_get_spe_privileged_state_1_registers(
- u64 id, u64 offsetof, u64 *value)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_get_spe_privileged_state_1_registers, dummy, id,
- offsetof);
- *value = dummy[0];
- return ret;
-}
-
-static inline s64 beat_set_irq_mask_for_spe(u64 id, u64 class, u64 mask)
-{
- return beat_hcall_norets(HV_set_irq_mask_for_spe, id, class, mask);
-}
-
-static inline s64 beat_clear_interrupt_status_of_spe(u64 id, u64 class,
- u64 mask)
-{
- return beat_hcall_norets(HV_clear_interrupt_status_of_spe,
- id, class, mask);
-}
-
-static inline s64 beat_set_spe_privileged_state_1_registers(
- u64 id, u64 offsetof, u64 value)
-{
- return beat_hcall_norets(HV_set_spe_privileged_state_1_registers,
- id, offsetof, value);
-}
-
-static inline s64 beat_get_interrupt_status_of_spe(u64 id, u64 class, u64 *val)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_get_interrupt_status_of_spe, dummy, id, class);
- *val = dummy[0];
- return ret;
-}
-
-static inline s64 beat_put_iopte(u64 ioas_id, u64 io_addr, u64 real_addr,
- u64 ioid, u64 flags)
-{
- return beat_hcall_norets(HV_put_iopte, ioas_id, io_addr, real_addr,
- ioid, flags);
-}
-
-static inline s64 beat_construct_event_receive_port(u64 *port)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_construct_event_receive_port, dummy);
- *port = dummy[0];
- return ret;
-}
-
-static inline s64 beat_destruct_event_receive_port(u64 port)
-{
- s64 ret;
-
- ret = beat_hcall_norets(HV_destruct_event_receive_port, port);
- return ret;
-}
-
-static inline s64 beat_create_repository_node(u64 path[4], u64 data[2])
-{
- s64 ret;
-
- ret = beat_hcall_norets(HV_create_repository_node2,
- path[0], path[1], path[2], path[3], data[0], data[1]);
- return ret;
-}
-
-static inline s64 beat_get_repository_node_value(u64 lpid, u64 path[4],
- u64 data[2])
-{
- s64 ret;
-
- ret = beat_hcall2(HV_get_repository_node_value2, data,
- lpid, path[0], path[1], path[2], path[3]);
- return ret;
-}
-
-#endif
diff --git a/arch/powerpc/platforms/cell/cell.h b/arch/powerpc/platforms/cell/cell.h
new file mode 100644
index 000000000000..ef143dfee068
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cell.h
@@ -0,0 +1,24 @@
+/*
+ * Cell Platform common data structures
+ *
+ * Copyright 2015, Daniel Axtens, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CELL_H
+#define CELL_H
+
+#include <asm/pci-bridge.h>
+
+extern struct pci_controller_ops cell_pci_controller_ops;
+
+#endif
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
deleted file mode 100644
index 3ce70ded2d6a..000000000000
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
- * Support for PCI on Celleb platform.
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/kernel/rtas_pci.c:
- * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
- * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/memblock.h>
-#include <linux/pci_regs.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-#include "celleb_pci.h"
-
-#define MAX_PCI_DEVICES 32
-#define MAX_PCI_FUNCTIONS 8
-#define MAX_PCI_BASE_ADDRS 3 /* use 64 bit address */
-
-/* definition for fake pci configuration area for GbE, .... ,and etc. */
-
-struct celleb_pci_resource {
- struct resource r[MAX_PCI_BASE_ADDRS];
-};
-
-struct celleb_pci_private {
- unsigned char *fake_config[MAX_PCI_DEVICES][MAX_PCI_FUNCTIONS];
- struct celleb_pci_resource *res[MAX_PCI_DEVICES][MAX_PCI_FUNCTIONS];
-};
-
-static inline u8 celleb_fake_config_readb(void *addr)
-{
- u8 *p = addr;
- return *p;
-}
-
-static inline u16 celleb_fake_config_readw(void *addr)
-{
- __le16 *p = addr;
- return le16_to_cpu(*p);
-}
-
-static inline u32 celleb_fake_config_readl(void *addr)
-{
- __le32 *p = addr;
- return le32_to_cpu(*p);
-}
-
-static inline void celleb_fake_config_writeb(u32 val, void *addr)
-{
- u8 *p = addr;
- *p = val;
-}
-
-static inline void celleb_fake_config_writew(u32 val, void *addr)
-{
- __le16 val16;
- __le16 *p = addr;
- val16 = cpu_to_le16(val);
- *p = val16;
-}
-
-static inline void celleb_fake_config_writel(u32 val, void *addr)
-{
- __le32 val32;
- __le32 *p = addr;
- val32 = cpu_to_le32(val);
- *p = val32;
-}
-
-static unsigned char *get_fake_config_start(struct pci_controller *hose,
- int devno, int fn)
-{
- struct celleb_pci_private *private = hose->private_data;
-
- if (private == NULL)
- return NULL;
-
- return private->fake_config[devno][fn];
-}
-
-static struct celleb_pci_resource *get_resource_start(
- struct pci_controller *hose,
- int devno, int fn)
-{
- struct celleb_pci_private *private = hose->private_data;
-
- if (private == NULL)
- return NULL;
-
- return private->res[devno][fn];
-}
-
-
-static void celleb_config_read_fake(unsigned char *config, int where,
- int size, u32 *val)
-{
- char *p = config + where;
-
- switch (size) {
- case 1:
- *val = celleb_fake_config_readb(p);
- break;
- case 2:
- *val = celleb_fake_config_readw(p);
- break;
- case 4:
- *val = celleb_fake_config_readl(p);
- break;
- }
-}
-
-static void celleb_config_write_fake(unsigned char *config, int where,
- int size, u32 val)
-{
- char *p = config + where;
-
- switch (size) {
- case 1:
- celleb_fake_config_writeb(val, p);
- break;
- case 2:
- celleb_fake_config_writew(val, p);
- break;
- case 4:
- celleb_fake_config_writel(val, p);
- break;
- }
-}
-
-static int celleb_fake_pci_read_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val)
-{
- char *config;
- struct pci_controller *hose = pci_bus_to_host(bus);
- unsigned int devno = devfn >> 3;
- unsigned int fn = devfn & 0x7;
-
- /* allignment check */
- BUG_ON(where % size);
-
- pr_debug(" fake read: bus=0x%x, ", bus->number);
- config = get_fake_config_start(hose, devno, fn);
-
- pr_debug("devno=0x%x, where=0x%x, size=0x%x, ", devno, where, size);
- if (!config) {
- pr_debug("failed\n");
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- celleb_config_read_fake(config, where, size, val);
- pr_debug("val=0x%x\n", *val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-static int celleb_fake_pci_write_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val)
-{
- char *config;
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct celleb_pci_resource *res;
- unsigned int devno = devfn >> 3;
- unsigned int fn = devfn & 0x7;
-
- /* allignment check */
- BUG_ON(where % size);
-
- config = get_fake_config_start(hose, devno, fn);
-
- if (!config)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (val == ~0) {
- int i = (where - PCI_BASE_ADDRESS_0) >> 3;
-
- switch (where) {
- case PCI_BASE_ADDRESS_0:
- case PCI_BASE_ADDRESS_2:
- if (size != 4)
- return PCIBIOS_DEVICE_NOT_FOUND;
- res = get_resource_start(hose, devno, fn);
- if (!res)
- return PCIBIOS_DEVICE_NOT_FOUND;
- celleb_config_write_fake(config, where, size,
- (res->r[i].end - res->r[i].start));
- return PCIBIOS_SUCCESSFUL;
- case PCI_BASE_ADDRESS_1:
- case PCI_BASE_ADDRESS_3:
- case PCI_BASE_ADDRESS_4:
- case PCI_BASE_ADDRESS_5:
- break;
- default:
- break;
- }
- }
-
- celleb_config_write_fake(config, where, size, val);
- pr_debug(" fake write: where=%x, size=%d, val=%x\n",
- where, size, val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops celleb_fake_pci_ops = {
- .read = celleb_fake_pci_read_config,
- .write = celleb_fake_pci_write_config,
-};
-
-static inline void celleb_setup_pci_base_addrs(struct pci_controller *hose,
- unsigned int devno, unsigned int fn,
- unsigned int num_base_addr)
-{
- u32 val;
- unsigned char *config;
- struct celleb_pci_resource *res;
-
- config = get_fake_config_start(hose, devno, fn);
- res = get_resource_start(hose, devno, fn);
-
- if (!config || !res)
- return;
-
- switch (num_base_addr) {
- case 3:
- val = (res->r[2].start & 0xfffffff0)
- | PCI_BASE_ADDRESS_MEM_TYPE_64;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_4, 4, val);
- val = res->r[2].start >> 32;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_5, 4, val);
- /* FALLTHROUGH */
- case 2:
- val = (res->r[1].start & 0xfffffff0)
- | PCI_BASE_ADDRESS_MEM_TYPE_64;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_2, 4, val);
- val = res->r[1].start >> 32;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_3, 4, val);
- /* FALLTHROUGH */
- case 1:
- val = (res->r[0].start & 0xfffffff0)
- | PCI_BASE_ADDRESS_MEM_TYPE_64;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_0, 4, val);
- val = res->r[0].start >> 32;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_1, 4, val);
- break;
- }
-
- val = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- celleb_config_write_fake(config, PCI_COMMAND, 2, val);
-}
-
-static int __init celleb_setup_fake_pci_device(struct device_node *node,
- struct pci_controller *hose)
-{
- unsigned int rlen;
- int num_base_addr = 0;
- u32 val;
- const u32 *wi0, *wi1, *wi2, *wi3, *wi4;
- unsigned int devno, fn;
- struct celleb_pci_private *private = hose->private_data;
- unsigned char **config = NULL;
- struct celleb_pci_resource **res = NULL;
- const char *name;
- const unsigned long *li;
- int size, result;
-
- if (private == NULL) {
- printk(KERN_ERR "PCI: "
- "memory space for pci controller is not assigned\n");
- goto error;
- }
-
- name = of_get_property(node, "model", &rlen);
- if (!name) {
- printk(KERN_ERR "PCI: model property not found.\n");
- goto error;
- }
-
- wi4 = of_get_property(node, "reg", &rlen);
- if (wi4 == NULL)
- goto error;
-
- devno = ((wi4[0] >> 8) & 0xff) >> 3;
- fn = (wi4[0] >> 8) & 0x7;
-
- pr_debug("PCI: celleb_setup_fake_pci() %s devno=%x fn=%x\n", name,
- devno, fn);
-
- size = 256;
- config = &private->fake_config[devno][fn];
- *config = zalloc_maybe_bootmem(size, GFP_KERNEL);
- if (*config == NULL) {
- printk(KERN_ERR "PCI: "
- "not enough memory for fake configuration space\n");
- goto error;
- }
- pr_debug("PCI: fake config area assigned 0x%016lx\n",
- (unsigned long)*config);
-
- size = sizeof(struct celleb_pci_resource);
- res = &private->res[devno][fn];
- *res = zalloc_maybe_bootmem(size, GFP_KERNEL);
- if (*res == NULL) {
- printk(KERN_ERR
- "PCI: not enough memory for resource data space\n");
- goto error;
- }
- pr_debug("PCI: res assigned 0x%016lx\n", (unsigned long)*res);
-
- wi0 = of_get_property(node, "device-id", NULL);
- wi1 = of_get_property(node, "vendor-id", NULL);
- wi2 = of_get_property(node, "class-code", NULL);
- wi3 = of_get_property(node, "revision-id", NULL);
- if (!wi0 || !wi1 || !wi2 || !wi3) {
- printk(KERN_ERR "PCI: Missing device tree properties.\n");
- goto error;
- }
-
- celleb_config_write_fake(*config, PCI_DEVICE_ID, 2, wi0[0] & 0xffff);
- celleb_config_write_fake(*config, PCI_VENDOR_ID, 2, wi1[0] & 0xffff);
- pr_debug("class-code = 0x%08x\n", wi2[0]);
-
- celleb_config_write_fake(*config, PCI_CLASS_PROG, 1, wi2[0] & 0xff);
- celleb_config_write_fake(*config, PCI_CLASS_DEVICE, 2,
- (wi2[0] >> 8) & 0xffff);
- celleb_config_write_fake(*config, PCI_REVISION_ID, 1, wi3[0]);
-
- while (num_base_addr < MAX_PCI_BASE_ADDRS) {
- result = of_address_to_resource(node,
- num_base_addr, &(*res)->r[num_base_addr]);
- if (result)
- break;
- num_base_addr++;
- }
-
- celleb_setup_pci_base_addrs(hose, devno, fn, num_base_addr);
-
- li = of_get_property(node, "interrupts", &rlen);
- if (!li) {
- printk(KERN_ERR "PCI: interrupts not found.\n");
- goto error;
- }
- val = li[0];
- celleb_config_write_fake(*config, PCI_INTERRUPT_PIN, 1, 1);
- celleb_config_write_fake(*config, PCI_INTERRUPT_LINE, 1, val);
-
-#ifdef DEBUG
- pr_debug("PCI: %s irq=%ld\n", name, li[0]);
- for (i = 0; i < 6; i++) {
- celleb_config_read_fake(*config,
- PCI_BASE_ADDRESS_0 + 0x4 * i, 4,
- &val);
- pr_debug("PCI: %s fn=%d base_address_%d=0x%x\n",
- name, fn, i, val);
- }
-#endif
-
- celleb_config_write_fake(*config, PCI_HEADER_TYPE, 1,
- PCI_HEADER_TYPE_NORMAL);
-
- return 0;
-
-error:
- if (mem_init_done) {
- if (config && *config)
- kfree(*config);
- if (res && *res)
- kfree(*res);
-
- } else {
- if (config && *config) {
- size = 256;
- memblock_free(__pa(*config), size);
- }
- if (res && *res) {
- size = sizeof(struct celleb_pci_resource);
- memblock_free(__pa(*res), size);
- }
- }
-
- return 1;
-}
-
-static int __init phb_set_bus_ranges(struct device_node *dev,
- struct pci_controller *phb)
-{
- const int *bus_range;
- unsigned int len;
-
- bus_range = of_get_property(dev, "bus-range", &len);
- if (bus_range == NULL || len < 2 * sizeof(int))
- return 1;
-
- phb->first_busno = bus_range[0];
- phb->last_busno = bus_range[1];
-
- return 0;
-}
-
-static void __init celleb_alloc_private_mem(struct pci_controller *hose)
-{
- hose->private_data =
- zalloc_maybe_bootmem(sizeof(struct celleb_pci_private),
- GFP_KERNEL);
-}
-
-static int __init celleb_setup_fake_pci(struct device_node *dev,
- struct pci_controller *phb)
-{
- struct device_node *node;
-
- phb->ops = &celleb_fake_pci_ops;
- celleb_alloc_private_mem(phb);
-
- for (node = of_get_next_child(dev, NULL);
- node != NULL; node = of_get_next_child(dev, node))
- celleb_setup_fake_pci_device(node, phb);
-
- return 0;
-}
-
-static struct celleb_phb_spec celleb_fake_pci_spec __initdata = {
- .setup = celleb_setup_fake_pci,
-};
-
-static const struct of_device_id celleb_phb_match[] __initconst = {
- {
- .name = "pci-pseudo",
- .data = &celleb_fake_pci_spec,
- }, {
- .name = "epci",
- .data = &celleb_epci_spec,
- }, {
- .name = "pcie",
- .data = &celleb_pciex_spec,
- }, {
- },
-};
-
-int __init celleb_setup_phb(struct pci_controller *phb)
-{
- struct device_node *dev = phb->dn;
- const struct of_device_id *match;
- const struct celleb_phb_spec *phb_spec;
- int rc;
-
- match = of_match_node(celleb_phb_match, dev);
- if (!match)
- return 1;
-
- phb_set_bus_ranges(dev, phb);
- phb->buid = 1;
-
- phb_spec = match->data;
- rc = (*phb_spec->setup)(dev, phb);
- if (rc)
- return 1;
-
- if (phb_spec->ops)
- iowa_register_bus(phb, phb_spec->ops,
- phb_spec->iowa_init,
- phb_spec->iowa_data);
- return 0;
-}
-
-int celleb_pci_probe_mode(struct pci_bus *bus)
-{
- return PCI_PROBE_DEVTREE;
-}
diff --git a/arch/powerpc/platforms/cell/celleb_pci.h b/arch/powerpc/platforms/cell/celleb_pci.h
deleted file mode 100644
index a801fcc5f389..000000000000
--- a/arch/powerpc/platforms/cell/celleb_pci.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * pci prototypes for Celleb platform
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _CELLEB_PCI_H
-#define _CELLEB_PCI_H
-
-#include <linux/pci.h>
-
-#include <asm/pci-bridge.h>
-#include <asm/prom.h>
-#include <asm/ppc-pci.h>
-#include <asm/io-workarounds.h>
-
-struct iowa_bus;
-
-struct celleb_phb_spec {
- int (*setup)(struct device_node *, struct pci_controller *);
- struct ppc_pci_io *ops;
- int (*iowa_init)(struct iowa_bus *, void *);
- void *iowa_data;
-};
-
-extern int celleb_setup_phb(struct pci_controller *);
-extern int celleb_pci_probe_mode(struct pci_bus *);
-
-extern struct celleb_phb_spec celleb_epci_spec;
-extern struct celleb_phb_spec celleb_pciex_spec;
-
-#endif /* _CELLEB_PCI_H */
diff --git a/arch/powerpc/platforms/cell/celleb_scc.h b/arch/powerpc/platforms/cell/celleb_scc.h
deleted file mode 100644
index b596a711c348..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * SCC (Super Companion Chip) definitions
- *
- * (C) Copyright 2004-2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _CELLEB_SCC_H
-#define _CELLEB_SCC_H
-
-#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
-#define PCI_DEVICE_ID_TOSHIBA_SCC_PCIEXC_BRIDGE 0x01b0
-#define PCI_DEVICE_ID_TOSHIBA_SCC_EPCI_BRIDGE 0x01b1
-#define PCI_DEVICE_ID_TOSHIBA_SCC_BRIDGE 0x01b2
-#define PCI_DEVICE_ID_TOSHIBA_SCC_GBE 0x01b3
-#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
-#define PCI_DEVICE_ID_TOSHIBA_SCC_USB2 0x01b5
-#define PCI_DEVICE_ID_TOSHIBA_SCC_USB 0x01b6
-#define PCI_DEVICE_ID_TOSHIBA_SCC_ENCDEC 0x01b7
-
-#define SCC_EPCI_REG 0x0000d000
-
-/* EPCI registers */
-#define SCC_EPCI_CNF10_REG 0x010
-#define SCC_EPCI_CNF14_REG 0x014
-#define SCC_EPCI_CNF18_REG 0x018
-#define SCC_EPCI_PVBAT 0x100
-#define SCC_EPCI_VPMBAT 0x104
-#define SCC_EPCI_VPIBAT 0x108
-#define SCC_EPCI_VCSR 0x110
-#define SCC_EPCI_VIENAB 0x114
-#define SCC_EPCI_VISTAT 0x118
-#define SCC_EPCI_VRDCOUNT 0x124
-#define SCC_EPCI_BAM0 0x12c
-#define SCC_EPCI_BAM1 0x134
-#define SCC_EPCI_BAM2 0x13c
-#define SCC_EPCI_IADR 0x164
-#define SCC_EPCI_CLKRST 0x800
-#define SCC_EPCI_INTSET 0x804
-#define SCC_EPCI_STATUS 0x808
-#define SCC_EPCI_ABTSET 0x80c
-#define SCC_EPCI_WATRP 0x810
-#define SCC_EPCI_DUMYRADR 0x814
-#define SCC_EPCI_SWRESP 0x818
-#define SCC_EPCI_CNTOPT 0x81c
-#define SCC_EPCI_ECMODE 0xf00
-#define SCC_EPCI_IOM_AC_NUM 5
-#define SCC_EPCI_IOM_ACTE(n) (0xf10 + (n) * 4)
-#define SCC_EPCI_IOT_AC_NUM 4
-#define SCC_EPCI_IOT_ACTE(n) (0xf30 + (n) * 4)
-#define SCC_EPCI_MAEA 0xf50
-#define SCC_EPCI_MAEC 0xf54
-#define SCC_EPCI_CKCTRL 0xff0
-
-/* bits for SCC_EPCI_VCSR */
-#define SCC_EPCI_VCSR_FRE 0x00020000
-#define SCC_EPCI_VCSR_FWE 0x00010000
-#define SCC_EPCI_VCSR_DR 0x00000400
-#define SCC_EPCI_VCSR_SR 0x00000008
-#define SCC_EPCI_VCSR_AT 0x00000004
-
-/* bits for SCC_EPCI_VIENAB/SCC_EPCI_VISTAT */
-#define SCC_EPCI_VISTAT_PMPE 0x00000008
-#define SCC_EPCI_VISTAT_PMFE 0x00000004
-#define SCC_EPCI_VISTAT_PRA 0x00000002
-#define SCC_EPCI_VISTAT_PRD 0x00000001
-#define SCC_EPCI_VISTAT_ALL 0x0000000f
-
-#define SCC_EPCI_VIENAB_PMPEE 0x00000008
-#define SCC_EPCI_VIENAB_PMFEE 0x00000004
-#define SCC_EPCI_VIENAB_PRA 0x00000002
-#define SCC_EPCI_VIENAB_PRD 0x00000001
-#define SCC_EPCI_VIENAB_ALL 0x0000000f
-
-/* bits for SCC_EPCI_CLKRST */
-#define SCC_EPCI_CLKRST_CKS_MASK 0x00030000
-#define SCC_EPCI_CLKRST_CKS_2 0x00000000
-#define SCC_EPCI_CLKRST_CKS_4 0x00010000
-#define SCC_EPCI_CLKRST_CKS_8 0x00020000
-#define SCC_EPCI_CLKRST_PCICRST 0x00000400
-#define SCC_EPCI_CLKRST_BC 0x00000200
-#define SCC_EPCI_CLKRST_PCIRST 0x00000100
-#define SCC_EPCI_CLKRST_PCKEN 0x00000001
-
-/* bits for SCC_EPCI_INTSET/SCC_EPCI_STATUS */
-#define SCC_EPCI_INT_2M 0x01000000
-#define SCC_EPCI_INT_RERR 0x00200000
-#define SCC_EPCI_INT_SERR 0x00100000
-#define SCC_EPCI_INT_PRTER 0x00080000
-#define SCC_EPCI_INT_SER 0x00040000
-#define SCC_EPCI_INT_PER 0x00020000
-#define SCC_EPCI_INT_PAI 0x00010000
-#define SCC_EPCI_INT_1M 0x00000100
-#define SCC_EPCI_INT_PME 0x00000010
-#define SCC_EPCI_INT_INTD 0x00000008
-#define SCC_EPCI_INT_INTC 0x00000004
-#define SCC_EPCI_INT_INTB 0x00000002
-#define SCC_EPCI_INT_INTA 0x00000001
-#define SCC_EPCI_INT_DEVINT 0x0000000f
-#define SCC_EPCI_INT_ALL 0x003f001f
-#define SCC_EPCI_INT_ALLERR 0x003f0000
-
-/* bits for SCC_EPCI_CKCTRL */
-#define SCC_EPCI_CKCTRL_CRST0 0x00010000
-#define SCC_EPCI_CKCTRL_CRST1 0x00020000
-#define SCC_EPCI_CKCTRL_OCLKEN 0x00000100
-#define SCC_EPCI_CKCTRL_LCLKEN 0x00000001
-
-#define SCC_EPCI_IDSEL_AD_TO_SLOT(ad) ((ad) - 10)
-#define SCC_EPCI_MAX_DEVNU SCC_EPCI_IDSEL_AD_TO_SLOT(32)
-
-/* bits for SCC_EPCI_CNTOPT */
-#define SCC_EPCI_CNTOPT_O2PMB 0x00000002
-
-/* SCC PCIEXC SMMIO registers */
-#define PEXCADRS 0x000
-#define PEXCWDATA 0x004
-#define PEXCRDATA 0x008
-#define PEXDADRS 0x010
-#define PEXDCMND 0x014
-#define PEXDWDATA 0x018
-#define PEXDRDATA 0x01c
-#define PEXREQID 0x020
-#define PEXTIDMAP 0x024
-#define PEXINTMASK 0x028
-#define PEXINTSTS 0x02c
-#define PEXAERRMASK 0x030
-#define PEXAERRSTS 0x034
-#define PEXPRERRMASK 0x040
-#define PEXPRERRSTS 0x044
-#define PEXPRERRID01 0x048
-#define PEXPRERRID23 0x04c
-#define PEXVDMASK 0x050
-#define PEXVDSTS 0x054
-#define PEXRCVCPLIDA 0x060
-#define PEXLENERRIDA 0x068
-#define PEXPHYPLLST 0x070
-#define PEXDMRDEN0 0x100
-#define PEXDMRDADR0 0x104
-#define PEXDMRDENX 0x110
-#define PEXDMRDADRX 0x114
-#define PEXECMODE 0xf00
-#define PEXMAEA(n) (0xf50 + (8 * n))
-#define PEXMAEC(n) (0xf54 + (8 * n))
-#define PEXCCRCTRL 0xff0
-
-/* SCC PCIEXC bits and shifts for PEXCADRS */
-#define PEXCADRS_BYTE_EN_SHIFT 20
-#define PEXCADRS_CMD_SHIFT 16
-#define PEXCADRS_CMD_READ (0xa << PEXCADRS_CMD_SHIFT)
-#define PEXCADRS_CMD_WRITE (0xb << PEXCADRS_CMD_SHIFT)
-
-/* SCC PCIEXC shifts for PEXDADRS */
-#define PEXDADRS_BUSNO_SHIFT 20
-#define PEXDADRS_DEVNO_SHIFT 15
-#define PEXDADRS_FUNCNO_SHIFT 12
-
-/* SCC PCIEXC bits and shifts for PEXDCMND */
-#define PEXDCMND_BYTE_EN_SHIFT 4
-#define PEXDCMND_IO_READ 0x2
-#define PEXDCMND_IO_WRITE 0x3
-#define PEXDCMND_CONFIG_READ 0xa
-#define PEXDCMND_CONFIG_WRITE 0xb
-
-/* SCC PCIEXC bits for PEXPHYPLLST */
-#define PEXPHYPLLST_PEXPHYAPLLST 0x00000001
-
-/* SCC PCIEXC bits for PEXECMODE */
-#define PEXECMODE_ALL_THROUGH 0x00000000
-#define PEXECMODE_ALL_8BIT 0x00550155
-#define PEXECMODE_ALL_16BIT 0x00aa02aa
-
-/* SCC PCIEXC bits for PEXCCRCTRL */
-#define PEXCCRCTRL_PEXIPCOREEN 0x00040000
-#define PEXCCRCTRL_PEXIPCONTEN 0x00020000
-#define PEXCCRCTRL_PEXPHYPLLEN 0x00010000
-#define PEXCCRCTRL_PCIEXCAOCKEN 0x00000100
-
-/* SCC PCIEXC port configuration registers */
-#define PEXTCERRCHK 0x21c
-#define PEXTAMAPB0 0x220
-#define PEXTAMAPL0 0x224
-#define PEXTAMAPB(n) (PEXTAMAPB0 + 8 * (n))
-#define PEXTAMAPL(n) (PEXTAMAPL0 + 8 * (n))
-#define PEXCHVC0P 0x500
-#define PEXCHVC0NP 0x504
-#define PEXCHVC0C 0x508
-#define PEXCDVC0P 0x50c
-#define PEXCDVC0NP 0x510
-#define PEXCDVC0C 0x514
-#define PEXCHVCXP 0x518
-#define PEXCHVCXNP 0x51c
-#define PEXCHVCXC 0x520
-#define PEXCDVCXP 0x524
-#define PEXCDVCXNP 0x528
-#define PEXCDVCXC 0x52c
-#define PEXCTTRG 0x530
-#define PEXTSCTRL 0x700
-#define PEXTSSTS 0x704
-#define PEXSKPCTRL 0x708
-
-/* UHC registers */
-#define SCC_UHC_CKRCTRL 0xff0
-#define SCC_UHC_ECMODE 0xf00
-
-/* bits for SCC_UHC_CKRCTRL */
-#define SCC_UHC_F48MCKLEN 0x00000001
-#define SCC_UHC_P_SUSPEND 0x00000002
-#define SCC_UHC_PHY_SUSPEND_SEL 0x00000004
-#define SCC_UHC_HCLKEN 0x00000100
-#define SCC_UHC_USBEN 0x00010000
-#define SCC_UHC_USBCEN 0x00020000
-#define SCC_UHC_PHYEN 0x00040000
-
-/* bits for SCC_UHC_ECMODE */
-#define SCC_UHC_ECMODE_BY_BYTE 0x00000555
-#define SCC_UHC_ECMODE_BY_WORD 0x00000aaa
-
-#endif /* _CELLEB_SCC_H */
diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
deleted file mode 100644
index 9438bbed402f..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_epci.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Support for SCC external PCI
- *
- * (C) Copyright 2004-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/pci_regs.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-#include "celleb_scc.h"
-#include "celleb_pci.h"
-
-#define MAX_PCI_DEVICES 32
-#define MAX_PCI_FUNCTIONS 8
-
-#define iob() __asm__ __volatile__("eieio; sync":::"memory")
-
-static inline PCI_IO_ADDR celleb_epci_get_epci_base(
- struct pci_controller *hose)
-{
- /*
- * Note:
- * Celleb epci uses cfg_addr as a base address for
- * epci control registers.
- */
-
- return hose->cfg_addr;
-}
-
-static inline PCI_IO_ADDR celleb_epci_get_epci_cfg(
- struct pci_controller *hose)
-{
- /*
- * Note:
- * Celleb epci uses cfg_data as a base address for
- * configuration area for epci devices.
- */
-
- return hose->cfg_data;
-}
-
-static inline void clear_and_disable_master_abort_interrupt(
- struct pci_controller *hose)
-{
- PCI_IO_ADDR epci_base;
- PCI_IO_ADDR reg;
- epci_base = celleb_epci_get_epci_base(hose);
- reg = epci_base + PCI_COMMAND;
- out_be32(reg, in_be32(reg) | (PCI_STATUS_REC_MASTER_ABORT << 16));
-}
-
-static int celleb_epci_check_abort(struct pci_controller *hose,
- PCI_IO_ADDR addr)
-{
- PCI_IO_ADDR reg;
- PCI_IO_ADDR epci_base;
- u32 val;
-
- iob();
- epci_base = celleb_epci_get_epci_base(hose);
-
- reg = epci_base + PCI_COMMAND;
- val = in_be32(reg);
-
- if (val & (PCI_STATUS_REC_MASTER_ABORT << 16)) {
- out_be32(reg,
- (val & 0xffff) | (PCI_STATUS_REC_MASTER_ABORT << 16));
-
- /* clear PCI Controller error, FRE, PMFE */
- reg = epci_base + SCC_EPCI_STATUS;
- out_be32(reg, SCC_EPCI_INT_PAI);
-
- reg = epci_base + SCC_EPCI_VCSR;
- val = in_be32(reg) & 0xffff;
- val |= SCC_EPCI_VCSR_FRE;
- out_be32(reg, val);
-
- reg = epci_base + SCC_EPCI_VISTAT;
- out_be32(reg, SCC_EPCI_VISTAT_PMFE);
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static PCI_IO_ADDR celleb_epci_make_config_addr(struct pci_bus *bus,
- struct pci_controller *hose, unsigned int devfn, int where)
-{
- PCI_IO_ADDR addr;
-
- if (bus != hose->bus)
- addr = celleb_epci_get_epci_cfg(hose) +
- (((bus->number & 0xff) << 16)
- | ((devfn & 0xff) << 8)
- | (where & 0xff)
- | 0x01000000);
- else
- addr = celleb_epci_get_epci_cfg(hose) +
- (((devfn & 0xff) << 8) | (where & 0xff));
-
- pr_debug("EPCI: config_addr = 0x%p\n", addr);
-
- return addr;
-}
-
-static int celleb_epci_read_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val)
-{
- PCI_IO_ADDR epci_base;
- PCI_IO_ADDR addr;
- struct pci_controller *hose = pci_bus_to_host(bus);
-
- /* allignment check */
- BUG_ON(where % size);
-
- if (!celleb_epci_get_epci_cfg(hose))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (bus->number == hose->first_busno && devfn == 0) {
- /* EPCI controller self */
-
- epci_base = celleb_epci_get_epci_base(hose);
- addr = epci_base + where;
-
- switch (size) {
- case 1:
- *val = in_8(addr);
- break;
- case 2:
- *val = in_be16(addr);
- break;
- case 4:
- *val = in_be32(addr);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- } else {
-
- clear_and_disable_master_abort_interrupt(hose);
- addr = celleb_epci_make_config_addr(bus, hose, devfn, where);
-
- switch (size) {
- case 1:
- *val = in_8(addr);
- break;
- case 2:
- *val = in_le16(addr);
- break;
- case 4:
- *val = in_le32(addr);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- }
-
- pr_debug("EPCI: "
- "addr=0x%p, devfn=0x%x, where=0x%x, size=0x%x, val=0x%x\n",
- addr, devfn, where, size, *val);
-
- return celleb_epci_check_abort(hose, NULL);
-}
-
-static int celleb_epci_write_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val)
-{
- PCI_IO_ADDR epci_base;
- PCI_IO_ADDR addr;
- struct pci_controller *hose = pci_bus_to_host(bus);
-
- /* allignment check */
- BUG_ON(where % size);
-
- if (!celleb_epci_get_epci_cfg(hose))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (bus->number == hose->first_busno && devfn == 0) {
- /* EPCI controller self */
-
- epci_base = celleb_epci_get_epci_base(hose);
- addr = epci_base + where;
-
- switch (size) {
- case 1:
- out_8(addr, val);
- break;
- case 2:
- out_be16(addr, val);
- break;
- case 4:
- out_be32(addr, val);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- } else {
-
- clear_and_disable_master_abort_interrupt(hose);
- addr = celleb_epci_make_config_addr(bus, hose, devfn, where);
-
- switch (size) {
- case 1:
- out_8(addr, val);
- break;
- case 2:
- out_le16(addr, val);
- break;
- case 4:
- out_le32(addr, val);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- }
-
- return celleb_epci_check_abort(hose, addr);
-}
-
-struct pci_ops celleb_epci_ops = {
- .read = celleb_epci_read_config,
- .write = celleb_epci_write_config,
-};
-
-/* to be moved in FW */
-static int __init celleb_epci_init(struct pci_controller *hose)
-{
- u32 val;
- PCI_IO_ADDR reg;
- PCI_IO_ADDR epci_base;
- int hwres = 0;
-
- epci_base = celleb_epci_get_epci_base(hose);
-
- /* PCI core reset(Internal bus and PCI clock) */
- reg = epci_base + SCC_EPCI_CKCTRL;
- val = in_be32(reg);
- if (val == 0x00030101)
- hwres = 1;
- else {
- val &= ~(SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1);
- out_be32(reg, val);
-
- /* set PCI core clock */
- val = in_be32(reg);
- val |= (SCC_EPCI_CKCTRL_OCLKEN | SCC_EPCI_CKCTRL_LCLKEN);
- out_be32(reg, val);
-
- /* release PCI core reset (internal bus) */
- val = in_be32(reg);
- val |= SCC_EPCI_CKCTRL_CRST0;
- out_be32(reg, val);
-
- /* set PCI clock select */
- reg = epci_base + SCC_EPCI_CLKRST;
- val = in_be32(reg);
- val &= ~SCC_EPCI_CLKRST_CKS_MASK;
- val |= SCC_EPCI_CLKRST_CKS_2;
- out_be32(reg, val);
-
- /* set arbiter */
- reg = epci_base + SCC_EPCI_ABTSET;
- out_be32(reg, 0x0f1f001f); /* temporary value */
-
- /* buffer on */
- reg = epci_base + SCC_EPCI_CLKRST;
- val = in_be32(reg);
- val |= SCC_EPCI_CLKRST_BC;
- out_be32(reg, val);
-
- /* PCI clock enable */
- val = in_be32(reg);
- val |= SCC_EPCI_CLKRST_PCKEN;
- out_be32(reg, val);
-
- /* release PCI core reset (all) */
- reg = epci_base + SCC_EPCI_CKCTRL;
- val = in_be32(reg);
- val |= (SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1);
- out_be32(reg, val);
-
- /* set base translation registers. (already set by Beat) */
-
- /* set base address masks. (already set by Beat) */
- }
-
- /* release interrupt masks and clear all interrupts */
- reg = epci_base + SCC_EPCI_INTSET;
- out_be32(reg, 0x013f011f); /* all interrupts enable */
- reg = epci_base + SCC_EPCI_VIENAB;
- val = SCC_EPCI_VIENAB_PMPEE | SCC_EPCI_VIENAB_PMFEE;
- out_be32(reg, val);
- reg = epci_base + SCC_EPCI_STATUS;
- out_be32(reg, 0xffffffff);
- reg = epci_base + SCC_EPCI_VISTAT;
- out_be32(reg, 0xffffffff);
-
- /* disable PCI->IB address translation */
- reg = epci_base + SCC_EPCI_VCSR;
- val = in_be32(reg);
- val &= ~(SCC_EPCI_VCSR_DR | SCC_EPCI_VCSR_AT);
- out_be32(reg, val);
-
- /* set base addresses. (no need to set?) */
-
- /* memory space, bus master enable */
- reg = epci_base + PCI_COMMAND;
- val = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- out_be32(reg, val);
-
- /* endian mode setup */
- reg = epci_base + SCC_EPCI_ECMODE;
- val = 0x00550155;
- out_be32(reg, val);
-
- /* set control option */
- reg = epci_base + SCC_EPCI_CNTOPT;
- val = in_be32(reg);
- val |= SCC_EPCI_CNTOPT_O2PMB;
- out_be32(reg, val);
-
- /* XXX: temporay: set registers for address conversion setup */
- reg = epci_base + SCC_EPCI_CNF10_REG;
- out_be32(reg, 0x80000008);
- reg = epci_base + SCC_EPCI_CNF14_REG;
- out_be32(reg, 0x40000008);
-
- reg = epci_base + SCC_EPCI_BAM0;
- out_be32(reg, 0x80000000);
- reg = epci_base + SCC_EPCI_BAM1;
- out_be32(reg, 0xe0000000);
-
- reg = epci_base + SCC_EPCI_PVBAT;
- out_be32(reg, 0x80000000);
-
- if (!hwres) {
- /* release external PCI reset */
- reg = epci_base + SCC_EPCI_CLKRST;
- val = in_be32(reg);
- val |= SCC_EPCI_CLKRST_PCIRST;
- out_be32(reg, val);
- }
-
- return 0;
-}
-
-static int __init celleb_setup_epci(struct device_node *node,
- struct pci_controller *hose)
-{
- struct resource r;
-
- pr_debug("PCI: celleb_setup_epci()\n");
-
- /*
- * Note:
- * Celleb epci uses cfg_addr and cfg_data member of
- * pci_controller structure in irregular way.
- *
- * cfg_addr is used to map for control registers of
- * celleb epci.
- *
- * cfg_data is used for configuration area of devices
- * on Celleb epci buses.
- */
-
- if (of_address_to_resource(node, 0, &r))
- goto error;
- hose->cfg_addr = ioremap(r.start, resource_size(&r));
- if (!hose->cfg_addr)
- goto error;
- pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n",
- r.start, (unsigned long)hose->cfg_addr, resource_size(&r));
-
- if (of_address_to_resource(node, 2, &r))
- goto error;
- hose->cfg_data = ioremap(r.start, resource_size(&r));
- if (!hose->cfg_data)
- goto error;
- pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n",
- r.start, (unsigned long)hose->cfg_data, resource_size(&r));
-
- hose->ops = &celleb_epci_ops;
- celleb_epci_init(hose);
-
- return 0;
-
-error:
- if (hose->cfg_addr)
- iounmap(hose->cfg_addr);
-
- if (hose->cfg_data)
- iounmap(hose->cfg_data);
- return 1;
-}
-
-struct celleb_phb_spec celleb_epci_spec __initdata = {
- .setup = celleb_setup_epci,
- .ops = &spiderpci_ops,
- .iowa_init = &spiderpci_iowa_init,
- .iowa_data = (void *)0,
-};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
deleted file mode 100644
index 94170e4f2ce7..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- * Support for Celleb PCI-Express.
- *
- * (C) Copyright 2007-2008 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/iommu.h>
-#include <asm/byteorder.h>
-
-#include "celleb_scc.h"
-#include "celleb_pci.h"
-
-#define PEX_IN(base, off) in_be32((void __iomem *)(base) + (off))
-#define PEX_OUT(base, off, data) out_be32((void __iomem *)(base) + (off), (data))
-
-static void scc_pciex_io_flush(struct iowa_bus *bus)
-{
- (void)PEX_IN(bus->phb->cfg_addr, PEXDMRDEN0);
-}
-
-/*
- * Memory space access to device on PCIEX
- */
-#define PCIEX_MMIO_READ(name, ret) \
-static ret scc_pciex_##name(const PCI_IO_ADDR addr) \
-{ \
- ret val = __do_##name(addr); \
- scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
- return val; \
-}
-
-#define PCIEX_MMIO_READ_STR(name) \
-static void scc_pciex_##name(const PCI_IO_ADDR addr, void *buf, \
- unsigned long count) \
-{ \
- __do_##name(addr, buf, count); \
- scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
-}
-
-PCIEX_MMIO_READ(readb, u8)
-PCIEX_MMIO_READ(readw, u16)
-PCIEX_MMIO_READ(readl, u32)
-PCIEX_MMIO_READ(readq, u64)
-PCIEX_MMIO_READ(readw_be, u16)
-PCIEX_MMIO_READ(readl_be, u32)
-PCIEX_MMIO_READ(readq_be, u64)
-PCIEX_MMIO_READ_STR(readsb)
-PCIEX_MMIO_READ_STR(readsw)
-PCIEX_MMIO_READ_STR(readsl)
-
-static void scc_pciex_memcpy_fromio(void *dest, const PCI_IO_ADDR src,
- unsigned long n)
-{
- __do_memcpy_fromio(dest, src, n);
- scc_pciex_io_flush(iowa_mem_find_bus(src));
-}
-
-/*
- * I/O port access to devices on PCIEX.
- */
-
-static inline unsigned long get_bus_address(struct pci_controller *phb,
- unsigned long port)
-{
- return port - ((unsigned long)(phb->io_base_virt) - _IO_BASE);
-}
-
-static u32 scc_pciex_read_port(struct pci_controller *phb,
- unsigned long port, int size)
-{
- unsigned int byte_enable;
- unsigned int cmd, shift;
- unsigned long addr;
- u32 data, ret;
-
- BUG_ON(((port & 0x3ul) + size) > 4);
-
- addr = get_bus_address(phb, port);
- shift = addr & 0x3ul;
- byte_enable = ((1 << size) - 1) << shift;
- cmd = PEXDCMND_IO_READ | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
- PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
- PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
- data = PEX_IN(phb->cfg_addr, PEXDRDATA);
- ret = (data >> (shift * 8)) & (0xFFFFFFFF >> ((4 - size) * 8));
-
- pr_debug("PCIEX:PIO READ:port=0x%lx, addr=0x%lx, size=%d, be=%x,"
- " cmd=%x, data=%x, ret=%x\n", port, addr, size, byte_enable,
- cmd, data, ret);
-
- return ret;
-}
-
-static void scc_pciex_write_port(struct pci_controller *phb,
- unsigned long port, int size, u32 val)
-{
- unsigned int byte_enable;
- unsigned int cmd, shift;
- unsigned long addr;
- u32 data;
-
- BUG_ON(((port & 0x3ul) + size) > 4);
-
- addr = get_bus_address(phb, port);
- shift = addr & 0x3ul;
- byte_enable = ((1 << size) - 1) << shift;
- cmd = PEXDCMND_IO_WRITE | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
- data = (val & (0xFFFFFFFF >> (4 - size) * 8)) << (shift * 8);
- PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
- PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
- PEX_OUT(phb->cfg_addr, PEXDWDATA, data);
-
- pr_debug("PCIEX:PIO WRITE:port=0x%lx, addr=%lx, size=%d, val=%x,"
- " be=%x, cmd=%x, data=%x\n", port, addr, size, val,
- byte_enable, cmd, data);
-}
-
-static u8 __scc_pciex_inb(struct pci_controller *phb, unsigned long port)
-{
- return (u8)scc_pciex_read_port(phb, port, 1);
-}
-
-static u16 __scc_pciex_inw(struct pci_controller *phb, unsigned long port)
-{
- u32 data;
- if ((port & 0x3ul) < 3)
- data = scc_pciex_read_port(phb, port, 2);
- else {
- u32 d1 = scc_pciex_read_port(phb, port, 1);
- u32 d2 = scc_pciex_read_port(phb, port + 1, 1);
- data = d1 | (d2 << 8);
- }
- return (u16)data;
-}
-
-static u32 __scc_pciex_inl(struct pci_controller *phb, unsigned long port)
-{
- unsigned int mod = port & 0x3ul;
- u32 data;
- if (mod == 0)
- data = scc_pciex_read_port(phb, port, 4);
- else {
- u32 d1 = scc_pciex_read_port(phb, port, 4 - mod);
- u32 d2 = scc_pciex_read_port(phb, port + 1, mod);
- data = d1 | (d2 << (mod * 8));
- }
- return data;
-}
-
-static void __scc_pciex_outb(struct pci_controller *phb,
- u8 val, unsigned long port)
-{
- scc_pciex_write_port(phb, port, 1, (u32)val);
-}
-
-static void __scc_pciex_outw(struct pci_controller *phb,
- u16 val, unsigned long port)
-{
- if ((port & 0x3ul) < 3)
- scc_pciex_write_port(phb, port, 2, (u32)val);
- else {
- u32 d1 = val & 0x000000FF;
- u32 d2 = (val & 0x0000FF00) >> 8;
- scc_pciex_write_port(phb, port, 1, d1);
- scc_pciex_write_port(phb, port + 1, 1, d2);
- }
-}
-
-static void __scc_pciex_outl(struct pci_controller *phb,
- u32 val, unsigned long port)
-{
- unsigned int mod = port & 0x3ul;
- if (mod == 0)
- scc_pciex_write_port(phb, port, 4, val);
- else {
- u32 d1 = val & (0xFFFFFFFFul >> (mod * 8));
- u32 d2 = val >> ((4 - mod) * 8);
- scc_pciex_write_port(phb, port, 4 - mod, d1);
- scc_pciex_write_port(phb, port + 1, mod, d2);
- }
-}
-
-#define PCIEX_PIO_FUNC(size, name) \
-static u##size scc_pciex_in##name(unsigned long port) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(port); \
- u##size data = __scc_pciex_in##name(bus->phb, port); \
- scc_pciex_io_flush(bus); \
- return data; \
-} \
-static void scc_pciex_ins##name(unsigned long p, void *b, unsigned long c) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(p); \
- __le##size *dst = b; \
- for (; c != 0; c--, dst++) \
- *dst = cpu_to_le##size(__scc_pciex_in##name(bus->phb, p)); \
- scc_pciex_io_flush(bus); \
-} \
-static void scc_pciex_out##name(u##size val, unsigned long port) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(port); \
- __scc_pciex_out##name(bus->phb, val, port); \
-} \
-static void scc_pciex_outs##name(unsigned long p, const void *b, \
- unsigned long c) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(p); \
- const __le##size *src = b; \
- for (; c != 0; c--, src++) \
- __scc_pciex_out##name(bus->phb, le##size##_to_cpu(*src), p); \
-}
-#define __le8 u8
-#define cpu_to_le8(x) (x)
-#define le8_to_cpu(x) (x)
-PCIEX_PIO_FUNC(8, b)
-PCIEX_PIO_FUNC(16, w)
-PCIEX_PIO_FUNC(32, l)
-
-static struct ppc_pci_io scc_pciex_ops = {
- .readb = scc_pciex_readb,
- .readw = scc_pciex_readw,
- .readl = scc_pciex_readl,
- .readq = scc_pciex_readq,
- .readw_be = scc_pciex_readw_be,
- .readl_be = scc_pciex_readl_be,
- .readq_be = scc_pciex_readq_be,
- .readsb = scc_pciex_readsb,
- .readsw = scc_pciex_readsw,
- .readsl = scc_pciex_readsl,
- .memcpy_fromio = scc_pciex_memcpy_fromio,
- .inb = scc_pciex_inb,
- .inw = scc_pciex_inw,
- .inl = scc_pciex_inl,
- .outb = scc_pciex_outb,
- .outw = scc_pciex_outw,
- .outl = scc_pciex_outl,
- .insb = scc_pciex_insb,
- .insw = scc_pciex_insw,
- .insl = scc_pciex_insl,
- .outsb = scc_pciex_outsb,
- .outsw = scc_pciex_outsw,
- .outsl = scc_pciex_outsl,
-};
-
-static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data)
-{
- dma_addr_t dummy_page_da;
- void *dummy_page_va;
-
- dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!dummy_page_va) {
- pr_err("PCIEX:Alloc dummy_page_va failed\n");
- return -1;
- }
-
- dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
- PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(bus->phb->parent, dummy_page_da)) {
- pr_err("PCIEX:Map dummy page failed.\n");
- kfree(dummy_page_va);
- return -1;
- }
-
- PEX_OUT(bus->phb->cfg_addr, PEXDMRDADR0, dummy_page_da);
-
- return 0;
-}
-
-/*
- * config space access
- */
-#define MK_PEXDADRS(bus_no, dev_no, func_no, addr) \
- ((uint32_t)(((addr) & ~0x3UL) | \
- ((bus_no) << PEXDADRS_BUSNO_SHIFT) | \
- ((dev_no) << PEXDADRS_DEVNO_SHIFT) | \
- ((func_no) << PEXDADRS_FUNCNO_SHIFT)))
-
-#define MK_PEXDCMND_BYTE_EN(addr, size) \
- ((((0x1 << (size))-1) << ((addr) & 0x3)) << PEXDCMND_BYTE_EN_SHIFT)
-#define MK_PEXDCMND(cmd, addr, size) ((cmd) | MK_PEXDCMND_BYTE_EN(addr, size))
-
-static uint32_t config_read_pciex_dev(unsigned int __iomem *base,
- uint64_t bus_no, uint64_t dev_no, uint64_t func_no,
- uint64_t off, uint64_t size)
-{
- uint32_t ret;
- uint32_t addr, cmd;
-
- addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
- cmd = MK_PEXDCMND(PEXDCMND_CONFIG_READ, off, size);
- PEX_OUT(base, PEXDADRS, addr);
- PEX_OUT(base, PEXDCMND, cmd);
- ret = (PEX_IN(base, PEXDRDATA)
- >> ((off & (4-size)) * 8)) & ((0x1 << (size * 8)) - 1);
- return ret;
-}
-
-static void config_write_pciex_dev(unsigned int __iomem *base, uint64_t bus_no,
- uint64_t dev_no, uint64_t func_no, uint64_t off, uint64_t size,
- uint32_t data)
-{
- uint32_t addr, cmd;
-
- addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
- cmd = MK_PEXDCMND(PEXDCMND_CONFIG_WRITE, off, size);
- PEX_OUT(base, PEXDADRS, addr);
- PEX_OUT(base, PEXDCMND, cmd);
- PEX_OUT(base, PEXDWDATA,
- (data & ((0x1 << (size * 8)) - 1)) << ((off & (4-size)) * 8));
-}
-
-#define MK_PEXCADRS_BYTE_EN(off, len) \
- ((((0x1 << (len)) - 1) << ((off) & 0x3)) << PEXCADRS_BYTE_EN_SHIFT)
-#define MK_PEXCADRS(cmd, addr, size) \
- ((cmd) | MK_PEXCADRS_BYTE_EN(addr, size) | ((addr) & ~0x3))
-static uint32_t config_read_pciex_rc(unsigned int __iomem *base,
- uint32_t where, uint32_t size)
-{
- PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_READ, where, size));
- return (PEX_IN(base, PEXCRDATA)
- >> ((where & (4 - size)) * 8)) & ((0x1 << (size * 8)) - 1);
-}
-
-static void config_write_pciex_rc(unsigned int __iomem *base, uint32_t where,
- uint32_t size, uint32_t val)
-{
- uint32_t data;
-
- data = (val & ((0x1 << (size * 8)) - 1)) << ((where & (4 - size)) * 8);
- PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_WRITE, where, size));
- PEX_OUT(base, PEXCWDATA, data);
-}
-
-/* Interfaces */
-/* Note: Work-around
- * On SCC PCIEXC, one device is seen on all 32 dev_no.
- * As SCC PCIEXC can have only one device on the bus, we look only one dev_no.
- * (dev_no = 1)
- */
-static int scc_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, unsigned int *val)
-{
- struct pci_controller *phb = pci_bus_to_host(bus);
-
- if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1) {
- *val = ~0;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- if (bus->number == 0 && PCI_SLOT(devfn) == 0)
- *val = config_read_pciex_rc(phb->cfg_addr, where, size);
- else
- *val = config_read_pciex_dev(phb->cfg_addr, bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where, size);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, unsigned int val)
-{
- struct pci_controller *phb = pci_bus_to_host(bus);
-
- if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (bus->number == 0 && PCI_SLOT(devfn) == 0)
- config_write_pciex_rc(phb->cfg_addr, where, size, val);
- else
- config_write_pciex_dev(phb->cfg_addr, bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
- return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops scc_pciex_pci_ops = {
- .read = scc_pciex_read_config,
- .write = scc_pciex_write_config,
-};
-
-static void pciex_clear_intr_all(unsigned int __iomem *base)
-{
- PEX_OUT(base, PEXAERRSTS, 0xffffffff);
- PEX_OUT(base, PEXPRERRSTS, 0xffffffff);
- PEX_OUT(base, PEXINTSTS, 0xffffffff);
-}
-
-#if 0
-static void pciex_disable_intr_all(unsigned int *base)
-{
- PEX_OUT(base, PEXINTMASK, 0x0);
- PEX_OUT(base, PEXAERRMASK, 0x0);
- PEX_OUT(base, PEXPRERRMASK, 0x0);
- PEX_OUT(base, PEXVDMASK, 0x0);
-}
-#endif
-
-static void pciex_enable_intr_all(unsigned int __iomem *base)
-{
- PEX_OUT(base, PEXINTMASK, 0x0000e7f1);
- PEX_OUT(base, PEXAERRMASK, 0x03ff01ff);
- PEX_OUT(base, PEXPRERRMASK, 0x0001010f);
- PEX_OUT(base, PEXVDMASK, 0x00000001);
-}
-
-static void pciex_check_status(unsigned int __iomem *base)
-{
- uint32_t err = 0;
- uint32_t intsts, aerr, prerr, rcvcp, lenerr;
- uint32_t maea, maec;
-
- intsts = PEX_IN(base, PEXINTSTS);
- aerr = PEX_IN(base, PEXAERRSTS);
- prerr = PEX_IN(base, PEXPRERRSTS);
- rcvcp = PEX_IN(base, PEXRCVCPLIDA);
- lenerr = PEX_IN(base, PEXLENERRIDA);
-
- if (intsts || aerr || prerr || rcvcp || lenerr)
- err = 1;
-
- pr_info("PCEXC interrupt!!\n");
- pr_info("PEXINTSTS :0x%08x\n", intsts);
- pr_info("PEXAERRSTS :0x%08x\n", aerr);
- pr_info("PEXPRERRSTS :0x%08x\n", prerr);
- pr_info("PEXRCVCPLIDA :0x%08x\n", rcvcp);
- pr_info("PEXLENERRIDA :0x%08x\n", lenerr);
-
- /* print detail of Protection Error */
- if (intsts & 0x00004000) {
- uint32_t i, n;
- for (i = 0; i < 4; i++) {
- n = 1 << i;
- if (prerr & n) {
- maea = PEX_IN(base, PEXMAEA(i));
- maec = PEX_IN(base, PEXMAEC(i));
- pr_info("PEXMAEC%d :0x%08x\n", i, maec);
- pr_info("PEXMAEA%d :0x%08x\n", i, maea);
- }
- }
- }
-
- if (err)
- pciex_clear_intr_all(base);
-}
-
-static irqreturn_t pciex_handle_internal_irq(int irq, void *dev_id)
-{
- struct pci_controller *phb = dev_id;
-
- pr_debug("PCIEX:pciex_handle_internal_irq(irq=%d)\n", irq);
-
- BUG_ON(phb->cfg_addr == NULL);
-
- pciex_check_status(phb->cfg_addr);
-
- return IRQ_HANDLED;
-}
-
-static __init int celleb_setup_pciex(struct device_node *node,
- struct pci_controller *phb)
-{
- struct resource r;
- int virq;
-
- /* SMMIO registers; used inside this file */
- if (of_address_to_resource(node, 0, &r)) {
- pr_err("PCIEXC:Failed to get config resource.\n");
- return 1;
- }
- phb->cfg_addr = ioremap(r.start, resource_size(&r));
- if (!phb->cfg_addr) {
- pr_err("PCIEXC:Failed to remap SMMIO region.\n");
- return 1;
- }
-
- /* Not use cfg_data, cmd and data regs are near address reg */
- phb->cfg_data = NULL;
-
- /* set pci_ops */
- phb->ops = &scc_pciex_pci_ops;
-
- /* internal interrupt handler */
- virq = irq_of_parse_and_map(node, 1);
- if (!virq) {
- pr_err("PCIEXC:Failed to map irq\n");
- goto error;
- }
- if (request_irq(virq, pciex_handle_internal_irq,
- 0, "pciex", (void *)phb)) {
- pr_err("PCIEXC:Failed to request irq\n");
- goto error;
- }
-
- /* enable all interrupts */
- pciex_clear_intr_all(phb->cfg_addr);
- pciex_enable_intr_all(phb->cfg_addr);
- /* MSI: TBD */
-
- return 0;
-
-error:
- phb->cfg_data = NULL;
- if (phb->cfg_addr)
- iounmap(phb->cfg_addr);
- phb->cfg_addr = NULL;
- return 1;
-}
-
-struct celleb_phb_spec celleb_pciex_spec __initdata = {
- .setup = celleb_setup_pciex,
- .ops = &scc_pciex_ops,
- .iowa_init = &scc_pciex_iowa_init,
-};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c
deleted file mode 100644
index c8eb57193826..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_sio.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * setup serial port in SCC
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/console.h>
-
-#include <asm/io.h>
-#include <asm/prom.h>
-
-/* sio irq0=0xb00010022 irq0=0xb00010023 irq2=0xb00010024
- mmio=0xfff000-0x1000,0xff2000-0x1000 */
-static int txx9_serial_bitmap __initdata;
-
-static struct {
- uint32_t offset;
- uint32_t index;
-} txx9_scc_tab[3] __initdata = {
- { 0x300, 0 }, /* 0xFFF300 */
- { 0x400, 0 }, /* 0xFFF400 */
- { 0x800, 1 } /* 0xFF2800 */
-};
-
-static int __init txx9_serial_init(void)
-{
- extern int early_serial_txx9_setup(struct uart_port *port);
- struct device_node *node;
- int i;
- struct uart_port req;
- struct of_phandle_args irq;
- struct resource res;
-
- for_each_compatible_node(node, "serial", "toshiba,sio-scc") {
- for (i = 0; i < ARRAY_SIZE(txx9_scc_tab); i++) {
- if (!(txx9_serial_bitmap & (1<<i)))
- continue;
-
- if (of_irq_parse_one(node, i, &irq))
- continue;
- if (of_address_to_resource(node,
- txx9_scc_tab[i].index, &res))
- continue;
-
- memset(&req, 0, sizeof(req));
- req.line = i;
- req.iotype = UPIO_MEM;
- req.mapbase = res.start + txx9_scc_tab[i].offset;
-#ifdef CONFIG_SERIAL_TXX9_CONSOLE
- req.membase = ioremap(req.mapbase, 0x24);
-#endif
- req.irq = irq_create_of_mapping(&irq);
- req.flags |= UPF_IOREMAP | UPF_BUGGY_UART
- /*HAVE_CTS_LINE*/;
- req.uartclk = 83300000;
- early_serial_txx9_setup(&req);
- }
- }
-
- return 0;
-}
-
-static int __init txx9_serial_config(char *ptr)
-{
- int i;
-
- for (;;) {
- switch (get_option(&ptr, &i)) {
- default:
- return 0;
- case 2:
- txx9_serial_bitmap |= 1 << i;
- break;
- case 1:
- txx9_serial_bitmap |= 1 << i;
- return 0;
- }
- }
-}
-__setup("txx9_serial=", txx9_serial_config);
-
-console_initcall(txx9_serial_init);
diff --git a/arch/powerpc/platforms/cell/celleb_scc_uhc.c b/arch/powerpc/platforms/cell/celleb_scc_uhc.c
deleted file mode 100644
index d63b720bfe3a..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_uhc.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * SCC (Super Companion Chip) UHC setup
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-
-#include <asm/delay.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-
-#include "celleb_scc.h"
-
-#define UHC_RESET_WAIT_MAX 10000
-
-static inline int uhc_clkctrl_ready(u32 val)
-{
- const u32 mask = SCC_UHC_USBCEN | SCC_UHC_USBCEN;
- return((val & mask) == mask);
-}
-
-/*
- * UHC(usb host controller) enable function.
- * affect to both of OHCI and EHCI core module.
- */
-static void enable_scc_uhc(struct pci_dev *dev)
-{
- void __iomem *uhc_base;
- u32 __iomem *uhc_clkctrl;
- u32 __iomem *uhc_ecmode;
- u32 val = 0;
- int i;
-
- if (!machine_is(celleb_beat) &&
- !machine_is(celleb_native))
- return;
-
- uhc_base = ioremap(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
- if (!uhc_base) {
- printk(KERN_ERR "failed to map UHC register base.\n");
- return;
- }
- uhc_clkctrl = uhc_base + SCC_UHC_CKRCTRL;
- uhc_ecmode = uhc_base + SCC_UHC_ECMODE;
-
- /* setup for normal mode */
- val |= SCC_UHC_F48MCKLEN;
- out_be32(uhc_clkctrl, val);
- val |= SCC_UHC_PHY_SUSPEND_SEL;
- out_be32(uhc_clkctrl, val);
- udelay(10);
- val |= SCC_UHC_PHYEN;
- out_be32(uhc_clkctrl, val);
- udelay(50);
-
- /* disable reset */
- val |= SCC_UHC_HCLKEN;
- out_be32(uhc_clkctrl, val);
- val |= (SCC_UHC_USBCEN | SCC_UHC_USBEN);
- out_be32(uhc_clkctrl, val);
- i = 0;
- while (!uhc_clkctrl_ready(in_be32(uhc_clkctrl))) {
- udelay(10);
- if (i++ > UHC_RESET_WAIT_MAX) {
- printk(KERN_ERR "Failed to disable UHC reset %x\n",
- in_be32(uhc_clkctrl));
- break;
- }
- }
-
- /* Endian Conversion Mode for Master ALL area */
- out_be32(uhc_ecmode, SCC_UHC_ECMODE_BY_BYTE);
-
- iounmap(uhc_base);
-}
-
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
- PCI_DEVICE_ID_TOSHIBA_SCC_USB, enable_scc_uhc);
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
deleted file mode 100644
index 90be8ec51686..000000000000
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Celleb setup code
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/cell/setup.c:
- * Copyright (C) 1995 Linus Torvalds
- * Adapted from 'alpha' version by Gary Thomas
- * Modified by Cort Dougan (cort@cs.nmt.edu)
- * Modified by PPC64 Team, IBM Corp
- * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/cpu.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/seq_file.h>
-#include <linux/root_dev.h>
-#include <linux/console.h>
-#include <linux/of_platform.h>
-
-#include <asm/mmu.h>
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
-#include <asm/cputable.h>
-#include <asm/irq.h>
-#include <asm/time.h>
-#include <asm/spu_priv1.h>
-#include <asm/firmware.h>
-#include <asm/rtas.h>
-#include <asm/cell-regs.h>
-
-#include "beat_interrupt.h"
-#include "beat_wrapper.h"
-#include "beat.h"
-#include "celleb_pci.h"
-#include "interrupt.h"
-#include "pervasive.h"
-#include "ras.h"
-
-static char celleb_machine_type[128] = "Celleb";
-
-static void celleb_show_cpuinfo(struct seq_file *m)
-{
- struct device_node *root;
- const char *model = "";
-
- root = of_find_node_by_path("/");
- if (root)
- model = of_get_property(root, "model", NULL);
- /* using "CHRP" is to trick anaconda into installing FCx into Celleb */
- seq_printf(m, "machine\t\t: %s %s\n", celleb_machine_type, model);
- of_node_put(root);
-}
-
-static int __init celleb_machine_type_hack(char *ptr)
-{
- strlcpy(celleb_machine_type, ptr, sizeof(celleb_machine_type));
- return 0;
-}
-
-__setup("celleb_machine_type_hack=", celleb_machine_type_hack);
-
-static void celleb_progress(char *s, unsigned short hex)
-{
- printk("*** %04x : %s\n", hex, s ? s : "");
-}
-
-static void __init celleb_setup_arch_common(void)
-{
- /* init to some ~sane value until calibrate_delay() runs */
- loops_per_jiffy = 50000000;
-
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-}
-
-static const struct of_device_id celleb_bus_ids[] __initconst = {
- { .type = "scc", },
- { .type = "ioif", }, /* old style */
- {},
-};
-
-static int __init celleb_publish_devices(void)
-{
- /* Publish OF platform devices for southbridge IOs */
- of_platform_bus_probe(NULL, celleb_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(celleb_beat, celleb_publish_devices);
-machine_device_initcall(celleb_native, celleb_publish_devices);
-
-
-/*
- * functions for Celleb-Beat
- */
-static void __init celleb_setup_arch_beat(void)
-{
-#ifdef CONFIG_SPU_BASE
- spu_priv1_ops = &spu_priv1_beat_ops;
- spu_management_ops = &spu_management_of_ops;
-#endif
-
- celleb_setup_arch_common();
-}
-
-static int __init celleb_probe_beat(void)
-{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "Beat"))
- return 0;
-
- powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS
- | FW_FEATURE_BEAT | FW_FEATURE_LPAR;
- hpte_init_beat_v3();
- pm_power_off = beat_power_off;
-
- return 1;
-}
-
-
-/*
- * functions for Celleb-native
- */
-static void __init celleb_init_IRQ_native(void)
-{
- iic_init_IRQ();
- spider_init_IRQ();
-}
-
-static void __init celleb_setup_arch_native(void)
-{
-#ifdef CONFIG_SPU_BASE
- spu_priv1_ops = &spu_priv1_mmio_ops;
- spu_management_ops = &spu_management_of_ops;
-#endif
-
- cbe_regs_init();
-
-#ifdef CONFIG_CBE_RAS
- cbe_ras_init();
-#endif
-
-#ifdef CONFIG_SMP
- smp_init_cell();
-#endif
-
- cbe_pervasive_init();
-
- /* XXX: nvram initialization should be added */
-
- celleb_setup_arch_common();
-}
-
-static int __init celleb_probe_native(void)
-{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "Beat") ||
- !of_flat_dt_is_compatible(root, "TOSHIBA,Celleb"))
- return 0;
-
- powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS;
- hpte_init_native();
- pm_power_off = rtas_power_off;
-
- return 1;
-}
-
-
-/*
- * machine definitions
- */
-define_machine(celleb_beat) {
- .name = "Cell Reference Set (Beat)",
- .probe = celleb_probe_beat,
- .setup_arch = celleb_setup_arch_beat,
- .show_cpuinfo = celleb_show_cpuinfo,
- .restart = beat_restart,
- .halt = beat_halt,
- .get_rtc_time = beat_get_rtc_time,
- .set_rtc_time = beat_set_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = celleb_progress,
- .power_save = beat_power_save,
- .nvram_size = beat_nvram_get_size,
- .nvram_read = beat_nvram_read,
- .nvram_write = beat_nvram_write,
- .set_dabr = beat_set_xdabr,
- .init_IRQ = beatic_init_IRQ,
- .get_irq = beatic_get_irq,
- .pci_probe_mode = celleb_pci_probe_mode,
- .pci_setup_phb = celleb_setup_phb,
-#ifdef CONFIG_KEXEC
- .kexec_cpu_down = beat_kexec_cpu_down,
-#endif
-};
-
-define_machine(celleb_native) {
- .name = "Cell Reference Set (native)",
- .probe = celleb_probe_native,
- .setup_arch = celleb_setup_arch_native,
- .show_cpuinfo = celleb_show_cpuinfo,
- .restart = rtas_restart,
- .halt = rtas_halt,
- .get_boot_time = rtas_get_boot_time,
- .get_rtc_time = rtas_get_rtc_time,
- .set_rtc_time = rtas_set_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = celleb_progress,
- .pci_probe_mode = celleb_pci_probe_mode,
- .pci_setup_phb = celleb_setup_phb,
- .init_IRQ = celleb_init_IRQ_native,
-};
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 4c11421847be..3af8324c122e 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
void iic_setup_cpu(void)
{
- out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
+ out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
}
u8 iic_get_target_id(int cpu)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index c7c8720aa39f..21b502398bf3 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -39,6 +39,7 @@
#include <asm/firmware.h>
#include <asm/cell-regs.h>
+#include "cell.h"
#include "interrupt.h"
/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
@@ -197,7 +198,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
+ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
mb();
@@ -857,7 +858,7 @@ static int __init cell_iommu_init_disabled(void)
cell_dma_direct_offset += base;
if (cell_dma_direct_offset != 0)
- ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+ cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
printk("iommu: disabled, direct DMA offset is 0x%lx\n",
cell_dma_direct_offset);
@@ -1197,8 +1198,8 @@ static int __init cell_iommu_init(void)
if (cell_iommu_init_disabled() == 0)
goto bail;
- /* Setup various ppc_md. callbacks */
- ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+ /* Setup various callbacks */
+ cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
ppc_md.tce_build = tce_build_cell;
ppc_md.tce_free = tce_free_cell;
@@ -1234,5 +1235,3 @@ static int __init cell_iommu_init(void)
return 0;
}
machine_arch_initcall(cell, cell_iommu_init);
-machine_arch_initcall(celleb_native, cell_iommu_init);
-
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index d62aa982d530..36cff28d0293 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -54,6 +54,7 @@
#include <asm/cell-regs.h>
#include <asm/io-workarounds.h>
+#include "cell.h"
#include "interrupt.h"
#include "pervasive.h"
#include "ras.h"
@@ -126,6 +127,8 @@ static int cell_setup_phb(struct pci_controller *phb)
if (rc)
return rc;
+ phb->controller_ops = cell_pci_controller_ops;
+
np = phb->dn;
model = of_get_property(np, "model", NULL);
if (model == NULL || strcmp(np->name, "pci"))
@@ -279,3 +282,5 @@ define_machine(cell) {
.init_IRQ = cell_init_irq,
.pci_setup_phb = cell_setup_phb,
};
+
+struct pci_controller_ops cell_pci_controller_ops;
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index b64e7ead752f..895560f4be69 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -102,13 +102,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
return 1;
}
-static int __init smp_iic_probe(void)
-{
- iic_request_IPIs();
-
- return num_possible_cpus();
-}
-
static void smp_cell_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
@@ -139,7 +132,7 @@ static int smp_cell_kick_cpu(int nr)
static struct smp_ops_t bpa_iic_smp_ops = {
.message_pass = iic_message_pass,
- .probe = smp_iic_probe,
+ .probe = iic_request_IPIs,
.kick_cpu = smp_cell_kick_cpu,
.setup_cpu = smp_cell_setup_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index b0ec78e8ad68..a494028b2cdf 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -39,6 +39,7 @@ static void *spu_syscall_table[] = {
#define PPC_SYS(func) sys_ni_syscall,
#define OLDSYS(func) sys_ni_syscall,
#define SYS32ONLY(func) sys_ni_syscall,
+#define PPC64ONLY(func) sys_ni_syscall,
#define SYSX(f, f3264, f32) sys_ni_syscall,
#define SYSCALL_SPU(func) sys_##func,
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 860a59eb8ea2..15ebc4e8a151 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -253,7 +253,7 @@ static void briq_restart(char *cmd)
* But unfortunately, the firmware does not connect /chosen/{stdin,stdout}
* the the built-in serial node. Instead, a /failsafe node is created.
*/
-static void chrp_init_early(void)
+static __init void chrp_init_early(void)
{
struct device_node *node;
const char *property;
diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h
index c6911ddc479f..eecfa182b06e 100644
--- a/arch/powerpc/platforms/maple/maple.h
+++ b/arch/powerpc/platforms/maple/maple.h
@@ -10,3 +10,5 @@ extern void maple_calibrate_decr(void);
extern void maple_pci_init(void);
extern void maple_pci_irq_fixup(struct pci_dev *dev);
extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
+
+extern struct pci_controller_ops maple_pci_controller_ops;
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index d3a13067ec42..a923230e575b 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -510,6 +510,7 @@ static int __init maple_add_bridge(struct device_node *dev)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
+ hose->controller_ops = maple_pci_controller_ops;
disp_name = NULL;
if (of_device_is_compatible(dev, "u3-agp")) {
@@ -660,3 +661,6 @@ static void quirk_ipr_msi(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
quirk_ipr_msi);
+
+struct pci_controller_ops maple_pci_controller_ops = {
+};
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 56b85cd61aaf..a837188544c8 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -203,7 +203,7 @@ static void __init maple_init_early(void)
{
DBG(" -> maple_init_early\n");
- iommu_init_early_dart();
+ iommu_init_early_dart(&maple_pci_controller_ops);
DBG(" <- maple_init_early\n");
}
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 2e576f2ae442..b8f567b2ea19 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -27,6 +27,8 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
+#include "pasemi.h"
+
#define IOBMAP_PAGE_SHIFT 12
#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
@@ -248,8 +250,8 @@ void __init iommu_init_early_pasemi(void)
iob_init(NULL);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pasemi;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pasemi;
+ pasemi_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pasemi;
+ pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
ppc_md.tce_build = iobmap_build;
ppc_md.tce_free = iobmap_free;
set_pci_dma_ops(&dma_iommu_ops);
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index ea65bf0eb897..11f230a48227 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -30,5 +30,6 @@ static inline void restore_astate(int cpu)
}
#endif
+extern struct pci_controller_ops pasemi_pci_controller_ops;
#endif /* _PASEMI_PASEMI_H */
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index aa862713258c..f3a68a0fef23 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -31,6 +31,8 @@
#include <asm/ppc-pci.h>
+#include "pasemi.h"
+
#define PA_PXP_CFA(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
static inline int pa_pxp_offset_valid(u8 bus, u8 devfn, int offset)
@@ -199,6 +201,7 @@ static int __init pas_add_bridge(struct device_node *dev)
hose->first_busno = 0;
hose->last_busno = 0xff;
+ hose->controller_ops = pasemi_pci_controller_ops;
setup_pa_pxp(hose);
@@ -239,3 +242,5 @@ void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
return (void __iomem *)pa_pxp_cfg_addr(hose, dev->bus->number, dev->devfn, offset);
}
+
+struct pci_controller_ops pasemi_pci_controller_ops;
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index 3e91ef538114..76f5013c35e5 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -246,7 +246,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base,
DBG(" detected display ! adding properties names !\n");
bootx_dt_add_string("linux,boot-display", mem_end);
bootx_dt_add_string("linux,opened", mem_end);
- strncpy(bootx_disp_path, namep, 255);
+ strlcpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
}
/* get and store all property names */
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index f4071a67ad00..59ab16fa600f 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -27,6 +27,8 @@
#include <asm/grackle.h>
#include <asm/ppc-pci.h>
+#include "pmac.h"
+
#undef DEBUG
#ifdef DEBUG
@@ -798,6 +800,7 @@ static int __init pmac_add_bridge(struct device_node *dev)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
+ hose->controller_ops = pmac_pci_controller_ops;
disp_name = NULL;
@@ -942,7 +945,7 @@ void __init pmac_pci_init(void)
}
#ifdef CONFIG_PPC32
-int pmac_pci_enable_device_hook(struct pci_dev *dev)
+static bool pmac_pci_enable_device_hook(struct pci_dev *dev)
{
struct device_node* node;
int updatecfg = 0;
@@ -958,11 +961,11 @@ int pmac_pci_enable_device_hook(struct pci_dev *dev)
&& !node) {
printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
pci_name(dev));
- return -EINVAL;
+ return false;
}
if (!node)
- return 0;
+ return true;
uninorth_child = node->parent &&
of_device_is_compatible(node->parent, "uni-north");
@@ -1003,7 +1006,7 @@ int pmac_pci_enable_device_hook(struct pci_dev *dev)
L1_CACHE_BYTES >> 2);
}
- return 0;
+ return true;
}
void pmac_pci_fixup_ohci(struct pci_dev *dev)
@@ -1223,3 +1226,30 @@ static void fixup_u4_pcie(struct pci_dev* dev)
pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_U4_PCIE, fixup_u4_pcie);
+
+#ifdef CONFIG_PPC64
+static int pmac_pci_probe_mode(struct pci_bus *bus)
+{
+ struct device_node *node = pci_bus_to_OF_node(bus);
+
+ /* We need to use normal PCI probing for the AGP bus,
+ * since the device for the AGP bridge isn't in the tree.
+ * Same for the PCIe host on U4 and the HT host bridge.
+ */
+ if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
+ of_device_is_compatible(node, "u4-pcie") ||
+ of_device_is_compatible(node, "u3-ht")))
+ return PCI_PROBE_NORMAL;
+ return PCI_PROBE_DEVTREE;
+}
+#endif /* CONFIG_PPC64 */
+
+struct pci_controller_ops pmac_pci_controller_ops = {
+#ifdef CONFIG_PPC64
+ .probe_mode = pmac_pci_probe_mode,
+#endif
+#ifdef CONFIG_PPC32
+ .enable_device_hook = pmac_pci_enable_device_hook,
+#endif
+};
+
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 4c24bf60d39d..59cfc9d63c2d 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -321,6 +321,9 @@ static void __init pmac_pic_probe_oldstyle(void)
max_irqs = max_real_irqs = 64;
/* We might have a second cascaded heathrow */
+
+ /* Compensate for of_node_put() in of_find_node_by_name() */
+ of_node_get(master);
slave = of_find_node_by_name(master, "mac-io");
/* Check ordering of master & slave */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 8327cce2bdb0..e7f8163d6769 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -25,7 +25,6 @@ extern void pmac_pci_init(void);
extern void pmac_nvram_update(void);
extern unsigned char pmac_nvram_read_byte(int addr);
extern void pmac_nvram_write_byte(int addr, unsigned char val);
-extern int pmac_pci_enable_device_hook(struct pci_dev *dev);
extern void pmac_pcibios_after_init(void);
extern int of_show_percpuinfo(struct seq_file *m, int i);
@@ -39,4 +38,6 @@ extern void low_cpu_die(void) __attribute__((noreturn));
extern int pmac_nvram_init(void);
extern void pmac_pic_init(void);
+extern struct pci_controller_ops pmac_pci_controller_ops;
+
#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 713d36d45d1d..8dd78f4e1af4 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -473,7 +473,7 @@ static void __init pmac_init_early(void)
udbg_adb_init(!!strstr(boot_command_line, "btextdbg"));
#ifdef CONFIG_PPC64
- iommu_init_early_dart();
+ iommu_init_early_dart(&pmac_pci_controller_ops);
#endif
/* SMP Init has to be done early as we need to patch up
@@ -637,24 +637,6 @@ static int __init pmac_probe(void)
return 1;
}
-#ifdef CONFIG_PPC64
-/* Move that to pci.c */
-static int pmac_pci_probe_mode(struct pci_bus *bus)
-{
- struct device_node *node = pci_bus_to_OF_node(bus);
-
- /* We need to use normal PCI probing for the AGP bus,
- * since the device for the AGP bridge isn't in the tree.
- * Same for the PCIe host on U4 and the HT host bridge.
- */
- if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
- of_device_is_compatible(node, "u4-pcie") ||
- of_device_is_compatible(node, "u3-ht")))
- return PCI_PROBE_NORMAL;
- return PCI_PROBE_DEVTREE;
-}
-#endif /* CONFIG_PPC64 */
-
define_machine(powermac) {
.name = "PowerMac",
.probe = pmac_probe,
@@ -674,12 +656,10 @@ define_machine(powermac) {
.feature_call = pmac_do_feature_call,
.progress = udbg_progress,
#ifdef CONFIG_PPC64
- .pci_probe_mode = pmac_pci_probe_mode,
.power_save = power4_idle,
.enable_pmcs = power4_enable_pmcs,
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
- .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
.pcibios_after_init = pmac_pcibios_after_init,
.phys_mem_access_prot = pci_phys_mem_access_prot,
#endif
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index af094ae03dbb..28a147ca32ba 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -268,14 +268,14 @@ static void __init psurge_quad_init(void)
mdelay(33);
}
-static int __init smp_psurge_probe(void)
+static void __init smp_psurge_probe(void)
{
int i, ncpus;
struct device_node *dn;
/* We don't do SMP on the PPC601 -- paulus */
if (PVR_VER(mfspr(SPRN_PVR)) == 1)
- return 1;
+ return;
/*
* The powersurge cpu board can be used in the generation
@@ -289,7 +289,7 @@ static int __init smp_psurge_probe(void)
*/
dn = of_find_node_by_name(NULL, "hammerhead");
if (dn == NULL)
- return 1;
+ return;
of_node_put(dn);
hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
@@ -310,13 +310,13 @@ static int __init smp_psurge_probe(void)
/* not a dual-cpu card */
iounmap(hhead_base);
psurge_type = PSURGE_NONE;
- return 1;
+ return;
}
ncpus = 2;
}
if (psurge_secondary_ipi_init())
- return 1;
+ return;
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
@@ -332,8 +332,6 @@ static int __init smp_psurge_probe(void)
set_cpu_present(i, true);
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
-
- return ncpus;
}
static int __init smp_psurge_kick_cpu(int nr)
@@ -766,7 +764,7 @@ static void __init smp_core99_setup(int ncpus)
powersave_nap = 0;
}
-static int __init smp_core99_probe(void)
+static void __init smp_core99_probe(void)
{
struct device_node *cpus;
int ncpus = 0;
@@ -781,7 +779,7 @@ static int __init smp_core99_probe(void)
/* Nothing more to do if less than 2 of them */
if (ncpus <= 1)
- return 1;
+ return;
/* We need to perform some early initialisations before we can start
* setting up SMP as we are running before initcalls
@@ -797,8 +795,6 @@ static int __init smp_core99_probe(void)
/* Collect l2cr and l3cr values from CPU 0 */
core99_init_caches(0);
-
- return ncpus;
}
static int smp_core99_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 45a8ed0585cd..4b044d8cb49a 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -19,10 +19,3 @@ config PPC_POWERNV
select CPU_FREQ_GOV_CONSERVATIVE
select PPC_DOORBELL
default y
-
-config PPC_POWERNV_RTAS
- depends on PPC_POWERNV
- bool "Support for RTAS based PowerNV platforms such as BML"
- default y
- select PPC_ICS_RTAS
- select PPC_RTAS
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 6f3c5d33c3af..33e44f37212f 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -5,7 +5,7 @@ obj-y += opal-msglog.o opal-hmi.o opal-power.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
-obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o
+obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
deleted file mode 100644
index 2809c9895288..000000000000
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ /dev/null
@@ -1,1149 +0,0 @@
-/*
- * The file intends to implement the functions needed by EEH, which is
- * built on IODA compliant chip. Actually, lots of functions related
- * to EEH would be built based on the OPAL APIs.
- *
- * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/msi.h>
-#include <linux/notifier.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-
-#include <asm/eeh.h>
-#include <asm/eeh_event.h>
-#include <asm/io.h>
-#include <asm/iommu.h>
-#include <asm/msi_bitmap.h>
-#include <asm/opal.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-#include <asm/tce.h>
-
-#include "powernv.h"
-#include "pci.h"
-
-static int ioda_eeh_nb_init = 0;
-
-static int ioda_eeh_event(struct notifier_block *nb,
- unsigned long events, void *change)
-{
- uint64_t changed_evts = (uint64_t)change;
-
- /*
- * We simply send special EEH event if EEH has
- * been enabled, or clear pending events in
- * case that we enable EEH soon
- */
- if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
- !(events & OPAL_EVENT_PCI_ERROR))
- return 0;
-
- if (eeh_enabled())
- eeh_send_failure_event(NULL);
- else
- opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
-
- return 0;
-}
-
-static struct notifier_block ioda_eeh_nb = {
- .notifier_call = ioda_eeh_event,
- .next = NULL,
- .priority = 0
-};
-
-#ifdef CONFIG_DEBUG_FS
-static ssize_t ioda_eeh_ei_write(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct pci_controller *hose = filp->private_data;
- struct pnv_phb *phb = hose->private_data;
- struct eeh_dev *edev;
- struct eeh_pe *pe;
- int pe_no, type, func;
- unsigned long addr, mask;
- char buf[50];
- int ret;
-
- if (!phb->eeh_ops || !phb->eeh_ops->err_inject)
- return -ENXIO;
-
- ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
- if (!ret)
- return -EFAULT;
-
- /* Retrieve parameters */
- ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
- &pe_no, &type, &func, &addr, &mask);
- if (ret != 5)
- return -EINVAL;
-
- /* Retrieve PE */
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return -ENOMEM;
- edev->phb = hose;
- edev->pe_config_addr = pe_no;
- pe = eeh_pe_get(edev);
- kfree(edev);
- if (!pe)
- return -ENODEV;
-
- /* Do error injection */
- ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
- return ret < 0 ? ret : count;
-}
-
-static const struct file_operations ioda_eeh_ei_fops = {
- .open = simple_open,
- .llseek = no_llseek,
- .write = ioda_eeh_ei_write,
-};
-
-static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
-{
- struct pci_controller *hose = data;
- struct pnv_phb *phb = hose->private_data;
-
- out_be64(phb->regs + offset, val);
- return 0;
-}
-
-static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
-{
- struct pci_controller *hose = data;
- struct pnv_phb *phb = hose->private_data;
-
- *val = in_be64(phb->regs + offset);
- return 0;
-}
-
-static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
-{
- return ioda_eeh_dbgfs_set(data, 0xD10, val);
-}
-
-static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
-{
- return ioda_eeh_dbgfs_get(data, 0xD10, val);
-}
-
-static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
-{
- return ioda_eeh_dbgfs_set(data, 0xD90, val);
-}
-
-static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
-{
- return ioda_eeh_dbgfs_get(data, 0xD90, val);
-}
-
-static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
-{
- return ioda_eeh_dbgfs_set(data, 0xE10, val);
-}
-
-static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
-{
- return ioda_eeh_dbgfs_get(data, 0xE10, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
- ioda_eeh_outb_dbgfs_set, "0x%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
- ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
- ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
-#endif /* CONFIG_DEBUG_FS */
-
-
-/**
- * ioda_eeh_post_init - Chip dependent post initialization
- * @hose: PCI controller
- *
- * The function will be called after eeh PEs and devices
- * have been built. That means the EEH is ready to supply
- * service with I/O cache.
- */
-static int ioda_eeh_post_init(struct pci_controller *hose)
-{
- struct pnv_phb *phb = hose->private_data;
- int ret;
-
- /* Register OPAL event notifier */
- if (!ioda_eeh_nb_init) {
- ret = opal_notifier_register(&ioda_eeh_nb);
- if (ret) {
- pr_err("%s: Can't register OPAL event notifier (%d)\n",
- __func__, ret);
- return ret;
- }
-
- ioda_eeh_nb_init = 1;
- }
-
-#ifdef CONFIG_DEBUG_FS
- if (!phb->has_dbgfs && phb->dbgfs) {
- phb->has_dbgfs = 1;
-
- debugfs_create_file("err_injct", 0200,
- phb->dbgfs, hose,
- &ioda_eeh_ei_fops);
-
- debugfs_create_file("err_injct_outbound", 0600,
- phb->dbgfs, hose,
- &ioda_eeh_outb_dbgfs_ops);
- debugfs_create_file("err_injct_inboundA", 0600,
- phb->dbgfs, hose,
- &ioda_eeh_inbA_dbgfs_ops);
- debugfs_create_file("err_injct_inboundB", 0600,
- phb->dbgfs, hose,
- &ioda_eeh_inbB_dbgfs_ops);
- }
-#endif
-
- /* If EEH is enabled, we're going to rely on that.
- * Otherwise, we restore to conventional mechanism
- * to clear frozen PE during PCI config access.
- */
- if (eeh_enabled())
- phb->flags |= PNV_PHB_FLAG_EEH;
- else
- phb->flags &= ~PNV_PHB_FLAG_EEH;
-
- return 0;
-}
-
-/**
- * ioda_eeh_set_option - Set EEH operation or I/O setting
- * @pe: EEH PE
- * @option: options
- *
- * Enable or disable EEH option for the indicated PE. The
- * function also can be used to enable I/O or DMA for the
- * PE.
- */
-static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
-{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- bool freeze_pe = false;
- int enable, ret = 0;
- s64 rc;
-
- /* Check on PE number */
- if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
- pr_err("%s: PE address %x out of range [0, %x] "
- "on PHB#%x\n",
- __func__, pe->addr, phb->ioda.total_pe,
- hose->global_number);
- return -EINVAL;
- }
-
- switch (option) {
- case EEH_OPT_DISABLE:
- return -EPERM;
- case EEH_OPT_ENABLE:
- return 0;
- case EEH_OPT_THAW_MMIO:
- enable = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
- break;
- case EEH_OPT_THAW_DMA:
- enable = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
- break;
- case EEH_OPT_FREEZE_PE:
- freeze_pe = true;
- enable = OPAL_EEH_ACTION_SET_FREEZE_ALL;
- break;
- default:
- pr_warn("%s: Invalid option %d\n",
- __func__, option);
- return -EINVAL;
- }
-
- /* If PHB supports compound PE, to handle it */
- if (freeze_pe) {
- if (phb->freeze_pe) {
- phb->freeze_pe(phb, pe->addr);
- } else {
- rc = opal_pci_eeh_freeze_set(phb->opal_id,
- pe->addr,
- enable);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld freezing "
- "PHB#%x-PE#%x\n",
- __func__, rc,
- phb->hose->global_number, pe->addr);
- ret = -EIO;
- }
- }
- } else {
- if (phb->unfreeze_pe) {
- ret = phb->unfreeze_pe(phb, pe->addr, enable);
- } else {
- rc = opal_pci_eeh_freeze_clear(phb->opal_id,
- pe->addr,
- enable);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld enable %d "
- "for PHB#%x-PE#%x\n",
- __func__, rc, option,
- phb->hose->global_number, pe->addr);
- ret = -EIO;
- }
- }
- }
-
- return ret;
-}
-
-static void ioda_eeh_phb_diag(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
- long rc;
-
- rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
- PNV_PCI_DIAG_BUF_SIZE);
- if (rc != OPAL_SUCCESS)
- pr_warn("%s: Failed to get diag-data for PHB#%x (%ld)\n",
- __func__, pe->phb->global_number, rc);
-}
-
-static int ioda_eeh_get_phb_state(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
- u8 fstate;
- __be16 pcierr;
- s64 rc;
- int result = 0;
-
- rc = opal_pci_eeh_freeze_status(phb->opal_id,
- pe->addr,
- &fstate,
- &pcierr,
- NULL);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld getting PHB#%x state\n",
- __func__, rc, phb->hose->global_number);
- return EEH_STATE_NOT_SUPPORT;
- }
-
- /*
- * Check PHB state. If the PHB is frozen for the
- * first time, to dump the PHB diag-data.
- */
- if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE |
- EEH_STATE_MMIO_ENABLED |
- EEH_STATE_DMA_ENABLED);
- } else if (!(pe->state & EEH_PE_ISOLATED)) {
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
- ioda_eeh_phb_diag(pe);
-
- if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
- }
-
- return result;
-}
-
-static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
- u8 fstate;
- __be16 pcierr;
- s64 rc;
- int result;
-
- /*
- * We don't clobber hardware frozen state until PE
- * reset is completed. In order to keep EEH core
- * moving forward, we have to return operational
- * state during PE reset.
- */
- if (pe->state & EEH_PE_RESET) {
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE |
- EEH_STATE_MMIO_ENABLED |
- EEH_STATE_DMA_ENABLED);
- return result;
- }
-
- /*
- * Fetch PE state from hardware. If the PHB
- * supports compound PE, let it handle that.
- */
- if (phb->get_pe_state) {
- fstate = phb->get_pe_state(phb, pe->addr);
- } else {
- rc = opal_pci_eeh_freeze_status(phb->opal_id,
- pe->addr,
- &fstate,
- &pcierr,
- NULL);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
- __func__, rc, phb->hose->global_number, pe->addr);
- return EEH_STATE_NOT_SUPPORT;
- }
- }
-
- /* Figure out state */
- switch (fstate) {
- case OPAL_EEH_STOPPED_NOT_FROZEN:
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE |
- EEH_STATE_MMIO_ENABLED |
- EEH_STATE_DMA_ENABLED);
- break;
- case OPAL_EEH_STOPPED_MMIO_FREEZE:
- result = (EEH_STATE_DMA_ACTIVE |
- EEH_STATE_DMA_ENABLED);
- break;
- case OPAL_EEH_STOPPED_DMA_FREEZE:
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_MMIO_ENABLED);
- break;
- case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
- result = 0;
- break;
- case OPAL_EEH_STOPPED_RESET:
- result = EEH_STATE_RESET_ACTIVE;
- break;
- case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
- result = EEH_STATE_UNAVAILABLE;
- break;
- case OPAL_EEH_STOPPED_PERM_UNAVAIL:
- result = EEH_STATE_NOT_SUPPORT;
- break;
- default:
- result = EEH_STATE_NOT_SUPPORT;
- pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
- __func__, phb->hose->global_number,
- pe->addr, fstate);
- }
-
- /*
- * If PHB supports compound PE, to freeze all
- * slave PEs for consistency.
- *
- * If the PE is switching to frozen state for the
- * first time, to dump the PHB diag-data.
- */
- if (!(result & EEH_STATE_NOT_SUPPORT) &&
- !(result & EEH_STATE_UNAVAILABLE) &&
- !(result & EEH_STATE_MMIO_ACTIVE) &&
- !(result & EEH_STATE_DMA_ACTIVE) &&
- !(pe->state & EEH_PE_ISOLATED)) {
- if (phb->freeze_pe)
- phb->freeze_pe(phb, pe->addr);
-
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
- ioda_eeh_phb_diag(pe);
-
- if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
- }
-
- return result;
-}
-
-/**
- * ioda_eeh_get_state - Retrieve the state of PE
- * @pe: EEH PE
- *
- * The PE's state should be retrieved from the PEEV, PEST
- * IODA tables. Since the OPAL has exported the function
- * to do it, it'd better to use that.
- */
-static int ioda_eeh_get_state(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
-
- /* Sanity check on PE number. PHB PE should have 0 */
- if (pe->addr < 0 ||
- pe->addr >= phb->ioda.total_pe) {
- pr_warn("%s: PHB#%x-PE#%x out of range [0, %x]\n",
- __func__, phb->hose->global_number,
- pe->addr, phb->ioda.total_pe);
- return EEH_STATE_NOT_SUPPORT;
- }
-
- if (pe->type & EEH_PE_PHB)
- return ioda_eeh_get_phb_state(pe);
-
- return ioda_eeh_get_pe_state(pe);
-}
-
-static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
-{
- s64 rc = OPAL_HARDWARE;
-
- while (1) {
- rc = opal_pci_poll(phb->opal_id);
- if (rc <= 0)
- break;
-
- if (system_state < SYSTEM_RUNNING)
- udelay(1000 * rc);
- else
- msleep(rc);
- }
-
- return rc;
-}
-
-int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
-{
- struct pnv_phb *phb = hose->private_data;
- s64 rc = OPAL_HARDWARE;
-
- pr_debug("%s: Reset PHB#%x, option=%d\n",
- __func__, hose->global_number, option);
-
- /* Issue PHB complete reset request */
- if (option == EEH_RESET_FUNDAMENTAL ||
- option == EEH_RESET_HOT)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PHB_COMPLETE,
- OPAL_ASSERT_RESET);
- else if (option == EEH_RESET_DEACTIVATE)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PHB_COMPLETE,
- OPAL_DEASSERT_RESET);
- if (rc < 0)
- goto out;
-
- /*
- * Poll state of the PHB until the request is done
- * successfully. The PHB reset is usually PHB complete
- * reset followed by hot reset on root bus. So we also
- * need the PCI bus settlement delay.
- */
- rc = ioda_eeh_phb_poll(phb);
- if (option == EEH_RESET_DEACTIVATE) {
- if (system_state < SYSTEM_RUNNING)
- udelay(1000 * EEH_PE_RST_SETTLE_TIME);
- else
- msleep(EEH_PE_RST_SETTLE_TIME);
- }
-out:
- if (rc != OPAL_SUCCESS)
- return -EIO;
-
- return 0;
-}
-
-static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
-{
- struct pnv_phb *phb = hose->private_data;
- s64 rc = OPAL_SUCCESS;
-
- pr_debug("%s: Reset PHB#%x, option=%d\n",
- __func__, hose->global_number, option);
-
- /*
- * During the reset deassert time, we needn't care
- * the reset scope because the firmware does nothing
- * for fundamental or hot reset during deassert phase.
- */
- if (option == EEH_RESET_FUNDAMENTAL)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PCI_FUNDAMENTAL,
- OPAL_ASSERT_RESET);
- else if (option == EEH_RESET_HOT)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PCI_HOT,
- OPAL_ASSERT_RESET);
- else if (option == EEH_RESET_DEACTIVATE)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PCI_HOT,
- OPAL_DEASSERT_RESET);
- if (rc < 0)
- goto out;
-
- /* Poll state of the PHB until the request is done */
- rc = ioda_eeh_phb_poll(phb);
- if (option == EEH_RESET_DEACTIVATE)
- msleep(EEH_PE_RST_SETTLE_TIME);
-out:
- if (rc != OPAL_SUCCESS)
- return -EIO;
-
- return 0;
-}
-
-static int ioda_eeh_bridge_reset(struct pci_dev *dev, int option)
-
-{
- struct device_node *dn = pci_device_to_OF_node(dev);
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
- int aer = edev ? edev->aer_cap : 0;
- u32 ctrl;
-
- pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
- __func__, pci_domain_nr(dev->bus),
- dev->bus->number, option);
-
- switch (option) {
- case EEH_RESET_FUNDAMENTAL:
- case EEH_RESET_HOT:
- /* Don't report linkDown event */
- if (aer) {
- eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, &ctrl);
- ctrl |= PCI_ERR_UNC_SURPDN;
- eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, ctrl);
- }
-
- eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
- ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
- msleep(EEH_PE_RST_HOLD_TIME);
-
- break;
- case EEH_RESET_DEACTIVATE:
- eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
- ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
- msleep(EEH_PE_RST_SETTLE_TIME);
-
- /* Continue reporting linkDown event */
- if (aer) {
- eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, &ctrl);
- ctrl &= ~PCI_ERR_UNC_SURPDN;
- eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, ctrl);
- }
-
- break;
- }
-
- return 0;
-}
-
-void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
-{
- struct pci_controller *hose;
-
- if (pci_is_root_bus(dev->bus)) {
- hose = pci_bus_to_host(dev->bus);
- ioda_eeh_root_reset(hose, EEH_RESET_HOT);
- ioda_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
- } else {
- ioda_eeh_bridge_reset(dev, EEH_RESET_HOT);
- ioda_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
- }
-}
-
-/**
- * ioda_eeh_reset - Reset the indicated PE
- * @pe: EEH PE
- * @option: reset option
- *
- * Do reset on the indicated PE. For PCI bus sensitive PE,
- * we need to reset the parent p2p bridge. The PHB has to
- * be reinitialized if the p2p bridge is root bridge. For
- * PCI device sensitive PE, we will try to reset the device
- * through FLR. For now, we don't have OPAL APIs to do HARD
- * reset yet, so all reset would be SOFT (HOT) reset.
- */
-static int ioda_eeh_reset(struct eeh_pe *pe, int option)
-{
- struct pci_controller *hose = pe->phb;
- struct pci_bus *bus;
- int ret;
-
- /*
- * For PHB reset, we always have complete reset. For those PEs whose
- * primary bus derived from root complex (root bus) or root port
- * (usually bus#1), we apply hot or fundamental reset on the root port.
- * For other PEs, we always have hot reset on the PE primary bus.
- *
- * Here, we have different design to pHyp, which always clear the
- * frozen state during PE reset. However, the good idea here from
- * benh is to keep frozen state before we get PE reset done completely
- * (until BAR restore). With the frozen state, HW drops illegal IO
- * or MMIO access, which can incur recrusive frozen PE during PE
- * reset. The side effect is that EEH core has to clear the frozen
- * state explicitly after BAR restore.
- */
- if (pe->type & EEH_PE_PHB) {
- ret = ioda_eeh_phb_reset(hose, option);
- } else {
- struct pnv_phb *phb;
- s64 rc;
-
- /*
- * The frozen PE might be caused by PAPR error injection
- * registers, which are expected to be cleared after hitting
- * frozen PE as stated in the hardware spec. Unfortunately,
- * that's not true on P7IOC. So we have to clear it manually
- * to avoid recursive EEH errors during recovery.
- */
- phb = hose->private_data;
- if (phb->model == PNV_PHB_MODEL_P7IOC &&
- (option == EEH_RESET_HOT ||
- option == EEH_RESET_FUNDAMENTAL)) {
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PHB_ERROR,
- OPAL_ASSERT_RESET);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld clearing "
- "error injection registers\n",
- __func__, rc);
- return -EIO;
- }
- }
-
- bus = eeh_pe_bus_get(pe);
- if (pci_is_root_bus(bus) ||
- pci_is_root_bus(bus->parent))
- ret = ioda_eeh_root_reset(hose, option);
- else
- ret = ioda_eeh_bridge_reset(bus->self, option);
- }
-
- return ret;
-}
-
-/**
- * ioda_eeh_get_log - Retrieve error log
- * @pe: frozen PE
- * @severity: permanent or temporary error
- * @drv_log: device driver log
- * @len: length of device driver log
- *
- * Retrieve error log, which contains log from device driver
- * and firmware.
- */
-static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len)
-{
- if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
-
- return 0;
-}
-
-/**
- * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
- * @pe: EEH PE
- *
- * For particular PE, it might have included PCI bridges. In order
- * to make the PE work properly, those PCI bridges should be configured
- * correctly. However, we need do nothing on P7IOC since the reset
- * function will do everything that should be covered by the function.
- */
-static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
-{
- return 0;
-}
-
-static int ioda_eeh_err_inject(struct eeh_pe *pe, int type, int func,
- unsigned long addr, unsigned long mask)
-{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- s64 ret;
-
- /* Sanity check on error type */
- if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
- type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
- pr_warn("%s: Invalid error type %d\n",
- __func__, type);
- return -ERANGE;
- }
-
- if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
- func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
- pr_warn("%s: Invalid error function %d\n",
- __func__, func);
- return -ERANGE;
- }
-
- /* Firmware supports error injection ? */
- if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
- pr_warn("%s: Firmware doesn't support error injection\n",
- __func__);
- return -ENXIO;
- }
-
- /* Do error injection */
- ret = opal_pci_err_inject(phb->opal_id, pe->addr,
- type, func, addr, mask);
- if (ret != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld injecting error "
- "%d-%d to PHB#%x-PE#%x\n",
- __func__, ret, type, func,
- hose->global_number, pe->addr);
- return -EIO;
- }
-
- return 0;
-}
-
-static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
-{
- /* GEM */
- if (data->gemXfir || data->gemRfir ||
- data->gemRirqfir || data->gemMask || data->gemRwof)
- pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
- be64_to_cpu(data->gemXfir),
- be64_to_cpu(data->gemRfir),
- be64_to_cpu(data->gemRirqfir),
- be64_to_cpu(data->gemMask),
- be64_to_cpu(data->gemRwof));
-
- /* LEM */
- if (data->lemFir || data->lemErrMask ||
- data->lemAction0 || data->lemAction1 || data->lemWof)
- pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
- be64_to_cpu(data->lemFir),
- be64_to_cpu(data->lemErrMask),
- be64_to_cpu(data->lemAction0),
- be64_to_cpu(data->lemAction1),
- be64_to_cpu(data->lemWof));
-}
-
-static void ioda_eeh_hub_diag(struct pci_controller *hose)
-{
- struct pnv_phb *phb = hose->private_data;
- struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
- long rc;
-
- rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
- __func__, phb->hub_id, rc);
- return;
- }
-
- switch (data->type) {
- case OPAL_P7IOC_DIAG_TYPE_RGC:
- pr_info("P7IOC diag-data for RGC\n\n");
- ioda_eeh_hub_diag_common(data);
- if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
- pr_info(" RGC: %016llx %016llx\n",
- be64_to_cpu(data->rgc.rgcStatus),
- be64_to_cpu(data->rgc.rgcLdcp));
- break;
- case OPAL_P7IOC_DIAG_TYPE_BI:
- pr_info("P7IOC diag-data for BI %s\n\n",
- data->bi.biDownbound ? "Downbound" : "Upbound");
- ioda_eeh_hub_diag_common(data);
- if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
- data->bi.biLdcp2 || data->bi.biFenceStatus)
- pr_info(" BI: %016llx %016llx %016llx %016llx\n",
- be64_to_cpu(data->bi.biLdcp0),
- be64_to_cpu(data->bi.biLdcp1),
- be64_to_cpu(data->bi.biLdcp2),
- be64_to_cpu(data->bi.biFenceStatus));
- break;
- case OPAL_P7IOC_DIAG_TYPE_CI:
- pr_info("P7IOC diag-data for CI Port %d\n\n",
- data->ci.ciPort);
- ioda_eeh_hub_diag_common(data);
- if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
- pr_info(" CI: %016llx %016llx\n",
- be64_to_cpu(data->ci.ciPortStatus),
- be64_to_cpu(data->ci.ciPortLdcp));
- break;
- case OPAL_P7IOC_DIAG_TYPE_MISC:
- pr_info("P7IOC diag-data for MISC\n\n");
- ioda_eeh_hub_diag_common(data);
- break;
- case OPAL_P7IOC_DIAG_TYPE_I2C:
- pr_info("P7IOC diag-data for I2C\n\n");
- ioda_eeh_hub_diag_common(data);
- break;
- default:
- pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
- __func__, phb->hub_id, data->type);
- }
-}
-
-static int ioda_eeh_get_pe(struct pci_controller *hose,
- u16 pe_no, struct eeh_pe **pe)
-{
- struct pnv_phb *phb = hose->private_data;
- struct pnv_ioda_pe *pnv_pe;
- struct eeh_pe *dev_pe;
- struct eeh_dev edev;
-
- /*
- * If PHB supports compound PE, to fetch
- * the master PE because slave PE is invisible
- * to EEH core.
- */
- pnv_pe = &phb->ioda.pe_array[pe_no];
- if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
- pnv_pe = pnv_pe->master;
- WARN_ON(!pnv_pe ||
- !(pnv_pe->flags & PNV_IODA_PE_MASTER));
- pe_no = pnv_pe->pe_number;
- }
-
- /* Find the PE according to PE# */
- memset(&edev, 0, sizeof(struct eeh_dev));
- edev.phb = hose;
- edev.pe_config_addr = pe_no;
- dev_pe = eeh_pe_get(&edev);
- if (!dev_pe)
- return -EEXIST;
-
- /* Freeze the (compound) PE */
- *pe = dev_pe;
- if (!(dev_pe->state & EEH_PE_ISOLATED))
- phb->freeze_pe(phb, pe_no);
-
- /*
- * At this point, we're sure the (compound) PE should
- * have been frozen. However, we still need poke until
- * hitting the frozen PE on top level.
- */
- dev_pe = dev_pe->parent;
- while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
- int ret;
- int active_flags = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE);
-
- ret = eeh_ops->get_state(dev_pe, NULL);
- if (ret <= 0 || (ret & active_flags) == active_flags) {
- dev_pe = dev_pe->parent;
- continue;
- }
-
- /* Frozen parent PE */
- *pe = dev_pe;
- if (!(dev_pe->state & EEH_PE_ISOLATED))
- phb->freeze_pe(phb, dev_pe->addr);
-
- /* Next one */
- dev_pe = dev_pe->parent;
- }
-
- return 0;
-}
-
-/**
- * ioda_eeh_next_error - Retrieve next error for EEH core to handle
- * @pe: The affected PE
- *
- * The function is expected to be called by EEH core while it gets
- * special EEH event (without binding PE). The function calls to
- * OPAL APIs for next error to handle. The informational error is
- * handled internally by platform. However, the dead IOC, dead PHB,
- * fenced PHB and frozen PE should be handled by EEH core eventually.
- */
-static int ioda_eeh_next_error(struct eeh_pe **pe)
-{
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct eeh_pe *phb_pe, *parent_pe;
- __be64 frozen_pe_no;
- __be16 err_type, severity;
- int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
- long rc;
- int state, ret = EEH_NEXT_ERR_NONE;
-
- /*
- * While running here, it's safe to purge the event queue.
- * And we should keep the cached OPAL notifier event sychronized
- * between the kernel and firmware.
- */
- eeh_remove_event(NULL, false);
- opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
-
- list_for_each_entry(hose, &hose_list, list_node) {
- /*
- * If the subordinate PCI buses of the PHB has been
- * removed or is exactly under error recovery, we
- * needn't take care of it any more.
- */
- phb = hose->private_data;
- phb_pe = eeh_phb_pe_get(hose);
- if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
- continue;
-
- rc = opal_pci_next_error(phb->opal_id,
- &frozen_pe_no, &err_type, &severity);
-
- /* If OPAL API returns error, we needn't proceed */
- if (rc != OPAL_SUCCESS) {
- pr_devel("%s: Invalid return value on "
- "PHB#%x (0x%lx) from opal_pci_next_error",
- __func__, hose->global_number, rc);
- continue;
- }
-
- /* If the PHB doesn't have error, stop processing */
- if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
- be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
- pr_devel("%s: No error found on PHB#%x\n",
- __func__, hose->global_number);
- continue;
- }
-
- /*
- * Processing the error. We're expecting the error with
- * highest priority reported upon multiple errors on the
- * specific PHB.
- */
- pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
- __func__, be16_to_cpu(err_type), be16_to_cpu(severity),
- be64_to_cpu(frozen_pe_no), hose->global_number);
- switch (be16_to_cpu(err_type)) {
- case OPAL_EEH_IOC_ERROR:
- if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
- pr_err("EEH: dead IOC detected\n");
- ret = EEH_NEXT_ERR_DEAD_IOC;
- } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
- pr_info("EEH: IOC informative error "
- "detected\n");
- ioda_eeh_hub_diag(hose);
- ret = EEH_NEXT_ERR_NONE;
- }
-
- break;
- case OPAL_EEH_PHB_ERROR:
- if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
- *pe = phb_pe;
- pr_err("EEH: dead PHB#%x detected, "
- "location: %s\n",
- hose->global_number,
- eeh_pe_loc_get(phb_pe));
- ret = EEH_NEXT_ERR_DEAD_PHB;
- } else if (be16_to_cpu(severity) ==
- OPAL_EEH_SEV_PHB_FENCED) {
- *pe = phb_pe;
- pr_err("EEH: Fenced PHB#%x detected, "
- "location: %s\n",
- hose->global_number,
- eeh_pe_loc_get(phb_pe));
- ret = EEH_NEXT_ERR_FENCED_PHB;
- } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
- pr_info("EEH: PHB#%x informative error "
- "detected, location: %s\n",
- hose->global_number,
- eeh_pe_loc_get(phb_pe));
- ioda_eeh_phb_diag(phb_pe);
- pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
- ret = EEH_NEXT_ERR_NONE;
- }
-
- break;
- case OPAL_EEH_PE_ERROR:
- /*
- * If we can't find the corresponding PE, we
- * just try to unfreeze.
- */
- if (ioda_eeh_get_pe(hose,
- be64_to_cpu(frozen_pe_no), pe)) {
- /* Try best to clear it */
- pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
- hose->global_number, frozen_pe_no);
- pr_info("EEH: PHB location: %s\n",
- eeh_pe_loc_get(phb_pe));
- opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no,
- OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
- ret = EEH_NEXT_ERR_NONE;
- } else if ((*pe)->state & EEH_PE_ISOLATED ||
- eeh_pe_passed(*pe)) {
- ret = EEH_NEXT_ERR_NONE;
- } else {
- pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
- (*pe)->addr, (*pe)->phb->global_number);
- pr_err("EEH: PE location: %s, PHB location: %s\n",
- eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe));
- ret = EEH_NEXT_ERR_FROZEN_PE;
- }
-
- break;
- default:
- pr_warn("%s: Unexpected error type %d\n",
- __func__, be16_to_cpu(err_type));
- }
-
- /*
- * EEH core will try recover from fenced PHB or
- * frozen PE. In the time for frozen PE, EEH core
- * enable IO path for that before collecting logs,
- * but it ruins the site. So we have to dump the
- * log in advance here.
- */
- if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
- ret == EEH_NEXT_ERR_FENCED_PHB) &&
- !((*pe)->state & EEH_PE_ISOLATED)) {
- eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
- ioda_eeh_phb_diag(*pe);
-
- if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data((*pe)->phb,
- (*pe)->data);
- }
-
- /*
- * We probably have the frozen parent PE out there and
- * we need have to handle frozen parent PE firstly.
- */
- if (ret == EEH_NEXT_ERR_FROZEN_PE) {
- parent_pe = (*pe)->parent;
- while (parent_pe) {
- /* Hit the ceiling ? */
- if (parent_pe->type & EEH_PE_PHB)
- break;
-
- /* Frozen parent PE ? */
- state = ioda_eeh_get_state(parent_pe);
- if (state > 0 &&
- (state & active_flags) != active_flags)
- *pe = parent_pe;
-
- /* Next parent level */
- parent_pe = parent_pe->parent;
- }
-
- /* We possibly migrate to another PE */
- eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
- }
-
- /*
- * If we have no errors on the specific PHB or only
- * informative error there, we continue poking it.
- * Otherwise, we need actions to be taken by upper
- * layer.
- */
- if (ret > EEH_NEXT_ERR_INF)
- break;
- }
-
- return ret;
-}
-
-struct pnv_eeh_ops ioda_eeh_ops = {
- .post_init = ioda_eeh_post_init,
- .set_option = ioda_eeh_set_option,
- .get_state = ioda_eeh_get_state,
- .reset = ioda_eeh_reset,
- .get_log = ioda_eeh_get_log,
- .configure_bridge = ioda_eeh_configure_bridge,
- .err_inject = ioda_eeh_err_inject,
- .next_error = ioda_eeh_next_error
-};
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index e261869adc86..ce738ab3d5a9 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -12,6 +12,7 @@
*/
#include <linux/atomic.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -38,12 +39,14 @@
#include "powernv.h"
#include "pci.h"
+static bool pnv_eeh_nb_init = false;
+
/**
- * powernv_eeh_init - EEH platform dependent initialization
+ * pnv_eeh_init - EEH platform dependent initialization
*
* EEH platform dependent initialization on powernv
*/
-static int powernv_eeh_init(void)
+static int pnv_eeh_init(void)
{
struct pci_controller *hose;
struct pnv_phb *phb;
@@ -85,37 +88,280 @@ static int powernv_eeh_init(void)
return 0;
}
+static int pnv_eeh_event(struct notifier_block *nb,
+ unsigned long events, void *change)
+{
+ uint64_t changed_evts = (uint64_t)change;
+
+ /*
+ * We simply send special EEH event if EEH has
+ * been enabled, or clear pending events in
+ * case that we enable EEH soon
+ */
+ if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
+ !(events & OPAL_EVENT_PCI_ERROR))
+ return 0;
+
+ if (eeh_enabled())
+ eeh_send_failure_event(NULL);
+ else
+ opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
+
+ return 0;
+}
+
+static struct notifier_block pnv_eeh_nb = {
+ .notifier_call = pnv_eeh_event,
+ .next = NULL,
+ .priority = 0
+};
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t pnv_eeh_ei_write(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_controller *hose = filp->private_data;
+ struct eeh_dev *edev;
+ struct eeh_pe *pe;
+ int pe_no, type, func;
+ unsigned long addr, mask;
+ char buf[50];
+ int ret;
+
+ if (!eeh_ops || !eeh_ops->err_inject)
+ return -ENXIO;
+
+ /* Copy over argument buffer */
+ ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
+ if (!ret)
+ return -EFAULT;
+
+ /* Retrieve parameters */
+ ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
+ &pe_no, &type, &func, &addr, &mask);
+ if (ret != 5)
+ return -EINVAL;
+
+ /* Retrieve PE */
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+ edev->phb = hose;
+ edev->pe_config_addr = pe_no;
+ pe = eeh_pe_get(edev);
+ kfree(edev);
+ if (!pe)
+ return -ENODEV;
+
+ /* Do error injection */
+ ret = eeh_ops->err_inject(pe, type, func, addr, mask);
+ return ret < 0 ? ret : count;
+}
+
+static const struct file_operations pnv_eeh_ei_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = pnv_eeh_ei_write,
+};
+
+static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
+{
+ struct pci_controller *hose = data;
+ struct pnv_phb *phb = hose->private_data;
+
+ out_be64(phb->regs + offset, val);
+ return 0;
+}
+
+static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
+{
+ struct pci_controller *hose = data;
+ struct pnv_phb *phb = hose->private_data;
+
+ *val = in_be64(phb->regs + offset);
+ return 0;
+}
+
+static int pnv_eeh_outb_dbgfs_set(void *data, u64 val)
+{
+ return pnv_eeh_dbgfs_set(data, 0xD10, val);
+}
+
+static int pnv_eeh_outb_dbgfs_get(void *data, u64 *val)
+{
+ return pnv_eeh_dbgfs_get(data, 0xD10, val);
+}
+
+static int pnv_eeh_inbA_dbgfs_set(void *data, u64 val)
+{
+ return pnv_eeh_dbgfs_set(data, 0xD90, val);
+}
+
+static int pnv_eeh_inbA_dbgfs_get(void *data, u64 *val)
+{
+ return pnv_eeh_dbgfs_get(data, 0xD90, val);
+}
+
+static int pnv_eeh_inbB_dbgfs_set(void *data, u64 val)
+{
+ return pnv_eeh_dbgfs_set(data, 0xE10, val);
+}
+
+static int pnv_eeh_inbB_dbgfs_get(void *data, u64 *val)
+{
+ return pnv_eeh_dbgfs_get(data, 0xE10, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_outb_dbgfs_ops, pnv_eeh_outb_dbgfs_get,
+ pnv_eeh_outb_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbA_dbgfs_ops, pnv_eeh_inbA_dbgfs_get,
+ pnv_eeh_inbA_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbB_dbgfs_ops, pnv_eeh_inbB_dbgfs_get,
+ pnv_eeh_inbB_dbgfs_set, "0x%llx\n");
+#endif /* CONFIG_DEBUG_FS */
+
/**
- * powernv_eeh_post_init - EEH platform dependent post initialization
+ * pnv_eeh_post_init - EEH platform dependent post initialization
*
* EEH platform dependent post initialization on powernv. When
* the function is called, the EEH PEs and devices should have
* been built. If the I/O cache staff has been built, EEH is
* ready to supply service.
*/
-static int powernv_eeh_post_init(void)
+static int pnv_eeh_post_init(void)
{
struct pci_controller *hose;
struct pnv_phb *phb;
int ret = 0;
+ /* Register OPAL event notifier */
+ if (!pnv_eeh_nb_init) {
+ ret = opal_notifier_register(&pnv_eeh_nb);
+ if (ret) {
+ pr_warn("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ pnv_eeh_nb_init = true;
+ }
+
list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
- if (phb->eeh_ops && phb->eeh_ops->post_init) {
- ret = phb->eeh_ops->post_init(hose);
- if (ret)
- break;
- }
+ /*
+ * If EEH is enabled, we're going to rely on that.
+ * Otherwise, we restore to conventional mechanism
+ * to clear frozen PE during PCI config access.
+ */
+ if (eeh_enabled())
+ phb->flags |= PNV_PHB_FLAG_EEH;
+ else
+ phb->flags &= ~PNV_PHB_FLAG_EEH;
+
+ /* Create debugfs entries */
+#ifdef CONFIG_DEBUG_FS
+ if (phb->has_dbgfs || !phb->dbgfs)
+ continue;
+
+ phb->has_dbgfs = 1;
+ debugfs_create_file("err_injct", 0200,
+ phb->dbgfs, hose,
+ &pnv_eeh_ei_fops);
+
+ debugfs_create_file("err_injct_outbound", 0600,
+ phb->dbgfs, hose,
+ &pnv_eeh_outb_dbgfs_ops);
+ debugfs_create_file("err_injct_inboundA", 0600,
+ phb->dbgfs, hose,
+ &pnv_eeh_inbA_dbgfs_ops);
+ debugfs_create_file("err_injct_inboundB", 0600,
+ phb->dbgfs, hose,
+ &pnv_eeh_inbB_dbgfs_ops);
+#endif /* CONFIG_DEBUG_FS */
}
+
return ret;
}
+static int pnv_eeh_cap_start(struct pci_dn *pdn)
+{
+ u32 status;
+
+ if (!pdn)
+ return 0;
+
+ pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+
+ return PCI_CAPABILITY_LIST;
+}
+
+static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap)
+{
+ int pos = pnv_eeh_cap_start(pdn);
+ int cnt = 48; /* Maximal number of capabilities */
+ u32 id;
+
+ if (!pos)
+ return 0;
+
+ while (cnt--) {
+ pnv_pci_cfg_read(pdn, pos, 1, &pos);
+ if (pos < 0x40)
+ break;
+
+ pos &= ~3;
+ pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
+ if (id == 0xff)
+ break;
+
+ /* Found */
+ if (id == cap)
+ return pos;
+
+ /* Next one */
+ pos += PCI_CAP_LIST_NEXT;
+ }
+
+ return 0;
+}
+
+static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
+{
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ u32 header;
+ int pos = 256, ttl = (4096 - 256) / 8;
+
+ if (!edev || !edev->pcie_cap)
+ return 0;
+ if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
+ return 0;
+ else if (!header)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap && pos)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < 256)
+ break;
+
+ if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
+ break;
+ }
+
+ return 0;
+}
+
/**
- * powernv_eeh_dev_probe - Do probe on PCI device
- * @dev: PCI device
- * @flag: unused
+ * pnv_eeh_probe - Do probe on PCI device
+ * @pdn: PCI device node
+ * @data: unused
*
* When EEH module is installed during system boot, all PCI devices
* are checked one by one to see if it supports EEH. The function
@@ -129,12 +375,12 @@ static int powernv_eeh_post_init(void)
* was possiblly triggered by EEH core, the binding between EEH device
* and the PCI device isn't built yet.
*/
-static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
+static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pci_controller *hose = pdn->phb;
struct pnv_phb *phb = hose->private_data;
- struct device_node *dn = pci_device_to_OF_node(dev);
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ uint32_t pcie_flags;
int ret;
/*
@@ -143,40 +389,42 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* the root bridge. So it's not reasonable to continue
* the probing.
*/
- if (!dn || !edev || edev->pe)
- return 0;
+ if (!edev || edev->pe)
+ return NULL;
/* Skip for PCI-ISA bridge */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
- return 0;
+ if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
+ return NULL;
/* Initialize eeh device */
- edev->class_code = dev->class;
+ edev->class_code = pdn->class_code;
edev->mode &= 0xFFFFFF00;
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
+ edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
+ edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
+ if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
edev->mode |= EEH_DEV_BRIDGE;
- edev->pcix_cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
- if (pci_is_pcie(dev)) {
- edev->pcie_cap = pci_pcie_cap(dev);
-
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- edev->mode |= EEH_DEV_ROOT_PORT;
- else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
- edev->mode |= EEH_DEV_DS_PORT;
-
- edev->aer_cap = pci_find_ext_capability(dev,
- PCI_EXT_CAP_ID_ERR);
+ if (edev->pcie_cap) {
+ pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
+ 2, &pcie_flags);
+ pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
+ if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
+ edev->mode |= EEH_DEV_ROOT_PORT;
+ else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
+ edev->mode |= EEH_DEV_DS_PORT;
+ }
}
- edev->config_addr = ((dev->bus->number << 8) | dev->devfn);
- edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
+ edev->config_addr = (pdn->busno << 8) | (pdn->devfn);
+ edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr];
/* Create PE */
ret = eeh_add_to_parent_pe(edev);
if (ret) {
- pr_warn("%s: Can't add PCI dev %s to parent PE (%d)\n",
- __func__, pci_name(dev), ret);
- return ret;
+ pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n",
+ __func__, hose->global_number, pdn->busno,
+ PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
+ return NULL;
}
/*
@@ -195,8 +443,10 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Broadcom Austin 4-ports NICs (14e4:1657)
* Broadcom Shiner 2-ports 10G NICs (14e4:168e)
*/
- if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) ||
- (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e))
+ if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
+ pdn->device_id == 0x1657) ||
+ (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
+ pdn->device_id == 0x168e))
edev->pe->state |= EEH_PE_CFG_RESTRICTED;
/*
@@ -206,7 +456,8 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* to PE reset.
*/
if (!edev->pe->bus)
- edev->pe->bus = dev->bus;
+ edev->pe->bus = pci_find_bus(hose->global_number,
+ pdn->busno);
/*
* Enable EEH explicitly so that we will do EEH check
@@ -217,11 +468,11 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
/* Save memory bars */
eeh_save_bars(edev);
- return 0;
+ return NULL;
}
/**
- * powernv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
+ * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
* @pe: EEH PE
* @option: operation to be issued
*
@@ -229,36 +480,236 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Currently, following options are support according to PAPR:
* Enable EEH, Disable EEH, Enable MMIO and Enable DMA
*/
-static int powernv_eeh_set_option(struct eeh_pe *pe, int option)
+static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ bool freeze_pe = false;
+ int opt, ret = 0;
+ s64 rc;
+
+ /* Sanity check on option */
+ switch (option) {
+ case EEH_OPT_DISABLE:
+ return -EPERM;
+ case EEH_OPT_ENABLE:
+ return 0;
+ case EEH_OPT_THAW_MMIO:
+ opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
+ break;
+ case EEH_OPT_THAW_DMA:
+ opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
+ break;
+ case EEH_OPT_FREEZE_PE:
+ freeze_pe = true;
+ opt = OPAL_EEH_ACTION_SET_FREEZE_ALL;
+ break;
+ default:
+ pr_warn("%s: Invalid option %d\n", __func__, option);
+ return -EINVAL;
+ }
- /*
- * What we need do is pass it down for hardware
- * implementation to handle it.
- */
- if (phb->eeh_ops && phb->eeh_ops->set_option)
- ret = phb->eeh_ops->set_option(pe, option);
+ /* If PHB supports compound PE, to handle it */
+ if (freeze_pe) {
+ if (phb->freeze_pe) {
+ phb->freeze_pe(phb, pe->addr);
+ } else {
+ rc = opal_pci_eeh_freeze_set(phb->opal_id,
+ pe->addr, opt);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld freezing "
+ "PHB#%x-PE#%x\n",
+ __func__, rc,
+ phb->hose->global_number, pe->addr);
+ ret = -EIO;
+ }
+ }
+ } else {
+ if (phb->unfreeze_pe) {
+ ret = phb->unfreeze_pe(phb, pe->addr, opt);
+ } else {
+ rc = opal_pci_eeh_freeze_clear(phb->opal_id,
+ pe->addr, opt);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld enable %d "
+ "for PHB#%x-PE#%x\n",
+ __func__, rc, option,
+ phb->hose->global_number, pe->addr);
+ ret = -EIO;
+ }
+ }
+ }
return ret;
}
/**
- * powernv_eeh_get_pe_addr - Retrieve PE address
+ * pnv_eeh_get_pe_addr - Retrieve PE address
* @pe: EEH PE
*
* Retrieve the PE address according to the given tranditional
* PCI BDF (Bus/Device/Function) address.
*/
-static int powernv_eeh_get_pe_addr(struct eeh_pe *pe)
+static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
{
return pe->addr;
}
+static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb->private_data;
+ s64 rc;
+
+ rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
+ PNV_PCI_DIAG_BUF_SIZE);
+ if (rc != OPAL_SUCCESS)
+ pr_warn("%s: Failure %lld getting PHB#%x diag-data\n",
+ __func__, rc, pe->phb->global_number);
+}
+
+static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb->private_data;
+ u8 fstate;
+ __be16 pcierr;
+ s64 rc;
+ int result = 0;
+
+ rc = opal_pci_eeh_freeze_status(phb->opal_id,
+ pe->addr,
+ &fstate,
+ &pcierr,
+ NULL);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld getting PHB#%x state\n",
+ __func__, rc, phb->hose->global_number);
+ return EEH_STATE_NOT_SUPPORT;
+ }
+
+ /*
+ * Check PHB state. If the PHB is frozen for the
+ * first time, to dump the PHB diag-data.
+ */
+ if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED);
+ } else if (!(pe->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ pnv_eeh_get_phb_diag(pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
+ }
+
+ return result;
+}
+
+static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb->private_data;
+ u8 fstate;
+ __be16 pcierr;
+ s64 rc;
+ int result;
+
+ /*
+ * We don't clobber hardware frozen state until PE
+ * reset is completed. In order to keep EEH core
+ * moving forward, we have to return operational
+ * state during PE reset.
+ */
+ if (pe->state & EEH_PE_RESET) {
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED);
+ return result;
+ }
+
+ /*
+ * Fetch PE state from hardware. If the PHB
+ * supports compound PE, let it handle that.
+ */
+ if (phb->get_pe_state) {
+ fstate = phb->get_pe_state(phb, pe->addr);
+ } else {
+ rc = opal_pci_eeh_freeze_status(phb->opal_id,
+ pe->addr,
+ &fstate,
+ &pcierr,
+ NULL);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
+ __func__, rc, phb->hose->global_number,
+ pe->addr);
+ return EEH_STATE_NOT_SUPPORT;
+ }
+ }
+
+ /* Figure out state */
+ switch (fstate) {
+ case OPAL_EEH_STOPPED_NOT_FROZEN:
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED);
+ break;
+ case OPAL_EEH_STOPPED_MMIO_FREEZE:
+ result = (EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_DMA_ENABLED);
+ break;
+ case OPAL_EEH_STOPPED_DMA_FREEZE:
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_MMIO_ENABLED);
+ break;
+ case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
+ result = 0;
+ break;
+ case OPAL_EEH_STOPPED_RESET:
+ result = EEH_STATE_RESET_ACTIVE;
+ break;
+ case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
+ result = EEH_STATE_UNAVAILABLE;
+ break;
+ case OPAL_EEH_STOPPED_PERM_UNAVAIL:
+ result = EEH_STATE_NOT_SUPPORT;
+ break;
+ default:
+ result = EEH_STATE_NOT_SUPPORT;
+ pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
+ __func__, phb->hose->global_number,
+ pe->addr, fstate);
+ }
+
+ /*
+ * If PHB supports compound PE, to freeze all
+ * slave PEs for consistency.
+ *
+ * If the PE is switching to frozen state for the
+ * first time, to dump the PHB diag-data.
+ */
+ if (!(result & EEH_STATE_NOT_SUPPORT) &&
+ !(result & EEH_STATE_UNAVAILABLE) &&
+ !(result & EEH_STATE_MMIO_ACTIVE) &&
+ !(result & EEH_STATE_DMA_ACTIVE) &&
+ !(pe->state & EEH_PE_ISOLATED)) {
+ if (phb->freeze_pe)
+ phb->freeze_pe(phb, pe->addr);
+
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ pnv_eeh_get_phb_diag(pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
+ }
+
+ return result;
+}
+
/**
- * powernv_eeh_get_state - Retrieve PE state
+ * pnv_eeh_get_state - Retrieve PE state
* @pe: EEH PE
* @delay: delay while PE state is temporarily unavailable
*
@@ -267,64 +718,279 @@ static int powernv_eeh_get_pe_addr(struct eeh_pe *pe)
* we prefer passing down to hardware implementation to handle
* it.
*/
-static int powernv_eeh_get_state(struct eeh_pe *pe, int *delay)
+static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
+{
+ int ret;
+
+ if (pe->type & EEH_PE_PHB)
+ ret = pnv_eeh_get_phb_state(pe);
+ else
+ ret = pnv_eeh_get_pe_state(pe);
+
+ if (!delay)
+ return ret;
+
+ /*
+ * If the PE state is temporarily unavailable,
+ * to inform the EEH core delay for default
+ * period (1 second)
+ */
+ *delay = 0;
+ if (ret & EEH_STATE_UNAVAILABLE)
+ *delay = 1000;
+
+ return ret;
+}
+
+static s64 pnv_eeh_phb_poll(struct pnv_phb *phb)
+{
+ s64 rc = OPAL_HARDWARE;
+
+ while (1) {
+ rc = opal_pci_poll(phb->opal_id);
+ if (rc <= 0)
+ break;
+
+ if (system_state < SYSTEM_RUNNING)
+ udelay(1000 * rc);
+ else
+ msleep(rc);
+ }
+
+ return rc;
+}
+
+int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
{
- struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
- int ret = EEH_STATE_NOT_SUPPORT;
+ s64 rc = OPAL_HARDWARE;
+
+ pr_debug("%s: Reset PHB#%x, option=%d\n",
+ __func__, hose->global_number, option);
+
+ /* Issue PHB complete reset request */
+ if (option == EEH_RESET_FUNDAMENTAL ||
+ option == EEH_RESET_HOT)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PHB_COMPLETE,
+ OPAL_ASSERT_RESET);
+ else if (option == EEH_RESET_DEACTIVATE)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PHB_COMPLETE,
+ OPAL_DEASSERT_RESET);
+ if (rc < 0)
+ goto out;
- if (phb->eeh_ops && phb->eeh_ops->get_state) {
- ret = phb->eeh_ops->get_state(pe);
+ /*
+ * Poll state of the PHB until the request is done
+ * successfully. The PHB reset is usually PHB complete
+ * reset followed by hot reset on root bus. So we also
+ * need the PCI bus settlement delay.
+ */
+ rc = pnv_eeh_phb_poll(phb);
+ if (option == EEH_RESET_DEACTIVATE) {
+ if (system_state < SYSTEM_RUNNING)
+ udelay(1000 * EEH_PE_RST_SETTLE_TIME);
+ else
+ msleep(EEH_PE_RST_SETTLE_TIME);
+ }
+out:
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
- /*
- * If the PE state is temporarily unavailable,
- * to inform the EEH core delay for default
- * period (1 second)
- */
- if (delay) {
- *delay = 0;
- if (ret & EEH_STATE_UNAVAILABLE)
- *delay = 1000;
+ return 0;
+}
+
+static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
+{
+ struct pnv_phb *phb = hose->private_data;
+ s64 rc = OPAL_HARDWARE;
+
+ pr_debug("%s: Reset PHB#%x, option=%d\n",
+ __func__, hose->global_number, option);
+
+ /*
+ * During the reset deassert time, we needn't care
+ * the reset scope because the firmware does nothing
+ * for fundamental or hot reset during deassert phase.
+ */
+ if (option == EEH_RESET_FUNDAMENTAL)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PCI_FUNDAMENTAL,
+ OPAL_ASSERT_RESET);
+ else if (option == EEH_RESET_HOT)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PCI_HOT,
+ OPAL_ASSERT_RESET);
+ else if (option == EEH_RESET_DEACTIVATE)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PCI_HOT,
+ OPAL_DEASSERT_RESET);
+ if (rc < 0)
+ goto out;
+
+ /* Poll state of the PHB until the request is done */
+ rc = pnv_eeh_phb_poll(phb);
+ if (option == EEH_RESET_DEACTIVATE)
+ msleep(EEH_PE_RST_SETTLE_TIME);
+out:
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
+{
+ struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ int aer = edev ? edev->aer_cap : 0;
+ u32 ctrl;
+
+ pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
+ __func__, pci_domain_nr(dev->bus),
+ dev->bus->number, option);
+
+ switch (option) {
+ case EEH_RESET_FUNDAMENTAL:
+ case EEH_RESET_HOT:
+ /* Don't report linkDown event */
+ if (aer) {
+ eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, &ctrl);
+ ctrl |= PCI_ERR_UNC_SURPDN;
+ eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, ctrl);
}
+
+ eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
+
+ msleep(EEH_PE_RST_HOLD_TIME);
+ break;
+ case EEH_RESET_DEACTIVATE:
+ eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
+
+ msleep(EEH_PE_RST_SETTLE_TIME);
+
+ /* Continue reporting linkDown event */
+ if (aer) {
+ eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, &ctrl);
+ ctrl &= ~PCI_ERR_UNC_SURPDN;
+ eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, ctrl);
+ }
+
+ break;
}
- return ret;
+ return 0;
+}
+
+void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
+{
+ struct pci_controller *hose;
+
+ if (pci_is_root_bus(dev->bus)) {
+ hose = pci_bus_to_host(dev->bus);
+ pnv_eeh_root_reset(hose, EEH_RESET_HOT);
+ pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
+ } else {
+ pnv_eeh_bridge_reset(dev, EEH_RESET_HOT);
+ pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
+ }
}
/**
- * powernv_eeh_reset - Reset the specified PE
+ * pnv_eeh_reset - Reset the specified PE
* @pe: EEH PE
* @option: reset option
*
- * Reset the specified PE
+ * Do reset on the indicated PE. For PCI bus sensitive PE,
+ * we need to reset the parent p2p bridge. The PHB has to
+ * be reinitialized if the p2p bridge is root bridge. For
+ * PCI device sensitive PE, we will try to reset the device
+ * through FLR. For now, we don't have OPAL APIs to do HARD
+ * reset yet, so all reset would be SOFT (HOT) reset.
*/
-static int powernv_eeh_reset(struct eeh_pe *pe, int option)
+static int pnv_eeh_reset(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ struct pci_bus *bus;
+ int ret;
+
+ /*
+ * For PHB reset, we always have complete reset. For those PEs whose
+ * primary bus derived from root complex (root bus) or root port
+ * (usually bus#1), we apply hot or fundamental reset on the root port.
+ * For other PEs, we always have hot reset on the PE primary bus.
+ *
+ * Here, we have different design to pHyp, which always clear the
+ * frozen state during PE reset. However, the good idea here from
+ * benh is to keep frozen state before we get PE reset done completely
+ * (until BAR restore). With the frozen state, HW drops illegal IO
+ * or MMIO access, which can incur recrusive frozen PE during PE
+ * reset. The side effect is that EEH core has to clear the frozen
+ * state explicitly after BAR restore.
+ */
+ if (pe->type & EEH_PE_PHB) {
+ ret = pnv_eeh_phb_reset(hose, option);
+ } else {
+ struct pnv_phb *phb;
+ s64 rc;
- if (phb->eeh_ops && phb->eeh_ops->reset)
- ret = phb->eeh_ops->reset(pe, option);
+ /*
+ * The frozen PE might be caused by PAPR error injection
+ * registers, which are expected to be cleared after hitting
+ * frozen PE as stated in the hardware spec. Unfortunately,
+ * that's not true on P7IOC. So we have to clear it manually
+ * to avoid recursive EEH errors during recovery.
+ */
+ phb = hose->private_data;
+ if (phb->model == PNV_PHB_MODEL_P7IOC &&
+ (option == EEH_RESET_HOT ||
+ option == EEH_RESET_FUNDAMENTAL)) {
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PHB_ERROR,
+ OPAL_ASSERT_RESET);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld clearing "
+ "error injection registers\n",
+ __func__, rc);
+ return -EIO;
+ }
+ }
+
+ bus = eeh_pe_bus_get(pe);
+ if (pci_is_root_bus(bus) ||
+ pci_is_root_bus(bus->parent))
+ ret = pnv_eeh_root_reset(hose, option);
+ else
+ ret = pnv_eeh_bridge_reset(bus->self, option);
+ }
return ret;
}
/**
- * powernv_eeh_wait_state - Wait for PE state
+ * pnv_eeh_wait_state - Wait for PE state
* @pe: EEH PE
* @max_wait: maximal period in microsecond
*
* Wait for the state of associated PE. It might take some time
* to retrieve the PE's state.
*/
-static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
+static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
{
int ret;
int mwait;
while (1) {
- ret = powernv_eeh_get_state(pe, &mwait);
+ ret = pnv_eeh_get_state(pe, &mwait);
/*
* If the PE's state is temporarily unavailable,
@@ -348,7 +1014,7 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
}
/**
- * powernv_eeh_get_log - Retrieve error log
+ * pnv_eeh_get_log - Retrieve error log
* @pe: EEH PE
* @severity: temporary or permanent error log
* @drv_log: driver log to be combined with retrieved error log
@@ -356,41 +1022,30 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
*
* Retrieve the temporary or permanent error from the PE.
*/
-static int powernv_eeh_get_log(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len)
+static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
+ char *drv_log, unsigned long len)
{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
- if (phb->eeh_ops && phb->eeh_ops->get_log)
- ret = phb->eeh_ops->get_log(pe, severity, drv_log, len);
-
- return ret;
+ return 0;
}
/**
- * powernv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
+ * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
* @pe: EEH PE
*
* The function will be called to reconfigure the bridges included
* in the specified PE so that the mulfunctional PE would be recovered
* again.
*/
-static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
+static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- int ret = 0;
-
- if (phb->eeh_ops && phb->eeh_ops->configure_bridge)
- ret = phb->eeh_ops->configure_bridge(pe);
-
- return ret;
+ return 0;
}
/**
- * powernv_pe_err_inject - Inject specified error to the indicated PE
+ * pnv_pe_err_inject - Inject specified error to the indicated PE
* @pe: the indicated PE
* @type: error type
* @func: specific error type
@@ -401,22 +1056,52 @@ static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
* determined by @type and @func, to the indicated PE for
* testing purpose.
*/
-static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
- unsigned long addr, unsigned long mask)
+static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask)
{
struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ s64 rc;
+
+ /* Sanity check on error type */
+ if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
+ type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
+ pr_warn("%s: Invalid error type %d\n",
+ __func__, type);
+ return -ERANGE;
+ }
- if (phb->eeh_ops && phb->eeh_ops->err_inject)
- ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
+ if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
+ func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
+ pr_warn("%s: Invalid error function %d\n",
+ __func__, func);
+ return -ERANGE;
+ }
- return ret;
+ /* Firmware supports error injection ? */
+ if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
+ pr_warn("%s: Firmware doesn't support error injection\n",
+ __func__);
+ return -ENXIO;
+ }
+
+ /* Do error injection */
+ rc = opal_pci_err_inject(phb->opal_id, pe->addr,
+ type, func, addr, mask);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld injecting error "
+ "%d-%d to PHB#%x-PE#%x\n",
+ __func__, rc, type, func,
+ hose->global_number, pe->addr);
+ return -EIO;
+ }
+
+ return 0;
}
-static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
+static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn)
{
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
if (!edev || !edev->pe)
return false;
@@ -427,51 +1112,377 @@ static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
return false;
}
-static int powernv_eeh_read_config(struct device_node *dn,
- int where, int size, u32 *val)
+static int pnv_eeh_read_config(struct pci_dn *pdn,
+ int where, int size, u32 *val)
{
- if (powernv_eeh_cfg_blocked(dn)) {
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (pnv_eeh_cfg_blocked(pdn)) {
*val = 0xFFFFFFFF;
return PCIBIOS_SET_FAILED;
}
- return pnv_pci_cfg_read(dn, where, size, val);
+ return pnv_pci_cfg_read(pdn, where, size, val);
}
-static int powernv_eeh_write_config(struct device_node *dn,
- int where, int size, u32 val)
+static int pnv_eeh_write_config(struct pci_dn *pdn,
+ int where, int size, u32 val)
{
- if (powernv_eeh_cfg_blocked(dn))
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (pnv_eeh_cfg_blocked(pdn))
return PCIBIOS_SET_FAILED;
- return pnv_pci_cfg_write(dn, where, size, val);
+ return pnv_pci_cfg_write(pdn, where, size, val);
+}
+
+static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data)
+{
+ /* GEM */
+ if (data->gemXfir || data->gemRfir ||
+ data->gemRirqfir || data->gemMask || data->gemRwof)
+ pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->gemXfir),
+ be64_to_cpu(data->gemRfir),
+ be64_to_cpu(data->gemRirqfir),
+ be64_to_cpu(data->gemMask),
+ be64_to_cpu(data->gemRwof));
+
+ /* LEM */
+ if (data->lemFir || data->lemErrMask ||
+ data->lemAction0 || data->lemAction1 || data->lemWof)
+ pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->lemFir),
+ be64_to_cpu(data->lemErrMask),
+ be64_to_cpu(data->lemAction0),
+ be64_to_cpu(data->lemAction1),
+ be64_to_cpu(data->lemWof));
+}
+
+static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
+{
+ struct pnv_phb *phb = hose->private_data;
+ struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
+ long rc;
+
+ rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
+ __func__, phb->hub_id, rc);
+ return;
+ }
+
+ switch (data->type) {
+ case OPAL_P7IOC_DIAG_TYPE_RGC:
+ pr_info("P7IOC diag-data for RGC\n\n");
+ pnv_eeh_dump_hub_diag_common(data);
+ if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
+ pr_info(" RGC: %016llx %016llx\n",
+ be64_to_cpu(data->rgc.rgcStatus),
+ be64_to_cpu(data->rgc.rgcLdcp));
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_BI:
+ pr_info("P7IOC diag-data for BI %s\n\n",
+ data->bi.biDownbound ? "Downbound" : "Upbound");
+ pnv_eeh_dump_hub_diag_common(data);
+ if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
+ data->bi.biLdcp2 || data->bi.biFenceStatus)
+ pr_info(" BI: %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->bi.biLdcp0),
+ be64_to_cpu(data->bi.biLdcp1),
+ be64_to_cpu(data->bi.biLdcp2),
+ be64_to_cpu(data->bi.biFenceStatus));
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_CI:
+ pr_info("P7IOC diag-data for CI Port %d\n\n",
+ data->ci.ciPort);
+ pnv_eeh_dump_hub_diag_common(data);
+ if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
+ pr_info(" CI: %016llx %016llx\n",
+ be64_to_cpu(data->ci.ciPortStatus),
+ be64_to_cpu(data->ci.ciPortLdcp));
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_MISC:
+ pr_info("P7IOC diag-data for MISC\n\n");
+ pnv_eeh_dump_hub_diag_common(data);
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_I2C:
+ pr_info("P7IOC diag-data for I2C\n\n");
+ pnv_eeh_dump_hub_diag_common(data);
+ break;
+ default:
+ pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
+ __func__, phb->hub_id, data->type);
+ }
+}
+
+static int pnv_eeh_get_pe(struct pci_controller *hose,
+ u16 pe_no, struct eeh_pe **pe)
+{
+ struct pnv_phb *phb = hose->private_data;
+ struct pnv_ioda_pe *pnv_pe;
+ struct eeh_pe *dev_pe;
+ struct eeh_dev edev;
+
+ /*
+ * If PHB supports compound PE, to fetch
+ * the master PE because slave PE is invisible
+ * to EEH core.
+ */
+ pnv_pe = &phb->ioda.pe_array[pe_no];
+ if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
+ pnv_pe = pnv_pe->master;
+ WARN_ON(!pnv_pe ||
+ !(pnv_pe->flags & PNV_IODA_PE_MASTER));
+ pe_no = pnv_pe->pe_number;
+ }
+
+ /* Find the PE according to PE# */
+ memset(&edev, 0, sizeof(struct eeh_dev));
+ edev.phb = hose;
+ edev.pe_config_addr = pe_no;
+ dev_pe = eeh_pe_get(&edev);
+ if (!dev_pe)
+ return -EEXIST;
+
+ /* Freeze the (compound) PE */
+ *pe = dev_pe;
+ if (!(dev_pe->state & EEH_PE_ISOLATED))
+ phb->freeze_pe(phb, pe_no);
+
+ /*
+ * At this point, we're sure the (compound) PE should
+ * have been frozen. However, we still need poke until
+ * hitting the frozen PE on top level.
+ */
+ dev_pe = dev_pe->parent;
+ while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
+ int ret;
+ int active_flags = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE);
+
+ ret = eeh_ops->get_state(dev_pe, NULL);
+ if (ret <= 0 || (ret & active_flags) == active_flags) {
+ dev_pe = dev_pe->parent;
+ continue;
+ }
+
+ /* Frozen parent PE */
+ *pe = dev_pe;
+ if (!(dev_pe->state & EEH_PE_ISOLATED))
+ phb->freeze_pe(phb, dev_pe->addr);
+
+ /* Next one */
+ dev_pe = dev_pe->parent;
+ }
+
+ return 0;
}
/**
- * powernv_eeh_next_error - Retrieve next EEH error to handle
+ * pnv_eeh_next_error - Retrieve next EEH error to handle
* @pe: Affected PE
*
- * Using OPAL API, to retrieve next EEH error for EEH core to handle
+ * The function is expected to be called by EEH core while it gets
+ * special EEH event (without binding PE). The function calls to
+ * OPAL APIs for next error to handle. The informational error is
+ * handled internally by platform. However, the dead IOC, dead PHB,
+ * fenced PHB and frozen PE should be handled by EEH core eventually.
*/
-static int powernv_eeh_next_error(struct eeh_pe **pe)
+static int pnv_eeh_next_error(struct eeh_pe **pe)
{
struct pci_controller *hose;
- struct pnv_phb *phb = NULL;
+ struct pnv_phb *phb;
+ struct eeh_pe *phb_pe, *parent_pe;
+ __be64 frozen_pe_no;
+ __be16 err_type, severity;
+ int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
+ long rc;
+ int state, ret = EEH_NEXT_ERR_NONE;
+
+ /*
+ * While running here, it's safe to purge the event queue.
+ * And we should keep the cached OPAL notifier event sychronized
+ * between the kernel and firmware.
+ */
+ eeh_remove_event(NULL, false);
+ opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
list_for_each_entry(hose, &hose_list, list_node) {
+ /*
+ * If the subordinate PCI buses of the PHB has been
+ * removed or is exactly under error recovery, we
+ * needn't take care of it any more.
+ */
phb = hose->private_data;
- break;
- }
+ phb_pe = eeh_phb_pe_get(hose);
+ if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
+ continue;
+
+ rc = opal_pci_next_error(phb->opal_id,
+ &frozen_pe_no, &err_type, &severity);
+ if (rc != OPAL_SUCCESS) {
+ pr_devel("%s: Invalid return value on "
+ "PHB#%x (0x%lx) from opal_pci_next_error",
+ __func__, hose->global_number, rc);
+ continue;
+ }
+
+ /* If the PHB doesn't have error, stop processing */
+ if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
+ be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
+ pr_devel("%s: No error found on PHB#%x\n",
+ __func__, hose->global_number);
+ continue;
+ }
+
+ /*
+ * Processing the error. We're expecting the error with
+ * highest priority reported upon multiple errors on the
+ * specific PHB.
+ */
+ pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
+ __func__, be16_to_cpu(err_type),
+ be16_to_cpu(severity), be64_to_cpu(frozen_pe_no),
+ hose->global_number);
+ switch (be16_to_cpu(err_type)) {
+ case OPAL_EEH_IOC_ERROR:
+ if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
+ pr_err("EEH: dead IOC detected\n");
+ ret = EEH_NEXT_ERR_DEAD_IOC;
+ } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
+ pr_info("EEH: IOC informative error "
+ "detected\n");
+ pnv_eeh_get_and_dump_hub_diag(hose);
+ ret = EEH_NEXT_ERR_NONE;
+ }
+
+ break;
+ case OPAL_EEH_PHB_ERROR:
+ if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
+ *pe = phb_pe;
+ pr_err("EEH: dead PHB#%x detected, "
+ "location: %s\n",
+ hose->global_number,
+ eeh_pe_loc_get(phb_pe));
+ ret = EEH_NEXT_ERR_DEAD_PHB;
+ } else if (be16_to_cpu(severity) ==
+ OPAL_EEH_SEV_PHB_FENCED) {
+ *pe = phb_pe;
+ pr_err("EEH: Fenced PHB#%x detected, "
+ "location: %s\n",
+ hose->global_number,
+ eeh_pe_loc_get(phb_pe));
+ ret = EEH_NEXT_ERR_FENCED_PHB;
+ } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
+ pr_info("EEH: PHB#%x informative error "
+ "detected, location: %s\n",
+ hose->global_number,
+ eeh_pe_loc_get(phb_pe));
+ pnv_eeh_get_phb_diag(phb_pe);
+ pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
+ ret = EEH_NEXT_ERR_NONE;
+ }
+
+ break;
+ case OPAL_EEH_PE_ERROR:
+ /*
+ * If we can't find the corresponding PE, we
+ * just try to unfreeze.
+ */
+ if (pnv_eeh_get_pe(hose,
+ be64_to_cpu(frozen_pe_no), pe)) {
+ /* Try best to clear it */
+ pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
+ hose->global_number, frozen_pe_no);
+ pr_info("EEH: PHB location: %s\n",
+ eeh_pe_loc_get(phb_pe));
+ opal_pci_eeh_freeze_clear(phb->opal_id,
+ frozen_pe_no,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+ ret = EEH_NEXT_ERR_NONE;
+ } else if ((*pe)->state & EEH_PE_ISOLATED ||
+ eeh_pe_passed(*pe)) {
+ ret = EEH_NEXT_ERR_NONE;
+ } else {
+ pr_err("EEH: Frozen PE#%x "
+ "on PHB#%x detected\n",
+ (*pe)->addr,
+ (*pe)->phb->global_number);
+ pr_err("EEH: PE location: %s, "
+ "PHB location: %s\n",
+ eeh_pe_loc_get(*pe),
+ eeh_pe_loc_get(phb_pe));
+ ret = EEH_NEXT_ERR_FROZEN_PE;
+ }
+
+ break;
+ default:
+ pr_warn("%s: Unexpected error type %d\n",
+ __func__, be16_to_cpu(err_type));
+ }
- if (phb && phb->eeh_ops->next_error)
- return phb->eeh_ops->next_error(pe);
+ /*
+ * EEH core will try recover from fenced PHB or
+ * frozen PE. In the time for frozen PE, EEH core
+ * enable IO path for that before collecting logs,
+ * but it ruins the site. So we have to dump the
+ * log in advance here.
+ */
+ if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
+ ret == EEH_NEXT_ERR_FENCED_PHB) &&
+ !((*pe)->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
+ pnv_eeh_get_phb_diag(*pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data((*pe)->phb,
+ (*pe)->data);
+ }
- return -EEXIST;
+ /*
+ * We probably have the frozen parent PE out there and
+ * we need have to handle frozen parent PE firstly.
+ */
+ if (ret == EEH_NEXT_ERR_FROZEN_PE) {
+ parent_pe = (*pe)->parent;
+ while (parent_pe) {
+ /* Hit the ceiling ? */
+ if (parent_pe->type & EEH_PE_PHB)
+ break;
+
+ /* Frozen parent PE ? */
+ state = eeh_ops->get_state(parent_pe, NULL);
+ if (state > 0 &&
+ (state & active_flags) != active_flags)
+ *pe = parent_pe;
+
+ /* Next parent level */
+ parent_pe = parent_pe->parent;
+ }
+
+ /* We possibly migrate to another PE */
+ eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
+ }
+
+ /*
+ * If we have no errors on the specific PHB or only
+ * informative error there, we continue poking it.
+ * Otherwise, we need actions to be taken by upper
+ * layer.
+ */
+ if (ret > EEH_NEXT_ERR_INF)
+ break;
+ }
+
+ return ret;
}
-static int powernv_eeh_restore_config(struct device_node *dn)
+static int pnv_eeh_restore_config(struct pci_dn *pdn)
{
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
struct pnv_phb *phb;
s64 ret;
@@ -490,24 +1501,23 @@ static int powernv_eeh_restore_config(struct device_node *dn)
return 0;
}
-static struct eeh_ops powernv_eeh_ops = {
+static struct eeh_ops pnv_eeh_ops = {
.name = "powernv",
- .init = powernv_eeh_init,
- .post_init = powernv_eeh_post_init,
- .of_probe = NULL,
- .dev_probe = powernv_eeh_dev_probe,
- .set_option = powernv_eeh_set_option,
- .get_pe_addr = powernv_eeh_get_pe_addr,
- .get_state = powernv_eeh_get_state,
- .reset = powernv_eeh_reset,
- .wait_state = powernv_eeh_wait_state,
- .get_log = powernv_eeh_get_log,
- .configure_bridge = powernv_eeh_configure_bridge,
- .err_inject = powernv_eeh_err_inject,
- .read_config = powernv_eeh_read_config,
- .write_config = powernv_eeh_write_config,
- .next_error = powernv_eeh_next_error,
- .restore_config = powernv_eeh_restore_config
+ .init = pnv_eeh_init,
+ .post_init = pnv_eeh_post_init,
+ .probe = pnv_eeh_probe,
+ .set_option = pnv_eeh_set_option,
+ .get_pe_addr = pnv_eeh_get_pe_addr,
+ .get_state = pnv_eeh_get_state,
+ .reset = pnv_eeh_reset,
+ .wait_state = pnv_eeh_wait_state,
+ .get_log = pnv_eeh_get_log,
+ .configure_bridge = pnv_eeh_configure_bridge,
+ .err_inject = pnv_eeh_err_inject,
+ .read_config = pnv_eeh_read_config,
+ .write_config = pnv_eeh_write_config,
+ .next_error = pnv_eeh_next_error,
+ .restore_config = pnv_eeh_restore_config
};
/**
@@ -521,7 +1531,7 @@ static int __init eeh_powernv_init(void)
int ret = -EINVAL;
eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
- ret = eeh_ops_register(&powernv_eeh_ops);
+ ret = eeh_ops_register(&pnv_eeh_ops);
if (!ret)
pr_info("EEH: PowerNV platform initialized\n");
else
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 23260f7dfa7a..5aa9c1ce4de3 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -452,5 +452,6 @@ void __init opal_platform_dump_init(void)
return;
}
- opal_dump_resend_notification();
+ if (opal_check_token(OPAL_DUMP_RESEND))
+ opal_dump_resend_notification();
}
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 518fe95dbf24..38ce757e5e2a 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -313,7 +313,8 @@ int __init opal_elog_init(void)
}
/* We are now ready to pull error logs from opal. */
- opal_resend_pending_logs();
+ if (opal_check_token(OPAL_ELOG_RESEND))
+ opal_resend_pending_logs();
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index 5c21d9c07f45..4ec6219287fc 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -120,7 +120,11 @@ static struct image_header_t image_header;
static struct image_data_t image_data;
static struct validate_flash_t validate_flash_data;
static struct manage_flash_t manage_flash_data;
-static struct update_flash_t update_flash_data;
+
+/* Initialize update_flash_data status to No Operation */
+static struct update_flash_t update_flash_data = {
+ .status = FLASH_NO_OP,
+};
static DEFINE_MUTEX(image_data_mutex);
@@ -542,7 +546,7 @@ static struct attribute_group image_op_attr_group = {
.attrs = image_op_attrs,
};
-void __init opal_flash_init(void)
+void __init opal_flash_update_init(void)
{
int ret;
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index f9896fd5d04a..9db4398ded5d 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <asm/opal.h>
+#include <asm/nvram.h>
#include <asm/machdep.h>
static unsigned int nvram_size;
@@ -62,6 +63,15 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
return count;
}
+static int __init opal_nvram_init_log_partitions(void)
+{
+ /* Scan nvram for partitions */
+ nvram_scan_partitions();
+ nvram_init_oops_partition(0);
+ return 0;
+}
+machine_arch_initcall(powernv, opal_nvram_init_log_partitions);
+
void __init opal_nvram_init(void)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c
index 48bf5b080bcf..ac46c2c24f99 100644
--- a/arch/powerpc/platforms/powernv/opal-power.c
+++ b/arch/powerpc/platforms/powernv/opal-power.c
@@ -29,8 +29,9 @@ static int opal_power_control_event(struct notifier_block *nb,
switch (type) {
case SOFT_REBOOT:
- /* Fall through. The service processor is responsible for
- * bringing the machine back up */
+ pr_info("OPAL: reboot requested\n");
+ orderly_reboot();
+ break;
case SOFT_OFF:
pr_info("OPAL: poweroff requested\n");
orderly_poweroff(true);
diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c
index 4ab67ef7abc9..655250499d18 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor.c
@@ -46,18 +46,28 @@ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
mutex_lock(&opal_sensor_mutex);
ret = opal_sensor_read(sensor_hndl, token, &data);
- if (ret != OPAL_ASYNC_COMPLETION)
- goto out_token;
+ switch (ret) {
+ case OPAL_ASYNC_COMPLETION:
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_err("%s: Failed to wait for the async response, %d\n",
+ __func__, ret);
+ goto out_token;
+ }
- ret = opal_async_wait_response(token, &msg);
- if (ret) {
- pr_err("%s: Failed to wait for the async response, %d\n",
- __func__, ret);
- goto out_token;
- }
+ ret = opal_error_code(be64_to_cpu(msg.params[1]));
+ *sensor_data = be32_to_cpu(data);
+ break;
+
+ case OPAL_SUCCESS:
+ ret = 0;
+ *sensor_data = be32_to_cpu(data);
+ break;
- *sensor_data = be32_to_cpu(data);
- ret = be64_to_cpu(msg.params[1]);
+ default:
+ ret = opal_error_code(ret);
+ break;
+ }
out_token:
mutex_unlock(&opal_sensor_mutex);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 0509bca5e830..a7ade94cdf87 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -9,11 +9,11 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/jump_label.h>
#include <asm/ppc_asm.h>
#include <asm/hvcall.h>
#include <asm/asm-offsets.h>
#include <asm/opal.h>
-#include <asm/jump_label.h>
.section ".text"
@@ -286,9 +286,12 @@ OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
-OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE);
+OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CAPI_MODE);
OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO);
OPAL_CALL(opal_tpo_read, OPAL_READ_TPO);
OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND);
OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV);
OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST);
+OPAL_CALL(opal_flash_read, OPAL_FLASH_READ);
+OPAL_CALL(opal_flash_write, OPAL_FLASH_WRITE);
+OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 18fd4e71c9c1..2241565b0739 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -23,6 +23,8 @@
#include <linux/kobject.h>
#include <linux/delay.h>
#include <linux/memblock.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <asm/machdep.h>
#include <asm/opal.h>
@@ -58,6 +60,7 @@ static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
static DEFINE_SPINLOCK(opal_notifier_lock);
static uint64_t last_notified_mask = 0x0ul;
static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
+static uint32_t opal_heartbeat;
static void opal_reinit_cores(void)
{
@@ -302,23 +305,26 @@ void opal_notifier_disable(void)
* Opal message notifier based on message type. Allow subscribers to get
* notified for specific messgae type.
*/
-int opal_message_notifier_register(enum OpalMessageType msg_type,
+int opal_message_notifier_register(enum opal_msg_type msg_type,
struct notifier_block *nb)
{
- if (!nb) {
- pr_warning("%s: Invalid argument (%p)\n",
- __func__, nb);
- return -EINVAL;
- }
- if (msg_type > OPAL_MSG_TYPE_MAX) {
- pr_warning("%s: Invalid message type argument (%d)\n",
+ if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
+ pr_warning("%s: Invalid arguments, msg_type:%d\n",
__func__, msg_type);
return -EINVAL;
}
+
return atomic_notifier_chain_register(
&opal_msg_notifier_head[msg_type], nb);
}
+int opal_message_notifier_unregister(enum opal_msg_type msg_type,
+ struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(
+ &opal_msg_notifier_head[msg_type], nb);
+}
+
static void opal_message_do_notify(uint32_t msg_type, void *msg)
{
/* notify subscribers */
@@ -351,7 +357,7 @@ static void opal_handle_message(void)
type = be32_to_cpu(msg.msg_type);
/* Sanity check */
- if (type > OPAL_MSG_TYPE_MAX) {
+ if (type >= OPAL_MSG_TYPE_MAX) {
pr_warning("%s: Unknown message type: %u\n", __func__, type);
return;
}
@@ -665,6 +671,9 @@ static void __init opal_dump_region_init(void)
uint64_t size;
int rc;
+ if (!opal_check_token(OPAL_REGISTER_DUMP_REGION))
+ return;
+
/* Register kernel log buffer */
addr = log_buf_addr_get();
if (addr == NULL)
@@ -684,6 +693,15 @@ static void __init opal_dump_region_init(void)
"rc = %d\n", rc);
}
+static void opal_flash_init(struct device_node *opal_node)
+{
+ struct device_node *np;
+
+ for_each_child_of_node(opal_node, np)
+ if (of_device_is_compatible(np, "ibm,opal-flash"))
+ of_platform_device_create(np, NULL, NULL);
+}
+
static void opal_ipmi_init(struct device_node *opal_node)
{
struct device_node *np;
@@ -741,6 +759,29 @@ static void __init opal_irq_init(struct device_node *dn)
}
}
+static int kopald(void *unused)
+{
+ set_freezable();
+ do {
+ try_to_freeze();
+ opal_poll_events(NULL);
+ msleep_interruptible(opal_heartbeat);
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static void opal_init_heartbeat(void)
+{
+ /* Old firwmware, we assume the HVC heartbeat is sufficient */
+ if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
+ &opal_heartbeat) != 0)
+ opal_heartbeat = 0;
+
+ if (opal_heartbeat)
+ kthread_run(kopald, NULL, "kopald");
+}
+
static int __init opal_init(void)
{
struct device_node *np, *consoles;
@@ -769,6 +810,9 @@ static int __init opal_init(void)
/* Create i2c platform devices */
opal_i2c_create_devs();
+ /* Setup a heatbeat thread if requested by OPAL */
+ opal_init_heartbeat();
+
/* Find all OPAL interrupts and request them */
opal_irq_init(opal_node);
@@ -782,7 +826,7 @@ static int __init opal_init(void)
/* Setup error log interface */
rc = opal_elog_init();
/* Setup code update interface */
- opal_flash_init();
+ opal_flash_update_init();
/* Setup platform dump extract interface */
opal_platform_dump_init();
/* Setup system parameters interface */
@@ -791,8 +835,11 @@ static int __init opal_init(void)
opal_msglog_init();
}
+ /* Initialize OPAL IPMI backend */
opal_ipmi_init(opal_node);
+ opal_flash_init(opal_node);
+
return 0;
}
machine_subsys_initcall(powernv, opal_init);
@@ -823,13 +870,17 @@ void opal_shutdown(void)
}
/* Unregister memory dump region */
- opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
+ if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION))
+ opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
}
/* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call);
EXPORT_SYMBOL_GPL(opal_ipmi_send);
EXPORT_SYMBOL_GPL(opal_ipmi_recv);
+EXPORT_SYMBOL_GPL(opal_flash_read);
+EXPORT_SYMBOL_GPL(opal_flash_write);
+EXPORT_SYMBOL_GPL(opal_flash_erase);
/* Convert a region of vmalloc memory to an opal sg list */
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
@@ -894,6 +945,25 @@ void opal_free_sg_list(struct opal_sg_list *sg)
}
}
+int opal_error_code(int rc)
+{
+ switch (rc) {
+ case OPAL_SUCCESS: return 0;
+
+ case OPAL_PARAMETER: return -EINVAL;
+ case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
+ case OPAL_BUSY_EVENT: return -EBUSY;
+ case OPAL_NO_MEM: return -ENOMEM;
+
+ case OPAL_UNSUPPORTED: return -EIO;
+ case OPAL_HARDWARE: return -EIO;
+ case OPAL_INTERNAL_ERROR: return -EIO;
+ default:
+ pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
+ return -EIO;
+ }
+}
+
EXPORT_SYMBOL_GPL(opal_poll_events);
EXPORT_SYMBOL_GPL(opal_rtc_read);
EXPORT_SYMBOL_GPL(opal_rtc_write);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6c9ff2b95119..920c252d1f49 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -44,6 +44,9 @@
#include "powernv.h"
#include "pci.h"
+/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
+#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
+
static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...)
{
@@ -56,11 +59,18 @@ static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
vaf.fmt = fmt;
vaf.va = &args;
- if (pe->pdev)
+ if (pe->flags & PNV_IODA_PE_DEV)
strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
- else
+ else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
sprintf(pfix, "%04x:%02x ",
pci_domain_nr(pe->pbus), pe->pbus->number);
+#ifdef CONFIG_PCI_IOV
+ else if (pe->flags & PNV_IODA_PE_VF)
+ sprintf(pfix, "%04x:%02x:%2x.%d",
+ pci_domain_nr(pe->parent_dev->bus),
+ (pe->rid & 0xff00) >> 8,
+ PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
+#endif /* CONFIG_PCI_IOV*/
printk("%spci %s: [PE# %.3d] %pV",
level, pfix, pe->pe_number, &vaf);
@@ -591,7 +601,7 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
bool is_add)
{
struct pnv_ioda_pe *slave;
- struct pci_dev *pdev;
+ struct pci_dev *pdev = NULL;
int ret;
/*
@@ -630,8 +640,12 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
pdev = pe->pbus->self;
- else
+ else if (pe->flags & PNV_IODA_PE_DEV)
pdev = pe->pdev->bus->self;
+#ifdef CONFIG_PCI_IOV
+ else if (pe->flags & PNV_IODA_PE_VF)
+ pdev = pe->parent_dev->bus->self;
+#endif /* CONFIG_PCI_IOV */
while (pdev) {
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *parent;
@@ -649,6 +663,87 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
return 0;
}
+#ifdef CONFIG_PCI_IOV
+static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
+{
+ struct pci_dev *parent;
+ uint8_t bcomp, dcomp, fcomp;
+ int64_t rc;
+ long rid_end, rid;
+
+ /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
+ if (pe->pbus) {
+ int count;
+
+ dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
+ fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
+ parent = pe->pbus->self;
+ if (pe->flags & PNV_IODA_PE_BUS_ALL)
+ count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
+ else
+ count = 1;
+
+ switch(count) {
+ case 1: bcomp = OpalPciBusAll; break;
+ case 2: bcomp = OpalPciBus7Bits; break;
+ case 4: bcomp = OpalPciBus6Bits; break;
+ case 8: bcomp = OpalPciBus5Bits; break;
+ case 16: bcomp = OpalPciBus4Bits; break;
+ case 32: bcomp = OpalPciBus3Bits; break;
+ default:
+ dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
+ count);
+ /* Do an exact match only */
+ bcomp = OpalPciBusAll;
+ }
+ rid_end = pe->rid + (count << 8);
+ } else {
+ if (pe->flags & PNV_IODA_PE_VF)
+ parent = pe->parent_dev;
+ else
+ parent = pe->pdev->bus->self;
+ bcomp = OpalPciBusAll;
+ dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
+ fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
+ rid_end = pe->rid + 1;
+ }
+
+ /* Clear the reverse map */
+ for (rid = pe->rid; rid < rid_end; rid++)
+ phb->ioda.pe_rmap[rid] = 0;
+
+ /* Release from all parents PELT-V */
+ while (parent) {
+ struct pci_dn *pdn = pci_get_pdn(parent);
+ if (pdn && pdn->pe_number != IODA_INVALID_PE) {
+ rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
+ pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
+ /* XXX What to do in case of error ? */
+ }
+ parent = parent->bus->self;
+ }
+
+ opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+
+ /* Disassociate PE in PELT */
+ rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
+ pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
+ if (rc)
+ pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
+ rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
+ bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
+ if (rc)
+ pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
+
+ pe->pbus = NULL;
+ pe->pdev = NULL;
+ pe->parent_dev = NULL;
+
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
{
struct pci_dev *parent;
@@ -675,15 +770,19 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
case 16: bcomp = OpalPciBus4Bits; break;
case 32: bcomp = OpalPciBus3Bits; break;
default:
- pr_err("%s: Number of subordinate busses %d"
- " unsupported\n",
- pci_name(pe->pbus->self), count);
+ dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
+ count);
/* Do an exact match only */
bcomp = OpalPciBusAll;
}
rid_end = pe->rid + (count << 8);
} else {
- parent = pe->pdev->bus->self;
+#ifdef CONFIG_PCI_IOV
+ if (pe->flags & PNV_IODA_PE_VF)
+ parent = pe->parent_dev;
+ else
+#endif /* CONFIG_PCI_IOV */
+ parent = pe->pdev->bus->self;
bcomp = OpalPciBusAll;
dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
@@ -774,6 +873,78 @@ static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
return 10;
}
+#ifdef CONFIG_PCI_IOV
+static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
+{
+ struct pci_dn *pdn = pci_get_pdn(dev);
+ int i;
+ struct resource *res, res2;
+ resource_size_t size;
+ u16 num_vfs;
+
+ if (!dev->is_physfn)
+ return -EINVAL;
+
+ /*
+ * "offset" is in VFs. The M64 windows are sized so that when they
+ * are segmented, each segment is the same size as the IOV BAR.
+ * Each segment is in a separate PE, and the high order bits of the
+ * address are the PE number. Therefore, each VF's BAR is in a
+ * separate PE, and changing the IOV BAR start address changes the
+ * range of PEs the VFs are in.
+ */
+ num_vfs = pdn->num_vfs;
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+
+ if (!pnv_pci_is_mem_pref_64(res->flags))
+ continue;
+
+ /*
+ * The actual IOV BAR range is determined by the start address
+ * and the actual size for num_vfs VFs BAR. This check is to
+ * make sure that after shifting, the range will not overlap
+ * with another device.
+ */
+ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ res2.flags = res->flags;
+ res2.start = res->start + (size * offset);
+ res2.end = res2.start + (size * num_vfs) - 1;
+
+ if (res2.end > res->end) {
+ dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
+ i, &res2, res, num_vfs, offset);
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * After doing so, there would be a "hole" in the /proc/iomem when
+ * offset is a positive value. It looks like the device return some
+ * mmio back to the system, which actually no one could use it.
+ */
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+
+ if (!pnv_pci_is_mem_pref_64(res->flags))
+ continue;
+
+ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ res2 = *res;
+ res->start += size * offset;
+
+ dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
+ i, &res2, res, num_vfs, offset);
+ pci_update_resource(dev, i + PCI_IOV_RESOURCES);
+ }
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
#if 0
static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
{
@@ -857,7 +1028,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
pci_name(dev));
continue;
}
- pdn->pcidev = dev;
pdn->pe_number = pe->pe_number;
pe->dma_weight += pnv_ioda_dma_weight(dev);
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -916,6 +1086,10 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
return;
}
+ pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
+ GFP_KERNEL, hose->node);
+ pe->tce32_table->data = pe;
+
/* Associate it with all child devices */
pnv_ioda_setup_same_PE(bus, pe);
@@ -974,6 +1148,441 @@ static void pnv_pci_ioda_setup_PEs(void)
}
}
+#ifdef CONFIG_PCI_IOV
+static int pnv_pci_vf_release_m64(struct pci_dev *pdev)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ int i, j;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
+ for (j = 0; j < M64_PER_IOV; j++) {
+ if (pdn->m64_wins[i][j] == IODA_INVALID_M64)
+ continue;
+ opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 0);
+ clear_bit(pdn->m64_wins[i][j], &phb->ioda.m64_bar_alloc);
+ pdn->m64_wins[i][j] = IODA_INVALID_M64;
+ }
+
+ return 0;
+}
+
+static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ unsigned int win;
+ struct resource *res;
+ int i, j;
+ int64_t rc;
+ int total_vfs;
+ resource_size_t size, start;
+ int pe_num;
+ int vf_groups;
+ int vf_per_group;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Initialize the m64_wins to IODA_INVALID_M64 */
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
+ for (j = 0; j < M64_PER_IOV; j++)
+ pdn->m64_wins[i][j] = IODA_INVALID_M64;
+
+ if (pdn->m64_per_iov == M64_PER_IOV) {
+ vf_groups = (num_vfs <= M64_PER_IOV) ? num_vfs: M64_PER_IOV;
+ vf_per_group = (num_vfs <= M64_PER_IOV)? 1:
+ roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
+ } else {
+ vf_groups = 1;
+ vf_per_group = 1;
+ }
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+
+ if (!pnv_pci_is_mem_pref_64(res->flags))
+ continue;
+
+ for (j = 0; j < vf_groups; j++) {
+ do {
+ win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
+ phb->ioda.m64_bar_idx + 1, 0);
+
+ if (win >= phb->ioda.m64_bar_idx + 1)
+ goto m64_failed;
+ } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
+
+ pdn->m64_wins[i][j] = win;
+
+ if (pdn->m64_per_iov == M64_PER_IOV) {
+ size = pci_iov_resource_size(pdev,
+ PCI_IOV_RESOURCES + i);
+ size = size * vf_per_group;
+ start = res->start + size * j;
+ } else {
+ size = resource_size(res);
+ start = res->start;
+ }
+
+ /* Map the M64 here */
+ if (pdn->m64_per_iov == M64_PER_IOV) {
+ pe_num = pdn->offset + j;
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ pe_num, OPAL_M64_WINDOW_TYPE,
+ pdn->m64_wins[i][j], 0);
+ }
+
+ rc = opal_pci_set_phb_mem_window(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE,
+ pdn->m64_wins[i][j],
+ start,
+ 0, /* unused */
+ size);
+
+
+ if (rc != OPAL_SUCCESS) {
+ dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
+ win, rc);
+ goto m64_failed;
+ }
+
+ if (pdn->m64_per_iov == M64_PER_IOV)
+ rc = opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 2);
+ else
+ rc = opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 1);
+
+ if (rc != OPAL_SUCCESS) {
+ dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
+ win, rc);
+ goto m64_failed;
+ }
+ }
+ }
+ return 0;
+
+m64_failed:
+ pnv_pci_vf_release_m64(pdev);
+ return -EBUSY;
+}
+
+static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct iommu_table *tbl;
+ unsigned long addr;
+ int64_t rc;
+
+ bus = dev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ tbl = pe->tce32_table;
+ addr = tbl->it_base;
+
+ opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
+ pe->pe_number << 1, 1, __pa(addr),
+ 0, 0x1000);
+
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ (pe->pe_number << 1) + 1,
+ pe->tce_bypass_base,
+ 0);
+ if (rc)
+ pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
+
+ iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
+ free_pages(addr, get_order(TCE32_TABLE_SIZE));
+ pe->tce32_table = NULL;
+}
+
+static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe, *pe_n;
+ struct pci_dn *pdn;
+ u16 vf_index;
+ int64_t rc;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ if (!pdev->is_physfn)
+ return;
+
+ if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
+ int vf_group;
+ int vf_per_group;
+ int vf_index1;
+
+ vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
+
+ for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++)
+ for (vf_index = vf_group * vf_per_group;
+ vf_index < (vf_group + 1) * vf_per_group &&
+ vf_index < num_vfs;
+ vf_index++)
+ for (vf_index1 = vf_group * vf_per_group;
+ vf_index1 < (vf_group + 1) * vf_per_group &&
+ vf_index1 < num_vfs;
+ vf_index1++){
+
+ rc = opal_pci_set_peltv(phb->opal_id,
+ pdn->offset + vf_index,
+ pdn->offset + vf_index1,
+ OPAL_REMOVE_PE_FROM_DOMAIN);
+
+ if (rc)
+ dev_warn(&pdev->dev, "%s: Failed to unlink same group PE#%d(%lld)\n",
+ __func__,
+ pdn->offset + vf_index1, rc);
+ }
+ }
+
+ list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
+ if (pe->parent_dev != pdev)
+ continue;
+
+ pnv_pci_ioda2_release_dma_pe(pdev, pe);
+
+ /* Remove from list */
+ mutex_lock(&phb->ioda.pe_list_mutex);
+ list_del(&pe->list);
+ mutex_unlock(&phb->ioda.pe_list_mutex);
+
+ pnv_ioda_deconfigure_pe(phb, pe);
+
+ pnv_ioda_free_pe(phb, pe->pe_number);
+ }
+}
+
+void pnv_pci_sriov_disable(struct pci_dev *pdev)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ struct pci_sriov *iov;
+ u16 num_vfs;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+ iov = pdev->sriov;
+ num_vfs = pdn->num_vfs;
+
+ /* Release VF PEs */
+ pnv_ioda_release_vf_PE(pdev, num_vfs);
+
+ if (phb->type == PNV_PHB_IODA2) {
+ if (pdn->m64_per_iov == 1)
+ pnv_pci_vf_resource_shift(pdev, -pdn->offset);
+
+ /* Release M64 windows */
+ pnv_pci_vf_release_m64(pdev);
+
+ /* Release PE numbers */
+ bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
+ pdn->offset = 0;
+ }
+}
+
+static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe);
+static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe;
+ int pe_num;
+ u16 vf_index;
+ struct pci_dn *pdn;
+ int64_t rc;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ if (!pdev->is_physfn)
+ return;
+
+ /* Reserve PE for each VF */
+ for (vf_index = 0; vf_index < num_vfs; vf_index++) {
+ pe_num = pdn->offset + vf_index;
+
+ pe = &phb->ioda.pe_array[pe_num];
+ pe->pe_number = pe_num;
+ pe->phb = phb;
+ pe->flags = PNV_IODA_PE_VF;
+ pe->pbus = NULL;
+ pe->parent_dev = pdev;
+ pe->tce32_seg = -1;
+ pe->mve_number = -1;
+ pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
+ pci_iov_virtfn_devfn(pdev, vf_index);
+
+ pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
+ hose->global_number, pdev->bus->number,
+ PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
+ PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
+
+ if (pnv_ioda_configure_pe(phb, pe)) {
+ /* XXX What do we do here ? */
+ if (pe_num)
+ pnv_ioda_free_pe(phb, pe_num);
+ pe->pdev = NULL;
+ continue;
+ }
+
+ pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
+ GFP_KERNEL, hose->node);
+ pe->tce32_table->data = pe;
+
+ /* Put PE to the list */
+ mutex_lock(&phb->ioda.pe_list_mutex);
+ list_add_tail(&pe->list, &phb->ioda.pe_list);
+ mutex_unlock(&phb->ioda.pe_list_mutex);
+
+ pnv_pci_ioda2_setup_dma_pe(phb, pe);
+ }
+
+ if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
+ int vf_group;
+ int vf_per_group;
+ int vf_index1;
+
+ vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
+
+ for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++) {
+ for (vf_index = vf_group * vf_per_group;
+ vf_index < (vf_group + 1) * vf_per_group &&
+ vf_index < num_vfs;
+ vf_index++) {
+ for (vf_index1 = vf_group * vf_per_group;
+ vf_index1 < (vf_group + 1) * vf_per_group &&
+ vf_index1 < num_vfs;
+ vf_index1++) {
+
+ rc = opal_pci_set_peltv(phb->opal_id,
+ pdn->offset + vf_index,
+ pdn->offset + vf_index1,
+ OPAL_ADD_PE_TO_DOMAIN);
+
+ if (rc)
+ dev_warn(&pdev->dev, "%s: Failed to link same group PE#%d(%lld)\n",
+ __func__,
+ pdn->offset + vf_index1, rc);
+ }
+ }
+ }
+ }
+}
+
+int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ int ret;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ if (phb->type == PNV_PHB_IODA2) {
+ /* Calculate available PE for required VFs */
+ mutex_lock(&phb->ioda.pe_alloc_mutex);
+ pdn->offset = bitmap_find_next_zero_area(
+ phb->ioda.pe_alloc, phb->ioda.total_pe,
+ 0, num_vfs, 0);
+ if (pdn->offset >= phb->ioda.total_pe) {
+ mutex_unlock(&phb->ioda.pe_alloc_mutex);
+ dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
+ pdn->offset = 0;
+ return -EBUSY;
+ }
+ bitmap_set(phb->ioda.pe_alloc, pdn->offset, num_vfs);
+ pdn->num_vfs = num_vfs;
+ mutex_unlock(&phb->ioda.pe_alloc_mutex);
+
+ /* Assign M64 window accordingly */
+ ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
+ if (ret) {
+ dev_info(&pdev->dev, "Not enough M64 window resources\n");
+ goto m64_failed;
+ }
+
+ /*
+ * When using one M64 BAR to map one IOV BAR, we need to shift
+ * the IOV BAR according to the PE# allocated to the VFs.
+ * Otherwise, the PE# for the VF will conflict with others.
+ */
+ if (pdn->m64_per_iov == 1) {
+ ret = pnv_pci_vf_resource_shift(pdev, pdn->offset);
+ if (ret)
+ goto m64_failed;
+ }
+ }
+
+ /* Setup VF PEs */
+ pnv_ioda_setup_vf_PE(pdev, num_vfs);
+
+ return 0;
+
+m64_failed:
+ bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
+ pdn->offset = 0;
+
+ return ret;
+}
+
+int pcibios_sriov_disable(struct pci_dev *pdev)
+{
+ pnv_pci_sriov_disable(pdev);
+
+ /* Release PCI data */
+ remove_dev_pci_data(pdev);
+ return 0;
+}
+
+int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+{
+ /* Allocate PCI data */
+ add_dev_pci_data(pdev);
+
+ pnv_pci_sriov_enable(pdev, num_vfs);
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
{
struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -989,7 +1598,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
- set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
+ set_iommu_table_base_and_group(&pdev->dev, pe->tce32_table);
}
static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
@@ -1016,7 +1625,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
} else {
dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(&pdev->dev, &dma_iommu_ops);
- set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ set_iommu_table_base(&pdev->dev, pe->tce32_table);
}
*pdev->dev.dma_mask = dma_mask;
return 0;
@@ -1053,9 +1662,9 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
list_for_each_entry(dev, &bus->devices, bus_list) {
if (add_to_iommu_group)
set_iommu_table_base_and_group(&dev->dev,
- &pe->tce32_table);
+ pe->tce32_table);
else
- set_iommu_table_base(&dev->dev, &pe->tce32_table);
+ set_iommu_table_base(&dev->dev, pe->tce32_table);
if (dev->subordinate)
pnv_ioda_setup_bus_dma(pe, dev->subordinate,
@@ -1145,8 +1754,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
__be64 *startp, __be64 *endp, bool rm)
{
- struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
- tce32_table);
+ struct pnv_ioda_pe *pe = tbl->data;
struct pnv_phb *phb = pe->phb;
if (phb->type == PNV_PHB_IODA1)
@@ -1167,9 +1775,6 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
int64_t rc;
void *addr;
- /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
-#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
-
/* XXX FIXME: Handle 64-bit only DMA devices */
/* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
/* XXX FIXME: Allocate multi-level tables on PHB3 */
@@ -1212,7 +1817,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
}
/* Setup linux iommu table */
- tbl = &pe->tce32_table;
+ tbl = pe->tce32_table;
pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
base << 28, IOMMU_PAGE_SHIFT_4K);
@@ -1232,12 +1837,19 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
TCE_PCI_SWINV_PAIR);
}
iommu_init_table(tbl, phb->hose->node);
- iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
- if (pe->pdev)
+ if (pe->flags & PNV_IODA_PE_DEV) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
- else
+ } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
+ } else if (pe->flags & PNV_IODA_PE_VF) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
+ }
return;
fail:
@@ -1250,8 +1862,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
{
- struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
- tce32_table);
+ struct pnv_ioda_pe *pe = tbl->data;
uint16_t window_id = (pe->pe_number << 1 ) + 1;
int64_t rc;
@@ -1296,10 +1907,10 @@ static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
pe->tce_bypass_base = 1ull << 59;
/* Install set_bypass callback for VFIO */
- pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
+ pe->tce32_table->set_bypass = pnv_pci_ioda2_set_bypass;
/* Enable bypass by default */
- pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
+ pnv_pci_ioda2_set_bypass(pe->tce32_table, true);
}
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
@@ -1347,7 +1958,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
}
/* Setup linux iommu table */
- tbl = &pe->tce32_table;
+ tbl = pe->tce32_table;
pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0,
IOMMU_PAGE_SHIFT_4K);
@@ -1365,12 +1976,19 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
}
iommu_init_table(tbl, phb->hose->node);
- iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
- if (pe->pdev)
+ if (pe->flags & PNV_IODA_PE_DEV) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
- else
+ } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
+ } else if (pe->flags & PNV_IODA_PE_VF) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
+ }
/* Also create a bypass window */
if (!pnv_iommu_bypass_disabled)
@@ -1731,6 +2349,73 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
#endif /* CONFIG_PCI_MSI */
+#ifdef CONFIG_PCI_IOV
+static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
+{
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct resource *res;
+ int i;
+ resource_size_t size;
+ struct pci_dn *pdn;
+ int mul, total_vfs;
+
+ if (!pdev->is_physfn || pdev->is_added)
+ return;
+
+ hose = pci_bus_to_host(pdev->bus);
+ phb = hose->private_data;
+
+ pdn = pci_get_pdn(pdev);
+ pdn->vfs_expanded = 0;
+
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+ pdn->m64_per_iov = 1;
+ mul = phb->ioda.total_pe;
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || res->parent)
+ continue;
+ if (!pnv_pci_is_mem_pref_64(res->flags)) {
+ dev_warn(&pdev->dev, " non M64 VF BAR%d: %pR\n",
+ i, res);
+ continue;
+ }
+
+ size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
+
+ /* bigger than 64M */
+ if (size > (1 << 26)) {
+ dev_info(&pdev->dev, "PowerNV: VF BAR%d: %pR IOV size is bigger than 64M, roundup power2\n",
+ i, res);
+ pdn->m64_per_iov = M64_PER_IOV;
+ mul = roundup_pow_of_two(total_vfs);
+ break;
+ }
+ }
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || res->parent)
+ continue;
+ if (!pnv_pci_is_mem_pref_64(res->flags)) {
+ dev_warn(&pdev->dev, "Skipping expanding VF BAR%d: %pR\n",
+ i, res);
+ continue;
+ }
+
+ dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
+ size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
+ res->end = res->start + size * mul - 1;
+ dev_dbg(&pdev->dev, " %pR\n", res);
+ dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
+ i, res, mul);
+ }
+ pdn->vfs_expanded = mul;
+}
+#endif /* CONFIG_PCI_IOV */
+
/*
* This function is supposed to be called on basis of PE from top
* to bottom style. So the the I/O or MMIO segment assigned to
@@ -1777,7 +2462,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
region.start += phb->ioda.io_segsize;
index++;
}
- } else if (res->flags & IORESOURCE_MEM) {
+ } else if ((res->flags & IORESOURCE_MEM) &&
+ !pnv_pci_is_mem_pref_64(res->flags)) {
region.start = res->start -
hose->mem_offset[0] -
phb->ioda.m32_pci_base;
@@ -1907,10 +2593,29 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
return phb->ioda.io_segsize;
}
+#ifdef CONFIG_PCI_IOV
+static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
+ int resno)
+{
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ resource_size_t align, iov_align;
+
+ iov_align = resource_size(&pdev->resource[resno]);
+ if (iov_align)
+ return iov_align;
+
+ align = pci_iov_resource_size(pdev, resno);
+ if (pdn->vfs_expanded)
+ return pdn->vfs_expanded * align;
+
+ return align;
+}
+#endif /* CONFIG_PCI_IOV */
+
/* Prevent enabling devices for which we couldn't properly
* assign a PE
*/
-static int pnv_pci_enable_device_hook(struct pci_dev *dev)
+static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -1922,13 +2627,13 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev)
* PEs isn't ready.
*/
if (!phb->initialized)
- return 0;
+ return true;
pdn = pci_get_pdn(dev);
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
- return -EINVAL;
+ return false;
- return 0;
+ return true;
}
static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
@@ -1988,9 +2693,11 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
hose->last_busno = 0xff;
}
hose->private_data = phb;
+ hose->controller_ops = pnv_pci_controller_ops;
phb->hub_id = hub_id;
phb->opal_id = phb_id;
phb->type = ioda_type;
+ mutex_init(&phb->ioda.pe_alloc_mutex);
/* Detect specific models for error handling */
if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
@@ -2050,6 +2757,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
INIT_LIST_HEAD(&phb->ioda.pe_list);
+ mutex_init(&phb->ioda.pe_list_mutex);
/* Calculate how many 32-bit TCE segments we have */
phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
@@ -2078,9 +2786,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->get_pe_state = pnv_ioda_get_pe_state;
phb->freeze_pe = pnv_ioda_freeze_pe;
phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
-#ifdef CONFIG_EEH
- phb->eeh_ops = &ioda_eeh_ops;
-#endif
/* Setup RID -> PE mapping function */
phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
@@ -2104,9 +2809,15 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
* the child P2P bridges) can form individual PE.
*/
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
- ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
- ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
- ppc_md.pcibios_reset_secondary_bus = pnv_pci_reset_secondary_bus;
+ pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook;
+ pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment;
+ pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus;
+
+#ifdef CONFIG_PCI_IOV
+ ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
+ ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
+#endif
+
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
/* Reset IODA tables to a clean state */
@@ -2121,8 +2832,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
*/
if (is_kdump_kernel()) {
pr_info(" Issue PHB reset ...\n");
- ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
- ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
+ pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
+ pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
}
/* Remove M64 resource if we can't configure it successfully */
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 6ef6d4d8e7e2..4729ca793813 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -133,6 +133,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
phb->hose->first_busno = 0;
phb->hose->last_busno = 0xff;
phb->hose->private_data = phb;
+ phb->hose->controller_ops = pnv_pci_controller_ops;
phb->hub_id = hub_id;
phb->opal_id = phb_id;
phb->type = PNV_PHB_P5IOC2;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index e69142f4af08..bca2aeb6e4b6 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -366,9 +366,9 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
spin_unlock_irqrestore(&phb->lock, flags);
}
-static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
- struct device_node *dn)
+static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
{
+ struct pnv_phb *phb = pdn->phb->private_data;
u8 fstate;
__be16 pcierr;
int pe_no;
@@ -379,7 +379,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
* setup that yet. So all ER errors should be mapped to
* reserved PE.
*/
- pe_no = PCI_DN(dn)->pe_number;
+ pe_no = pdn->pe_number;
if (pe_no == IODA_INVALID_PE) {
if (phb->type == PNV_PHB_P5IOC2)
pe_no = 0;
@@ -407,8 +407,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
}
cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
- (PCI_DN(dn)->busno << 8) | (PCI_DN(dn)->devfn),
- pe_no, fstate);
+ (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
/* Clear the frozen state if applicable */
if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
@@ -425,10 +424,9 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
}
}
-int pnv_pci_cfg_read(struct device_node *dn,
+int pnv_pci_cfg_read(struct pci_dn *pdn,
int where, int size, u32 *val)
{
- struct pci_dn *pdn = PCI_DN(dn);
struct pnv_phb *phb = pdn->phb->private_data;
u32 bdfn = (pdn->busno << 8) | pdn->devfn;
s64 rc;
@@ -462,10 +460,9 @@ int pnv_pci_cfg_read(struct device_node *dn,
return PCIBIOS_SUCCESSFUL;
}
-int pnv_pci_cfg_write(struct device_node *dn,
+int pnv_pci_cfg_write(struct pci_dn *pdn,
int where, int size, u32 val)
{
- struct pci_dn *pdn = PCI_DN(dn);
struct pnv_phb *phb = pdn->phb->private_data;
u32 bdfn = (pdn->busno << 8) | pdn->devfn;
@@ -489,18 +486,17 @@ int pnv_pci_cfg_write(struct device_node *dn,
}
#if CONFIG_EEH
-static bool pnv_pci_cfg_check(struct pci_controller *hose,
- struct device_node *dn)
+static bool pnv_pci_cfg_check(struct pci_dn *pdn)
{
struct eeh_dev *edev = NULL;
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pdn->phb->private_data;
/* EEH not enabled ? */
if (!(phb->flags & PNV_PHB_FLAG_EEH))
return true;
/* PE reset or device removed ? */
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn->edev;
if (edev) {
if (edev->pe &&
(edev->pe->state & EEH_PE_CFG_BLOCKED))
@@ -513,8 +509,7 @@ static bool pnv_pci_cfg_check(struct pci_controller *hose,
return true;
}
#else
-static inline pnv_pci_cfg_check(struct pci_controller *hose,
- struct device_node *dn)
+static inline pnv_pci_cfg_check(struct pci_dn *pdn)
{
return true;
}
@@ -524,32 +519,26 @@ static int pnv_pci_read_config(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 *val)
{
- struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
struct pci_dn *pdn;
struct pnv_phb *phb;
- bool found = false;
int ret;
*val = 0xFFFFFFFF;
- for (dn = busdn->child; dn; dn = dn->sibling) {
- pdn = PCI_DN(dn);
- if (pdn && pdn->devfn == devfn) {
- phb = pdn->phb->private_data;
- found = true;
- break;
- }
- }
+ pdn = pci_get_pdn_by_devfn(bus, devfn);
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
- if (!found || !pnv_pci_cfg_check(pdn->phb, dn))
+ if (!pnv_pci_cfg_check(pdn))
return PCIBIOS_DEVICE_NOT_FOUND;
- ret = pnv_pci_cfg_read(dn, where, size, val);
- if (phb->flags & PNV_PHB_FLAG_EEH) {
+ ret = pnv_pci_cfg_read(pdn, where, size, val);
+ phb = pdn->phb->private_data;
+ if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
if (*val == EEH_IO_ERROR_VALUE(size) &&
- eeh_dev_check_failure(of_node_to_eeh_dev(dn)))
+ eeh_dev_check_failure(pdn->edev))
return PCIBIOS_DEVICE_NOT_FOUND;
} else {
- pnv_pci_config_check_eeh(phb, dn);
+ pnv_pci_config_check_eeh(pdn);
}
return ret;
@@ -559,27 +548,21 @@ static int pnv_pci_write_config(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 val)
{
- struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
struct pci_dn *pdn;
struct pnv_phb *phb;
- bool found = false;
int ret;
- for (dn = busdn->child; dn; dn = dn->sibling) {
- pdn = PCI_DN(dn);
- if (pdn && pdn->devfn == devfn) {
- phb = pdn->phb->private_data;
- found = true;
- break;
- }
- }
+ pdn = pci_get_pdn_by_devfn(bus, devfn);
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
- if (!found || !pnv_pci_cfg_check(pdn->phb, dn))
+ if (!pnv_pci_cfg_check(pdn))
return PCIBIOS_DEVICE_NOT_FOUND;
- ret = pnv_pci_cfg_write(dn, where, size, val);
+ ret = pnv_pci_cfg_write(pdn, where, size, val);
+ phb = pdn->phb->private_data;
if (!(phb->flags & PNV_PHB_FLAG_EEH))
- pnv_pci_config_check_eeh(phb, dn);
+ pnv_pci_config_check_eeh(pdn);
return ret;
}
@@ -679,66 +662,31 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
tbl->it_type = TCE_PCI;
}
-static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose)
-{
- struct iommu_table *tbl;
- const __be64 *basep, *swinvp;
- const __be32 *sizep;
-
- basep = of_get_property(hose->dn, "linux,tce-base", NULL);
- sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
- if (basep == NULL || sizep == NULL) {
- pr_err("PCI: %s has missing tce entries !\n",
- hose->dn->full_name);
- return NULL;
- }
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
- if (WARN_ON(!tbl))
- return NULL;
- pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
- be32_to_cpup(sizep), 0, IOMMU_PAGE_SHIFT_4K);
- iommu_init_table(tbl, hose->node);
- iommu_register_group(tbl, pci_domain_nr(hose->bus), 0);
-
- /* Deal with SW invalidated TCEs when needed (BML way) */
- swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
- NULL);
- if (swinvp) {
- tbl->it_busno = be64_to_cpu(swinvp[1]);
- tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
- tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
- }
- return tbl;
-}
-
-static void pnv_pci_dma_fallback_setup(struct pci_controller *hose,
- struct pci_dev *pdev)
-{
- struct device_node *np = pci_bus_to_OF_node(hose->bus);
- struct pci_dn *pdn;
-
- if (np == NULL)
- return;
- pdn = PCI_DN(np);
- if (!pdn->iommu_table)
- pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
- if (!pdn->iommu_table)
- return;
- set_iommu_table_base_and_group(&pdev->dev, pdn->iommu_table);
-}
-
static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
+#ifdef CONFIG_PCI_IOV
+ struct pnv_ioda_pe *pe;
+ struct pci_dn *pdn;
+
+ /* Fix the VF pdn PE number */
+ if (pdev->is_virtfn) {
+ pdn = pci_get_pdn(pdev);
+ WARN_ON(pdn->pe_number != IODA_INVALID_PE);
+ list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+ if (pe->rid == ((pdev->bus->number << 8) |
+ (pdev->devfn & 0xff))) {
+ pdn->pe_number = pe->pe_number;
+ pe->pdev = pdev;
+ break;
+ }
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
- /* If we have no phb structure, try to setup a fallback based on
- * the device-tree (RTAS PCI for example)
- */
if (phb && phb->dma_dev_setup)
phb->dma_dev_setup(phb, pdev);
- else
- pnv_pci_dma_fallback_setup(hose, pdev);
}
int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
@@ -784,44 +732,36 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
void __init pnv_pci_init(void)
{
struct device_node *np;
+ bool found_ioda = false;
pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
- /* OPAL absent, try POPAL first then RTAS detection of PHBs */
- if (!firmware_has_feature(FW_FEATURE_OPAL)) {
-#ifdef CONFIG_PPC_POWERNV_RTAS
- init_pci_config_tokens();
- find_and_init_phbs();
-#endif /* CONFIG_PPC_POWERNV_RTAS */
- }
- /* OPAL is here, do our normal stuff */
- else {
- int found_ioda = 0;
+ /* If we don't have OPAL, eg. in sim, just skip PCI probe */
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return;
- /* Look for IODA IO-Hubs. We don't support mixing IODA
- * and p5ioc2 due to the need to change some global
- * probing flags
- */
- for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
- pnv_pci_init_ioda_hub(np);
- found_ioda = 1;
- }
+ /* Look for IODA IO-Hubs. We don't support mixing IODA
+ * and p5ioc2 due to the need to change some global
+ * probing flags
+ */
+ for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
+ pnv_pci_init_ioda_hub(np);
+ found_ioda = true;
+ }
- /* Look for p5ioc2 IO-Hubs */
- if (!found_ioda)
- for_each_compatible_node(np, NULL, "ibm,p5ioc2")
- pnv_pci_init_p5ioc2_hub(np);
+ /* Look for p5ioc2 IO-Hubs */
+ if (!found_ioda)
+ for_each_compatible_node(np, NULL, "ibm,p5ioc2")
+ pnv_pci_init_p5ioc2_hub(np);
- /* Look for ioda2 built-in PHB3's */
- for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
- pnv_pci_init_ioda2_phb(np);
- }
+ /* Look for ioda2 built-in PHB3's */
+ for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
+ pnv_pci_init_ioda2_phb(np);
/* Setup the linkage between OF nodes and PHBs */
pci_devs_phb_init();
/* Configure IOMMU DMA hooks */
- ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
ppc_md.tce_build = pnv_tce_build_vm;
ppc_md.tce_free = pnv_tce_free_vm;
ppc_md.tce_build_rm = pnv_tce_build_rm;
@@ -836,30 +776,8 @@ void __init pnv_pci_init(void)
#endif
}
-static int tce_iommu_bus_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- return iommu_add_device(dev);
- case BUS_NOTIFY_DEL_DEVICE:
- if (dev->iommu_group)
- iommu_del_device(dev);
- return 0;
- default:
- return 0;
- }
-}
+machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
-static struct notifier_block tce_iommu_bus_nb = {
- .notifier_call = tce_iommu_bus_notifier,
+struct pci_controller_ops pnv_pci_controller_ops = {
+ .dma_dev_setup = pnv_pci_dma_dev_setup,
};
-
-static int __init tce_iommu_bus_notifier_init(void)
-{
- bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
- return 0;
-}
-machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 6c02ff8dd69f..070ee888fc95 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -23,6 +23,7 @@ enum pnv_phb_model {
#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
#define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */
#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
+#define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
/* Data associated with a PE, including IOMMU tracking etc.. */
struct pnv_phb;
@@ -34,6 +35,9 @@ struct pnv_ioda_pe {
* entire bus (& children). In the former case, pdev
* is populated, in the later case, pbus is.
*/
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *parent_dev;
+#endif
struct pci_dev *pdev;
struct pci_bus *pbus;
@@ -53,7 +57,7 @@ struct pnv_ioda_pe {
/* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
int tce32_seg;
int tce32_segcount;
- struct iommu_table tce32_table;
+ struct iommu_table *tce32_table;
phys_addr_t tce_inval_reg_phys;
/* 64-bit TCE bypass region */
@@ -75,22 +79,6 @@ struct pnv_ioda_pe {
struct list_head list;
};
-/* IOC dependent EEH operations */
-#ifdef CONFIG_EEH
-struct pnv_eeh_ops {
- int (*post_init)(struct pci_controller *hose);
- int (*set_option)(struct eeh_pe *pe, int option);
- int (*get_state)(struct eeh_pe *pe);
- int (*reset)(struct eeh_pe *pe, int option);
- int (*get_log)(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len);
- int (*configure_bridge)(struct eeh_pe *pe);
- int (*err_inject)(struct eeh_pe *pe, int type, int func,
- unsigned long addr, unsigned long mask);
- int (*next_error)(struct eeh_pe **pe);
-};
-#endif /* CONFIG_EEH */
-
#define PNV_PHB_FLAG_EEH (1 << 0)
struct pnv_phb {
@@ -104,10 +92,6 @@ struct pnv_phb {
int initialized;
spinlock_t lock;
-#ifdef CONFIG_EEH
- struct pnv_eeh_ops *eeh_ops;
-#endif
-
#ifdef CONFIG_DEBUG_FS
int has_dbgfs;
struct dentry *dbgfs;
@@ -165,6 +149,8 @@ struct pnv_phb {
/* PE allocation bitmap */
unsigned long *pe_alloc;
+ /* PE allocation mutex */
+ struct mutex pe_alloc_mutex;
/* M32 & IO segment maps */
unsigned int *m32_segmap;
@@ -179,6 +165,7 @@ struct pnv_phb {
* on the sequence of creation
*/
struct list_head pe_list;
+ struct mutex pe_list_mutex;
/* Reverse map of PEs, will have to extend if
* we are to support more than 256 PEs, indexed
@@ -213,15 +200,12 @@ struct pnv_phb {
};
extern struct pci_ops pnv_pci_ops;
-#ifdef CONFIG_EEH
-extern struct pnv_eeh_ops ioda_eeh_ops;
-#endif
void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
unsigned char *log_buff);
-int pnv_pci_cfg_read(struct device_node *dn,
+int pnv_pci_cfg_read(struct pci_dn *pdn,
int where, int size, u32 *val);
-int pnv_pci_cfg_write(struct device_node *dn,
+int pnv_pci_cfg_write(struct pci_dn *pdn,
int where, int size, u32 val);
extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
@@ -232,6 +216,6 @@ extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
__be64 *startp, __be64 *endp, bool rm);
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
-extern int ioda_eeh_phb_reset(struct pci_controller *hose, int option);
+extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 604c48e7879a..826d2c9bea56 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -29,6 +29,8 @@ static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
}
#endif
+extern struct pci_controller_ops pnv_pci_controller_ops;
+
extern u32 pnv_get_supported_cpuidle_states(void);
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index d2de7d5d7574..16fdcb23f4c3 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -32,7 +32,6 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/xics.h>
-#include <asm/rtas.h>
#include <asm/opal.h>
#include <asm/kexec.h>
#include <asm/smp.h>
@@ -278,20 +277,6 @@ static void __init pnv_setup_machdep_opal(void)
ppc_md.handle_hmi_exception = opal_handle_hmi_exception;
}
-#ifdef CONFIG_PPC_POWERNV_RTAS
-static void __init pnv_setup_machdep_rtas(void)
-{
- if (rtas_token("get-time-of-day") != RTAS_UNKNOWN_SERVICE) {
- ppc_md.get_boot_time = rtas_get_boot_time;
- ppc_md.get_rtc_time = rtas_get_rtc_time;
- ppc_md.set_rtc_time = rtas_set_rtc_time;
- }
- ppc_md.restart = rtas_restart;
- pm_power_off = rtas_power_off;
- ppc_md.halt = rtas_halt;
-}
-#endif /* CONFIG_PPC_POWERNV_RTAS */
-
static u32 supported_cpuidle_states;
int pnv_save_sprs_for_winkle(void)
@@ -409,37 +394,39 @@ static int __init pnv_init_idle_states(void)
{
struct device_node *power_mgt;
int dt_idle_states;
- const __be32 *idle_state_flags;
- u32 len_flags, flags;
+ u32 *flags;
int i;
supported_cpuidle_states = 0;
if (cpuidle_disable != IDLE_NO_OVERRIDE)
- return 0;
+ goto out;
if (!firmware_has_feature(FW_FEATURE_OPALv3))
- return 0;
+ goto out;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
pr_warn("opal: PowerMgmt Node not found\n");
- return 0;
+ goto out;
+ }
+ dt_idle_states = of_property_count_u32_elems(power_mgt,
+ "ibm,cpu-idle-state-flags");
+ if (dt_idle_states < 0) {
+ pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+ goto out;
}
- idle_state_flags = of_get_property(power_mgt,
- "ibm,cpu-idle-state-flags", &len_flags);
- if (!idle_state_flags) {
- pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
- return 0;
+ flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
+ if (of_property_read_u32_array(power_mgt,
+ "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
+ goto out_free;
}
- dt_idle_states = len_flags / sizeof(u32);
+ for (i = 0; i < dt_idle_states; i++)
+ supported_cpuidle_states |= flags[i];
- for (i = 0; i < dt_idle_states; i++) {
- flags = be32_to_cpu(idle_state_flags[i]);
- supported_cpuidle_states |= flags;
- }
if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
patch_instruction(
(unsigned int *)pnv_fastsleep_workaround_at_entry,
@@ -449,6 +436,9 @@ static int __init pnv_init_idle_states(void)
PPC_INST_NOP);
}
pnv_alloc_idle_core_states();
+out_free:
+ kfree(flags);
+out:
return 0;
}
@@ -465,10 +455,6 @@ static int __init pnv_probe(void)
if (firmware_has_feature(FW_FEATURE_OPAL))
pnv_setup_machdep_opal();
-#ifdef CONFIG_PPC_POWERNV_RTAS
- else if (rtas.base)
- pnv_setup_machdep_rtas();
-#endif /* CONFIG_PPC_POWERNV_RTAS */
pr_debug("PowerNV detected !\n");
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index fc34025ef822..8f70ba681a78 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -25,7 +25,6 @@
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
-#include <asm/rtas.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
@@ -33,6 +32,8 @@
#include <asm/runlatch.h>
#include <asm/code-patching.h>
#include <asm/dbell.h>
+#include <asm/kvm_ppc.h>
+#include <asm/ppc-opcode.h>
#include "powernv.h"
@@ -149,7 +150,7 @@ static int pnv_smp_cpu_disable(void)
static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
- unsigned long srr1;
+ unsigned long srr1, wmask;
u32 idle_states;
/* Standard hot unplug procedure */
@@ -161,6 +162,10 @@ static void pnv_smp_cpu_kill_self(void)
generic_set_cpu_dead(cpu);
smp_wmb();
+ wmask = SRR1_WAKEMASK;
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ wmask = SRR1_WAKEMASK_P8;
+
idle_states = pnv_get_supported_cpuidle_states();
/* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 enabled.
@@ -191,10 +196,14 @@ static void pnv_smp_cpu_kill_self(void)
* having finished executing in a KVM guest, then srr1
* contains 0.
*/
- if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
+ if ((srr1 & wmask) == SRR1_WAKEEE) {
icp_native_flush_interrupt();
local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
smp_mb();
+ } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
+ kvmppc_set_host_ipi(cpu, 0);
}
if (cpu_core_split_required())
@@ -241,18 +250,6 @@ void __init pnv_smp_init(void)
{
smp_ops = &pnv_smp_ops;
- /* XXX We don't yet have a proper entry point from HAL, for
- * now we rely on kexec-style entry from BML
- */
-
-#ifdef CONFIG_PPC_RTAS
- /* Non-lpar has additional take/give timebase */
- if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
- smp_ops->give_timebase = rtas_give_timebase;
- smp_ops->take_timebase = rtas_take_timebase;
- }
-#endif /* CONFIG_PPC_RTAS */
-
#ifdef CONFIG_HOTPLUG_CPU
ppc_md.cpu_die = pnv_smp_cpu_kill_self;
#endif
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index b358bec6c8cb..3c7707af3384 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -57,7 +57,7 @@ static void ps3_smp_message_pass(int cpu, int msg)
" (%d)\n", __func__, __LINE__, cpu, msg, result);
}
-static int __init ps3_smp_probe(void)
+static void __init ps3_smp_probe(void)
{
int cpu;
@@ -100,8 +100,6 @@ static int __init ps3_smp_probe(void)
DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
}
-
- return 2;
}
void ps3_smp_cleanup_cpu(int cpu)
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index a758a9c3bbba..54c87d5d349d 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -16,7 +16,6 @@ config PPC_PSERIES
select PPC_UDBG_16550
select PPC_NATIVE
select PPC_PCI_CHOICE if EXPERT
- select ZLIB_DEFLATE
select PPC_DOORBELL
select HAVE_CONTEXT_TRACKING
select HOTPLUG_CPU if SMP
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index c22bb1b4beb8..b4b11096ea8b 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -10,6 +10,8 @@
* 2 as published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) "dlpar: " fmt
+
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
@@ -535,13 +537,125 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
return count;
}
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+
+static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
+{
+ int rc;
+
+ /* pseries error logs are in BE format, convert to cpu type */
+ switch (hp_elog->id_type) {
+ case PSERIES_HP_ELOG_ID_DRC_COUNT:
+ hp_elog->_drc_u.drc_count =
+ be32_to_cpu(hp_elog->_drc_u.drc_count);
+ break;
+ case PSERIES_HP_ELOG_ID_DRC_INDEX:
+ hp_elog->_drc_u.drc_index =
+ be32_to_cpu(hp_elog->_drc_u.drc_index);
+ }
+
+ switch (hp_elog->resource) {
+ case PSERIES_HP_ELOG_RESOURCE_MEM:
+ rc = dlpar_memory(hp_elog);
+ break;
+ default:
+ pr_warn_ratelimited("Invalid resource (%d) specified\n",
+ hp_elog->resource);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pseries_hp_errorlog *hp_elog;
+ const char *arg;
+ int rc;
+
+ hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
+ if (!hp_elog) {
+ rc = -ENOMEM;
+ goto dlpar_store_out;
+ }
+
+ /* Parse out the request from the user, this will be in the form
+ * <resource> <action> <id_type> <id>
+ */
+ arg = buf;
+ if (!strncmp(arg, "memory", 6)) {
+ hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
+ arg += strlen("memory ");
+ } else {
+ pr_err("Invalid resource specified: \"%s\"\n", buf);
+ rc = -EINVAL;
+ goto dlpar_store_out;
+ }
+
+ if (!strncmp(arg, "add", 3)) {
+ hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
+ arg += strlen("add ");
+ } else if (!strncmp(arg, "remove", 6)) {
+ hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
+ arg += strlen("remove ");
+ } else {
+ pr_err("Invalid action specified: \"%s\"\n", buf);
+ rc = -EINVAL;
+ goto dlpar_store_out;
+ }
+
+ if (!strncmp(arg, "index", 5)) {
+ u32 index;
+
+ hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
+ arg += strlen("index ");
+ if (kstrtou32(arg, 0, &index)) {
+ rc = -EINVAL;
+ pr_err("Invalid drc_index specified: \"%s\"\n", buf);
+ goto dlpar_store_out;
+ }
+
+ hp_elog->_drc_u.drc_index = cpu_to_be32(index);
+ } else if (!strncmp(arg, "count", 5)) {
+ u32 count;
+
+ hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
+ arg += strlen("count ");
+ if (kstrtou32(arg, 0, &count)) {
+ rc = -EINVAL;
+ pr_err("Invalid count specified: \"%s\"\n", buf);
+ goto dlpar_store_out;
+ }
+
+ hp_elog->_drc_u.drc_count = cpu_to_be32(count);
+ } else {
+ pr_err("Invalid id_type specified: \"%s\"\n", buf);
+ rc = -EINVAL;
+ goto dlpar_store_out;
+ }
+
+ rc = handle_dlpar_errorlog(hp_elog);
+
+dlpar_store_out:
+ kfree(hp_elog);
+ return rc ? rc : count;
+}
+
+static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store);
+
static int __init pseries_dlpar_init(void)
{
+ int rc;
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
ppc_md.cpu_probe = dlpar_cpu_probe;
ppc_md.cpu_release = dlpar_cpu_release;
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
- return 0;
+ rc = sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
+
+ return rc;
}
machine_device_initcall(pseries, pseries_dlpar_init);
-#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index a6c7e19f5eb3..2039397cc75d 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -118,9 +118,8 @@ static int pseries_eeh_init(void)
return 0;
}
-static int pseries_eeh_cap_start(struct device_node *dn)
+static int pseries_eeh_cap_start(struct pci_dn *pdn)
{
- struct pci_dn *pdn = PCI_DN(dn);
u32 status;
if (!pdn)
@@ -134,10 +133,9 @@ static int pseries_eeh_cap_start(struct device_node *dn)
}
-static int pseries_eeh_find_cap(struct device_node *dn, int cap)
+static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
{
- struct pci_dn *pdn = PCI_DN(dn);
- int pos = pseries_eeh_cap_start(dn);
+ int pos = pseries_eeh_cap_start(pdn);
int cnt = 48; /* Maximal number of capabilities */
u32 id;
@@ -160,10 +158,9 @@ static int pseries_eeh_find_cap(struct device_node *dn, int cap)
return 0;
}
-static int pseries_eeh_find_ecap(struct device_node *dn, int cap)
+static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
{
- struct pci_dn *pdn = PCI_DN(dn);
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
u32 header;
int pos = 256;
int ttl = (4096 - 256) / 8;
@@ -191,53 +188,44 @@ static int pseries_eeh_find_ecap(struct device_node *dn, int cap)
}
/**
- * pseries_eeh_of_probe - EEH probe on the given device
- * @dn: OF node
- * @flag: Unused
+ * pseries_eeh_probe - EEH probe on the given device
+ * @pdn: PCI device node
+ * @data: Unused
*
* When EEH module is installed during system boot, all PCI devices
* are checked one by one to see if it supports EEH. The function
* is introduced for the purpose.
*/
-static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
+static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
{
struct eeh_dev *edev;
struct eeh_pe pe;
- struct pci_dn *pdn = PCI_DN(dn);
- const __be32 *classp, *vendorp, *devicep;
- u32 class_code;
- const __be32 *regs;
u32 pcie_flags;
int enable = 0;
int ret;
/* Retrieve OF node and eeh device */
- edev = of_node_to_eeh_dev(dn);
- if (edev->pe || !of_device_is_available(dn))
+ edev = pdn_to_eeh_dev(pdn);
+ if (!edev || edev->pe)
return NULL;
- /* Retrieve class/vendor/device IDs */
- classp = of_get_property(dn, "class-code", NULL);
- vendorp = of_get_property(dn, "vendor-id", NULL);
- devicep = of_get_property(dn, "device-id", NULL);
-
- /* Skip for bad OF node or PCI-ISA bridge */
- if (!classp || !vendorp || !devicep)
- return NULL;
- if (dn->type && !strcmp(dn->type, "isa"))
+ /* Check class/vendor/device IDs */
+ if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
return NULL;
- class_code = of_read_number(classp, 1);
+ /* Skip for PCI-ISA bridge */
+ if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
+ return NULL;
/*
* Update class code and mode of eeh device. We need
* correctly reflects that current device is root port
* or PCIe switch downstream port.
*/
- edev->class_code = class_code;
- edev->pcix_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_PCIX);
- edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
- edev->aer_cap = pseries_eeh_find_ecap(dn, PCI_EXT_CAP_ID_ERR);
+ edev->class_code = pdn->class_code;
+ edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
+ edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
+ edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
edev->mode &= 0xFFFFFF00;
if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
edev->mode |= EEH_DEV_BRIDGE;
@@ -252,24 +240,16 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
}
}
- /* Retrieve the device address */
- regs = of_get_property(dn, "reg", NULL);
- if (!regs) {
- pr_warn("%s: OF node property %s::reg not found\n",
- __func__, dn->full_name);
- return NULL;
- }
-
/* Initialize the fake PE */
memset(&pe, 0, sizeof(struct eeh_pe));
pe.phb = edev->phb;
- pe.config_addr = of_read_number(regs, 1);
+ pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
/* Enable EEH on the device */
ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
if (!ret) {
- edev->config_addr = of_read_number(regs, 1);
/* Retrieve PE address */
+ edev->config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
pe.addr = edev->pe_config_addr;
@@ -285,16 +265,17 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
eeh_add_flag(EEH_ENABLED);
eeh_add_to_parent_pe(edev);
- pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
- __func__, dn->full_name, pe.phb->global_number,
- pe.addr, pe.config_addr);
- } else if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
- (of_node_to_eeh_dev(dn->parent))->pe) {
+ pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%d-PE#%x\n",
+ __func__, pdn->busno, PCI_SLOT(pdn->devfn),
+ PCI_FUNC(pdn->devfn), pe.phb->global_number,
+ pe.addr);
+ } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) &&
+ (pdn_to_eeh_dev(pdn->parent))->pe) {
/* This device doesn't support EEH, but it may have an
* EEH parent, in which case we mark it as supported.
*/
- edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
- edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr;
+ edev->config_addr = pdn_to_eeh_dev(pdn->parent)->config_addr;
+ edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr;
eeh_add_to_parent_pe(edev);
}
}
@@ -670,45 +651,36 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
/**
* pseries_eeh_read_config - Read PCI config space
- * @dn: device node
+ * @pdn: PCI device node
* @where: PCI address
* @size: size to read
* @val: return value
*
* Read config space from the speicifed device
*/
-static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val)
+static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
{
- struct pci_dn *pdn;
-
- pdn = PCI_DN(dn);
-
return rtas_read_config(pdn, where, size, val);
}
/**
* pseries_eeh_write_config - Write PCI config space
- * @dn: device node
+ * @pdn: PCI device node
* @where: PCI address
* @size: size to write
* @val: value to be written
*
* Write config space to the specified device
*/
-static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val)
+static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val)
{
- struct pci_dn *pdn;
-
- pdn = PCI_DN(dn);
-
return rtas_write_config(pdn, where, size, val);
}
static struct eeh_ops pseries_eeh_ops = {
.name = "pseries",
.init = pseries_eeh_init,
- .of_probe = pseries_eeh_of_probe,
- .dev_probe = NULL,
+ .probe = pseries_eeh_probe,
.set_option = pseries_eeh_set_option,
.get_pe_addr = pseries_eeh_get_pe_addr,
.get_state = pseries_eeh_get_state,
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index fa41f0da5b6f..0ced387e1463 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -9,11 +9,14 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
+
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
+#include <linux/slab.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
@@ -21,6 +24,8 @@
#include <asm/sparsemem.h>
#include "pseries.h"
+static bool rtas_hp_event;
+
unsigned long pseries_memory_block_size(void)
{
struct device_node *np;
@@ -64,6 +69,67 @@ unsigned long pseries_memory_block_size(void)
return memblock_size;
}
+static void dlpar_free_drconf_property(struct property *prop)
+{
+ kfree(prop->name);
+ kfree(prop->value);
+ kfree(prop);
+}
+
+static struct property *dlpar_clone_drconf_property(struct device_node *dn)
+{
+ struct property *prop, *new_prop;
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int i;
+
+ prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
+ if (!prop)
+ return NULL;
+
+ new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ if (!new_prop)
+ return NULL;
+
+ new_prop->name = kstrdup(prop->name, GFP_KERNEL);
+ new_prop->value = kmalloc(prop->length, GFP_KERNEL);
+ if (!new_prop->name || !new_prop->value) {
+ dlpar_free_drconf_property(new_prop);
+ return NULL;
+ }
+
+ memcpy(new_prop->value, prop->value, prop->length);
+ new_prop->length = prop->length;
+
+ /* Convert the property to cpu endian-ness */
+ p = new_prop->value;
+ *p = be32_to_cpu(*p);
+
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ for (i = 0; i < num_lmbs; i++) {
+ lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
+ lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+ lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
+ }
+
+ return new_prop;
+}
+
+static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
+{
+ unsigned long section_nr;
+ struct mem_section *mem_sect;
+ struct memory_block *mem_block;
+
+ section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
+ mem_sect = __nr_to_section(section_nr);
+
+ mem_block = find_memory_block(mem_sect);
+ return mem_block;
+}
+
#ifdef CONFIG_MEMORY_HOTREMOVE
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
@@ -122,6 +188,173 @@ static int pseries_remove_mem_node(struct device_node *np)
pseries_remove_memblock(base, lmb_size);
return 0;
}
+
+static bool lmb_is_removable(struct of_drconf_cell *lmb)
+{
+ int i, scns_per_block;
+ int rc = 1;
+ unsigned long pfn, block_sz;
+ u64 phys_addr;
+
+ if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
+ return false;
+
+ block_sz = memory_block_size_bytes();
+ scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+ phys_addr = lmb->base_addr;
+
+ for (i = 0; i < scns_per_block; i++) {
+ pfn = PFN_DOWN(phys_addr);
+ if (!pfn_present(pfn))
+ continue;
+
+ rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
+ phys_addr += MIN_MEMORY_BLOCK_SIZE;
+ }
+
+ return rc ? true : false;
+}
+
+static int dlpar_add_lmb(struct of_drconf_cell *);
+
+static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
+{
+ struct memory_block *mem_block;
+ unsigned long block_sz;
+ int nid, rc;
+
+ if (!lmb_is_removable(lmb))
+ return -EINVAL;
+
+ mem_block = lmb_to_memblock(lmb);
+ if (!mem_block)
+ return -EINVAL;
+
+ rc = device_offline(&mem_block->dev);
+ put_device(&mem_block->dev);
+ if (rc)
+ return rc;
+
+ block_sz = pseries_memory_block_size();
+ nid = memory_add_physaddr_to_nid(lmb->base_addr);
+
+ remove_memory(nid, lmb->base_addr, block_sz);
+
+ /* Update memory regions for memory remove */
+ memblock_remove(lmb->base_addr, block_sz);
+
+ dlpar_release_drc(lmb->drc_index);
+
+ lmb->flags &= ~DRCONF_MEM_ASSIGNED;
+ return 0;
+}
+
+static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
+ struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ int lmbs_removed = 0;
+ int lmbs_available = 0;
+ u32 num_lmbs, *p;
+ int i, rc;
+
+ pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
+
+ if (lmbs_to_remove == 0)
+ return -EINVAL;
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ /* Validate that there are enough LMBs to satisfy the request */
+ for (i = 0; i < num_lmbs; i++) {
+ if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
+ lmbs_available++;
+ }
+
+ if (lmbs_available < lmbs_to_remove)
+ return -EINVAL;
+
+ for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
+ rc = dlpar_remove_lmb(&lmbs[i]);
+ if (rc)
+ continue;
+
+ lmbs_removed++;
+
+ /* Mark this lmb so we can add it later if all of the
+ * requested LMBs cannot be removed.
+ */
+ lmbs[i].reserved = 1;
+ }
+
+ if (lmbs_removed != lmbs_to_remove) {
+ pr_err("Memory hot-remove failed, adding LMB's back\n");
+
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ rc = dlpar_add_lmb(&lmbs[i]);
+ if (rc)
+ pr_err("Failed to add LMB back, drc index %x\n",
+ lmbs[i].drc_index);
+
+ lmbs[i].reserved = 0;
+ }
+
+ rc = -EINVAL;
+ } else {
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ pr_info("Memory at %llx was hot-removed\n",
+ lmbs[i].base_addr);
+
+ lmbs[i].reserved = 0;
+ }
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int lmb_found;
+ int i, rc;
+
+ pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ lmb_found = 0;
+ for (i = 0; i < num_lmbs; i++) {
+ if (lmbs[i].drc_index == drc_index) {
+ lmb_found = 1;
+ rc = dlpar_remove_lmb(&lmbs[i]);
+ break;
+ }
+ }
+
+ if (!lmb_found)
+ rc = -EINVAL;
+
+ if (rc)
+ pr_info("Failed to hot-remove memory at %llx\n",
+ lmbs[i].base_addr);
+ else
+ pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
+
+ return rc;
+}
+
#else
static inline int pseries_remove_memblock(unsigned long base,
unsigned int memblock_size)
@@ -132,8 +365,261 @@ static inline int pseries_remove_mem_node(struct device_node *np)
{
return 0;
}
+static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
+{
+ return -EOPNOTSUPP;
+}
+static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
+{
+ return -EOPNOTSUPP;
+}
+static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
+ struct property *prop)
+{
+ return -EOPNOTSUPP;
+}
+static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_MEMORY_HOTREMOVE */
+static int dlpar_add_lmb(struct of_drconf_cell *lmb)
+{
+ struct memory_block *mem_block;
+ unsigned long block_sz;
+ int nid, rc;
+
+ if (lmb->flags & DRCONF_MEM_ASSIGNED)
+ return -EINVAL;
+
+ block_sz = memory_block_size_bytes();
+
+ rc = dlpar_acquire_drc(lmb->drc_index);
+ if (rc)
+ return rc;
+
+ /* Find the node id for this address */
+ nid = memory_add_physaddr_to_nid(lmb->base_addr);
+
+ /* Add the memory */
+ rc = add_memory(nid, lmb->base_addr, block_sz);
+ if (rc) {
+ dlpar_release_drc(lmb->drc_index);
+ return rc;
+ }
+
+ /* Register this block of memory */
+ rc = memblock_add(lmb->base_addr, block_sz);
+ if (rc) {
+ remove_memory(nid, lmb->base_addr, block_sz);
+ dlpar_release_drc(lmb->drc_index);
+ return rc;
+ }
+
+ mem_block = lmb_to_memblock(lmb);
+ if (!mem_block) {
+ remove_memory(nid, lmb->base_addr, block_sz);
+ dlpar_release_drc(lmb->drc_index);
+ return -EINVAL;
+ }
+
+ rc = device_online(&mem_block->dev);
+ put_device(&mem_block->dev);
+ if (rc) {
+ remove_memory(nid, lmb->base_addr, block_sz);
+ dlpar_release_drc(lmb->drc_index);
+ return rc;
+ }
+
+ lmb->flags |= DRCONF_MEM_ASSIGNED;
+ return 0;
+}
+
+static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int lmbs_available = 0;
+ int lmbs_added = 0;
+ int i, rc;
+
+ pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
+
+ if (lmbs_to_add == 0)
+ return -EINVAL;
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ /* Validate that there are enough LMBs to satisfy the request */
+ for (i = 0; i < num_lmbs; i++) {
+ if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
+ lmbs_available++;
+ }
+
+ if (lmbs_available < lmbs_to_add)
+ return -EINVAL;
+
+ for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
+ rc = dlpar_add_lmb(&lmbs[i]);
+ if (rc)
+ continue;
+
+ lmbs_added++;
+
+ /* Mark this lmb so we can remove it later if all of the
+ * requested LMBs cannot be added.
+ */
+ lmbs[i].reserved = 1;
+ }
+
+ if (lmbs_added != lmbs_to_add) {
+ pr_err("Memory hot-add failed, removing any added LMBs\n");
+
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ rc = dlpar_remove_lmb(&lmbs[i]);
+ if (rc)
+ pr_err("Failed to remove LMB, drc index %x\n",
+ be32_to_cpu(lmbs[i].drc_index));
+ }
+ rc = -EINVAL;
+ } else {
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ pr_info("Memory at %llx (drc index %x) was hot-added\n",
+ lmbs[i].base_addr, lmbs[i].drc_index);
+ lmbs[i].reserved = 0;
+ }
+ }
+
+ return rc;
+}
+
+static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int i, lmb_found;
+ int rc;
+
+ pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ lmb_found = 0;
+ for (i = 0; i < num_lmbs; i++) {
+ if (lmbs[i].drc_index == drc_index) {
+ lmb_found = 1;
+ rc = dlpar_add_lmb(&lmbs[i]);
+ break;
+ }
+ }
+
+ if (!lmb_found)
+ rc = -EINVAL;
+
+ if (rc)
+ pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
+ else
+ pr_info("Memory at %llx (drc index %x) was hot-added\n",
+ lmbs[i].base_addr, drc_index);
+
+ return rc;
+}
+
+static void dlpar_update_drconf_property(struct device_node *dn,
+ struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int i;
+
+ /* Convert the property back to BE */
+ p = prop->value;
+ num_lmbs = *p;
+ *p = cpu_to_be32(*p);
+ p++;
+
+ lmbs = (struct of_drconf_cell *)p;
+ for (i = 0; i < num_lmbs; i++) {
+ lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
+ lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+ lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
+ }
+
+ rtas_hp_event = true;
+ of_update_property(dn, prop);
+ rtas_hp_event = false;
+}
+
+int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
+{
+ struct device_node *dn;
+ struct property *prop;
+ u32 count, drc_index;
+ int rc;
+
+ count = hp_elog->_drc_u.drc_count;
+ drc_index = hp_elog->_drc_u.drc_index;
+
+ lock_device_hotplug();
+
+ dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (!dn) {
+ rc = -EINVAL;
+ goto dlpar_memory_out;
+ }
+
+ prop = dlpar_clone_drconf_property(dn);
+ if (!prop) {
+ rc = -EINVAL;
+ goto dlpar_memory_out;
+ }
+
+ switch (hp_elog->action) {
+ case PSERIES_HP_ELOG_ACTION_ADD:
+ if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
+ rc = dlpar_memory_add_by_count(count, prop);
+ else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+ rc = dlpar_memory_add_by_index(drc_index, prop);
+ else
+ rc = -EINVAL;
+ break;
+ case PSERIES_HP_ELOG_ACTION_REMOVE:
+ if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
+ rc = dlpar_memory_remove_by_count(count, prop);
+ else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+ rc = dlpar_memory_remove_by_index(drc_index, prop);
+ else
+ rc = -EINVAL;
+ break;
+ default:
+ pr_err("Invalid action (%d) specified\n", hp_elog->action);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ dlpar_free_drconf_property(prop);
+ else
+ dlpar_update_drconf_property(dn, prop);
+
+dlpar_memory_out:
+ of_node_put(dn);
+ unlock_device_hotplug();
+ return rc;
+}
+
static int pseries_add_mem_node(struct device_node *np)
{
const char *type;
@@ -174,6 +660,9 @@ static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
__be32 *p;
int i, rc = -EINVAL;
+ if (rtas_hp_event)
+ return 0;
+
memblock_size = pseries_memory_block_size();
if (!memblock_size)
return -EINVAL;
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index ccd53f91e8aa..74b5b8e239c8 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -7,12 +7,12 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/jump_label.h>
#include <asm/hvcall.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
-#include <asm/jump_label.h>
.section ".text"
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 1d3d52dc3ff3..61d5a17f45c0 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -49,6 +49,7 @@
#include <asm/mmzone.h>
#include <asm/plpar_wrappers.h>
+#include "pseries.h"
static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
__be64 *startp, __be64 *endp)
@@ -1307,16 +1308,16 @@ void iommu_init_early_pSeries(void)
ppc_md.tce_free = tce_free_pSeriesLP;
}
ppc_md.tce_get = tce_get_pSeriesLP;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
+ pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
+ pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
} else {
ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free = tce_free_pSeries;
ppc_md.tce_get = tce_get_pseries;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
+ pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
+ pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
}
@@ -1340,3 +1341,5 @@ static int __init disable_multitce(char *str)
}
__setup("multitce=", disable_multitce);
+
+machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index b5682fd6c984..b7a67e3d2201 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/console.h>
#include <linux/export.h>
-#include <linux/static_key.h>
+#include <linux/jump_label.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 90cf3dcbd9f2..ceb18d349459 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -25,10 +25,10 @@
static struct kobject *mobility_kobj;
struct update_props_workarea {
- u32 phandle;
- u32 state;
- u64 reserved;
- u32 nprops;
+ __be32 phandle;
+ __be32 state;
+ __be64 reserved;
+ __be32 nprops;
} __packed;
#define NODE_ACTION_MASK 0xff000000
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
return rc;
}
-static int delete_dt_node(u32 phandle)
+static int delete_dt_node(__be32 phandle)
{
struct device_node *dn;
- dn = of_find_node_by_phandle(phandle);
+ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn)
return -ENOENT;
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
return 0;
}
-static int update_dt_node(u32 phandle, s32 scope)
+static int update_dt_node(__be32 phandle, s32 scope)
{
struct update_props_workarea *upwa;
struct device_node *dn;
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
char *prop_data;
char *rtas_buf;
int update_properties_token;
+ u32 nprops;
u32 vd;
update_properties_token = rtas_token("ibm,update-properties");
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
if (!rtas_buf)
return -ENOMEM;
- dn = of_find_node_by_phandle(phandle);
+ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn) {
kfree(rtas_buf);
return -ENOENT;
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
break;
prop_data = rtas_buf + sizeof(*upwa);
+ nprops = be32_to_cpu(upwa->nprops);
/* On the first call to ibm,update-properties for a node the
* the first property value descriptor contains an empty
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
*/
if (*prop_data == 0) {
prop_data++;
- vd = *(u32 *)prop_data;
+ vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += vd + sizeof(vd);
- upwa->nprops--;
+ nprops--;
}
- for (i = 0; i < upwa->nprops; i++) {
+ for (i = 0; i < nprops; i++) {
char *prop_name;
prop_name = prop_data;
prop_data += strlen(prop_name) + 1;
- vd = *(u32 *)prop_data;
+ vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += sizeof(vd);
switch (vd) {
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
return 0;
}
-static int add_dt_node(u32 parent_phandle, u32 drc_index)
+static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
{
struct device_node *dn;
struct device_node *parent_dn;
int rc;
- parent_dn = of_find_node_by_phandle(parent_phandle);
+ parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
if (!parent_dn)
return -ENOENT;
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
int pseries_devicetree_update(s32 scope)
{
char *rtas_buf;
- u32 *data;
+ __be32 *data;
int update_nodes_token;
int rc;
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
if (rc && rc != 1)
break;
- data = (u32 *)rtas_buf + 4;
- while (*data & NODE_ACTION_MASK) {
+ data = (__be32 *)rtas_buf + 4;
+ while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
int i;
- u32 action = *data & NODE_ACTION_MASK;
- int node_count = *data & NODE_COUNT_MASK;
+ u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
+ u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
data++;
for (i = 0; i < node_count; i++) {
- u32 phandle = *data++;
- u32 drc_index;
+ __be32 phandle = *data++;
+ __be32 drc_index;
switch (action) {
case DELETE_DT_NODE:
@@ -318,28 +320,34 @@ static ssize_t migrate_store(struct class *class, struct class_attribute *attr,
{
u64 streamid;
int rc;
- int vasi_rc = 0;
rc = kstrtou64(buf, 0, &streamid);
if (rc)
return rc;
do {
- rc = rtas_ibm_suspend_me(streamid, &vasi_rc);
- if (!rc && vasi_rc == RTAS_NOT_SUSPENDABLE)
+ rc = rtas_ibm_suspend_me(streamid);
+ if (rc == -EAGAIN)
ssleep(1);
- } while (!rc && vasi_rc == RTAS_NOT_SUSPENDABLE);
+ } while (rc == -EAGAIN);
if (rc)
return rc;
- if (vasi_rc)
- return vasi_rc;
post_mobility_fixup();
return count;
}
+/*
+ * Used by drmgr to determine the kernel behavior of the migration interface.
+ *
+ * Version 1: Performs all PAPR requirements for migration including
+ * firmware activation and device tree update.
+ */
+#define MIGRATION_API_VERSION 1
+
static CLASS_ATTR(migration, S_IWUSR, NULL, migrate_store);
+static CLASS_ATTR_STRING(api_version, S_IRUGO, __stringify(MIGRATION_API_VERSION));
static int __init mobility_sysfs_init(void)
{
@@ -350,7 +358,13 @@ static int __init mobility_sysfs_init(void)
return -ENOMEM;
rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
+ if (rc)
+ pr_err("mobility: unable to create migration sysfs file (%d)\n", rc);
- return rc;
+ rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr);
+ if (rc)
+ pr_err("mobility: unable to create api_version sysfs file (%d)\n", rc);
+
+ return 0;
}
machine_device_initcall(pseries, mobility_sysfs_init);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 691a154c286d..c8d24f9a6948 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -195,6 +195,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
{
struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
/* Found our PE and assume 8 at that point. */
@@ -204,10 +205,11 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
return NULL;
/* Get the top level device in the PE */
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn_to_eeh_dev(PCI_DN(dn));
if (edev->pe)
edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
- dn = eeh_dev_to_of_node(edev);
+ pdn = eeh_dev_to_pdn(edev);
+ dn = pdn ? pdn->node : NULL;
if (!dn)
return NULL;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 054a0ed5c7ee..9f8184175c86 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -20,7 +20,6 @@
#include <linux/kmsg_dump.h>
#include <linux/pstore.h>
#include <linux/ctype.h>
-#include <linux/zlib.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
@@ -30,129 +29,17 @@
/* Max bytes to read/write in one go */
#define NVRW_CNT 0x20
-/*
- * Set oops header version to distinguish between old and new format header.
- * lnx,oops-log partition max size is 4000, header version > 4000 will
- * help in identifying new header.
- */
-#define OOPS_HDR_VERSION 5000
-
static unsigned int nvram_size;
static int nvram_fetch, nvram_store;
static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
static DEFINE_SPINLOCK(nvram_lock);
-struct err_log_info {
- __be32 error_type;
- __be32 seq_num;
-};
-
-struct nvram_os_partition {
- const char *name;
- int req_size; /* desired size, in bytes */
- int min_size; /* minimum acceptable size (0 means req_size) */
- long size; /* size of data portion (excluding err_log_info) */
- long index; /* offset of data portion of partition */
- bool os_partition; /* partition initialized by OS, not FW */
-};
-
-static struct nvram_os_partition rtas_log_partition = {
- .name = "ibm,rtas-log",
- .req_size = 2079,
- .min_size = 1055,
- .index = -1,
- .os_partition = true
-};
-
-static struct nvram_os_partition oops_log_partition = {
- .name = "lnx,oops-log",
- .req_size = 4000,
- .min_size = 2000,
- .index = -1,
- .os_partition = true
-};
-
-static const char *pseries_nvram_os_partitions[] = {
- "ibm,rtas-log",
- "lnx,oops-log",
- NULL
-};
-
-struct oops_log_info {
- __be16 version;
- __be16 report_length;
- __be64 timestamp;
-} __attribute__((packed));
-
-static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason);
-
-static struct kmsg_dumper nvram_kmsg_dumper = {
- .dump = oops_to_nvram
-};
-
/* See clobbering_unread_rtas_event() */
#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */
-static unsigned long last_unread_rtas_event; /* timestamp */
-
-/*
- * For capturing and compressing an oops or panic report...
-
- * big_oops_buf[] holds the uncompressed text we're capturing.
- *
- * oops_buf[] holds the compressed text, preceded by a oops header.
- * oops header has u16 holding the version of oops header (to differentiate
- * between old and new format header) followed by u16 holding the length of
- * the compressed* text (*Or uncompressed, if compression fails.) and u64
- * holding the timestamp. oops_buf[] gets written to NVRAM.
- *
- * oops_log_info points to the header. oops_data points to the compressed text.
- *
- * +- oops_buf
- * | +- oops_data
- * v v
- * +-----------+-----------+-----------+------------------------+
- * | version | length | timestamp | text |
- * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) |
- * +-----------+-----------+-----------+------------------------+
- * ^
- * +- oops_log_info
- *
- * We preallocate these buffers during init to avoid kmalloc during oops/panic.
- */
-static size_t big_oops_buf_sz;
-static char *big_oops_buf, *oops_buf;
-static char *oops_data;
-static size_t oops_data_sz;
-
-/* Compression parameters */
-#define COMPR_LEVEL 6
-#define WINDOW_BITS 12
-#define MEM_LEVEL 4
-static struct z_stream_s stream;
+static time64_t last_unread_rtas_event; /* timestamp */
#ifdef CONFIG_PSTORE
-static struct nvram_os_partition of_config_partition = {
- .name = "of-config",
- .index = -1,
- .os_partition = false
-};
-
-static struct nvram_os_partition common_partition = {
- .name = "common",
- .index = -1,
- .os_partition = false
-};
-
-static enum pstore_type_id nvram_type_ids[] = {
- PSTORE_TYPE_DMESG,
- PSTORE_TYPE_PPC_RTAS,
- PSTORE_TYPE_PPC_OF,
- PSTORE_TYPE_PPC_COMMON,
- -1
-};
-static int read_type;
-static unsigned long last_rtas_event;
+time64_t last_rtas_event;
#endif
static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
@@ -246,132 +133,26 @@ static ssize_t pSeries_nvram_get_size(void)
return nvram_size ? nvram_size : -ENODEV;
}
-
-/* nvram_write_os_partition, nvram_write_error_log
+/* nvram_write_error_log
*
* We need to buffer the error logs into nvram to ensure that we have
- * the failure information to decode. If we have a severe error there
- * is no way to guarantee that the OS or the machine is in a state to
- * get back to user land and write the error to disk. For example if
- * the SCSI device driver causes a Machine Check by writing to a bad
- * IO address, there is no way of guaranteeing that the device driver
- * is in any state that is would also be able to write the error data
- * captured to disk, thus we buffer it in NVRAM for analysis on the
- * next boot.
- *
- * In NVRAM the partition containing the error log buffer will looks like:
- * Header (in bytes):
- * +-----------+----------+--------+------------+------------------+
- * | signature | checksum | length | name | data |
- * |0 |1 |2 3|4 15|16 length-1|
- * +-----------+----------+--------+------------+------------------+
- *
- * The 'data' section would look like (in bytes):
- * +--------------+------------+-----------------------------------+
- * | event_logged | sequence # | error log |
- * |0 3|4 7|8 error_log_size-1|
- * +--------------+------------+-----------------------------------+
- *
- * event_logged: 0 if event has not been logged to syslog, 1 if it has
- * sequence #: The unique sequence # for each event. (until it wraps)
- * error log: The error log from event_scan
+ * the failure information to decode.
*/
-static int nvram_write_os_partition(struct nvram_os_partition *part,
- char *buff, int length,
- unsigned int err_type,
- unsigned int error_log_cnt)
-{
- int rc;
- loff_t tmp_index;
- struct err_log_info info;
-
- if (part->index == -1) {
- return -ESPIPE;
- }
-
- if (length > part->size) {
- length = part->size;
- }
-
- info.error_type = cpu_to_be32(err_type);
- info.seq_num = cpu_to_be32(error_log_cnt);
-
- tmp_index = part->index;
-
- rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
- return rc;
- }
-
- rc = ppc_md.nvram_write(buff, length, &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
- return rc;
- }
-
- return 0;
-}
-
int nvram_write_error_log(char * buff, int length,
unsigned int err_type, unsigned int error_log_cnt)
{
int rc = nvram_write_os_partition(&rtas_log_partition, buff, length,
err_type, error_log_cnt);
if (!rc) {
- last_unread_rtas_event = get_seconds();
+ last_unread_rtas_event = ktime_get_real_seconds();
#ifdef CONFIG_PSTORE
- last_rtas_event = get_seconds();
+ last_rtas_event = ktime_get_real_seconds();
#endif
}
return rc;
}
-/* nvram_read_partition
- *
- * Reads nvram partition for at most 'length'
- */
-static int nvram_read_partition(struct nvram_os_partition *part, char *buff,
- int length, unsigned int *err_type,
- unsigned int *error_log_cnt)
-{
- int rc;
- loff_t tmp_index;
- struct err_log_info info;
-
- if (part->index == -1)
- return -1;
-
- if (length > part->size)
- length = part->size;
-
- tmp_index = part->index;
-
- if (part->os_partition) {
- rc = ppc_md.nvram_read((char *)&info,
- sizeof(struct err_log_info),
- &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
- return rc;
- }
- }
-
- rc = ppc_md.nvram_read(buff, length, &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
- return rc;
- }
-
- if (part->os_partition) {
- *error_log_cnt = be32_to_cpu(info.seq_num);
- *err_type = be32_to_cpu(info.error_type);
- }
-
- return 0;
-}
-
/* nvram_read_error_log
*
* Reads nvram for error log for at most 'length'
@@ -407,67 +188,6 @@ int nvram_clear_error_log(void)
return 0;
}
-/* pseries_nvram_init_os_partition
- *
- * This sets up a partition with an "OS" signature.
- *
- * The general strategy is the following:
- * 1.) If a partition with the indicated name already exists...
- * - If it's large enough, use it.
- * - Otherwise, recycle it and keep going.
- * 2.) Search for a free partition that is large enough.
- * 3.) If there's not a free partition large enough, recycle any obsolete
- * OS partitions and try again.
- * 4.) Will first try getting a chunk that will satisfy the requested size.
- * 5.) If a chunk of the requested size cannot be allocated, then try finding
- * a chunk that will satisfy the minum needed.
- *
- * Returns 0 on success, else -1.
- */
-static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
- *part)
-{
- loff_t p;
- int size;
-
- /* Look for ours */
- p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
-
- /* Found one but too small, remove it */
- if (p && size < part->min_size) {
- pr_info("nvram: Found too small %s partition,"
- " removing it...\n", part->name);
- nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL);
- p = 0;
- }
-
- /* Create one if we didn't find */
- if (!p) {
- p = nvram_create_partition(part->name, NVRAM_SIG_OS,
- part->req_size, part->min_size);
- if (p == -ENOSPC) {
- pr_info("nvram: No room to create %s partition, "
- "deleting any obsolete OS partitions...\n",
- part->name);
- nvram_remove_partition(NULL, NVRAM_SIG_OS,
- pseries_nvram_os_partitions);
- p = nvram_create_partition(part->name, NVRAM_SIG_OS,
- part->req_size, part->min_size);
- }
- }
-
- if (p <= 0) {
- pr_err("nvram: Failed to find or create %s"
- " partition, err %d\n", part->name, (int)p);
- return -1;
- }
-
- part->index = p;
- part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
-
- return 0;
-}
-
/*
* Are we using the ibm,rtas-log for oops/panic reports? And if so,
* would logging this oops/panic overwrite an RTAS event that rtas_errd
@@ -476,321 +196,14 @@ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
* We assume that if rtas_errd hasn't read the RTAS event in
* NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to.
*/
-static int clobbering_unread_rtas_event(void)
+int clobbering_unread_rtas_event(void)
{
return (oops_log_partition.index == rtas_log_partition.index
&& last_unread_rtas_event
- && get_seconds() - last_unread_rtas_event <=
+ && ktime_get_real_seconds() - last_unread_rtas_event <=
NVRAM_RTAS_READ_TIMEOUT);
}
-/* Derived from logfs_compress() */
-static int nvram_compress(const void *in, void *out, size_t inlen,
- size_t outlen)
-{
- int err, ret;
-
- ret = -EIO;
- err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
- MEM_LEVEL, Z_DEFAULT_STRATEGY);
- if (err != Z_OK)
- goto error;
-
- stream.next_in = in;
- stream.avail_in = inlen;
- stream.total_in = 0;
- stream.next_out = out;
- stream.avail_out = outlen;
- stream.total_out = 0;
-
- err = zlib_deflate(&stream, Z_FINISH);
- if (err != Z_STREAM_END)
- goto error;
-
- err = zlib_deflateEnd(&stream);
- if (err != Z_OK)
- goto error;
-
- if (stream.total_out >= stream.total_in)
- goto error;
-
- ret = stream.total_out;
-error:
- return ret;
-}
-
-/* Compress the text from big_oops_buf into oops_buf. */
-static int zip_oops(size_t text_len)
-{
- struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
- int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
- oops_data_sz);
- if (zipped_len < 0) {
- pr_err("nvram: compression failed; returned %d\n", zipped_len);
- pr_err("nvram: logging uncompressed oops/panic report\n");
- return -1;
- }
- oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
- oops_hdr->report_length = cpu_to_be16(zipped_len);
- oops_hdr->timestamp = cpu_to_be64(get_seconds());
- return 0;
-}
-
-#ifdef CONFIG_PSTORE
-static int nvram_pstore_open(struct pstore_info *psi)
-{
- /* Reset the iterator to start reading partitions again */
- read_type = -1;
- return 0;
-}
-
-/**
- * nvram_pstore_write - pstore write callback for nvram
- * @type: Type of message logged
- * @reason: reason behind dump (oops/panic)
- * @id: identifier to indicate the write performed
- * @part: pstore writes data to registered buffer in parts,
- * part number will indicate the same.
- * @count: Indicates oops count
- * @compressed: Flag to indicate the log is compressed
- * @size: number of bytes written to the registered buffer
- * @psi: registered pstore_info structure
- *
- * Called by pstore_dump() when an oops or panic report is logged in the
- * printk buffer.
- * Returns 0 on successful write.
- */
-static int nvram_pstore_write(enum pstore_type_id type,
- enum kmsg_dump_reason reason,
- u64 *id, unsigned int part, int count,
- bool compressed, size_t size,
- struct pstore_info *psi)
-{
- int rc;
- unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
- struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
-
- /* part 1 has the recent messages from printk buffer */
- if (part > 1 || type != PSTORE_TYPE_DMESG ||
- clobbering_unread_rtas_event())
- return -1;
-
- oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
- oops_hdr->report_length = cpu_to_be16(size);
- oops_hdr->timestamp = cpu_to_be64(get_seconds());
-
- if (compressed)
- err_type = ERR_TYPE_KERNEL_PANIC_GZ;
-
- rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_hdr) + size), err_type, count);
-
- if (rc != 0)
- return rc;
-
- *id = part;
- return 0;
-}
-
-/*
- * Reads the oops/panic report, rtas, of-config and common partition.
- * Returns the length of the data we read from each partition.
- * Returns 0 if we've been called before.
- */
-static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
- int *count, struct timespec *time, char **buf,
- bool *compressed, struct pstore_info *psi)
-{
- struct oops_log_info *oops_hdr;
- unsigned int err_type, id_no, size = 0;
- struct nvram_os_partition *part = NULL;
- char *buff = NULL;
- int sig = 0;
- loff_t p;
-
- read_type++;
-
- switch (nvram_type_ids[read_type]) {
- case PSTORE_TYPE_DMESG:
- part = &oops_log_partition;
- *type = PSTORE_TYPE_DMESG;
- break;
- case PSTORE_TYPE_PPC_RTAS:
- part = &rtas_log_partition;
- *type = PSTORE_TYPE_PPC_RTAS;
- time->tv_sec = last_rtas_event;
- time->tv_nsec = 0;
- break;
- case PSTORE_TYPE_PPC_OF:
- sig = NVRAM_SIG_OF;
- part = &of_config_partition;
- *type = PSTORE_TYPE_PPC_OF;
- *id = PSTORE_TYPE_PPC_OF;
- time->tv_sec = 0;
- time->tv_nsec = 0;
- break;
- case PSTORE_TYPE_PPC_COMMON:
- sig = NVRAM_SIG_SYS;
- part = &common_partition;
- *type = PSTORE_TYPE_PPC_COMMON;
- *id = PSTORE_TYPE_PPC_COMMON;
- time->tv_sec = 0;
- time->tv_nsec = 0;
- break;
- default:
- return 0;
- }
-
- if (!part->os_partition) {
- p = nvram_find_partition(part->name, sig, &size);
- if (p <= 0) {
- pr_err("nvram: Failed to find partition %s, "
- "err %d\n", part->name, (int)p);
- return 0;
- }
- part->index = p;
- part->size = size;
- }
-
- buff = kmalloc(part->size, GFP_KERNEL);
-
- if (!buff)
- return -ENOMEM;
-
- if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) {
- kfree(buff);
- return 0;
- }
-
- *count = 0;
-
- if (part->os_partition)
- *id = id_no;
-
- if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
- size_t length, hdr_size;
-
- oops_hdr = (struct oops_log_info *)buff;
- if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
- /* Old format oops header had 2-byte record size */
- hdr_size = sizeof(u16);
- length = be16_to_cpu(oops_hdr->version);
- time->tv_sec = 0;
- time->tv_nsec = 0;
- } else {
- hdr_size = sizeof(*oops_hdr);
- length = be16_to_cpu(oops_hdr->report_length);
- time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
- time->tv_nsec = 0;
- }
- *buf = kmalloc(length, GFP_KERNEL);
- if (*buf == NULL)
- return -ENOMEM;
- memcpy(*buf, buff + hdr_size, length);
- kfree(buff);
-
- if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
- *compressed = true;
- else
- *compressed = false;
- return length;
- }
-
- *buf = buff;
- return part->size;
-}
-
-static struct pstore_info nvram_pstore_info = {
- .owner = THIS_MODULE,
- .name = "nvram",
- .open = nvram_pstore_open,
- .read = nvram_pstore_read,
- .write = nvram_pstore_write,
-};
-
-static int nvram_pstore_init(void)
-{
- int rc = 0;
-
- nvram_pstore_info.buf = oops_data;
- nvram_pstore_info.bufsize = oops_data_sz;
-
- spin_lock_init(&nvram_pstore_info.buf_lock);
-
- rc = pstore_register(&nvram_pstore_info);
- if (rc != 0)
- pr_err("nvram: pstore_register() failed, defaults to "
- "kmsg_dump; returned %d\n", rc);
-
- return rc;
-}
-#else
-static int nvram_pstore_init(void)
-{
- return -1;
-}
-#endif
-
-static void __init nvram_init_oops_partition(int rtas_partition_exists)
-{
- int rc;
-
- rc = pseries_nvram_init_os_partition(&oops_log_partition);
- if (rc != 0) {
- if (!rtas_partition_exists)
- return;
- pr_notice("nvram: Using %s partition to log both"
- " RTAS errors and oops/panic reports\n",
- rtas_log_partition.name);
- memcpy(&oops_log_partition, &rtas_log_partition,
- sizeof(rtas_log_partition));
- }
- oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
- if (!oops_buf) {
- pr_err("nvram: No memory for %s partition\n",
- oops_log_partition.name);
- return;
- }
- oops_data = oops_buf + sizeof(struct oops_log_info);
- oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
-
- rc = nvram_pstore_init();
-
- if (!rc)
- return;
-
- /*
- * Figure compression (preceded by elimination of each line's <n>
- * severity prefix) will reduce the oops/panic report to at most
- * 45% of its original size.
- */
- big_oops_buf_sz = (oops_data_sz * 100) / 45;
- big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
- if (big_oops_buf) {
- stream.workspace = kmalloc(zlib_deflate_workspacesize(
- WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
- if (!stream.workspace) {
- pr_err("nvram: No memory for compression workspace; "
- "skipping compression of %s partition data\n",
- oops_log_partition.name);
- kfree(big_oops_buf);
- big_oops_buf = NULL;
- }
- } else {
- pr_err("No memory for uncompressed %s data; "
- "skipping compression\n", oops_log_partition.name);
- stream.workspace = NULL;
- }
-
- rc = kmsg_dump_register(&nvram_kmsg_dumper);
- if (rc != 0) {
- pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
- kfree(oops_buf);
- kfree(big_oops_buf);
- kfree(stream.workspace);
- }
-}
-
static int __init pseries_nvram_init_log_partitions(void)
{
int rc;
@@ -798,7 +211,7 @@ static int __init pseries_nvram_init_log_partitions(void)
/* Scan nvram for partitions */
nvram_scan_partitions();
- rc = pseries_nvram_init_os_partition(&rtas_log_partition);
+ rc = nvram_init_os_partition(&rtas_log_partition);
nvram_init_oops_partition(rc == 0);
return 0;
}
@@ -834,72 +247,3 @@ int __init pSeries_nvram_init(void)
return 0;
}
-
-/*
- * This is our kmsg_dump callback, called after an oops or panic report
- * has been written to the printk buffer. We want to capture as much
- * of the printk buffer as possible. First, capture as much as we can
- * that we think will compress sufficiently to fit in the lnx,oops-log
- * partition. If that's too much, go back and capture uncompressed text.
- */
-static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
-{
- struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
- static unsigned int oops_count = 0;
- static bool panicking = false;
- static DEFINE_SPINLOCK(lock);
- unsigned long flags;
- size_t text_len;
- unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
- int rc = -1;
-
- switch (reason) {
- case KMSG_DUMP_RESTART:
- case KMSG_DUMP_HALT:
- case KMSG_DUMP_POWEROFF:
- /* These are almost always orderly shutdowns. */
- return;
- case KMSG_DUMP_OOPS:
- break;
- case KMSG_DUMP_PANIC:
- panicking = true;
- break;
- case KMSG_DUMP_EMERG:
- if (panicking)
- /* Panic report already captured. */
- return;
- break;
- default:
- pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
- __func__, (int) reason);
- return;
- }
-
- if (clobbering_unread_rtas_event())
- return;
-
- if (!spin_trylock_irqsave(&lock, flags))
- return;
-
- if (big_oops_buf) {
- kmsg_dump_get_buffer(dumper, false,
- big_oops_buf, big_oops_buf_sz, &text_len);
- rc = zip_oops(text_len);
- }
- if (rc != 0) {
- kmsg_dump_rewind(dumper);
- kmsg_dump_get_buffer(dumper, false,
- oops_data, oops_data_sz, &text_len);
- err_type = ERR_TYPE_KERNEL_PANIC;
- oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
- oops_hdr->report_length = cpu_to_be16(text_len);
- oops_hdr->timestamp = cpu_to_be64(get_seconds());
- }
-
- (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_hdr) + text_len), err_type,
- ++oops_count);
-
- spin_unlock_irqrestore(&lock, flags);
-}
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 89e23811199c..5d4a3df59d0c 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -32,6 +32,8 @@
#include <asm/firmware.h>
#include <asm/eeh.h>
+#include "pseries.h"
+
static struct pci_bus *
find_bus_among_children(struct pci_bus *bus,
struct device_node *dn)
@@ -75,6 +77,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
return NULL;
rtas_setup_phb(phb);
pci_process_bridge_OF_ranges(phb, dn, 0);
+ phb->controller_ops = pseries_pci_controller_ops;
pci_devs_phb_init_dynamic(phb);
@@ -82,7 +85,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
eeh_dev_phb_init_dynamic(phb);
if (dn->child)
- eeh_add_device_tree_early(dn);
+ eeh_add_device_tree_early(PCI_DN(dn));
pcibios_scan_phb(phb);
pcibios_finish_adding_to_bus(phb->bus);
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 1796c5438cc6..8411c27293e4 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -11,6 +11,7 @@
#define _PSERIES_PSERIES_H
#include <linux/interrupt.h>
+#include <asm/rtas.h>
struct device_node;
@@ -60,11 +61,24 @@ extern struct device_node *dlpar_configure_connector(__be32,
struct device_node *);
extern int dlpar_attach_node(struct device_node *);
extern int dlpar_detach_node(struct device_node *);
+extern int dlpar_acquire_drc(u32 drc_index);
+extern int dlpar_release_drc(u32 drc_index);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int dlpar_memory(struct pseries_hp_errorlog *hp_elog);
+#else
+static inline int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
+{
+ return -EOPNOTSUPP;
+}
+#endif
/* PCI root bridge prepare function override for pseries */
struct pci_host_bridge;
int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
+extern struct pci_controller_ops pseries_pci_controller_ops;
+
unsigned long pseries_memory_block_size(void);
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index e445b6701f50..df6a7041922b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -265,7 +265,7 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
update_dn_pci_info(np, pci->phb);
/* Create EEH device for the OF node */
- eeh_dev_init(np, pci->phb);
+ eeh_dev_init(PCI_DN(np), pci->phb);
}
break;
default:
@@ -461,6 +461,47 @@ static long pseries_little_endian_exceptions(void)
}
#endif
+static void __init find_and_init_phbs(void)
+{
+ struct device_node *node;
+ struct pci_controller *phb;
+ struct device_node *root = of_find_node_by_path("/");
+
+ for_each_child_of_node(root, node) {
+ if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
+ strcmp(node->type, "pciex") != 0))
+ continue;
+
+ phb = pcibios_alloc_controller(node);
+ if (!phb)
+ continue;
+ rtas_setup_phb(phb);
+ pci_process_bridge_OF_ranges(phb, node, 0);
+ isa_bridge_find_early(phb);
+ phb->controller_ops = pseries_pci_controller_ops;
+ }
+
+ of_node_put(root);
+ pci_devs_phb_init();
+
+ /*
+ * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
+ * in chosen.
+ */
+ if (of_chosen) {
+ const int *prop;
+
+ prop = of_get_property(of_chosen,
+ "linux,pci-probe-only", NULL);
+ if (prop) {
+ if (*prop)
+ pci_add_flags(PCI_PROBE_ONLY);
+ else
+ pci_clear_flags(PCI_PROBE_ONLY);
+ }
+ }
+}
+
static void __init pSeries_setup_arch(void)
{
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -793,6 +834,10 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
void pSeries_final_fixup(void) { }
#endif
+struct pci_controller_ops pseries_pci_controller_ops = {
+ .probe_mode = pSeries_pci_probe_mode,
+};
+
define_machine(pseries) {
.name = "pSeries",
.probe = pSeries_probe,
@@ -801,7 +846,6 @@ define_machine(pseries) {
.show_cpuinfo = pSeries_show_cpuinfo,
.log_error = pSeries_log_error,
.pcibios_fixup = pSeries_final_fixup,
- .pci_probe_mode = pSeries_pci_probe_mode,
.restart = rtas_restart,
.halt = rtas_halt,
.panic = rtas_os_term,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index a3555b10c1a5..6932ea803e33 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -197,16 +197,14 @@ static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
xics_cause_ipi(cpu, data);
}
-static __init int pSeries_smp_probe(void)
+static __init void pSeries_smp_probe(void)
{
- int ret = xics_smp_probe();
+ xics_smp_probe();
if (cpu_has_feature(CPU_FTR_DBELL)) {
xics_cause_ipi = smp_ops->cause_ipi;
smp_ops->cause_ipi = pSeries_cause_ipi_mux;
}
-
- return ret;
}
static struct smp_ops_t pSeries_mpic_smp_ops = {