diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 00:43:38 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 00:43:38 +0400 |
commit | 3883cbb6c1bda013a3ce2dbdab7dc97c52e4a232 (patch) | |
tree | 5b69f83b049d24ac81123ac954ca8c9128e48443 /drivers/pci | |
parent | d2033f2c1d1de2239ded15e478ddb4028f192a15 (diff) | |
parent | 1eb92b24e243085d242cf5ffd64829bba70972e1 (diff) | |
download | linux-3883cbb6c1bda013a3ce2dbdab7dc97c52e4a232.tar.xz |
Merge tag 'soc-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC specific changes from Arnd Bergmann:
"These changes are all to SoC-specific code, a total of 33 branches on
17 platforms were pulled into this. Like last time, Renesas sh-mobile
is now the platform with the most changes, followed by OMAP and
EXYNOS.
Two new platforms, TI Keystone and Rockchips RK3xxx are added in this
branch, both containing almost no platform specific code at all, since
they are using generic subsystem interfaces for clocks, pinctrl,
interrupts etc. The device drivers are getting merged through the
respective subsystem maintainer trees.
One more SoC (u300) is now multiplatform capable and several others
(shmobile, exynos, msm, integrator, kirkwood, clps711x) are moving
towards that goal with this series but need more work.
Also noteworthy is the work on PCI here, which is traditionally part
of the SoC specific code. With the changes done by Thomas Petazzoni,
we can now more easily have PCI host controller drivers as loadable
modules and keep them separate from the platform code in
drivers/pci/host. This has already led to the discovery that three
platforms (exynos, spear and imx) are actually using an identical PCIe
host controller and will be able to share a driver once support for
spear and imx is added."
* tag 'soc-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (480 commits)
ARM: integrator: let pciv3 use mem/premem from device tree
ARM: integrator: set local side PCI addresses right
ARM: dts: Add pcie controller node for exynos5440-ssdk5440
ARM: dts: Add pcie controller node for Samsung EXYNOS5440 SoC
ARM: EXYNOS: Enable PCIe support for Exynos5440
pci: Add PCIe driver for Samsung Exynos
ARM: OMAP5: voltagedomain data: remove temporary OMAP4 voltage data
ARM: keystone: Move CPU bringup code to dedicated asm file
ARM: multiplatform: always pick one CPU type
ARM: imx: select syscon for IMX6SL
ARM: keystone: select ARM_ERRATA_798181 only for SMP
ARM: imx: Synertronixx scb9328 needs to select SOC_IMX1
ARM: OMAP2+: AM43x: resolve SMP related build error
dmaengine: edma: enable build for AM33XX
ARM: edma: Add EDMA crossbar event mux support
ARM: edma: Add DT and runtime PM support to the private EDMA API
dmaengine: edma: Add TI EDMA device tree binding
arm: add basic support for Rockchip RK3066a boards
arm: add debug uarts for rockchip rk29xx and rk3xxx series
arm: Add basic clocks for Rockchip rk3066a SoCs
...
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/Kconfig | 2 | ||||
-rw-r--r-- | drivers/pci/Makefile | 3 | ||||
-rw-r--r-- | drivers/pci/host/Kconfig | 17 | ||||
-rw-r--r-- | drivers/pci/host/Makefile | 2 | ||||
-rw-r--r-- | drivers/pci/host/pci-mvebu.c | 914 | ||||
-rw-r--r-- | drivers/pci/host/pcie-designware.c | 1057 |
6 files changed, 1995 insertions, 0 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 77497f140d68..81944fb73116 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -117,3 +117,5 @@ config PCI_IOAPIC config PCI_LABEL def_bool y if (DMI || ACPI) select NLS + +source "drivers/pci/host/Kconfig" diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 0c3efcffa83b..6ebf5bf8e7a7 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -67,3 +67,6 @@ obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o obj-$(CONFIG_OF) += of.o ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG + +# PCI host controller drivers +obj-y += host/ diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig new file mode 100644 index 000000000000..1184ff6fe864 --- /dev/null +++ b/drivers/pci/host/Kconfig @@ -0,0 +1,17 @@ +menu "PCI host controller drivers" + depends on PCI + +config PCI_MVEBU + bool "Marvell EBU PCIe controller" + depends on ARCH_MVEBU || ARCH_KIRKWOOD + +config PCIE_DW + bool + +config PCI_EXYNOS + bool "Samsung Exynos PCIe controller" + depends on SOC_EXYNOS5440 + select PCIEPORTBUS + select PCIE_DW + +endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile new file mode 100644 index 000000000000..086d8500e849 --- /dev/null +++ b/drivers/pci/host/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o +obj-$(CONFIG_PCIE_DW) += pcie-designware.o diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c new file mode 100644 index 000000000000..13a633b1612e --- /dev/null +++ b/drivers/pci/host/pci-mvebu.c @@ -0,0 +1,914 @@ +/* + * PCIe driver for Marvell Armada 370 and Armada XP SoCs + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/mbus.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> + +/* + * PCIe unit register offsets. + */ +#define PCIE_DEV_ID_OFF 0x0000 +#define PCIE_CMD_OFF 0x0004 +#define PCIE_DEV_REV_OFF 0x0008 +#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) +#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) +#define PCIE_HEADER_LOG_4_OFF 0x0128 +#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) +#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) +#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) +#define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) +#define PCIE_WIN5_CTRL_OFF 0x1880 +#define PCIE_WIN5_BASE_OFF 0x1884 +#define PCIE_WIN5_REMAP_OFF 0x188c +#define PCIE_CONF_ADDR_OFF 0x18f8 +#define PCIE_CONF_ADDR_EN 0x80000000 +#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc)) +#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16) +#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11) +#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8) +#define PCIE_CONF_ADDR(bus, devfn, where) \ + (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ + PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \ + PCIE_CONF_ADDR_EN) +#define PCIE_CONF_DATA_OFF 0x18fc +#define PCIE_MASK_OFF 0x1910 +#define PCIE_MASK_ENABLE_INTS 0x0f000000 +#define PCIE_CTRL_OFF 0x1a00 +#define PCIE_CTRL_X1_MODE 0x0001 +#define PCIE_STAT_OFF 0x1a04 +#define PCIE_STAT_BUS 0xff00 +#define PCIE_STAT_DEV 0x1f0000 +#define PCIE_STAT_LINK_DOWN BIT(0) +#define PCIE_DEBUG_CTRL 0x1a60 +#define PCIE_DEBUG_SOFT_RESET BIT(20) + +/* + * This product ID is registered by Marvell, and used when the Marvell + * SoC is not the root complex, but an endpoint on the PCIe bus. It is + * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI + * bridge. + */ +#define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846 + +/* PCI configuration space of a PCI-to-PCI bridge */ +struct mvebu_sw_pci_bridge { + u16 vendor; + u16 device; + u16 command; + u16 class; + u8 interface; + u8 revision; + u8 bist; + u8 header_type; + u8 latency_timer; + u8 cache_line_size; + u32 bar[2]; + u8 primary_bus; + u8 secondary_bus; + u8 subordinate_bus; + u8 secondary_latency_timer; + u8 iobase; + u8 iolimit; + u16 secondary_status; + u16 membase; + u16 memlimit; + u16 prefmembase; + u16 prefmemlimit; + u32 prefbaseupper; + u32 preflimitupper; + u16 iobaseupper; + u16 iolimitupper; + u8 cappointer; + u8 reserved1; + u16 reserved2; + u32 romaddr; + u8 intline; + u8 intpin; + u16 bridgectrl; +}; + +struct mvebu_pcie_port; + +/* Structure representing all PCIe interfaces */ +struct mvebu_pcie { + struct platform_device *pdev; + struct mvebu_pcie_port *ports; + struct resource io; + struct resource realio; + struct resource mem; + struct resource busn; + int nports; +}; + +/* Structure representing one PCIe interface */ +struct mvebu_pcie_port { + char *name; + void __iomem *base; + spinlock_t conf_lock; + int haslink; + u32 port; + u32 lane; + int devfn; + struct clk *clk; + struct mvebu_sw_pci_bridge bridge; + struct device_node *dn; + struct mvebu_pcie *pcie; + phys_addr_t memwin_base; + size_t memwin_size; + phys_addr_t iowin_base; + size_t iowin_size; +}; + +static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) +{ + return !(readl(port->base + PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); +} + +static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) +{ + u32 stat; + + stat = readl(port->base + PCIE_STAT_OFF); + stat &= ~PCIE_STAT_BUS; + stat |= nr << 8; + writel(stat, port->base + PCIE_STAT_OFF); +} + +static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) +{ + u32 stat; + + stat = readl(port->base + PCIE_STAT_OFF); + stat &= ~PCIE_STAT_DEV; + stat |= nr << 16; + writel(stat, port->base + PCIE_STAT_OFF); +} + +/* + * Setup PCIE BARs and Address Decode Wins: + * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks + * WIN[0-3] -> DRAM bank[0-3] + */ +static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) +{ + const struct mbus_dram_target_info *dram; + u32 size; + int i; + + dram = mv_mbus_dram_info(); + + /* First, disable and clear BARs and windows. */ + for (i = 1; i < 3; i++) { + writel(0, port->base + PCIE_BAR_CTRL_OFF(i)); + writel(0, port->base + PCIE_BAR_LO_OFF(i)); + writel(0, port->base + PCIE_BAR_HI_OFF(i)); + } + + for (i = 0; i < 5; i++) { + writel(0, port->base + PCIE_WIN04_CTRL_OFF(i)); + writel(0, port->base + PCIE_WIN04_BASE_OFF(i)); + writel(0, port->base + PCIE_WIN04_REMAP_OFF(i)); + } + + writel(0, port->base + PCIE_WIN5_CTRL_OFF); + writel(0, port->base + PCIE_WIN5_BASE_OFF); + writel(0, port->base + PCIE_WIN5_REMAP_OFF); + + /* Setup windows for DDR banks. Count total DDR size on the fly. */ + size = 0; + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + writel(cs->base & 0xffff0000, + port->base + PCIE_WIN04_BASE_OFF(i)); + writel(0, port->base + PCIE_WIN04_REMAP_OFF(i)); + writel(((cs->size - 1) & 0xffff0000) | + (cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + port->base + PCIE_WIN04_CTRL_OFF(i)); + + size += cs->size; + } + + /* Round up 'size' to the nearest power of two. */ + if ((size & (size - 1)) != 0) + size = 1 << fls(size); + + /* Setup BAR[1] to all DRAM banks. */ + writel(dram->cs[0].base, port->base + PCIE_BAR_LO_OFF(1)); + writel(0, port->base + PCIE_BAR_HI_OFF(1)); + writel(((size - 1) & 0xffff0000) | 1, + port->base + PCIE_BAR_CTRL_OFF(1)); +} + +static void __init mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) +{ + u16 cmd; + u32 mask; + + /* Point PCIe unit MBUS decode windows to DRAM space. */ + mvebu_pcie_setup_wins(port); + + /* Master + slave enable. */ + cmd = readw(port->base + PCIE_CMD_OFF); + cmd |= PCI_COMMAND_IO; + cmd |= PCI_COMMAND_MEMORY; + cmd |= PCI_COMMAND_MASTER; + writew(cmd, port->base + PCIE_CMD_OFF); + + /* Enable interrupt lines A-D. */ + mask = readl(port->base + PCIE_MASK_OFF); + mask |= PCIE_MASK_ENABLE_INTS; + writel(mask, port->base + PCIE_MASK_OFF); +} + +static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, + struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val) +{ + writel(PCIE_CONF_ADDR(bus->number, devfn, where), + port->base + PCIE_CONF_ADDR_OFF); + + *val = readl(port->base + PCIE_CONF_DATA_OFF); + + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 3))) & 0xffff; + + return PCIBIOS_SUCCESSFUL; +} + +static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, + struct pci_bus *bus, + u32 devfn, int where, int size, u32 val) +{ + int ret = PCIBIOS_SUCCESSFUL; + + writel(PCIE_CONF_ADDR(bus->number, devfn, where), + port->base + PCIE_CONF_ADDR_OFF); + + if (size == 4) + writel(val, port->base + PCIE_CONF_DATA_OFF); + else if (size == 2) + writew(val, port->base + PCIE_CONF_DATA_OFF + (where & 3)); + else if (size == 1) + writeb(val, port->base + PCIE_CONF_DATA_OFF + (where & 3)); + else + ret = PCIBIOS_BAD_REGISTER_NUMBER; + + return ret; +} + +static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) +{ + phys_addr_t iobase; + + /* Are the new iobase/iolimit values invalid? */ + if (port->bridge.iolimit < port->bridge.iobase || + port->bridge.iolimitupper < port->bridge.iobaseupper) { + + /* If a window was configured, remove it */ + if (port->iowin_base) { + mvebu_mbus_del_window(port->iowin_base, + port->iowin_size); + port->iowin_base = 0; + port->iowin_size = 0; + } + + return; + } + + /* + * We read the PCI-to-PCI bridge emulated registers, and + * calculate the base address and size of the address decoding + * window to setup, according to the PCI-to-PCI bridge + * specifications. iobase is the bus address, port->iowin_base + * is the CPU address. + */ + iobase = ((port->bridge.iobase & 0xF0) << 8) | + (port->bridge.iobaseupper << 16); + port->iowin_base = port->pcie->io.start + iobase; + port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | + (port->bridge.iolimitupper << 16)) - + iobase); + + mvebu_mbus_add_window_remap_flags(port->name, port->iowin_base, + port->iowin_size, + iobase, + MVEBU_MBUS_PCI_IO); + + pci_ioremap_io(iobase, port->iowin_base); +} + +static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) +{ + /* Are the new membase/memlimit values invalid? */ + if (port->bridge.memlimit < port->bridge.membase) { + + /* If a window was configured, remove it */ + if (port->memwin_base) { + mvebu_mbus_del_window(port->memwin_base, + port->memwin_size); + port->memwin_base = 0; + port->memwin_size = 0; + } + + return; + } + + /* + * We read the PCI-to-PCI bridge emulated registers, and + * calculate the base address and size of the address decoding + * window to setup, according to the PCI-to-PCI bridge + * specifications. + */ + port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); + port->memwin_size = + (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - + port->memwin_base; + + mvebu_mbus_add_window_remap_flags(port->name, port->memwin_base, + port->memwin_size, + MVEBU_MBUS_NO_REMAP, + MVEBU_MBUS_PCI_MEM); +} + +/* + * Initialize the configuration space of the PCI-to-PCI bridge + * associated with the given PCIe interface. + */ +static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port) +{ + struct mvebu_sw_pci_bridge *bridge = &port->bridge; + + memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge)); + + bridge->class = PCI_CLASS_BRIDGE_PCI; + bridge->vendor = PCI_VENDOR_ID_MARVELL; + bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID; + bridge->header_type = PCI_HEADER_TYPE_BRIDGE; + bridge->cache_line_size = 0x10; + + /* We support 32 bits I/O addressing */ + bridge->iobase = PCI_IO_RANGE_TYPE_32; + bridge->iolimit = PCI_IO_RANGE_TYPE_32; +} + +/* + * Read the configuration space of the PCI-to-PCI bridge associated to + * the given PCIe interface. + */ +static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, + unsigned int where, int size, u32 *value) +{ + struct mvebu_sw_pci_bridge *bridge = &port->bridge; + + switch (where & ~3) { + case PCI_VENDOR_ID: + *value = bridge->device << 16 | bridge->vendor; + break; + + case PCI_COMMAND: + *value = bridge->command; + break; + + case PCI_CLASS_REVISION: + *value = bridge->class << 16 | bridge->interface << 8 | + bridge->revision; + break; + + case PCI_CACHE_LINE_SIZE: + *value = bridge->bist << 24 | bridge->header_type << 16 | + bridge->latency_timer << 8 | bridge->cache_line_size; + break; + + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: + *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4]; + break; + + case PCI_PRIMARY_BUS: + *value = (bridge->secondary_latency_timer << 24 | + bridge->subordinate_bus << 16 | + bridge->secondary_bus << 8 | + bridge->primary_bus); + break; + + case PCI_IO_BASE: + *value = (bridge->secondary_status << 16 | + bridge->iolimit << 8 | + bridge->iobase); + break; + + case PCI_MEMORY_BASE: + *value = (bridge->memlimit << 16 | bridge->membase); + break; + + case PCI_PREF_MEMORY_BASE: + *value = (bridge->prefmemlimit << 16 | bridge->prefmembase); + break; + + case PCI_PREF_BASE_UPPER32: + *value = bridge->prefbaseupper; + break; + + case PCI_PREF_LIMIT_UPPER32: + *value = bridge->preflimitupper; + break; + + case PCI_IO_BASE_UPPER16: + *value = (bridge->iolimitupper << 16 | bridge->iobaseupper); + break; + + case PCI_ROM_ADDRESS1: + *value = 0; + break; + + default: + *value = 0xffffffff; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (size == 2) + *value = (*value >> (8 * (where & 3))) & 0xffff; + else if (size == 1) + *value = (*value >> (8 * (where & 3))) & 0xff; + + return PCIBIOS_SUCCESSFUL; +} + +/* Write to the PCI-to-PCI bridge configuration space */ +static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, + unsigned int where, int size, u32 value) +{ + struct mvebu_sw_pci_bridge *bridge = &port->bridge; + u32 mask, reg; + int err; + + if (size == 4) + mask = 0x0; + else if (size == 2) + mask = ~(0xffff << ((where & 3) * 8)); + else if (size == 1) + mask = ~(0xff << ((where & 3) * 8)); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, ®); + if (err) + return err; + + value = (reg & mask) | value << ((where & 3) * 8); + + switch (where & ~3) { + case PCI_COMMAND: + bridge->command = value & 0xffff; + break; + + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: + bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value; + break; + + case PCI_IO_BASE: + /* + * We also keep bit 1 set, it is a read-only bit that + * indicates we support 32 bits addressing for the + * I/O + */ + bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32; + bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32; + bridge->secondary_status = value >> 16; + mvebu_pcie_handle_iobase_change(port); + break; + + case PCI_MEMORY_BASE: + bridge->membase = value & 0xffff; + bridge->memlimit = value >> 16; + mvebu_pcie_handle_membase_change(port); + break; + + case PCI_PREF_MEMORY_BASE: + bridge->prefmembase = value & 0xffff; + bridge->prefmemlimit = value >> 16; + break; + + case PCI_PREF_BASE_UPPER32: + bridge->prefbaseupper = value; + break; + + case PCI_PREF_LIMIT_UPPER32: + bridge->preflimitupper = value; + break; + + case PCI_IO_BASE_UPPER16: + bridge->iobaseupper = value & 0xffff; + bridge->iolimitupper = value >> 16; + mvebu_pcie_handle_iobase_change(port); + break; + + case PCI_PRIMARY_BUS: + bridge->primary_bus = value & 0xff; + bridge->secondary_bus = (value >> 8) & 0xff; + bridge->subordinate_bus = (value >> 16) & 0xff; + bridge->secondary_latency_timer = (value >> 24) & 0xff; + mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus); + break; + + default: + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) +{ + return sys->private_data; +} + +static struct mvebu_pcie_port * +mvebu_pcie_find_port(struct mvebu_pcie *pcie, struct pci_bus *bus, + int devfn) +{ + int i; + + for (i = 0; i < pcie->nports; i++) { + struct mvebu_pcie_port *port = &pcie->ports[i]; + if (bus->number == 0 && port->devfn == devfn) + return port; + if (bus->number != 0 && + bus->number >= port->bridge.secondary_bus && + bus->number <= port->bridge.subordinate_bus) + return port; + } + + return NULL; +} + +/* PCI configuration space write function */ +static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); + struct mvebu_pcie_port *port; + unsigned long flags; + int ret; + + port = mvebu_pcie_find_port(pcie, bus, devfn); + if (!port) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Access the emulated PCI-to-PCI bridge */ + if (bus->number == 0) + return mvebu_sw_pci_bridge_write(port, where, size, val); + + if (!port->haslink) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * On the secondary bus, we don't want to expose any other + * device than the device physically connected in the PCIe + * slot, visible in slot 0. In slot 1, there's a special + * Marvell device that only makes sense when the Armada is + * used as a PCIe endpoint. + */ + if (bus->number == port->bridge.secondary_bus && + PCI_SLOT(devfn) != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Access the real PCIe interface */ + spin_lock_irqsave(&port->conf_lock, flags); + ret = mvebu_pcie_hw_wr_conf(port, bus, devfn, + where, size, val); + spin_unlock_irqrestore(&port->conf_lock, flags); + + return ret; +} + +/* PCI configuration space read function */ +static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); + struct mvebu_pcie_port *port; + unsigned long flags; + int ret; + + port = mvebu_pcie_find_port(pcie, bus, devfn); + if (!port) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* Access the emulated PCI-to-PCI bridge */ + if (bus->number == 0) + return mvebu_sw_pci_bridge_read(port, where, size, val); + + if (!port->haslink) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* + * On the secondary bus, we don't want to expose any other + * device than the device physically connected in the PCIe + * slot, visible in slot 0. In slot 1, there's a special + * Marvell device that only makes sense when the Armada is + * used as a PCIe endpoint. + */ + if (bus->number == port->bridge.secondary_bus && + PCI_SLOT(devfn) != 0) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* Access the real PCIe interface */ + spin_lock_irqsave(&port->conf_lock, flags); + ret = mvebu_pcie_hw_rd_conf(port, bus, devfn, + where, size, val); + spin_unlock_irqrestore(&port->conf_lock, flags); + + return ret; +} + +static struct pci_ops mvebu_pcie_ops = { + .read = mvebu_pcie_rd_conf, + .write = mvebu_pcie_wr_conf, +}; + +static int __init mvebu_pcie_setup(int nr, struct pci_sys_data *sys) +{ + struct mvebu_pcie *pcie = sys_to_pcie(sys); + int i; + + pci_add_resource_offset(&sys->resources, &pcie->realio, sys->io_offset); + pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); + pci_add_resource(&sys->resources, &pcie->busn); + + for (i = 0; i < pcie->nports; i++) { + struct mvebu_pcie_port *port = &pcie->ports[i]; + mvebu_pcie_setup_hw(port); + } + + return 1; +} + +static int __init mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct of_irq oirq; + int ret; + + ret = of_irq_map_pci(dev, &oirq); + if (ret) + return ret; + + return irq_create_of_mapping(oirq.controller, oirq.specifier, + oirq.size); +} + +static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys) +{ + struct mvebu_pcie *pcie = sys_to_pcie(sys); + struct pci_bus *bus; + + bus = pci_create_root_bus(&pcie->pdev->dev, sys->busnr, + &mvebu_pcie_ops, sys, &sys->resources); + if (!bus) + return NULL; + + pci_scan_child_bus(bus); + + return bus; +} + +resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align) +{ + if (dev->bus->number != 0) + return start; + + /* + * On the PCI-to-PCI bridge side, the I/O windows must have at + * least a 64 KB size and be aligned on their size, and the + * memory windows must have at least a 1 MB size and be + * aligned on their size + */ + if (res->flags & IORESOURCE_IO) + return round_up(start, max((resource_size_t)SZ_64K, size)); + else if (res->flags & IORESOURCE_MEM) + return round_up(start, max((resource_size_t)SZ_1M, size)); + else + return start; +} + +static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie) +{ + struct hw_pci hw; + + memset(&hw, 0, sizeof(hw)); + + hw.nr_controllers = 1; + hw.private_data = (void **)&pcie; + hw.setup = mvebu_pcie_setup; + hw.scan = mvebu_pcie_scan_bus; + hw.map_irq = mvebu_pcie_map_irq; + hw.ops = &mvebu_pcie_ops; + hw.align_resource = mvebu_pcie_align_resource; + + pci_common_init(&hw); +} + +/* + * Looks up the list of register addresses encoded into the reg = + * <...> property for one that matches the given port/lane. Once + * found, maps it. + */ +static void __iomem * __init +mvebu_pcie_map_registers(struct platform_device *pdev, + struct device_node *np, + struct mvebu_pcie_port *port) +{ + struct resource regs; + int ret = 0; + + ret = of_address_to_resource(np, 0, ®s); + if (ret) + return NULL; + + return devm_request_and_ioremap(&pdev->dev, ®s); +} + +static int __init mvebu_pcie_probe(struct platform_device *pdev) +{ + struct mvebu_pcie *pcie; + struct device_node *np = pdev->dev.of_node; + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device_node *child; + int i, ret; + + pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie), + GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->pdev = pdev; + + if (of_pci_range_parser_init(&parser, np)) + return -EINVAL; + + /* Get the I/O and memory ranges from DT */ + for_each_of_pci_range(&parser, &range) { + unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; + if (restype == IORESOURCE_IO) { + of_pci_range_to_resource(&range, np, &pcie->io); + of_pci_range_to_resource(&range, np, &pcie->realio); + pcie->io.name = "I/O"; + pcie->realio.start = max_t(resource_size_t, + PCIBIOS_MIN_IO, + range.pci_addr); + pcie->realio.end = min_t(resource_size_t, + IO_SPACE_LIMIT, + range.pci_addr + range.size); + } + if (restype == IORESOURCE_MEM) { + of_pci_range_to_resource(&range, np, &pcie->mem); + pcie->mem.name = "MEM"; + } + } + + /* Get the bus range */ + ret = of_pci_parse_bus_range(np, &pcie->busn); + if (ret) { + dev_err(&pdev->dev, "failed to parse bus-range property: %d\n", + ret); + return ret; + } + + for_each_child_of_node(pdev->dev.of_node, child) { + if (!of_device_is_available(child)) + continue; + pcie->nports++; + } + + pcie->ports = devm_kzalloc(&pdev->dev, pcie->nports * + sizeof(struct mvebu_pcie_port), + GFP_KERNEL); + if (!pcie->ports) + return -ENOMEM; + + i = 0; + for_each_child_of_node(pdev->dev.of_node, child) { + struct mvebu_pcie_port *port = &pcie->ports[i]; + + if (!of_device_is_available(child)) + continue; + + port->pcie = pcie; + + if (of_property_read_u32(child, "marvell,pcie-port", + &port->port)) { + dev_warn(&pdev->dev, + "ignoring PCIe DT node, missing pcie-port property\n"); + continue; + } + + if (of_property_read_u32(child, "marvell,pcie-lane", + &port->lane)) + port->lane = 0; + + port->name = kasprintf(GFP_KERNEL, "pcie%d.%d", + port->port, port->lane); + + port->devfn = of_pci_get_devfn(child); + if (port->devfn < 0) + continue; + + port->base = mvebu_pcie_map_registers(pdev, child, port); + if (!port->base) { + dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n", + port->port, port->lane); + continue; + } + + mvebu_pcie_set_local_dev_nr(port, 1); + + if (mvebu_pcie_link_up(port)) { + port->haslink = 1; + dev_info(&pdev->dev, "PCIe%d.%d: link up\n", + port->port, port->lane); + } else { + port->haslink = 0; + dev_info(&pdev->dev, "PCIe%d.%d: link down\n", + port->port, port->lane); + } + + port->clk = of_clk_get_by_name(child, NULL); + if (IS_ERR(port->clk)) { + dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n", + port->port, port->lane); + iounmap(port->base); + port->haslink = 0; + continue; + } + + port->dn = child; + + clk_prepare_enable(port->clk); + spin_lock_init(&port->conf_lock); + + mvebu_sw_pci_bridge_init(port); + + i++; + } + + mvebu_pcie_enable(pcie); + + return 0; +} + +static const struct of_device_id mvebu_pcie_of_match_table[] = { + { .compatible = "marvell,armada-xp-pcie", }, + { .compatible = "marvell,armada-370-pcie", }, + { .compatible = "marvell,kirkwood-pcie", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table); + +static struct platform_driver mvebu_pcie_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "mvebu-pcie", + .of_match_table = + of_match_ptr(mvebu_pcie_of_match_table), + }, +}; + +static int __init mvebu_pcie_init(void) +{ + return platform_driver_probe(&mvebu_pcie_driver, + mvebu_pcie_probe); +} + +subsys_initcall(mvebu_pcie_init); + +MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); +MODULE_DESCRIPTION("Marvell EBU PCIe driver"); +MODULE_LICENSE("GPLv2"); diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c new file mode 100644 index 000000000000..26bdbda8ff90 --- /dev/null +++ b/drivers/pci/host/pcie-designware.c @@ -0,0 +1,1057 @@ +/* + * PCIe host controller driver for Samsung EXYNOS SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han <jg1.han@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_gpio.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/types.h> + +struct pcie_port_info { + u32 cfg0_size; + u32 cfg1_size; + u32 io_size; + u32 mem_size; + phys_addr_t io_bus_addr; + phys_addr_t mem_bus_addr; +}; + +struct pcie_port { + struct device *dev; + u8 controller; + u8 root_bus_nr; + void __iomem *dbi_base; + void __iomem *elbi_base; + void __iomem *phy_base; + void __iomem *purple_base; + u64 cfg0_base; + void __iomem *va_cfg0_base; + u64 cfg1_base; + void __iomem *va_cfg1_base; + u64 io_base; + u64 mem_base; + spinlock_t conf_lock; + struct resource cfg; + struct resource io; + struct resource mem; + struct pcie_port_info config; + struct clk *clk; + struct clk *bus_clk; + int irq; + int reset_gpio; +}; + +/* + * Exynos PCIe IP consists of Synopsys specific part and Exynos + * specific part. Only core block is a Synopsys designware part; + * other parts are Exynos specific. + */ + +/* Synopsis specific PCIE configuration registers */ +#define PCIE_PORT_LINK_CONTROL 0x710 +#define PORT_LINK_MODE_MASK (0x3f << 16) +#define PORT_LINK_MODE_4_LANES (0x7 << 16) + +#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C +#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) +#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8) +#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x7 << 8) + +#define PCIE_MSI_ADDR_LO 0x820 +#define PCIE_MSI_ADDR_HI 0x824 +#define PCIE_MSI_INTR0_ENABLE 0x828 +#define PCIE_MSI_INTR0_MASK 0x82C +#define PCIE_MSI_INTR0_STATUS 0x830 + +#define PCIE_ATU_VIEWPORT 0x900 +#define PCIE_ATU_REGION_INBOUND (0x1 << 31) +#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) +#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) +#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) +#define PCIE_ATU_CR1 0x904 +#define PCIE_ATU_TYPE_MEM (0x0 << 0) +#define PCIE_ATU_TYPE_IO (0x2 << 0) +#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) +#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) +#define PCIE_ATU_CR2 0x908 +#define PCIE_ATU_ENABLE (0x1 << 31) +#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) +#define PCIE_ATU_LOWER_BASE 0x90C +#define PCIE_ATU_UPPER_BASE 0x910 +#define PCIE_ATU_LIMIT 0x914 +#define PCIE_ATU_LOWER_TARGET 0x918 +#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) +#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) +#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) +#define PCIE_ATU_UPPER_TARGET 0x91C + +/* Exynos specific PCIE configuration registers */ + +/* PCIe ELBI registers */ +#define PCIE_IRQ_PULSE 0x000 +#define IRQ_INTA_ASSERT (0x1 << 0) +#define IRQ_INTB_ASSERT (0x1 << 2) +#define IRQ_INTC_ASSERT (0x1 << 4) +#define IRQ_INTD_ASSERT (0x1 << 6) +#define PCIE_IRQ_LEVEL 0x004 +#define PCIE_IRQ_SPECIAL 0x008 +#define PCIE_IRQ_EN_PULSE 0x00c +#define PCIE_IRQ_EN_LEVEL 0x010 +#define PCIE_IRQ_EN_SPECIAL 0x014 +#define PCIE_PWR_RESET 0x018 +#define PCIE_CORE_RESET 0x01c +#define PCIE_CORE_RESET_ENABLE (0x1 << 0) +#define PCIE_STICKY_RESET 0x020 +#define PCIE_NONSTICKY_RESET 0x024 +#define PCIE_APP_INIT_RESET 0x028 +#define PCIE_APP_LTSSM_ENABLE 0x02c +#define PCIE_ELBI_RDLH_LINKUP 0x064 +#define PCIE_ELBI_LTSSM_ENABLE 0x1 +#define PCIE_ELBI_SLV_AWMISC 0x11c +#define PCIE_ELBI_SLV_ARMISC 0x120 +#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) + +/* PCIe Purple registers */ +#define PCIE_PHY_GLOBAL_RESET 0x000 +#define PCIE_PHY_COMMON_RESET 0x004 +#define PCIE_PHY_CMN_REG 0x008 +#define PCIE_PHY_MAC_RESET 0x00c +#define PCIE_PHY_PLL_LOCKED 0x010 +#define PCIE_PHY_TRSVREG_RESET 0x020 +#define PCIE_PHY_TRSV_RESET 0x024 + +/* PCIe PHY registers */ +#define PCIE_PHY_IMPEDANCE 0x004 +#define PCIE_PHY_PLL_DIV_0 0x008 +#define PCIE_PHY_PLL_BIAS 0x00c +#define PCIE_PHY_DCC_FEEDBACK 0x014 +#define PCIE_PHY_PLL_DIV_1 0x05c +#define PCIE_PHY_TRSV0_EMP_LVL 0x084 +#define PCIE_PHY_TRSV0_DRV_LVL 0x088 +#define PCIE_PHY_TRSV0_RXCDR 0x0ac +#define PCIE_PHY_TRSV0_LVCC 0x0dc +#define PCIE_PHY_TRSV1_EMP_LVL 0x144 +#define PCIE_PHY_TRSV1_RXCDR 0x16c +#define PCIE_PHY_TRSV1_LVCC 0x19c +#define PCIE_PHY_TRSV2_EMP_LVL 0x204 +#define PCIE_PHY_TRSV2_RXCDR 0x22c +#define PCIE_PHY_TRSV2_LVCC 0x25c +#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4 +#define PCIE_PHY_TRSV3_RXCDR 0x2ec +#define PCIE_PHY_TRSV3_LVCC 0x31c + +static struct hw_pci exynos_pci; + +static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) +{ + return sys->private_data; +} + +static inline int cfg_read(void *addr, int where, int size, u32 *val) +{ + *val = readl(addr); + + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 3))) & 0xffff; + else if (size != 4) + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +static inline int cfg_write(void *addr, int where, int size, u32 val) +{ + if (size == 4) + writel(val, addr); + else if (size == 2) + writew(val, addr + (where & 2)); + else if (size == 1) + writeb(val, addr + (where & 3)); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on) +{ + u32 val; + + if (on) { + val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC); + val |= PCIE_ELBI_SLV_DBI_ENABLE; + writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC); + } else { + val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC); + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC); + } +} + +static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on) +{ + u32 val; + + if (on) { + val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC); + val |= PCIE_ELBI_SLV_DBI_ENABLE; + writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC); + } else { + val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC); + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC); + } +} + +static inline void readl_rc(struct pcie_port *pp, void *dbi_base, u32 *val) +{ + exynos_pcie_sideband_dbi_r_mode(pp, true); + *val = readl(dbi_base); + exynos_pcie_sideband_dbi_r_mode(pp, false); + return; +} + +static inline void writel_rc(struct pcie_port *pp, u32 val, void *dbi_base) +{ + exynos_pcie_sideband_dbi_w_mode(pp, true); + writel(val, dbi_base); + exynos_pcie_sideband_dbi_w_mode(pp, false); + return; +} + +static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + int ret; + + exynos_pcie_sideband_dbi_r_mode(pp, true); + ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); + exynos_pcie_sideband_dbi_r_mode(pp, false); + return ret; +} + +static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, + u32 val) +{ + int ret; + + exynos_pcie_sideband_dbi_w_mode(pp, true); + ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val); + exynos_pcie_sideband_dbi_w_mode(pp, false); + return ret; +} + +static void exynos_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) +{ + u32 val; + void __iomem *dbi_base = pp->dbi_base; + + /* Program viewport 0 : OUTBOUND : CFG0 */ + val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0; + writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); + writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE); + writel_rc(pp, (pp->cfg0_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); + writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1, + dbi_base + PCIE_ATU_LIMIT); + writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET); + writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET); + writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1); + val = PCIE_ATU_ENABLE; + writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); +} + +static void exynos_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) +{ + u32 val; + void __iomem *dbi_base = pp->dbi_base; + + /* Program viewport 1 : OUTBOUND : CFG1 */ + val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1; + writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); + writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1); + val = PCIE_ATU_ENABLE; + writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); + writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE); + writel_rc(pp, (pp->cfg1_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); + writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1, + dbi_base + PCIE_ATU_LIMIT); + writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET); + writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET); +} + +static void exynos_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) +{ + u32 val; + void __iomem *dbi_base = pp->dbi_base; + + /* Program viewport 0 : OUTBOUND : MEM */ + val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0; + writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); + writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1); + val = PCIE_ATU_ENABLE; + writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); + writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE); + writel_rc(pp, (pp->mem_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); + writel_rc(pp, pp->mem_base + pp->config.mem_size - 1, + dbi_base + PCIE_ATU_LIMIT); + writel_rc(pp, pp->config.mem_bus_addr, + dbi_base + PCIE_ATU_LOWER_TARGET); + writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), + dbi_base + PCIE_ATU_UPPER_TARGET); +} + +static void exynos_pcie_prog_viewport_io_outbound(struct pcie_port *pp) +{ + u32 val; + void __iomem *dbi_base = pp->dbi_base; + + /* Program viewport 1 : OUTBOUND : IO */ + val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1; + writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); + writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1); + val = PCIE_ATU_ENABLE; + writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); + writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE); + writel_rc(pp, (pp->io_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); + writel_rc(pp, pp->io_base + pp->config.io_size - 1, + dbi_base + PCIE_ATU_LIMIT); + writel_rc(pp, pp->config.io_bus_addr, + dbi_base + PCIE_ATU_LOWER_TARGET); + writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), + dbi_base + PCIE_ATU_UPPER_TARGET); +} + +static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val) +{ + int ret = PCIBIOS_SUCCESSFUL; + u32 address, busdev; + + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | + PCIE_ATU_FUNC(PCI_FUNC(devfn)); + address = where & ~0x3; + + if (bus->parent->number == pp->root_bus_nr) { + exynos_pcie_prog_viewport_cfg0(pp, busdev); + ret = cfg_read(pp->va_cfg0_base + address, where, size, val); + exynos_pcie_prog_viewport_mem_outbound(pp); + } else { + exynos_pcie_prog_viewport_cfg1(pp, busdev); + ret = cfg_read(pp->va_cfg1_base + address, where, size, val); + exynos_pcie_prog_viewport_io_outbound(pp); + } + + return ret; +} + +static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + u32 devfn, int where, int size, u32 val) +{ + int ret = PCIBIOS_SUCCESSFUL; + u32 address, busdev; + + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | + PCIE_ATU_FUNC(PCI_FUNC(devfn)); + address = where & ~0x3; + + if (bus->parent->number == pp->root_bus_nr) { + exynos_pcie_prog_viewport_cfg0(pp, busdev); + ret = cfg_write(pp->va_cfg0_base + address, where, size, val); + exynos_pcie_prog_viewport_mem_outbound(pp); + } else { + exynos_pcie_prog_viewport_cfg1(pp, busdev); + ret = cfg_write(pp->va_cfg1_base + address, where, size, val); + exynos_pcie_prog_viewport_io_outbound(pp); + } + + return ret; +} + +static unsigned long global_io_offset; + +static int exynos_pcie_setup(int nr, struct pci_sys_data *sys) +{ + struct pcie_port *pp; + + pp = sys_to_pcie(sys); + + if (!pp) + return 0; + + if (global_io_offset < SZ_1M && pp->config.io_size > 0) { + sys->io_offset = global_io_offset - pp->config.io_bus_addr; + pci_ioremap_io(sys->io_offset, pp->io.start); + global_io_offset += SZ_64K; + pci_add_resource_offset(&sys->resources, &pp->io, + sys->io_offset); + } + + sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr; + pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); + + return 1; +} + +static int exynos_pcie_link_up(struct pcie_port *pp) +{ + u32 val = readl(pp->elbi_base + PCIE_ELBI_RDLH_LINKUP); + + if (val == PCIE_ELBI_LTSSM_ENABLE) + return 1; + + return 0; +} + +static int exynos_pcie_valid_config(struct pcie_port *pp, + struct pci_bus *bus, int dev) +{ + /* If there is no link, then there is no device */ + if (bus->number != pp->root_bus_nr) { + if (!exynos_pcie_link_up(pp)) + return 0; + } + + /* access only one slot on each root port */ + if (bus->number == pp->root_bus_nr && dev > 0) + return 0; + + /* + * do not read more than one device on the bus directly attached + * to RC's (Virtual Bridge's) DS side. + */ + if (bus->primary == pp->root_bus_nr && dev > 0) + return 0; + + return 1; +} + +static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct pcie_port *pp = sys_to_pcie(bus->sysdata); + unsigned long flags; + int ret; + + if (!pp) { + BUG(); + return -EINVAL; + } + + if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + spin_lock_irqsave(&pp->conf_lock, flags); + if (bus->number != pp->root_bus_nr) + ret = exynos_pcie_rd_other_conf(pp, bus, devfn, + where, size, val); + else + ret = exynos_pcie_rd_own_conf(pp, where, size, val); + spin_unlock_irqrestore(&pp->conf_lock, flags); + + return ret; +} + +static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct pcie_port *pp = sys_to_pcie(bus->sysdata); + unsigned long flags; + int ret; + + if (!pp) { + BUG(); + return -EINVAL; + } + + if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + spin_lock_irqsave(&pp->conf_lock, flags); + if (bus->number != pp->root_bus_nr) + ret = exynos_pcie_wr_other_conf(pp, bus, devfn, + where, size, val); + else + ret = exynos_pcie_wr_own_conf(pp, where, size, val); + spin_unlock_irqrestore(&pp->conf_lock, flags); + + return ret; +} + +static struct pci_ops exynos_pcie_ops = { + .read = exynos_pcie_rd_conf, + .write = exynos_pcie_wr_conf, +}; + +static struct pci_bus *exynos_pcie_scan_bus(int nr, + struct pci_sys_data *sys) +{ + struct pci_bus *bus; + struct pcie_port *pp = sys_to_pcie(sys); + + if (pp) { + pp->root_bus_nr = sys->busnr; + bus = pci_scan_root_bus(NULL, sys->busnr, &exynos_pcie_ops, + sys, &sys->resources); + } else { + bus = NULL; + BUG(); + } + + return bus; +} + +static int exynos_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); + + return pp->irq; +} + +static struct hw_pci exynos_pci = { + .setup = exynos_pcie_setup, + .scan = exynos_pcie_scan_bus, + .map_irq = exynos_pcie_map_irq, +}; + +static void exynos_pcie_setup_rc(struct pcie_port *pp) +{ + struct pcie_port_info *config = &pp->config; + void __iomem *dbi_base = pp->dbi_base; + u32 val; + u32 membase; + u32 memlimit; + + /* set the number of lines as 4 */ + readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val); + val &= ~PORT_LINK_MODE_MASK; + val |= PORT_LINK_MODE_4_LANES; + writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL); + + /* set link width speed control register */ + readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val); + val &= ~PORT_LOGIC_LINK_WIDTH_MASK; + val |= PORT_LOGIC_LINK_WIDTH_4_LANES; + writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + + /* setup RC BARs */ + writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0); + writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1); + + /* setup interrupt pins */ + readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val); + val &= 0xffff00ff; + val |= 0x00000100; + writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE); + + /* setup bus numbers */ + readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val); + val &= 0xff000000; + val |= 0x00010100; + writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS); + + /* setup memory base, memory limit */ + membase = ((u32)pp->mem_base & 0xfff00000) >> 16; + memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000; + val = memlimit | membase; + writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE); + + /* setup command register */ + readl_rc(pp, dbi_base + PCI_COMMAND, &val); + val &= 0xffff0000; + val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | PCI_COMMAND_SERR; + writel_rc(pp, val, dbi_base + PCI_COMMAND); +} + +static void exynos_pcie_assert_core_reset(struct pcie_port *pp) +{ + u32 val; + void __iomem *elbi_base = pp->elbi_base; + + val = readl(elbi_base + PCIE_CORE_RESET); + val &= ~PCIE_CORE_RESET_ENABLE; + writel(val, elbi_base + PCIE_CORE_RESET); + writel(0, elbi_base + PCIE_PWR_RESET); + writel(0, elbi_base + PCIE_STICKY_RESET); + writel(0, elbi_base + PCIE_NONSTICKY_RESET); +} + +static void exynos_pcie_deassert_core_reset(struct pcie_port *pp) +{ + u32 val; + void __iomem *elbi_base = pp->elbi_base; + void __iomem *purple_base = pp->purple_base; + + val = readl(elbi_base + PCIE_CORE_RESET); + val |= PCIE_CORE_RESET_ENABLE; + writel(val, elbi_base + PCIE_CORE_RESET); + writel(1, elbi_base + PCIE_STICKY_RESET); + writel(1, elbi_base + PCIE_NONSTICKY_RESET); + writel(1, elbi_base + PCIE_APP_INIT_RESET); + writel(0, elbi_base + PCIE_APP_INIT_RESET); + writel(1, purple_base + PCIE_PHY_MAC_RESET); +} + +static void exynos_pcie_assert_phy_reset(struct pcie_port *pp) +{ + void __iomem *purple_base = pp->purple_base; + + writel(0, purple_base + PCIE_PHY_MAC_RESET); + writel(1, purple_base + PCIE_PHY_GLOBAL_RESET); +} + +static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp) +{ + void __iomem *elbi_base = pp->elbi_base; + void __iomem *purple_base = pp->purple_base; + + writel(0, purple_base + PCIE_PHY_GLOBAL_RESET); + writel(1, elbi_base + PCIE_PWR_RESET); + writel(0, purple_base + PCIE_PHY_COMMON_RESET); + writel(0, purple_base + PCIE_PHY_CMN_REG); + writel(0, purple_base + PCIE_PHY_TRSVREG_RESET); + writel(0, purple_base + PCIE_PHY_TRSV_RESET); +} + +static void exynos_pcie_init_phy(struct pcie_port *pp) +{ + void __iomem *phy_base = pp->phy_base; + + /* DCC feedback control off */ + writel(0x29, phy_base + PCIE_PHY_DCC_FEEDBACK); + + /* set TX/RX impedance */ + writel(0xd5, phy_base + PCIE_PHY_IMPEDANCE); + + /* set 50Mhz PHY clock */ + writel(0x14, phy_base + PCIE_PHY_PLL_DIV_0); + writel(0x12, phy_base + PCIE_PHY_PLL_DIV_1); + + /* set TX Differential output for lane 0 */ + writel(0x7f, phy_base + PCIE_PHY_TRSV0_DRV_LVL); + + /* set TX Pre-emphasis Level Control for lane 0 to minimum */ + writel(0x0, phy_base + PCIE_PHY_TRSV0_EMP_LVL); + + /* set RX clock and data recovery bandwidth */ + writel(0xe7, phy_base + PCIE_PHY_PLL_BIAS); + writel(0x82, phy_base + PCIE_PHY_TRSV0_RXCDR); + writel(0x82, phy_base + PCIE_PHY_TRSV1_RXCDR); + writel(0x82, phy_base + PCIE_PHY_TRSV2_RXCDR); + writel(0x82, phy_base + PCIE_PHY_TRSV3_RXCDR); + + /* change TX Pre-emphasis Level Control for lanes */ + writel(0x39, phy_base + PCIE_PHY_TRSV0_EMP_LVL); + writel(0x39, phy_base + PCIE_PHY_TRSV1_EMP_LVL); + writel(0x39, phy_base + PCIE_PHY_TRSV2_EMP_LVL); + writel(0x39, phy_base + PCIE_PHY_TRSV3_EMP_LVL); + + /* set LVCC */ + writel(0x20, phy_base + PCIE_PHY_TRSV0_LVCC); + writel(0xa0, phy_base + PCIE_PHY_TRSV1_LVCC); + writel(0xa0, phy_base + PCIE_PHY_TRSV2_LVCC); + writel(0xa0, phy_base + PCIE_PHY_TRSV3_LVCC); +} + +static void exynos_pcie_assert_reset(struct pcie_port *pp) +{ + if (pp->reset_gpio >= 0) + devm_gpio_request_one(pp->dev, pp->reset_gpio, + GPIOF_OUT_INIT_HIGH, "RESET"); + return; +} + +static int exynos_pcie_establish_link(struct pcie_port *pp) +{ + u32 val; + int count = 0; + void __iomem *elbi_base = pp->elbi_base; + void __iomem *purple_base = pp->purple_base; + void __iomem *phy_base = pp->phy_base; + + if (exynos_pcie_link_up(pp)) { + dev_err(pp->dev, "Link already up\n"); + return 0; + } + + /* assert reset signals */ + exynos_pcie_assert_core_reset(pp); + exynos_pcie_assert_phy_reset(pp); + + /* de-assert phy reset */ + exynos_pcie_deassert_phy_reset(pp); + + /* initialize phy */ + exynos_pcie_init_phy(pp); + + /* pulse for common reset */ + writel(1, purple_base + PCIE_PHY_COMMON_RESET); + udelay(500); + writel(0, purple_base + PCIE_PHY_COMMON_RESET); + + /* de-assert core reset */ + exynos_pcie_deassert_core_reset(pp); + + /* setup root complex */ + exynos_pcie_setup_rc(pp); + + /* assert reset signal */ + exynos_pcie_assert_reset(pp); + + /* assert LTSSM enable */ + writel(PCIE_ELBI_LTSSM_ENABLE, elbi_base + PCIE_APP_LTSSM_ENABLE); + + /* check if the link is up or not */ + while (!exynos_pcie_link_up(pp)) { + mdelay(100); + count++; + if (count == 10) { + while (readl(phy_base + PCIE_PHY_PLL_LOCKED) == 0) { + val = readl(purple_base + PCIE_PHY_PLL_LOCKED); + dev_info(pp->dev, "PLL Locked: 0x%x\n", val); + } + dev_err(pp->dev, "PCIe Link Fail\n"); + return -EINVAL; + } + } + + dev_info(pp->dev, "Link up\n"); + + return 0; +} + +static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) +{ + u32 val; + void __iomem *elbi_base = pp->elbi_base; + + val = readl(elbi_base + PCIE_IRQ_PULSE); + writel(val, elbi_base + PCIE_IRQ_PULSE); + return; +} + +static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp) +{ + u32 val; + void __iomem *elbi_base = pp->elbi_base; + + /* enable INTX interrupt */ + val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | + IRQ_INTC_ASSERT | IRQ_INTD_ASSERT, + writel(val, elbi_base + PCIE_IRQ_EN_PULSE); + return; +} + +static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) +{ + struct pcie_port *pp = arg; + + exynos_pcie_clear_irq_pulse(pp); + return IRQ_HANDLED; +} + +static void exynos_pcie_enable_interrupts(struct pcie_port *pp) +{ + exynos_pcie_enable_irq_pulse(pp); + return; +} + +static void exynos_pcie_host_init(struct pcie_port *pp) +{ + struct pcie_port_info *config = &pp->config; + u32 val; + + /* Keep first 64K for IO */ + pp->cfg0_base = pp->cfg.start; + pp->cfg1_base = pp->cfg.start + config->cfg0_size; + pp->io_base = pp->io.start; + pp->mem_base = pp->mem.start; + + /* enable link */ + exynos_pcie_establish_link(pp); + + exynos_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); + + /* program correct class for RC */ + exynos_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); + + exynos_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); + val |= PORT_LOGIC_SPEED_CHANGE; + exynos_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); + + exynos_pcie_enable_interrupts(pp); +} + +static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) +{ + struct resource *elbi_base; + struct resource *phy_base; + struct resource *purple_base; + int ret; + + elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!elbi_base) { + dev_err(&pdev->dev, "couldn't get elbi base resource\n"); + return -EINVAL; + } + pp->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base); + if (IS_ERR(pp->elbi_base)) + return PTR_ERR(pp->elbi_base); + + phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!phy_base) { + dev_err(&pdev->dev, "couldn't get phy base resource\n"); + return -EINVAL; + } + pp->phy_base = devm_ioremap_resource(&pdev->dev, phy_base); + if (IS_ERR(pp->phy_base)) + return PTR_ERR(pp->phy_base); + + purple_base = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!purple_base) { + dev_err(&pdev->dev, "couldn't get purple base resource\n"); + return -EINVAL; + } + pp->purple_base = devm_ioremap_resource(&pdev->dev, purple_base); + if (IS_ERR(pp->purple_base)) + return PTR_ERR(pp->purple_base); + + pp->irq = platform_get_irq(pdev, 1); + if (!pp->irq) { + dev_err(&pdev->dev, "failed to get irq\n"); + return -ENODEV; + } + ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler, + IRQF_SHARED, "exynos-pcie", pp); + if (ret) { + dev_err(&pdev->dev, "failed to request irq\n"); + return ret; + } + + pp->dbi_base = devm_ioremap(&pdev->dev, pp->cfg.start, + resource_size(&pp->cfg)); + if (!pp->dbi_base) { + dev_err(&pdev->dev, "error with ioremap\n"); + return -ENOMEM; + } + + pp->root_bus_nr = -1; + + spin_lock_init(&pp->conf_lock); + exynos_pcie_host_init(pp); + pp->va_cfg0_base = devm_ioremap(&pdev->dev, pp->cfg0_base, + pp->config.cfg0_size); + if (!pp->va_cfg0_base) { + dev_err(pp->dev, "error with ioremap in function\n"); + return -ENOMEM; + } + pp->va_cfg1_base = devm_ioremap(&pdev->dev, pp->cfg1_base, + pp->config.cfg1_size); + if (!pp->va_cfg1_base) { + dev_err(pp->dev, "error with ioremap\n"); + return -ENOMEM; + } + + return 0; +} + +static int __init exynos_pcie_probe(struct platform_device *pdev) +{ + struct pcie_port *pp; + struct device_node *np = pdev->dev.of_node; + struct of_pci_range range; + struct of_pci_range_parser parser; + int ret; + + pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL); + if (!pp) { + dev_err(&pdev->dev, "no memory for pcie port\n"); + return -ENOMEM; + } + + pp->dev = &pdev->dev; + + if (of_pci_range_parser_init(&parser, np)) { + dev_err(&pdev->dev, "missing ranges property\n"); + return -EINVAL; + } + + /* Get the I/O and memory ranges from DT */ + for_each_of_pci_range(&parser, &range) { + unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; + if (restype == IORESOURCE_IO) { + of_pci_range_to_resource(&range, np, &pp->io); + pp->io.name = "I/O"; + pp->io.start = max_t(resource_size_t, + PCIBIOS_MIN_IO, + range.pci_addr + global_io_offset); + pp->io.end = min_t(resource_size_t, + IO_SPACE_LIMIT, + range.pci_addr + range.size + + global_io_offset); + pp->config.io_size = resource_size(&pp->io); + pp->config.io_bus_addr = range.pci_addr; + } + if (restype == IORESOURCE_MEM) { + of_pci_range_to_resource(&range, np, &pp->mem); + pp->mem.name = "MEM"; + pp->config.mem_size = resource_size(&pp->mem); + pp->config.mem_bus_addr = range.pci_addr; + } + if (restype == 0) { + of_pci_range_to_resource(&range, np, &pp->cfg); + pp->config.cfg0_size = resource_size(&pp->cfg)/2; + pp->config.cfg1_size = resource_size(&pp->cfg)/2; + } + } + + pp->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); + + pp->clk = devm_clk_get(&pdev->dev, "pcie"); + if (IS_ERR(pp->clk)) { + dev_err(&pdev->dev, "Failed to get pcie rc clock\n"); + return PTR_ERR(pp->clk); + } + ret = clk_prepare_enable(pp->clk); + if (ret) + return ret; + + pp->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); + if (IS_ERR(pp->bus_clk)) { + dev_err(&pdev->dev, "Failed to get pcie bus clock\n"); + ret = PTR_ERR(pp->bus_clk); + goto fail_clk; + } + ret = clk_prepare_enable(pp->bus_clk); + if (ret) + goto fail_clk; + + ret = add_pcie_port(pp, pdev); + if (ret < 0) + goto fail_bus_clk; + + pp->controller = exynos_pci.nr_controllers; + exynos_pci.nr_controllers = 1; + exynos_pci.private_data = (void **)&pp; + + pci_common_init(&exynos_pci); + pci_assign_unassigned_resources(); +#ifdef CONFIG_PCI_DOMAINS + exynos_pci.domain++; +#endif + + platform_set_drvdata(pdev, pp); + return 0; + +fail_bus_clk: + clk_disable_unprepare(pp->bus_clk); +fail_clk: + clk_disable_unprepare(pp->clk); + return ret; +} + +static int __exit exynos_pcie_remove(struct platform_device *pdev) +{ + struct pcie_port *pp = platform_get_drvdata(pdev); + + clk_disable_unprepare(pp->bus_clk); + clk_disable_unprepare(pp->clk); + + return 0; +} + +static const struct of_device_id exynos_pcie_of_match[] = { + { .compatible = "samsung,exynos5440-pcie", }, + {}, +}; +MODULE_DEVICE_TABLE(of, exynos_pcie_of_match); + +static struct platform_driver exynos_pcie_driver = { + .remove = __exit_p(exynos_pcie_remove), + .driver = { + .name = "exynos-pcie", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(exynos_pcie_of_match), + }, +}; + +static int exynos_pcie_abort(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); + unsigned long instr = *(unsigned long *)pc; + + WARN_ONCE(1, "pcie abort\n"); + + /* + * If the instruction being executed was a read, + * make it look like it read all-ones. + */ + if ((instr & 0x0c100000) == 0x04100000) { + int reg = (instr >> 12) & 15; + unsigned long val; + + if (instr & 0x00400000) + val = 255; + else + val = -1; + + regs->uregs[reg] = val; + regs->ARM_pc += 4; + return 0; + } + + if ((instr & 0x0e100090) == 0x00100090) { + int reg = (instr >> 12) & 15; + + regs->uregs[reg] = -1; + regs->ARM_pc += 4; + return 0; + } + + return 1; +} + +/* Exynos PCIe driver does not allow module unload */ + +static int __init pcie_init(void) +{ + hook_fault_code(16 + 6, exynos_pcie_abort, SIGBUS, 0, + "imprecise external abort"); + + platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); + + return 0; +} +subsys_initcall(pcie_init); + +MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); +MODULE_DESCRIPTION("Samsung PCIe host controller driver"); +MODULE_LICENSE("GPL v2"); |