diff options
Diffstat (limited to 'drivers/pci/controller')
20 files changed, 2227 insertions, 882 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 20bf00f587bd..91bfdb784829 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -213,16 +213,6 @@ config PCIE_MEDIATEK Say Y here if you want to enable PCIe controller support on MediaTek SoCs. -config PCIE_MOBIVEIL - bool "Mobiveil AXI PCIe controller" - depends on ARCH_ZYNQMP || COMPILE_TEST - depends on OF - depends on PCI_MSI_IRQ_DOMAIN - help - Say Y here if you want to enable support for the Mobiveil AXI PCIe - Soft IP. It has up to 8 outbound and inbound windows - for address translation and it is a PCIe Gen4 IP. - config PCIE_TANGO_SMP8759 bool "Tango SMP8759 PCIe controller (DANGEROUS)" depends on ARCH_TANGO && PCI_MSI && OF @@ -269,5 +259,6 @@ config PCI_HYPERV_INTERFACE have a common interface with the Hyper-V PCI frontend driver. source "drivers/pci/controller/dwc/Kconfig" +source "drivers/pci/controller/mobiveil/Kconfig" source "drivers/pci/controller/cadence/Kconfig" endmenu diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index 01b2502a5323..158c59771824 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -25,12 +25,12 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o -obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o obj-$(CONFIG_VMD) += vmd.o obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW obj-y += dwc/ +obj-y += mobiveil/ # The following drivers are for devices that use the generic ACPI diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 0830dfcfa43a..03dcaf65d159 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -248,14 +248,37 @@ config PCI_MESON implement the driver. config PCIE_TEGRA194 - tristate "NVIDIA Tegra194 (and later) PCIe controller" + tristate + +config PCIE_TEGRA194_HOST + tristate "NVIDIA Tegra194 (and later) PCIe controller - Host Mode" depends on ARCH_TEGRA_194_SOC || COMPILE_TEST depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST select PHY_TEGRA194_P2U + select PCIE_TEGRA194 + help + Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to + work in host mode. There are two instances of PCIe controllers in + Tegra194. This controller can work either as EP or RC. In order to + enable host-specific features PCIE_TEGRA194_HOST must be selected and + in order to enable device-specific features PCIE_TEGRA194_EP must be + selected. This uses the DesignWare core. + +config PCIE_TEGRA194_EP + tristate "NVIDIA Tegra194 (and later) PCIe controller - Endpoint Mode" + depends on ARCH_TEGRA_194_SOC || COMPILE_TEST + depends on PCI_ENDPOINT + select PCIE_DW_EP + select PHY_TEGRA194_P2U + select PCIE_TEGRA194 help - Say Y here if you want support for DesignWare core based PCIe host - controller found in NVIDIA Tegra194 SoC. + Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to + work in host mode. There are two instances of PCIe controllers in + Tegra194. This controller can work either as EP or RC. In order to + enable host-specific features PCIE_TEGRA194_HOST must be selected and + in order to enable device-specific features PCIE_TEGRA194_EP must be + selected. This uses the DesignWare core. config PCIE_UNIPHIER bool "Socionext UniPhier PCIe controllers" diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 9bf7fa99b103..3b0e58f2de58 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -215,10 +215,6 @@ static int dra7xx_pcie_host_init(struct pcie_port *pp) return 0; } -static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { - .host_init = dra7xx_pcie_host_init, -}; - static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { @@ -233,43 +229,77 @@ static const struct irq_domain_ops intx_domain_ops = { .xlate = pci_irqd_intx_xlate, }; -static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) +static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct device *dev = pci->dev; - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); - struct device_node *node = dev->of_node; - struct device_node *pcie_intc_node = of_get_next_child(node, NULL); + unsigned long val; + int pos, irq; - if (!pcie_intc_node) { - dev_err(dev, "No PCIe Intc node found\n"); - return -ENODEV; - } + val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + + (index * MSI_REG_CTRL_BLOCK_SIZE)); + if (!val) + return 0; - dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, pp); - of_node_put(pcie_intc_node); - if (!dra7xx->irq_domain) { - dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return -ENODEV; + pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0); + while (pos != MAX_MSI_IRQS_PER_CTRL) { + irq = irq_find_mapping(pp->irq_domain, + (index * MAX_MSI_IRQS_PER_CTRL) + pos); + generic_handle_irq(irq); + pos++; + pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos); } - return 0; + return 1; } -static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) +static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp) { - struct dra7xx_pcie *dra7xx = arg; - struct dw_pcie *pci = dra7xx->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + int ret, i, count, num_ctrls; + + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + + /** + * Need to make sure all MSI status bits read 0 before exiting. + * Else, new MSI IRQs are not registered by the wrapper. Have an + * upperbound for the loop and exit the IRQ in case of IRQ flood + * to avoid locking up system in interrupt context. + */ + count = 0; + do { + ret = 0; + + for (i = 0; i < num_ctrls; i++) + ret |= dra7xx_pcie_handle_msi(pp, i); + count++; + } while (ret && count <= 1000); + + if (count > 1000) + dev_warn_ratelimited(pci->dev, + "Too many MSI IRQs to handle\n"); +} + +static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct dra7xx_pcie *dra7xx; + struct dw_pcie *pci; + struct pcie_port *pp; unsigned long reg; u32 virq, bit; + chained_irq_enter(chip, desc); + + pp = irq_desc_get_handler_data(desc); + pci = to_dw_pcie_from_pp(pp); + dra7xx = to_dra7xx_pcie(pci); + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); switch (reg) { case MSI: - dw_handle_msi_irq(pp); + dra7xx_pcie_handle_msi_irq(pp); break; case INTA: case INTB: @@ -283,9 +313,7 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) break; } - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); - - return IRQ_HANDLED; + chained_irq_exit(chip, desc); } static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) @@ -347,6 +375,145 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) return IRQ_HANDLED; } +static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node = of_get_next_child(node, NULL); + + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -ENODEV; + } + + irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, + pp); + dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &intx_domain_ops, pp); + of_node_put(pcie_intc_node); + if (!dra7xx->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + return -ENODEV; + } + + return 0; +} + +static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + u64 msi_target; + + msi_target = (u64)pp->msi_data; + + msg->address_lo = lower_32_bits(msi_target); + msg->address_hi = upper_32_bits(msi_target); + + msg->data = d->hwirq; + + dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", + (int)d->hwirq, msg->address_hi, msg->address_lo); +} + +static int dra7xx_pcie_msi_set_affinity(struct irq_data *d, + const struct cpumask *mask, + bool force) +{ + return -EINVAL; +} + +static void dra7xx_pcie_bottom_mask(struct irq_data *d) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + unsigned int res, bit, ctrl; + unsigned long flags; + + raw_spin_lock_irqsave(&pp->lock, flags); + + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + + pp->irq_mask[ctrl] |= BIT(bit); + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, + pp->irq_mask[ctrl]); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static void dra7xx_pcie_bottom_unmask(struct irq_data *d) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + unsigned int res, bit, ctrl; + unsigned long flags; + + raw_spin_lock_irqsave(&pp->lock, flags); + + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + + pp->irq_mask[ctrl] &= ~BIT(bit); + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, + pp->irq_mask[ctrl]); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static void dra7xx_pcie_bottom_ack(struct irq_data *d) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + unsigned int res, bit, ctrl; + + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); +} + +static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = { + .name = "DRA7XX-PCI-MSI", + .irq_ack = dra7xx_pcie_bottom_ack, + .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg, + .irq_set_affinity = dra7xx_pcie_msi_set_affinity, + .irq_mask = dra7xx_pcie_bottom_mask, + .irq_unmask = dra7xx_pcie_bottom_unmask, +}; + +static int dra7xx_pcie_msi_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + u32 ctrl, num_ctrls; + + pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip; + + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + /* Initialize IRQ Status array */ + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + pp->irq_mask[ctrl] = ~0; + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + pp->irq_mask[ctrl]); + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + ~0); + } + + return dw_pcie_allocate_domains(pp); +} + +static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { + .host_init = dra7xx_pcie_host_init, + .msi_host_init = dra7xx_pcie_msi_host_init, +}; + static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -467,14 +634,6 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, return pp->irq; } - ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, - IRQF_SHARED | IRQF_NO_THREAD, - "dra7-pcie-msi", dra7xx); - if (ret) { - dev_err(dev, "failed to request irq\n"); - return ret; - } - ret = dra7xx_pcie_init_irq_domain(pp); if (ret < 0) return ret; diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index c8c702c494a2..790679fdfa48 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -959,6 +959,9 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no, case PCI_EPC_IRQ_MSI: dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); break; + case PCI_EPC_IRQ_MSIX: + dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); + break; default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); return -EINVAL; @@ -970,7 +973,7 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no, static const struct pci_epc_features ks_pcie_am654_epc_features = { .linkup_notifier = false, .msi_capable = true, - .msix_capable = false, + .msix_capable = true, .reserved_bar = 1 << BAR_0 | 1 << BAR_1, .bar_fixed_64bit = 1 << BAR_0, .bar_fixed_size[2] = SZ_1M, diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index 3772b02a5c55..3715dceca1bf 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -66,7 +66,6 @@ #define PORT_CLK_RATE 100000000UL #define MAX_PAYLOAD_SIZE 256 #define MAX_READ_REQ_SIZE 256 -#define MESON_PCIE_PHY_POWERUP 0x1c #define PCIE_RESET_DELAY 500 #define PCIE_SHARED_RESET 1 #define PCIE_NORMAL_RESET 0 @@ -81,26 +80,19 @@ enum pcie_data_rate { struct meson_pcie_mem_res { void __iomem *elbi_base; void __iomem *cfg_base; - void __iomem *phy_base; }; struct meson_pcie_clk_res { struct clk *clk; - struct clk *mipi_gate; struct clk *port_clk; struct clk *general_clk; }; struct meson_pcie_rc_reset { - struct reset_control *phy; struct reset_control *port; struct reset_control *apb; }; -struct meson_pcie_param { - bool has_shared_phy; -}; - struct meson_pcie { struct dw_pcie pci; struct meson_pcie_mem_res mem_res; @@ -108,7 +100,6 @@ struct meson_pcie { struct meson_pcie_rc_reset mrst; struct gpio_desc *reset_gpio; struct phy *phy; - const struct meson_pcie_param *param; }; static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp, @@ -130,13 +121,6 @@ static int meson_pcie_get_resets(struct meson_pcie *mp) { struct meson_pcie_rc_reset *mrst = &mp->mrst; - if (!mp->param->has_shared_phy) { - mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET); - if (IS_ERR(mrst->phy)) - return PTR_ERR(mrst->phy); - reset_control_deassert(mrst->phy); - } - mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET); if (IS_ERR(mrst->port)) return PTR_ERR(mrst->port); @@ -162,22 +146,6 @@ static void __iomem *meson_pcie_get_mem(struct platform_device *pdev, return devm_ioremap_resource(dev, res); } -static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev, - struct meson_pcie *mp, - const char *id) -{ - struct device *dev = mp->pci.dev; - struct resource *res; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id); - if (!res) { - dev_err(dev, "No REG resource %s\n", id); - return ERR_PTR(-ENXIO); - } - - return devm_ioremap(dev, res->start, resource_size(res)); -} - static int meson_pcie_get_mems(struct platform_device *pdev, struct meson_pcie *mp) { @@ -189,14 +157,6 @@ static int meson_pcie_get_mems(struct platform_device *pdev, if (IS_ERR(mp->mem_res.cfg_base)) return PTR_ERR(mp->mem_res.cfg_base); - /* Meson AXG SoC has two PCI controllers use same phy register */ - if (!mp->param->has_shared_phy) { - mp->mem_res.phy_base = - meson_pcie_get_mem_shared(pdev, mp, "phy"); - if (IS_ERR(mp->mem_res.phy_base)) - return PTR_ERR(mp->mem_res.phy_base); - } - return 0; } @@ -204,37 +164,33 @@ static int meson_pcie_power_on(struct meson_pcie *mp) { int ret = 0; - if (mp->param->has_shared_phy) { - ret = phy_init(mp->phy); - if (ret) - return ret; + ret = phy_init(mp->phy); + if (ret) + return ret; - ret = phy_power_on(mp->phy); - if (ret) { - phy_exit(mp->phy); - return ret; - } - } else - writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base); + ret = phy_power_on(mp->phy); + if (ret) { + phy_exit(mp->phy); + return ret; + } return 0; } +static void meson_pcie_power_off(struct meson_pcie *mp) +{ + phy_power_off(mp->phy); + phy_exit(mp->phy); +} + static int meson_pcie_reset(struct meson_pcie *mp) { struct meson_pcie_rc_reset *mrst = &mp->mrst; int ret = 0; - if (mp->param->has_shared_phy) { - ret = phy_reset(mp->phy); - if (ret) - return ret; - } else { - reset_control_assert(mrst->phy); - udelay(PCIE_RESET_DELAY); - reset_control_deassert(mrst->phy); - udelay(PCIE_RESET_DELAY); - } + ret = phy_reset(mp->phy); + if (ret) + return ret; reset_control_assert(mrst->port); reset_control_assert(mrst->apb); @@ -286,12 +242,6 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp) if (IS_ERR(res->port_clk)) return PTR_ERR(res->port_clk); - if (!mp->param->has_shared_phy) { - res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0); - if (IS_ERR(res->mipi_gate)) - return PTR_ERR(res->mipi_gate); - } - res->general_clk = meson_pcie_probe_clock(dev, "general", 0); if (IS_ERR(res->general_clk)) return PTR_ERR(res->general_clk); @@ -562,7 +512,6 @@ static const struct dw_pcie_ops dw_pcie_ops = { static int meson_pcie_probe(struct platform_device *pdev) { - const struct meson_pcie_param *match_data; struct device *dev = &pdev->dev; struct dw_pcie *pci; struct meson_pcie *mp; @@ -576,17 +525,10 @@ static int meson_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; - match_data = of_device_get_match_data(dev); - if (!match_data) { - dev_err(dev, "failed to get match data\n"); - return -ENODEV; - } - mp->param = match_data; - - if (mp->param->has_shared_phy) { - mp->phy = devm_phy_get(dev, "pcie"); - if (IS_ERR(mp->phy)) - return PTR_ERR(mp->phy); + mp->phy = devm_phy_get(dev, "pcie"); + if (IS_ERR(mp->phy)) { + dev_err(dev, "get phy failed, %ld\n", PTR_ERR(mp->phy)); + return PTR_ERR(mp->phy); } mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); @@ -636,30 +578,16 @@ static int meson_pcie_probe(struct platform_device *pdev) return 0; err_phy: - if (mp->param->has_shared_phy) { - phy_power_off(mp->phy); - phy_exit(mp->phy); - } - + meson_pcie_power_off(mp); return ret; } -static struct meson_pcie_param meson_pcie_axg_param = { - .has_shared_phy = false, -}; - -static struct meson_pcie_param meson_pcie_g12a_param = { - .has_shared_phy = true, -}; - static const struct of_device_id meson_pcie_of_match[] = { { .compatible = "amlogic,axg-pcie", - .data = &meson_pcie_axg_param, }, { .compatible = "amlogic,g12a-pcie", - .data = &meson_pcie_g12a_param, }, {}, }; diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index cfeccd7e9fff..1cdcbd102ce8 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -18,6 +18,15 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) pci_epc_linkup(epc); } +EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup); + +void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep) +{ + struct pci_epc *epc = ep->epc; + + pci_epc_init_notify(epc); +} +EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify); static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar, int flags) @@ -125,6 +134,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); clear_bit(atu_index, ep->ib_window_map); + ep->epf_bar[bar] = NULL; } static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, @@ -158,6 +168,7 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, dw_pcie_writel_dbi(pci, reg + 4, 0); } + ep->epf_bar[bar] = epf_bar; dw_pcie_dbi_ro_wr_dis(pci); return 0; @@ -269,7 +280,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) return val; } -static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) +static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, + enum pci_barno bir, u32 offset) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -278,12 +290,22 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) if (!ep->msix_cap) return -EINVAL; + dw_pcie_dbi_ro_wr_en(pci); + reg = ep->msix_cap + PCI_MSIX_FLAGS; val = dw_pcie_readw_dbi(pci, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; val |= interrupts; - dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writew_dbi(pci, reg, val); + + reg = ep->msix_cap + PCI_MSIX_TABLE; + val = offset | bir; + dw_pcie_writel_dbi(pci, reg, val); + + reg = ep->msix_cap + PCI_MSIX_PBA; + val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + dw_pcie_writel_dbi(pci, reg, val); + dw_pcie_dbi_ro_wr_dis(pci); return 0; @@ -409,55 +431,41 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct pci_epf_msix_tbl *msix_tbl; struct pci_epc *epc = ep->epc; - u16 tbl_offset, bir; - u32 bar_addr_upper, bar_addr_lower; - u32 msg_addr_upper, msg_addr_lower; + struct pci_epf_bar *epf_bar; u32 reg, msg_data, vec_ctrl; - u64 tbl_addr, msg_addr, reg_u64; - void __iomem *msix_tbl; + unsigned int aligned_offset; + u32 tbl_offset; + u64 msg_addr; int ret; + u8 bir; reg = ep->msix_cap + PCI_MSIX_TABLE; tbl_offset = dw_pcie_readl_dbi(pci, reg); bir = (tbl_offset & PCI_MSIX_TABLE_BIR); tbl_offset &= PCI_MSIX_TABLE_OFFSET; - reg = PCI_BASE_ADDRESS_0 + (4 * bir); - bar_addr_upper = 0; - bar_addr_lower = dw_pcie_readl_dbi(pci, reg); - reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK); - if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64) - bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4); - - tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower; - tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE)); - tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK; - - msix_tbl = ioremap(ep->phys_base + tbl_addr, - PCI_MSIX_ENTRY_SIZE); - if (!msix_tbl) - return -EINVAL; - - msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR); - msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR); - msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; - msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA); - vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL); + epf_bar = ep->epf_bar[bir]; + msix_tbl = epf_bar->addr; + msix_tbl = (struct pci_epf_msix_tbl *)((char *)msix_tbl + tbl_offset); - iounmap(msix_tbl); + msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; + msg_data = msix_tbl[(interrupt_num - 1)].msg_data; + vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl; if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) { dev_dbg(pci->dev, "MSI-X entry ctrl set\n"); return -EPERM; } - ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, + aligned_offset = msg_addr & (epc->mem->page_size - 1); + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, epc->mem->page_size); if (ret) return ret; - writel(msg_data, ep->msi_mem); + writel(msg_data, ep->msi_mem + aligned_offset); dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); @@ -492,19 +500,54 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) return 0; } -int dw_pcie_ep_init(struct dw_pcie_ep *ep) +int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) { + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + unsigned int offset; + unsigned int nbars; + u8 hdr_type; + u32 reg; int i; + + hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE); + if (hdr_type != PCI_HEADER_TYPE_NORMAL) { + dev_err(pci->dev, + "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", + hdr_type); + return -EIO; + } + + ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); + + ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX); + + offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); + if (offset) { + reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); + nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> + PCI_REBAR_CTRL_NBAR_SHIFT; + + dw_pcie_dbi_ro_wr_en(pci); + for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) + dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); + dw_pcie_dbi_ro_wr_dis(pci); + } + + dw_pcie_setup(pci); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete); + +int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ int ret; - u32 reg; void *addr; - u8 hdr_type; - unsigned int nbars; - unsigned int offset; struct pci_epc *epc; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct device *dev = pci->dev; struct device_node *np = dev->of_node; + const struct pci_epc_features *epc_features; if (!pci->dbi_base || !pci->dbi_base2) { dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); @@ -563,13 +606,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) if (ep->ops->ep_init) ep->ops->ep_init(ep); - hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE); - if (hdr_type != PCI_HEADER_TYPE_NORMAL) { - dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", - hdr_type); - return -EIO; - } - ret = of_property_read_u8(np, "max-functions", &epc->max_functions); if (ret < 0) epc->max_functions = 1; @@ -587,23 +623,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); return -ENOMEM; } - ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); - - ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX); - - offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); - if (offset) { - reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); - nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> - PCI_REBAR_CTRL_NBAR_SHIFT; - dw_pcie_dbi_ro_wr_en(pci); - for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) - dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); - dw_pcie_dbi_ro_wr_dis(pci); + if (ep->ops->get_features) { + epc_features = ep->ops->get_features(ep); + if (epc_features->core_init_notifier) + return 0; } - dw_pcie_setup(pci); - - return 0; + return dw_pcie_ep_init_complete(ep); } +EXPORT_SYMBOL_GPL(dw_pcie_ep_init); diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index a22ea5982817..d6e1f397e6b0 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -233,6 +233,7 @@ struct dw_pcie_ep { phys_addr_t msi_mem_phys; u8 msi_cap; /* MSI capability offset */ u8 msix_cap; /* MSI-X capability offset */ + struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; }; struct dw_pcie_ops { @@ -411,6 +412,8 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp) #ifdef CONFIG_PCIE_DW_EP void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); int dw_pcie_ep_init(struct dw_pcie_ep *ep); +int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep); +void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep); void dw_pcie_ep_exit(struct dw_pcie_ep *ep); int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no); int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, @@ -428,6 +431,15 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) return 0; } +static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) +{ + return 0; +} + +static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep) +{ +} + static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) { } diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 5ea527a6bd9f..138e1a2d21cc 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -1439,7 +1439,13 @@ static void qcom_fixup_class(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); static struct platform_driver qcom_pcie_driver = { .probe = qcom_pcie_probe, diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index cbe95f0ea0ca..ae30a2fd3716 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -11,6 +11,7 @@ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/kernel.h> @@ -53,6 +54,7 @@ #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) +#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15) #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) @@ -60,19 +62,26 @@ #define APPL_INTR_STATUS_L0 0xC #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) #define APPL_INTR_STATUS_L0_INT_INT BIT(8) +#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15) +#define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16) #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) #define APPL_INTR_EN_L1_0_0 0x1C #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) +#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3) +#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30) #define APPL_INTR_STATUS_L1_0_0 0x20 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) +#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3) +#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30) #define APPL_INTR_STATUS_L1_1 0x2C #define APPL_INTR_STATUS_L1_2 0x30 #define APPL_INTR_STATUS_L1_3 0x34 #define APPL_INTR_STATUS_L1_6 0x3C #define APPL_INTR_STATUS_L1_7 0x40 +#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1) #define APPL_INTR_EN_L1_8_0 0x44 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) @@ -103,8 +112,12 @@ #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) +#define APPL_MSI_CTRL_1 0xAC + #define APPL_MSI_CTRL_2 0xB0 +#define APPL_LEGACY_INTX 0xB8 + #define APPL_LTR_MSG_1 0xC4 #define LTR_MSG_REQ BIT(15) #define LTR_MST_NO_SNOOP_SHIFT 16 @@ -205,6 +218,13 @@ #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 +#define MSIX_ADDR_MATCH_LOW_OFF 0x940 +#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) +#define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2) + +#define MSIX_ADDR_MATCH_HIGH_OFF 0x944 +#define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0) + #define PORT_LOGIC_MSIX_DOORBELL 0x948 #define CAP_SPCIE_CAP_OFF 0x154 @@ -223,6 +243,13 @@ #define GEN3_CORE_CLK_FREQ 250000000 #define GEN4_CORE_CLK_FREQ 500000000 +#define LTR_MSG_TIMEOUT (100 * 1000) + +#define PERST_DEBOUNCE_TIME (5 * 1000) + +#define EP_STATE_DISABLED 0 +#define EP_STATE_ENABLED 1 + static const unsigned int pcie_gen_freq[] = { GEN1_CORE_CLK_FREQ, GEN2_CORE_CLK_FREQ, @@ -260,6 +287,8 @@ struct tegra_pcie_dw { struct dw_pcie pci; struct tegra_bpmp *bpmp; + enum dw_pcie_device_mode mode; + bool supports_clkreq; bool enable_cdm_check; bool link_state; @@ -283,6 +312,16 @@ struct tegra_pcie_dw { struct phy **phys; struct dentry *debugfs; + + /* Endpoint mode specific */ + struct gpio_desc *pex_rst_gpiod; + struct gpio_desc *pex_refclk_sel_gpiod; + unsigned int pex_rst_irq; + int ep_state; +}; + +struct tegra_pcie_dw_of_data { + enum dw_pcie_device_mode mode; }; static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) @@ -339,8 +378,9 @@ static void apply_bad_link_workaround(struct pcie_port *pp) } } -static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie) +static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) { + struct tegra_pcie_dw *pcie = arg; struct dw_pcie *pci = &pcie->pci; struct pcie_port *pp = &pci->pp; u32 val, tmp; @@ -411,11 +451,121 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie) return IRQ_HANDLED; } -static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg) +static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) +{ + u32 val; + + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); + appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2); + + val = appl_readl(pcie, APPL_CTRL); + val |= APPL_CTRL_LTSSM_EN; + appl_writel(pcie, val, APPL_CTRL); +} + +static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) { struct tegra_pcie_dw *pcie = arg; + struct dw_pcie *pci = &pcie->pci; + u32 val, speed; + + speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & + PCI_EXP_LNKSTA_CLS; + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); - return tegra_pcie_rp_irq_handler(pcie); + /* If EP doesn't advertise L1SS, just return */ + val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); + if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) + return IRQ_HANDLED; + + /* Check if BME is set to '1' */ + val = dw_pcie_readl_dbi(pci, PCI_COMMAND); + if (val & PCI_COMMAND_MASTER) { + ktime_t timeout; + + /* 110us for both snoop and no-snoop */ + val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ; + val |= (val << LTR_MST_NO_SNOOP_SHIFT); + appl_writel(pcie, val, APPL_LTR_MSG_1); + + /* Send LTR upstream */ + val = appl_readl(pcie, APPL_LTR_MSG_2); + val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; + appl_writel(pcie, val, APPL_LTR_MSG_2); + + timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT); + for (;;) { + val = appl_readl(pcie, APPL_LTR_MSG_2); + if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)) + break; + if (ktime_after(ktime_get(), timeout)) + break; + usleep_range(1000, 1100); + } + if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE) + dev_err(pcie->dev, "Failed to send LTR message\n"); + } + + return IRQ_HANDLED; +} + +static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) +{ + struct tegra_pcie_dw *pcie = arg; + struct dw_pcie_ep *ep = &pcie->pci.ep; + int spurious = 1; + u32 val, tmp; + + val = appl_readl(pcie, APPL_INTR_STATUS_L0); + if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); + appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); + + if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) + pex_ep_event_hot_rst_done(pcie); + + if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { + tmp = appl_readl(pcie, APPL_LINK_STATUS); + if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) { + dev_dbg(pcie->dev, "Link is up with Host\n"); + dw_pcie_ep_linkup(ep); + } + } + + spurious = 0; + } + + if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_15); + appl_writel(pcie, val, APPL_INTR_STATUS_L1_15); + + if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) + return IRQ_WAKE_THREAD; + + spurious = 0; + } + + if (spurious) { + dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n", + val); + appl_writel(pcie, val, APPL_INTR_STATUS_L0); + } + + return IRQ_HANDLED; } static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size, @@ -884,8 +1034,26 @@ static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp) pp->num_vectors = MAX_MSI_IRQS; } +static int tegra_pcie_dw_start_link(struct dw_pcie *pci) +{ + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + + enable_irq(pcie->pex_rst_irq); + + return 0; +} + +static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) +{ + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + + disable_irq(pcie->pex_rst_irq); +} + static const struct dw_pcie_ops tegra_dw_pcie_ops = { .link_up = tegra_pcie_dw_link_up, + .start_link = tegra_pcie_dw_start_link, + .stop_link = tegra_pcie_dw_stop_link, }; static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { @@ -986,6 +1154,40 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) pcie->enable_cdm_check = of_property_read_bool(np, "snps,enable-cdm-check"); + if (pcie->mode == DW_PCIE_RC_TYPE) + return 0; + + /* Endpoint mode specific DT entries */ + pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN); + if (IS_ERR(pcie->pex_rst_gpiod)) { + int err = PTR_ERR(pcie->pex_rst_gpiod); + const char *level = KERN_ERR; + + if (err == -EPROBE_DEFER) + level = KERN_DEBUG; + + dev_printk(level, pcie->dev, + dev_fmt("Failed to get PERST GPIO: %d\n"), + err); + return err; + } + + pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev, + "nvidia,refclk-select", + GPIOD_OUT_HIGH); + if (IS_ERR(pcie->pex_refclk_sel_gpiod)) { + int err = PTR_ERR(pcie->pex_refclk_sel_gpiod); + const char *level = KERN_ERR; + + if (err == -EPROBE_DEFER) + level = KERN_DEBUG; + + dev_printk(level, pcie->dev, + dev_fmt("Failed to get REFCLK select GPIOs: %d\n"), + err); + pcie->pex_refclk_sel_gpiod = NULL; + } + return 0; } @@ -1017,6 +1219,34 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, return tegra_bpmp_transfer(pcie->bpmp, &msg); } +static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, + bool enable) +{ + struct mrq_uphy_response resp; + struct tegra_bpmp_message msg; + struct mrq_uphy_request req; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + if (enable) { + req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT; + req.ep_ctrlr_pll_init.ep_controller = pcie->cid; + } else { + req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF; + req.ep_ctrlr_pll_off.ep_controller = pcie->cid; + } + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_UPHY; + msg.tx.data = &req; + msg.tx.size = sizeof(req); + msg.rx.data = &resp; + msg.rx.size = sizeof(resp); + + return tegra_bpmp_transfer(pcie->bpmp, &msg); +} + static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) { struct pcie_port *pp = &pcie->pci.pp; @@ -1427,8 +1657,396 @@ fail_pm_get_sync: return ret; } +static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) +{ + u32 val; + int ret; + + if (pcie->ep_state == EP_STATE_DISABLED) + return; + + /* Disable LTSSM */ + val = appl_readl(pcie, APPL_CTRL); + val &= ~APPL_CTRL_LTSSM_EN; + appl_writel(pcie, val, APPL_CTRL); + + ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, + ((val & APPL_DEBUG_LTSSM_STATE_MASK) >> + APPL_DEBUG_LTSSM_STATE_SHIFT) == + LTSSM_STATE_PRE_DETECT, + 1, LTSSM_TIMEOUT); + if (ret) + dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret); + + reset_control_assert(pcie->core_rst); + + tegra_pcie_disable_phy(pcie); + + reset_control_assert(pcie->core_apb_rst); + + clk_disable_unprepare(pcie->core_clk); + + pm_runtime_put_sync(pcie->dev); + + ret = tegra_pcie_bpmp_set_pll_state(pcie, false); + if (ret) + dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); + + pcie->ep_state = EP_STATE_DISABLED; + dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); +} + +static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_ep *ep = &pci->ep; + struct device *dev = pcie->dev; + u32 val; + int ret; + + if (pcie->ep_state == EP_STATE_ENABLED) + return; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", + ret); + return; + } + + ret = tegra_pcie_bpmp_set_pll_state(pcie, true); + if (ret) { + dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret); + goto fail_pll_init; + } + + ret = clk_prepare_enable(pcie->core_clk); + if (ret) { + dev_err(dev, "Failed to enable core clock: %d\n", ret); + goto fail_core_clk_enable; + } + + ret = reset_control_deassert(pcie->core_apb_rst); + if (ret) { + dev_err(dev, "Failed to deassert core APB reset: %d\n", ret); + goto fail_core_apb_rst; + } + + ret = tegra_pcie_enable_phy(pcie); + if (ret) { + dev_err(dev, "Failed to enable PHY: %d\n", ret); + goto fail_phy; + } + + /* Clear any stale interrupt statuses */ + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); + + /* configure this core for EP mode operation */ + val = appl_readl(pcie, APPL_DM_TYPE); + val &= ~APPL_DM_TYPE_MASK; + val |= APPL_DM_TYPE_EP; + appl_writel(pcie, val, APPL_DM_TYPE); + + appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); + + val = appl_readl(pcie, APPL_CTRL); + val |= APPL_CTRL_SYS_PRE_DET_STATE; + val |= APPL_CTRL_HW_HOT_RST_EN; + appl_writel(pcie, val, APPL_CTRL); + + val = appl_readl(pcie, APPL_CFG_MISC); + val |= APPL_CFG_MISC_SLV_EP_MODE; + val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); + appl_writel(pcie, val, APPL_CFG_MISC); + + val = appl_readl(pcie, APPL_PINMUX); + val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; + val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; + appl_writel(pcie, val, APPL_PINMUX); + + appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, + APPL_CFG_BASE_ADDR); + + appl_writel(pcie, pcie->atu_dma_res->start & + APPL_CFG_IATU_DMA_BASE_ADDR_MASK, + APPL_CFG_IATU_DMA_BASE_ADDR); + + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; + val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; + val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); + + val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); + val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN; + val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); + + reset_control_deassert(pcie->core_rst); + + if (pcie->update_fc_fixup) { + val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); + val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; + dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); + } + + config_gen3_gen4_eq_presets(pcie); + + init_host_aspm(pcie); + + /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ + if (!pcie->supports_clkreq) { + disable_aspm_l11(pcie); + disable_aspm_l12(pcie); + } + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + + /* Configure N_FTS & FTS */ + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL); + val &= ~(N_FTS_MASK << N_FTS_SHIFT); + val |= N_FTS_VAL << N_FTS_SHIFT; + dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val); + + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL); + val &= ~FTS_MASK; + val |= FTS_VAL; + dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val); + + /* Configure Max Speed from DT */ + if (pcie->max_speed && pcie->max_speed != -EINVAL) { + val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCAP); + val &= ~PCI_EXP_LNKCAP_SLS; + val |= pcie->max_speed; + dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, + val); + } + + pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, + PCI_CAP_ID_EXP); + clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); + + val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); + val |= MSIX_ADDR_MATCH_LOW_OFF_EN; + dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val); + val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); + dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val); + + ret = dw_pcie_ep_init_complete(ep); + if (ret) { + dev_err(dev, "Failed to complete initialization: %d\n", ret); + goto fail_init_complete; + } + + dw_pcie_ep_init_notify(ep); + + /* Enable LTSSM */ + val = appl_readl(pcie, APPL_CTRL); + val |= APPL_CTRL_LTSSM_EN; + appl_writel(pcie, val, APPL_CTRL); + + pcie->ep_state = EP_STATE_ENABLED; + dev_dbg(dev, "Initialization of endpoint is completed\n"); + + return; + +fail_init_complete: + reset_control_assert(pcie->core_rst); + tegra_pcie_disable_phy(pcie); +fail_phy: + reset_control_assert(pcie->core_apb_rst); +fail_core_apb_rst: + clk_disable_unprepare(pcie->core_clk); +fail_core_clk_enable: + tegra_pcie_bpmp_set_pll_state(pcie, false); +fail_pll_init: + pm_runtime_put_sync(dev); +} + +static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) +{ + struct tegra_pcie_dw *pcie = arg; + + if (gpiod_get_value(pcie->pex_rst_gpiod)) + pex_ep_event_pex_rst_assert(pcie); + else + pex_ep_event_pex_rst_deassert(pcie); + + return IRQ_HANDLED; +} + +static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) +{ + /* Tegra194 supports only INTA */ + if (irq > 1) + return -EINVAL; + + appl_writel(pcie, 1, APPL_LEGACY_INTX); + usleep_range(1000, 2000); + appl_writel(pcie, 0, APPL_LEGACY_INTX); + return 0; +} + +static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) +{ + if (unlikely(irq > 31)) + return -EINVAL; + + appl_writel(pcie, (1 << irq), APPL_MSI_CTRL_1); + + return 0; +} + +static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) +{ + struct dw_pcie_ep *ep = &pcie->pci.ep; + + writel(irq, ep->msi_mem); + + return 0; +} + +static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, + u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num); + + case PCI_EPC_IRQ_MSI: + return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); + + case PCI_EPC_IRQ_MSIX: + return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); + + default: + dev_err(pci->dev, "Unknown IRQ type\n"); + return -EPERM; + } + + return 0; +} + +static const struct pci_epc_features tegra_pcie_epc_features = { + .linkup_notifier = true, + .core_init_notifier = true, + .msi_capable = false, + .msix_capable = false, + .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5, + .bar_fixed_64bit = 1 << BAR_0, + .bar_fixed_size[0] = SZ_1M, +}; + +static const struct pci_epc_features* +tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) +{ + return &tegra_pcie_epc_features; +} + +static struct dw_pcie_ep_ops pcie_ep_ops = { + .raise_irq = tegra_pcie_ep_raise_irq, + .get_features = tegra_pcie_ep_get_features, +}; + +static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &pcie->pci; + struct device *dev = pcie->dev; + struct dw_pcie_ep *ep; + struct resource *res; + char *name; + int ret; + + ep = &pci->ep; + ep->ops = &pcie_ep_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + ep->page_size = SZ_64K; + + ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME); + if (ret < 0) { + dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n", + ret); + return ret; + } + + ret = gpiod_to_irq(pcie->pex_rst_gpiod); + if (ret < 0) { + dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret); + return ret; + } + pcie->pex_rst_irq = (unsigned int)ret; + + name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq", + pcie->cid); + if (!name) { + dev_err(dev, "Failed to create PERST IRQ string\n"); + return -ENOMEM; + } + + irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN); + + pcie->ep_state = EP_STATE_DISABLED; + + ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL, + tegra_pcie_ep_pex_rst_irq, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + name, (void *)pcie); + if (ret < 0) { + dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret); + return ret; + } + + name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work", + pcie->cid); + if (!name) { + dev_err(dev, "Failed to create PCIe EP work thread string\n"); + return -ENOMEM; + } + + pm_runtime_enable(dev); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", + ret); + return ret; + } + + return 0; +} + static int tegra_pcie_dw_probe(struct platform_device *pdev) { + const struct tegra_pcie_dw_of_data *data; struct device *dev = &pdev->dev; struct resource *atu_dma_res; struct tegra_pcie_dw *pcie; @@ -1440,6 +2058,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) int ret; u32 i; + data = of_device_get_match_data(dev); + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; @@ -1449,19 +2069,37 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) pci->ops = &tegra_dw_pcie_ops; pp = &pci->pp; pcie->dev = &pdev->dev; + pcie->mode = (enum dw_pcie_device_mode)data->mode; ret = tegra_pcie_dw_parse_dt(pcie); if (ret < 0) { - dev_err(dev, "Failed to parse device tree: %d\n", ret); + const char *level = KERN_ERR; + + if (ret == -EPROBE_DEFER) + level = KERN_DEBUG; + + dev_printk(level, dev, + dev_fmt("Failed to parse device tree: %d\n"), + ret); return ret; } ret = tegra_pcie_get_slot_regulators(pcie); if (ret < 0) { - dev_err(dev, "Failed to get slot regulators: %d\n", ret); + const char *level = KERN_ERR; + + if (ret == -EPROBE_DEFER) + level = KERN_DEBUG; + + dev_printk(level, dev, + dev_fmt("Failed to get slot regulators: %d\n"), + ret); return ret; } + if (pcie->pex_refclk_sel_gpiod) + gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1); + pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); if (IS_ERR(pcie->pex_ctl_supply)) { ret = PTR_ERR(pcie->pex_ctl_supply); @@ -1557,24 +2195,49 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) return -ENODEV; } - ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler, - IRQF_SHARED, "tegra-pcie-intr", pcie); - if (ret) { - dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret); - return ret; - } - pcie->bpmp = tegra_bpmp_get(dev); if (IS_ERR(pcie->bpmp)) return PTR_ERR(pcie->bpmp); platform_set_drvdata(pdev, pcie); - ret = tegra_pcie_config_rp(pcie); - if (ret && ret != -ENOMEDIUM) - goto fail; - else - return 0; + switch (pcie->mode) { + case DW_PCIE_RC_TYPE: + ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler, + IRQF_SHARED, "tegra-pcie-intr", pcie); + if (ret) { + dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, + ret); + goto fail; + } + + ret = tegra_pcie_config_rp(pcie); + if (ret && ret != -ENOMEDIUM) + goto fail; + else + return 0; + break; + + case DW_PCIE_EP_TYPE: + ret = devm_request_threaded_irq(dev, pp->irq, + tegra_pcie_ep_hard_irq, + tegra_pcie_ep_irq_thread, + IRQF_SHARED | IRQF_ONESHOT, + "tegra-pcie-ep-intr", pcie); + if (ret) { + dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, + ret); + goto fail; + } + + ret = tegra_pcie_config_ep(pcie, pdev); + if (ret < 0) + goto fail; + break; + + default: + dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode); + } fail: tegra_bpmp_put(pcie->bpmp); @@ -1593,6 +2256,8 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev) pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); tegra_bpmp_put(pcie->bpmp); + if (pcie->pex_refclk_sel_gpiod) + gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0); return 0; } @@ -1697,9 +2362,22 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev) __deinit_controller(pcie); } +static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = { + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = { + .mode = DW_PCIE_EP_TYPE, +}; + static const struct of_device_id tegra_pcie_dw_of_match[] = { { .compatible = "nvidia,tegra194-pcie", + .data = &tegra_pcie_dw_rc_of_data, + }, + { + .compatible = "nvidia,tegra194-pcie-ep", + .data = &tegra_pcie_dw_ep_of_data, }, {}, }; diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig new file mode 100644 index 000000000000..a62d247018cf --- /dev/null +++ b/drivers/pci/controller/mobiveil/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "Mobiveil PCIe Core Support" + depends on PCI + +config PCIE_MOBIVEIL + bool + +config PCIE_MOBIVEIL_HOST + bool + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_MOBIVEIL + +config PCIE_MOBIVEIL_PLAT + bool "Mobiveil AXI PCIe controller" + depends on ARCH_ZYNQMP || COMPILE_TEST + depends on OF + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_MOBIVEIL_HOST + help + Say Y here if you want to enable support for the Mobiveil AXI PCIe + Soft IP. It has up to 8 outbound and inbound windows + for address translation and it is a PCIe Gen4 IP. + +config PCIE_LAYERSCAPE_GEN4 + bool "Freescale Layerscape PCIe Gen4 controller" + depends on PCI + depends on OF && (ARM64 || ARCH_LAYERSCAPE) + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_MOBIVEIL_HOST + help + Say Y here if you want PCIe Gen4 controller support on + Layerscape SoCs. +endmenu diff --git a/drivers/pci/controller/mobiveil/Makefile b/drivers/pci/controller/mobiveil/Makefile new file mode 100644 index 000000000000..99d879de32d6 --- /dev/null +++ b/drivers/pci/controller/mobiveil/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o +obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o +obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o +obj-$(CONFIG_PCIE_LAYERSCAPE_GEN4) += pcie-layerscape-gen4.o diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c new file mode 100644 index 000000000000..a6d2190a6753 --- /dev/null +++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe Gen4 host controller driver for NXP Layerscape SoCs + * + * Copyright 2019-2020 NXP + * + * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com> + */ + +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> + +#include "pcie-mobiveil.h" + +/* LUT and PF control registers */ +#define PCIE_LUT_OFF 0x80000 +#define PCIE_PF_OFF 0xc0000 +#define PCIE_PF_INT_STAT 0x18 +#define PF_INT_STAT_PABRST BIT(31) + +#define PCIE_PF_DBG 0x7fc +#define PF_DBG_LTSSM_MASK 0x3f +#define PF_DBG_LTSSM_L0 0x2d /* L0 state */ +#define PF_DBG_WE BIT(31) +#define PF_DBG_PABR BIT(27) + +#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev) + +struct ls_pcie_g4 { + struct mobiveil_pcie pci; + struct delayed_work dwork; + int irq; +}; + +static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off) +{ + return ioread32(pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off); +} + +static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie, + u32 off, u32 val) +{ + iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off); +} + +static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off) +{ + return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off); +} + +static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie, + u32 off, u32 val) +{ + iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off); +} + +static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci) +{ + struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci); + u32 state; + + state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); + state = state & PF_DBG_LTSSM_MASK; + + if (state == PF_DBG_LTSSM_L0) + return 1; + + return 0; +} + +static void ls_pcie_g4_disable_interrupt(struct ls_pcie_g4 *pcie) +{ + struct mobiveil_pcie *mv_pci = &pcie->pci; + + mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB); +} + +static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie) +{ + struct mobiveil_pcie *mv_pci = &pcie->pci; + u32 val; + + /* Clear the interrupt status */ + mobiveil_csr_writel(mv_pci, 0xffffffff, PAB_INTP_AMBA_MISC_STAT); + + val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET | + PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC; + mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB); +} + +static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie) +{ + struct mobiveil_pcie *mv_pci = &pcie->pci; + struct device *dev = &mv_pci->pdev->dev; + u32 val, act_stat; + int to = 100; + + /* Poll for pab_csb_reset to set and PAB activity to clear */ + do { + usleep_range(10, 15); + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT); + act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT); + } while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--); + if (to < 0) { + dev_err(dev, "Poll PABRST&PABACT timeout\n"); + return -EIO; + } + + /* clear PEX_RESET bit in PEX_PF0_DBG register */ + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); + val |= PF_DBG_WE; + ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val); + + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); + val |= PF_DBG_PABR; + ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val); + + val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); + val &= ~PF_DBG_WE; + ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val); + + mobiveil_host_init(mv_pci, true); + + to = 100; + while (!ls_pcie_g4_link_up(mv_pci) && to--) + usleep_range(200, 250); + if (to < 0) { + dev_err(dev, "PCIe link training timeout\n"); + return -EIO; + } + + return 0; +} + +static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id) +{ + struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id; + struct mobiveil_pcie *mv_pci = &pcie->pci; + u32 val; + + val = mobiveil_csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT); + if (!val) + return IRQ_NONE; + + if (val & PAB_INTP_RESET) { + ls_pcie_g4_disable_interrupt(pcie); + schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1)); + } + + mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT); + + return IRQ_HANDLED; +} + +static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci) +{ + struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci); + struct platform_device *pdev = mv_pci->pdev; + struct device *dev = &pdev->dev; + int ret; + + pcie->irq = platform_get_irq_byname(pdev, "intr"); + if (pcie->irq < 0) { + dev_err(dev, "Can't get 'intr' IRQ, errno = %d\n", pcie->irq); + return pcie->irq; + } + ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr, + IRQF_SHARED, pdev->name, pcie); + if (ret) { + dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret); + return ret; + } + + return 0; +} + +static void ls_pcie_g4_reset(struct work_struct *work) +{ + struct delayed_work *dwork = container_of(work, struct delayed_work, + work); + struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork); + struct mobiveil_pcie *mv_pci = &pcie->pci; + u16 ctrl; + + ctrl = mobiveil_csr_readw(mv_pci, PCI_BRIDGE_CONTROL); + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; + mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL); + + if (!ls_pcie_g4_reinit_hw(pcie)) + return; + + ls_pcie_g4_enable_interrupt(pcie); +} + +static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = { + .interrupt_init = ls_pcie_g4_interrupt_init, +}; + +static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = { + .link_up = ls_pcie_g4_link_up, +}; + +static int __init ls_pcie_g4_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pci_host_bridge *bridge; + struct mobiveil_pcie *mv_pci; + struct ls_pcie_g4 *pcie; + struct device_node *np = dev->of_node; + int ret; + + if (!of_parse_phandle(np, "msi-parent", 0)) { + dev_err(dev, "Failed to find msi-parent\n"); + return -EINVAL; + } + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) + return -ENOMEM; + + pcie = pci_host_bridge_priv(bridge); + mv_pci = &pcie->pci; + + mv_pci->pdev = pdev; + mv_pci->ops = &ls_pcie_g4_pab_ops; + mv_pci->rp.ops = &ls_pcie_g4_rp_ops; + mv_pci->rp.bridge = bridge; + + platform_set_drvdata(pdev, pcie); + + INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset); + + ret = mobiveil_pcie_host_probe(mv_pci); + if (ret) { + dev_err(dev, "Fail to probe\n"); + return ret; + } + + ls_pcie_g4_enable_interrupt(pcie); + + return 0; +} + +static const struct of_device_id ls_pcie_g4_of_match[] = { + { .compatible = "fsl,lx2160a-pcie", }, + { }, +}; + +static struct platform_driver ls_pcie_g4_driver = { + .driver = { + .name = "layerscape-pcie-gen4", + .of_match_table = ls_pcie_g4_of_match, + .suppress_bind_attrs = true, + }, +}; + +builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe); diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index 3a696ca45bfa..a94be264240f 100644 --- a/drivers/pci/controller/pcie-mobiveil.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -3,10 +3,12 @@ * PCIe host controller driver for Mobiveil PCIe Host controller * * Copyright (c) 2018 Mobiveil Inc. + * Copyright 2019-2020 NXP + * * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> + * Hou Zhiqiang <Zhiqiang.Hou@nxp.com> */ -#include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> @@ -23,274 +25,22 @@ #include <linux/platform_device.h> #include <linux/slab.h> -#include "../pci.h" - -/* register offsets and bit positions */ - -/* - * translation tables are grouped into windows, each window registers are - * grouped into blocks of 4 or 16 registers each - */ -#define PAB_REG_BLOCK_SIZE 16 -#define PAB_EXT_REG_BLOCK_SIZE 4 - -#define PAB_REG_ADDR(offset, win) \ - (offset + (win * PAB_REG_BLOCK_SIZE)) -#define PAB_EXT_REG_ADDR(offset, win) \ - (offset + (win * PAB_EXT_REG_BLOCK_SIZE)) - -#define LTSSM_STATUS 0x0404 -#define LTSSM_STATUS_L0_MASK 0x3f -#define LTSSM_STATUS_L0 0x2d - -#define PAB_CTRL 0x0808 -#define AMBA_PIO_ENABLE_SHIFT 0 -#define PEX_PIO_ENABLE_SHIFT 1 -#define PAGE_SEL_SHIFT 13 -#define PAGE_SEL_MASK 0x3f -#define PAGE_LO_MASK 0x3ff -#define PAGE_SEL_OFFSET_SHIFT 10 - -#define PAB_AXI_PIO_CTRL 0x0840 -#define APIO_EN_MASK 0xf - -#define PAB_PEX_PIO_CTRL 0x08c0 -#define PIO_ENABLE_SHIFT 0 - -#define PAB_INTP_AMBA_MISC_ENB 0x0b0c -#define PAB_INTP_AMBA_MISC_STAT 0x0b1c -#define PAB_INTP_INTX_MASK 0x01e0 -#define PAB_INTP_MSI_MASK 0x8 - -#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win) -#define WIN_ENABLE_SHIFT 0 -#define WIN_TYPE_SHIFT 1 -#define WIN_TYPE_MASK 0x3 -#define WIN_SIZE_MASK 0xfffffc00 - -#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win) - -#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win) -#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win) -#define AXI_WINDOW_ALIGN_MASK 3 - -#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win) -#define PAB_BUS_SHIFT 24 -#define PAB_DEVICE_SHIFT 19 -#define PAB_FUNCTION_SHIFT 16 - -#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win) -#define PAB_INTP_AXI_PIO_CLASS 0x474 - -#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win) -#define AMAP_CTRL_EN_SHIFT 0 -#define AMAP_CTRL_TYPE_SHIFT 1 -#define AMAP_CTRL_TYPE_MASK 3 - -#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win) -#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win) -#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win) -#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win) -#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) - -/* starting offset of INTX bits in status register */ -#define PAB_INTX_START 5 - -/* supported number of MSI interrupts */ -#define PCI_NUM_MSI 16 - -/* MSI registers */ -#define MSI_BASE_LO_OFFSET 0x04 -#define MSI_BASE_HI_OFFSET 0x08 -#define MSI_SIZE_OFFSET 0x0c -#define MSI_ENABLE_OFFSET 0x14 -#define MSI_STATUS_OFFSET 0x18 -#define MSI_DATA_OFFSET 0x20 -#define MSI_ADDR_L_OFFSET 0x24 -#define MSI_ADDR_H_OFFSET 0x28 - -/* outbound and inbound window definitions */ -#define WIN_NUM_0 0 -#define WIN_NUM_1 1 -#define CFG_WINDOW_TYPE 0 -#define IO_WINDOW_TYPE 1 -#define MEM_WINDOW_TYPE 2 -#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024) -#define MAX_PIO_WINDOWS 8 - -/* Parameters for the waiting for link up routine */ -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_MIN 90000 -#define LINK_WAIT_MAX 100000 - -#define PAGED_ADDR_BNDRY 0xc00 -#define OFFSET_TO_PAGE_ADDR(off) \ - ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY) -#define OFFSET_TO_PAGE_IDX(off) \ - ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK) - -struct mobiveil_msi { /* MSI information */ - struct mutex lock; /* protect bitmap variable */ - struct irq_domain *msi_domain; - struct irq_domain *dev_domain; - phys_addr_t msi_pages_phys; - int num_of_vectors; - DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI); -}; - -struct mobiveil_pcie { - struct platform_device *pdev; - void __iomem *config_axi_slave_base; /* endpoint config base */ - void __iomem *csr_axi_slave_base; /* root port config base */ - void __iomem *apb_csr_base; /* MSI register base */ - phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */ - struct irq_domain *intx_domain; - raw_spinlock_t intx_mask_lock; - int irq; - int apio_wins; - int ppio_wins; - int ob_wins_configured; /* configured outbound windows */ - int ib_wins_configured; /* configured inbound windows */ - struct resource *ob_io_res; - char root_bus_nr; - struct mobiveil_msi msi; -}; - -/* - * mobiveil_pcie_sel_page - routine to access paged register - * - * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged, - * for this scheme to work extracted higher 6 bits of the offset will be - * written to pg_sel field of PAB_CTRL register and rest of the lower 10 - * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register. - */ -static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx) -{ - u32 val; - - val = readl(pcie->csr_axi_slave_base + PAB_CTRL); - val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT); - val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT; - - writel(val, pcie->csr_axi_slave_base + PAB_CTRL); -} - -static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off) -{ - if (off < PAGED_ADDR_BNDRY) { - /* For directly accessed registers, clear the pg_sel field */ - mobiveil_pcie_sel_page(pcie, 0); - return pcie->csr_axi_slave_base + off; - } - - mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off)); - return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off); -} - -static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val) -{ - if ((uintptr_t)addr & (size - 1)) { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - - switch (size) { - case 4: - *val = readl(addr); - break; - case 2: - *val = readw(addr); - break; - case 1: - *val = readb(addr); - break; - default: - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - - return PCIBIOS_SUCCESSFUL; -} - -static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val) -{ - if ((uintptr_t)addr & (size - 1)) - return PCIBIOS_BAD_REGISTER_NUMBER; - - switch (size) { - case 4: - writel(val, addr); - break; - case 2: - writew(val, addr); - break; - case 1: - writeb(val, addr); - break; - default: - return PCIBIOS_BAD_REGISTER_NUMBER; - } - - return PCIBIOS_SUCCESSFUL; -} - -static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size) -{ - void *addr; - u32 val; - int ret; - - addr = mobiveil_pcie_comp_addr(pcie, off); - - ret = mobiveil_pcie_read(addr, size, &val); - if (ret) - dev_err(&pcie->pdev->dev, "read CSR address failed\n"); - - return val; -} - -static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, - size_t size) -{ - void *addr; - int ret; - - addr = mobiveil_pcie_comp_addr(pcie, off); - - ret = mobiveil_pcie_write(addr, size, val); - if (ret) - dev_err(&pcie->pdev->dev, "write CSR address failed\n"); -} - -static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off) -{ - return mobiveil_csr_read(pcie, off, 0x4); -} - -static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off) -{ - mobiveil_csr_write(pcie, val, off, 0x4); -} - -static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie) -{ - return (mobiveil_csr_readl(pcie, LTSSM_STATUS) & - LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0; -} +#include "pcie-mobiveil.h" static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) { struct mobiveil_pcie *pcie = bus->sysdata; + struct mobiveil_root_port *rp = &pcie->rp; /* Only one device down on each root port */ - if ((bus->number == pcie->root_bus_nr) && (devfn > 0)) + if ((bus->number == rp->root_bus_nr) && (devfn > 0)) return false; /* * Do not read more than one device on the bus directly * attached to RC */ - if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0)) + if ((bus->primary == rp->root_bus_nr) && (PCI_SLOT(devfn) > 0)) return false; return true; @@ -304,13 +54,14 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct mobiveil_pcie *pcie = bus->sysdata; + struct mobiveil_root_port *rp = &pcie->rp; u32 value; if (!mobiveil_pcie_valid_device(bus, devfn)) return NULL; /* RC config access */ - if (bus->number == pcie->root_bus_nr) + if (bus->number == rp->root_bus_nr) return pcie->csr_axi_slave_base + where; /* @@ -325,7 +76,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); - return pcie->config_axi_slave_base + where; + return rp->config_axi_slave_base + where; } static struct pci_ops mobiveil_pcie_ops = { @@ -339,7 +90,8 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) struct irq_chip *chip = irq_desc_get_chip(desc); struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc); struct device *dev = &pcie->pdev->dev; - struct mobiveil_msi *msi = &pcie->msi; + struct mobiveil_root_port *rp = &pcie->rp; + struct mobiveil_msi *msi = &rp->msi; u32 msi_data, msi_addr_lo, msi_addr_hi; u32 intr_status, msi_status; unsigned long shifted_status; @@ -365,7 +117,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc) shifted_status >>= PAB_INTX_START; do { for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) { - virq = irq_find_mapping(pcie->intx_domain, + virq = irq_find_mapping(rp->intx_domain, bit + 1); if (virq) generic_handle_irq(virq); @@ -424,15 +176,16 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) struct device *dev = &pcie->pdev->dev; struct platform_device *pdev = pcie->pdev; struct device_node *node = dev->of_node; + struct mobiveil_root_port *rp = &pcie->rp; struct resource *res; /* map config resource */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config_axi_slave"); - pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pcie->config_axi_slave_base)) - return PTR_ERR(pcie->config_axi_slave_base); - pcie->ob_io_res = res; + rp->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(rp->config_axi_slave_base)) + return PTR_ERR(rp->config_axi_slave_base); + rp->ob_io_res = res; /* map csr resource */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -442,12 +195,6 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) return PTR_ERR(pcie->csr_axi_slave_base); pcie->pcie_reg_base = res->start; - /* map MSI config resource */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr"); - pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pcie->apb_csr_base)) - return PTR_ERR(pcie->apb_csr_base); - /* read the number of windows requested */ if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins)) pcie->apio_wins = MAX_PIO_WINDOWS; @@ -455,118 +202,15 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins)) pcie->ppio_wins = MAX_PIO_WINDOWS; - pcie->irq = platform_get_irq(pdev, 0); - if (pcie->irq <= 0) { - dev_err(dev, "failed to map IRQ: %d\n", pcie->irq); - return -ENODEV; - } - return 0; } -static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, - u64 cpu_addr, u64 pci_addr, u32 type, u64 size) -{ - u32 value; - u64 size64 = ~(size - 1); - - if (win_num >= pcie->ppio_wins) { - dev_err(&pcie->pdev->dev, - "ERROR: max inbound windows reached !\n"); - return; - } - - value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num)); - value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK); - value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT | - (lower_32_bits(size64) & WIN_SIZE_MASK); - mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num)); - - mobiveil_csr_writel(pcie, upper_32_bits(size64), - PAB_EXT_PEX_AMAP_SIZEN(win_num)); - - mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr), - PAB_PEX_AMAP_AXI_WIN(win_num)); - mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr), - PAB_EXT_PEX_AMAP_AXI_WIN(win_num)); - - mobiveil_csr_writel(pcie, lower_32_bits(pci_addr), - PAB_PEX_AMAP_PEX_WIN_L(win_num)); - mobiveil_csr_writel(pcie, upper_32_bits(pci_addr), - PAB_PEX_AMAP_PEX_WIN_H(win_num)); - - pcie->ib_wins_configured++; -} - -/* - * routine to program the outbound windows - */ -static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, - u64 cpu_addr, u64 pci_addr, u32 type, u64 size) -{ - u32 value; - u64 size64 = ~(size - 1); - - if (win_num >= pcie->apio_wins) { - dev_err(&pcie->pdev->dev, - "ERROR: max outbound windows reached !\n"); - return; - } - - /* - * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit - * to 4 KB in PAB_AXI_AMAP_CTRL register - */ - value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); - value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK); - value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | - (lower_32_bits(size64) & WIN_SIZE_MASK); - mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num)); - - mobiveil_csr_writel(pcie, upper_32_bits(size64), - PAB_EXT_AXI_AMAP_SIZE(win_num)); - - /* - * program AXI window base with appropriate value in - * PAB_AXI_AMAP_AXI_WIN0 register - */ - mobiveil_csr_writel(pcie, - lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK), - PAB_AXI_AMAP_AXI_WIN(win_num)); - mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr), - PAB_EXT_AXI_AMAP_AXI_WIN(win_num)); - - mobiveil_csr_writel(pcie, lower_32_bits(pci_addr), - PAB_AXI_AMAP_PEX_WIN_L(win_num)); - mobiveil_csr_writel(pcie, upper_32_bits(pci_addr), - PAB_AXI_AMAP_PEX_WIN_H(win_num)); - - pcie->ob_wins_configured++; -} - -static int mobiveil_bringup_link(struct mobiveil_pcie *pcie) -{ - int retries; - - /* check if the link is up or not */ - for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { - if (mobiveil_pcie_link_up(pcie)) - return 0; - - usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); - } - - dev_err(&pcie->pdev->dev, "link never came up\n"); - - return -ETIMEDOUT; -} - static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie) { phys_addr_t msg_addr = pcie->pcie_reg_base; - struct mobiveil_msi *msi = &pcie->msi; + struct mobiveil_msi *msi = &pcie->rp.msi; - pcie->msi.num_of_vectors = PCI_NUM_MSI; + msi->num_of_vectors = PCI_NUM_MSI; msi->msi_pages_phys = (phys_addr_t)msg_addr; writel_relaxed(lower_32_bits(msg_addr), @@ -577,17 +221,23 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie) writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET); } -static int mobiveil_host_init(struct mobiveil_pcie *pcie) +int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit) { - struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + struct mobiveil_root_port *rp = &pcie->rp; + struct pci_host_bridge *bridge = rp->bridge; u32 value, pab_ctrl, type; struct resource_entry *win; - /* setup bus numbers */ - value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS); - value &= 0xff000000; - value |= 0x00ff0100; - mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS); + pcie->ib_wins_configured = 0; + pcie->ob_wins_configured = 0; + + if (!reinit) { + /* setup bus numbers */ + value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS); + value &= 0xff000000; + value |= 0x00ff0100; + mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS); + } /* * program Bus Master Enable Bit in Command Register in PAB Config @@ -605,9 +255,6 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT); mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL); - mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK), - PAB_INTP_AMBA_MISC_ENB); - /* * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in * PAB_AXI_PIO_CTRL Register @@ -629,8 +276,8 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) */ /* config outbound translation window */ - program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0, - CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res)); + program_ob_windows(pcie, WIN_NUM_0, rp->ob_io_res->start, 0, + CFG_WINDOW_TYPE, resource_size(rp->ob_io_res)); /* memory inbound translation window */ program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); @@ -657,9 +304,6 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) value |= (PCI_CLASS_BRIDGE_PCI << 16); mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); - /* setup MSI hardware registers */ - mobiveil_pcie_enable_msi(pcie); - return 0; } @@ -667,32 +311,36 @@ static void mobiveil_mask_intx_irq(struct irq_data *data) { struct irq_desc *desc = irq_to_desc(data->irq); struct mobiveil_pcie *pcie; + struct mobiveil_root_port *rp; unsigned long flags; u32 mask, shifted_val; pcie = irq_desc_get_chip_data(desc); + rp = &pcie->rp; mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); - raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); + raw_spin_lock_irqsave(&rp->intx_mask_lock, flags); shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); shifted_val &= ~mask; mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); - raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); + raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags); } static void mobiveil_unmask_intx_irq(struct irq_data *data) { struct irq_desc *desc = irq_to_desc(data->irq); struct mobiveil_pcie *pcie; + struct mobiveil_root_port *rp; unsigned long flags; u32 shifted_val, mask; pcie = irq_desc_get_chip_data(desc); + rp = &pcie->rp; mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); - raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); + raw_spin_lock_irqsave(&rp->intx_mask_lock, flags); shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); shifted_val |= mask; mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); - raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); + raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags); } static struct irq_chip intx_irq_chip = { @@ -760,7 +408,7 @@ static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int nr_irqs, void *args) { struct mobiveil_pcie *pcie = domain->host_data; - struct mobiveil_msi *msi = &pcie->msi; + struct mobiveil_msi *msi = &pcie->rp.msi; unsigned long bit; WARN_ON(nr_irqs != 1); @@ -787,7 +435,7 @@ static void mobiveil_irq_msi_domain_free(struct irq_domain *domain, { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d); - struct mobiveil_msi *msi = &pcie->msi; + struct mobiveil_msi *msi = &pcie->rp.msi; mutex_lock(&msi->lock); @@ -808,9 +456,9 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); - struct mobiveil_msi *msi = &pcie->msi; + struct mobiveil_msi *msi = &pcie->rp.msi; - mutex_init(&pcie->msi.lock); + mutex_init(&msi->lock); msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, &msi_domain_ops, pcie); if (!msi->dev_domain) { @@ -834,18 +482,19 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct device_node *node = dev->of_node; + struct mobiveil_root_port *rp = &pcie->rp; int ret; /* setup INTx */ - pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX, - &intx_domain_ops, pcie); + rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX, + &intx_domain_ops, pcie); - if (!pcie->intx_domain) { + if (!rp->intx_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENOMEM; } - raw_spin_lock_init(&pcie->intx_mask_lock); + raw_spin_lock_init(&rp->intx_mask_lock); /* setup MSI */ ret = mobiveil_allocate_msi_domains(pcie); @@ -855,23 +504,74 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) return 0; } -static int mobiveil_pcie_probe(struct platform_device *pdev) +static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie) { - struct mobiveil_pcie *pcie; - struct pci_bus *bus; - struct pci_bus *child; - struct pci_host_bridge *bridge; + struct platform_device *pdev = pcie->pdev; struct device *dev = &pdev->dev; + struct mobiveil_root_port *rp = &pcie->rp; + struct resource *res; int ret; - /* allocate the PCIe port */ - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); - if (!bridge) - return -ENOMEM; + /* map MSI config resource */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr"); + pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pcie->apb_csr_base)) + return PTR_ERR(pcie->apb_csr_base); - pcie = pci_host_bridge_priv(bridge); + /* setup MSI hardware registers */ + mobiveil_pcie_enable_msi(pcie); - pcie->pdev = pdev; + rp->irq = platform_get_irq(pdev, 0); + if (rp->irq <= 0) { + dev_err(dev, "failed to map IRQ: %d\n", rp->irq); + return -ENODEV; + } + + /* initialize the IRQ domains */ + ret = mobiveil_pcie_init_irq_domain(pcie); + if (ret) { + dev_err(dev, "Failed creating IRQ Domain\n"); + return ret; + } + + irq_set_chained_handler_and_data(rp->irq, mobiveil_pcie_isr, pcie); + + /* Enable interrupts */ + mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK), + PAB_INTP_AMBA_MISC_ENB); + + + return 0; +} + +static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie) +{ + struct mobiveil_root_port *rp = &pcie->rp; + + if (rp->ops->interrupt_init) + return rp->ops->interrupt_init(pcie); + + return mobiveil_pcie_integrated_interrupt_init(pcie); +} + +static bool mobiveil_pcie_is_bridge(struct mobiveil_pcie *pcie) +{ + u32 header_type; + + header_type = mobiveil_csr_readb(pcie, PCI_HEADER_TYPE); + header_type &= 0x7f; + + return header_type == PCI_HEADER_TYPE_BRIDGE; +} + +int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) +{ + struct mobiveil_root_port *rp = &pcie->rp; + struct pci_host_bridge *bridge = rp->bridge; + struct device *dev = &pcie->pdev->dev; + struct pci_bus *bus; + struct pci_bus *child; + int ret; ret = mobiveil_pcie_parse_dt(pcie); if (ret) { @@ -879,6 +579,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) return ret; } + if (!mobiveil_pcie_is_bridge(pcie)) + return -ENODEV; + /* parse the host bridge base addresses from the device tree file */ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, &bridge->dma_ranges, NULL); @@ -891,25 +594,22 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) * configure all inbound and outbound windows and prepare the RC for * config access */ - ret = mobiveil_host_init(pcie); + ret = mobiveil_host_init(pcie, false); if (ret) { dev_err(dev, "Failed to initialize host\n"); return ret; } - /* initialize the IRQ domains */ - ret = mobiveil_pcie_init_irq_domain(pcie); + ret = mobiveil_pcie_interrupt_init(pcie); if (ret) { - dev_err(dev, "Failed creating IRQ Domain\n"); + dev_err(dev, "Interrupt init failed\n"); return ret; } - irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie); - /* Initialize bridge */ bridge->dev.parent = dev; bridge->sysdata = pcie; - bridge->busnr = pcie->root_bus_nr; + bridge->busnr = rp->root_bus_nr; bridge->ops = &mobiveil_pcie_ops; bridge->map_irq = of_irq_parse_and_map_pci; bridge->swizzle_irq = pci_common_swizzle; @@ -934,25 +634,3 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) return 0; } - -static const struct of_device_id mobiveil_pcie_of_match[] = { - {.compatible = "mbvl,gpex40-pcie",}, - {}, -}; - -MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match); - -static struct platform_driver mobiveil_pcie_driver = { - .probe = mobiveil_pcie_probe, - .driver = { - .name = "mobiveil-pcie", - .of_match_table = mobiveil_pcie_of_match, - .suppress_bind_attrs = true, - }, -}; - -builtin_platform_driver(mobiveil_pcie_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Mobiveil PCIe host controller driver"); -MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>"); diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c new file mode 100644 index 000000000000..f6fcd95c2bf5 --- /dev/null +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Mobiveil PCIe Host controller + * + * Copyright (c) 2018 Mobiveil Inc. + * Copyright 2019 NXP + * + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> + * Hou Zhiqiang <Zhiqiang.Hou@nxp.com> + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "pcie-mobiveil.h" + +static int mobiveil_pcie_probe(struct platform_device *pdev) +{ + struct mobiveil_pcie *pcie; + struct pci_host_bridge *bridge; + struct device *dev = &pdev->dev; + + /* allocate the PCIe port */ + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) + return -ENOMEM; + + pcie = pci_host_bridge_priv(bridge); + pcie->rp.bridge = bridge; + + pcie->pdev = pdev; + + return mobiveil_pcie_host_probe(pcie); +} + +static const struct of_device_id mobiveil_pcie_of_match[] = { + {.compatible = "mbvl,gpex40-pcie",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match); + +static struct platform_driver mobiveil_pcie_driver = { + .probe = mobiveil_pcie_probe, + .driver = { + .name = "mobiveil-pcie", + .of_match_table = mobiveil_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; + +builtin_platform_driver(mobiveil_pcie_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Mobiveil PCIe host controller driver"); +MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>"); diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.c b/drivers/pci/controller/mobiveil/pcie-mobiveil.c new file mode 100644 index 000000000000..62ecbaeb0a60 --- /dev/null +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Mobiveil PCIe Host controller + * + * Copyright (c) 2018 Mobiveil Inc. + * Copyright 2019 NXP + * + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> + * Hou Zhiqiang <Zhiqiang.Hou@nxp.com> + */ + +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/platform_device.h> + +#include "pcie-mobiveil.h" + +/* + * mobiveil_pcie_sel_page - routine to access paged register + * + * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged, + * for this scheme to work extracted higher 6 bits of the offset will be + * written to pg_sel field of PAB_CTRL register and rest of the lower 10 + * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register. + */ +static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx) +{ + u32 val; + + val = readl(pcie->csr_axi_slave_base + PAB_CTRL); + val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT); + val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT; + + writel(val, pcie->csr_axi_slave_base + PAB_CTRL); +} + +static void __iomem *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, + u32 off) +{ + if (off < PAGED_ADDR_BNDRY) { + /* For directly accessed registers, clear the pg_sel field */ + mobiveil_pcie_sel_page(pcie, 0); + return pcie->csr_axi_slave_base + off; + } + + mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off)); + return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off); +} + +static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val) +{ + if ((uintptr_t)addr & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + switch (size) { + case 4: + *val = readl(addr); + break; + case 2: + *val = readw(addr); + break; + case 1: + *val = readb(addr); + break; + default: + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val) +{ + if ((uintptr_t)addr & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 4: + writel(val, addr); + break; + case 2: + writew(val, addr); + break; + case 1: + writeb(val, addr); + break; + default: + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + return PCIBIOS_SUCCESSFUL; +} + +u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size) +{ + void __iomem *addr; + u32 val; + int ret; + + addr = mobiveil_pcie_comp_addr(pcie, off); + + ret = mobiveil_pcie_read(addr, size, &val); + if (ret) + dev_err(&pcie->pdev->dev, "read CSR address failed\n"); + + return val; +} + +void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, + size_t size) +{ + void __iomem *addr; + int ret; + + addr = mobiveil_pcie_comp_addr(pcie, off); + + ret = mobiveil_pcie_write(addr, size, val); + if (ret) + dev_err(&pcie->pdev->dev, "write CSR address failed\n"); +} + +bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie) +{ + if (pcie->ops->link_up) + return pcie->ops->link_up(pcie); + + return (mobiveil_csr_readl(pcie, LTSSM_STATUS) & + LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0; +} + +void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, + u64 cpu_addr, u64 pci_addr, u32 type, u64 size) +{ + u32 value; + u64 size64 = ~(size - 1); + + if (win_num >= pcie->ppio_wins) { + dev_err(&pcie->pdev->dev, + "ERROR: max inbound windows reached !\n"); + return; + } + + value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num)); + value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK); + value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT | + (lower_32_bits(size64) & WIN_SIZE_MASK); + mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num)); + + mobiveil_csr_writel(pcie, upper_32_bits(size64), + PAB_EXT_PEX_AMAP_SIZEN(win_num)); + + mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr), + PAB_PEX_AMAP_AXI_WIN(win_num)); + mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr), + PAB_EXT_PEX_AMAP_AXI_WIN(win_num)); + + mobiveil_csr_writel(pcie, lower_32_bits(pci_addr), + PAB_PEX_AMAP_PEX_WIN_L(win_num)); + mobiveil_csr_writel(pcie, upper_32_bits(pci_addr), + PAB_PEX_AMAP_PEX_WIN_H(win_num)); + + pcie->ib_wins_configured++; +} + +/* + * routine to program the outbound windows + */ +void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, + u64 cpu_addr, u64 pci_addr, u32 type, u64 size) +{ + u32 value; + u64 size64 = ~(size - 1); + + if (win_num >= pcie->apio_wins) { + dev_err(&pcie->pdev->dev, + "ERROR: max outbound windows reached !\n"); + return; + } + + /* + * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit + * to 4 KB in PAB_AXI_AMAP_CTRL register + */ + value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); + value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK); + value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | + (lower_32_bits(size64) & WIN_SIZE_MASK); + mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num)); + + mobiveil_csr_writel(pcie, upper_32_bits(size64), + PAB_EXT_AXI_AMAP_SIZE(win_num)); + + /* + * program AXI window base with appropriate value in + * PAB_AXI_AMAP_AXI_WIN0 register + */ + mobiveil_csr_writel(pcie, + lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK), + PAB_AXI_AMAP_AXI_WIN(win_num)); + mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr), + PAB_EXT_AXI_AMAP_AXI_WIN(win_num)); + + mobiveil_csr_writel(pcie, lower_32_bits(pci_addr), + PAB_AXI_AMAP_PEX_WIN_L(win_num)); + mobiveil_csr_writel(pcie, upper_32_bits(pci_addr), + PAB_AXI_AMAP_PEX_WIN_H(win_num)); + + pcie->ob_wins_configured++; +} + +int mobiveil_bringup_link(struct mobiveil_pcie *pcie) +{ + int retries; + + /* check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (mobiveil_pcie_link_up(pcie)) + return 0; + + usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); + } + + dev_err(&pcie->pdev->dev, "link never came up\n"); + + return -ETIMEDOUT; +} diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h new file mode 100644 index 000000000000..767e36a8522d --- /dev/null +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PCIe host controller driver for Mobiveil PCIe Host controller + * + * Copyright (c) 2018 Mobiveil Inc. + * Copyright 2019 NXP + * + * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> + * Hou Zhiqiang <Zhiqiang.Hou@nxp.com> + */ + +#ifndef _PCIE_MOBIVEIL_H +#define _PCIE_MOBIVEIL_H + +#include <linux/pci.h> +#include <linux/irq.h> +#include <linux/msi.h> +#include "../../pci.h" + +/* register offsets and bit positions */ + +/* + * translation tables are grouped into windows, each window registers are + * grouped into blocks of 4 or 16 registers each + */ +#define PAB_REG_BLOCK_SIZE 16 +#define PAB_EXT_REG_BLOCK_SIZE 4 + +#define PAB_REG_ADDR(offset, win) \ + (offset + (win * PAB_REG_BLOCK_SIZE)) +#define PAB_EXT_REG_ADDR(offset, win) \ + (offset + (win * PAB_EXT_REG_BLOCK_SIZE)) + +#define LTSSM_STATUS 0x0404 +#define LTSSM_STATUS_L0_MASK 0x3f +#define LTSSM_STATUS_L0 0x2d + +#define PAB_CTRL 0x0808 +#define AMBA_PIO_ENABLE_SHIFT 0 +#define PEX_PIO_ENABLE_SHIFT 1 +#define PAGE_SEL_SHIFT 13 +#define PAGE_SEL_MASK 0x3f +#define PAGE_LO_MASK 0x3ff +#define PAGE_SEL_OFFSET_SHIFT 10 + +#define PAB_ACTIVITY_STAT 0x81c + +#define PAB_AXI_PIO_CTRL 0x0840 +#define APIO_EN_MASK 0xf + +#define PAB_PEX_PIO_CTRL 0x08c0 +#define PIO_ENABLE_SHIFT 0 + +#define PAB_INTP_AMBA_MISC_ENB 0x0b0c +#define PAB_INTP_AMBA_MISC_STAT 0x0b1c +#define PAB_INTP_RESET BIT(1) +#define PAB_INTP_MSI BIT(3) +#define PAB_INTP_INTA BIT(5) +#define PAB_INTP_INTB BIT(6) +#define PAB_INTP_INTC BIT(7) +#define PAB_INTP_INTD BIT(8) +#define PAB_INTP_PCIE_UE BIT(9) +#define PAB_INTP_IE_PMREDI BIT(29) +#define PAB_INTP_IE_EC BIT(30) +#define PAB_INTP_MSI_MASK PAB_INTP_MSI +#define PAB_INTP_INTX_MASK (PAB_INTP_INTA | PAB_INTP_INTB |\ + PAB_INTP_INTC | PAB_INTP_INTD) + +#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win) +#define WIN_ENABLE_SHIFT 0 +#define WIN_TYPE_SHIFT 1 +#define WIN_TYPE_MASK 0x3 +#define WIN_SIZE_MASK 0xfffffc00 + +#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win) + +#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win) +#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win) +#define AXI_WINDOW_ALIGN_MASK 3 + +#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win) +#define PAB_BUS_SHIFT 24 +#define PAB_DEVICE_SHIFT 19 +#define PAB_FUNCTION_SHIFT 16 + +#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win) +#define PAB_INTP_AXI_PIO_CLASS 0x474 + +#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win) +#define AMAP_CTRL_EN_SHIFT 0 +#define AMAP_CTRL_TYPE_SHIFT 1 +#define AMAP_CTRL_TYPE_MASK 3 + +#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win) +#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win) +#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win) +#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win) +#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) + +/* starting offset of INTX bits in status register */ +#define PAB_INTX_START 5 + +/* supported number of MSI interrupts */ +#define PCI_NUM_MSI 16 + +/* MSI registers */ +#define MSI_BASE_LO_OFFSET 0x04 +#define MSI_BASE_HI_OFFSET 0x08 +#define MSI_SIZE_OFFSET 0x0c +#define MSI_ENABLE_OFFSET 0x14 +#define MSI_STATUS_OFFSET 0x18 +#define MSI_DATA_OFFSET 0x20 +#define MSI_ADDR_L_OFFSET 0x24 +#define MSI_ADDR_H_OFFSET 0x28 + +/* outbound and inbound window definitions */ +#define WIN_NUM_0 0 +#define WIN_NUM_1 1 +#define CFG_WINDOW_TYPE 0 +#define IO_WINDOW_TYPE 1 +#define MEM_WINDOW_TYPE 2 +#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024) +#define MAX_PIO_WINDOWS 8 + +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_MIN 90000 +#define LINK_WAIT_MAX 100000 + +#define PAGED_ADDR_BNDRY 0xc00 +#define OFFSET_TO_PAGE_ADDR(off) \ + ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY) +#define OFFSET_TO_PAGE_IDX(off) \ + ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK) + +struct mobiveil_msi { /* MSI information */ + struct mutex lock; /* protect bitmap variable */ + struct irq_domain *msi_domain; + struct irq_domain *dev_domain; + phys_addr_t msi_pages_phys; + int num_of_vectors; + DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI); +}; + +struct mobiveil_pcie; + +struct mobiveil_rp_ops { + int (*interrupt_init)(struct mobiveil_pcie *pcie); +}; + +struct mobiveil_root_port { + char root_bus_nr; + void __iomem *config_axi_slave_base; /* endpoint config base */ + struct resource *ob_io_res; + struct mobiveil_rp_ops *ops; + int irq; + raw_spinlock_t intx_mask_lock; + struct irq_domain *intx_domain; + struct mobiveil_msi msi; + struct pci_host_bridge *bridge; +}; + +struct mobiveil_pab_ops { + int (*link_up)(struct mobiveil_pcie *pcie); +}; + +struct mobiveil_pcie { + struct platform_device *pdev; + void __iomem *csr_axi_slave_base; /* root port config base */ + void __iomem *apb_csr_base; /* MSI register base */ + phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */ + int apio_wins; + int ppio_wins; + int ob_wins_configured; /* configured outbound windows */ + int ib_wins_configured; /* configured inbound windows */ + const struct mobiveil_pab_ops *ops; + struct mobiveil_root_port rp; +}; + +int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie); +int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit); +bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie); +int mobiveil_bringup_link(struct mobiveil_pcie *pcie); +void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr, + u64 pci_addr, u32 type, u64 size); +void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr, + u64 pci_addr, u32 type, u64 size); +u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size); +void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, + size_t size); + +static inline u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off) +{ + return mobiveil_csr_read(pcie, off, 0x4); +} + +static inline u16 mobiveil_csr_readw(struct mobiveil_pcie *pcie, u32 off) +{ + return mobiveil_csr_read(pcie, off, 0x2); +} + +static inline u8 mobiveil_csr_readb(struct mobiveil_pcie *pcie, u32 off) +{ + return mobiveil_csr_read(pcie, off, 0x1); +} + + +static inline void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, + u32 off) +{ + mobiveil_csr_write(pcie, val, off, 0x4); +} + +static inline void mobiveil_csr_writew(struct mobiveil_pcie *pcie, u16 val, + u32 off) +{ + mobiveil_csr_write(pcie, val, off, 0x2); +} + +static inline void mobiveil_csr_writeb(struct mobiveil_pcie *pcie, u8 val, + u32 off) +{ + mobiveil_csr_write(pcie, val, off, 0x1); +} + +#endif /* _PCIE_MOBIVEIL_H */ diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 9977abff92fc..e15022ff63e3 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -63,6 +63,7 @@ enum pci_protocol_version_t { PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */ PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */ + PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */ }; #define CPU_AFFINITY_ALL -1ULL @@ -72,6 +73,7 @@ enum pci_protocol_version_t { * first. */ static enum pci_protocol_version_t pci_protocol_versions[] = { + PCI_PROTOCOL_VERSION_1_3, PCI_PROTOCOL_VERSION_1_2, PCI_PROTOCOL_VERSION_1_1, }; @@ -119,6 +121,7 @@ enum pci_message_type { PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ + PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19, PCI_MESSAGE_MAXIMUM }; @@ -164,6 +167,26 @@ struct pci_function_description { u32 ser; /* serial number */ } __packed; +enum pci_device_description_flags { + HV_PCI_DEVICE_FLAG_NONE = 0x0, + HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1, +}; + +struct pci_function_description2 { + u16 v_id; /* vendor ID */ + u16 d_id; /* device ID */ + u8 rev; + u8 prog_intf; + u8 subclass; + u8 base_class; + u32 subsystem_id; + union win_slot_encoding win_slot; + u32 ser; /* serial number */ + u32 flags; + u16 virtual_numa_node; + u16 reserved; +} __packed; + /** * struct hv_msi_desc * @vector: IDT entry @@ -260,7 +283,7 @@ struct pci_packet { int resp_packet_size); void *compl_ctxt; - struct pci_message message[0]; + struct pci_message message[]; }; /* @@ -296,7 +319,13 @@ struct pci_bus_d0_entry { struct pci_bus_relations { struct pci_incoming_message incoming; u32 device_count; - struct pci_function_description func[0]; + struct pci_function_description func[]; +} __packed; + +struct pci_bus_relations2 { + struct pci_incoming_message incoming; + u32 device_count; + struct pci_function_description2 func[]; } __packed; struct pci_q_res_req_response { @@ -407,42 +436,6 @@ struct pci_eject_response { static int pci_ring_size = (4 * PAGE_SIZE); /* - * Definitions or interrupt steering hypercall. - */ -#define HV_PARTITION_ID_SELF ((u64)-1) -#define HVCALL_RETARGET_INTERRUPT 0x7e - -struct hv_interrupt_entry { - u32 source; /* 1 for MSI(-X) */ - u32 reserved1; - u32 address; - u32 data; -}; - -/* - * flags for hv_device_interrupt_target.flags - */ -#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 -#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 - -struct hv_device_interrupt_target { - u32 vector; - u32 flags; - union { - u64 vp_mask; - struct hv_vpset vp_set; - }; -}; - -struct retarget_msi_interrupt { - u64 partition_id; /* use "self" */ - u64 device_id; - struct hv_interrupt_entry int_entry; - u64 reserved2; - struct hv_device_interrupt_target int_target; -} __packed __aligned(8); - -/* * Driver specific state. */ @@ -488,7 +481,7 @@ struct hv_pcibus_device { struct workqueue_struct *wq; /* hypercall arg, must not cross page boundary */ - struct retarget_msi_interrupt retarget_msi_interrupt_params; + struct hv_retarget_device_interrupt retarget_msi_interrupt_params; /* * Don't put anything here: retarget_msi_interrupt_params must be last @@ -505,10 +498,24 @@ struct hv_dr_work { struct hv_pcibus_device *bus; }; +struct hv_pcidev_description { + u16 v_id; /* vendor ID */ + u16 d_id; /* device ID */ + u8 rev; + u8 prog_intf; + u8 subclass; + u8 base_class; + u32 subsystem_id; + union win_slot_encoding win_slot; + u32 ser; /* serial number */ + u32 flags; + u16 virtual_numa_node; +}; + struct hv_dr_state { struct list_head list_entry; u32 device_count; - struct pci_function_description func[0]; + struct hv_pcidev_description func[]; }; enum hv_pcichild_state { @@ -525,7 +532,7 @@ struct hv_pci_dev { refcount_t refs; enum hv_pcichild_state state; struct pci_slot *pci_slot; - struct pci_function_description desc; + struct hv_pcidev_description desc; bool reported_missing; struct hv_pcibus_device *hbus; struct work_struct wrk; @@ -1184,7 +1191,7 @@ static void hv_irq_unmask(struct irq_data *data) { struct msi_desc *msi_desc = irq_data_get_msi_desc(data); struct irq_cfg *cfg = irqd_cfg(data); - struct retarget_msi_interrupt *params; + struct hv_retarget_device_interrupt *params; struct hv_pcibus_device *hbus; struct cpumask *dest; cpumask_var_t tmp; @@ -1206,8 +1213,7 @@ static void hv_irq_unmask(struct irq_data *data) memset(params, 0, sizeof(*params)); params->partition_id = HV_PARTITION_ID_SELF; params->int_entry.source = 1; /* MSI(-X) */ - params->int_entry.address = msi_desc->msg.address_lo; - params->int_entry.data = msi_desc->msg.data; + hv_set_msi_entry_from_desc(¶ms->int_entry.msi_entry, msi_desc); params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | (hbus->hdev->dev_instance.b[4] << 16) | (hbus->hdev->dev_instance.b[7] << 8) | @@ -1401,6 +1407,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) break; case PCI_PROTOCOL_VERSION_1_2: + case PCI_PROTOCOL_VERSION_1_3: size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, dest, hpdev->desc.win_slot.slot, @@ -1799,6 +1806,27 @@ static void hv_pci_remove_slots(struct hv_pcibus_device *hbus) } } +/* + * Set NUMA node for the devices on the bus + */ +static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus) +{ + struct pci_dev *dev; + struct pci_bus *bus = hbus->pci_bus; + struct hv_pci_dev *hv_dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn)); + if (!hv_dev) + continue; + + if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY) + set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node); + + put_pcichild(hv_dev); + } +} + /** * create_root_hv_pci_bus() - Expose a new root PCI bus * @hbus: Root PCI bus, as understood by this driver @@ -1821,6 +1849,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); + hv_pci_assign_numa_node(hbus); pci_bus_assign_resources(hbus->pci_bus); hv_pci_assign_slots(hbus); pci_bus_add_devices(hbus->pci_bus); @@ -1877,7 +1906,7 @@ static void q_resource_requirements(void *context, struct pci_response *resp, * Return: Pointer to the new tracking struct */ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, - struct pci_function_description *desc) + struct hv_pcidev_description *desc) { struct hv_pci_dev *hpdev; struct pci_child_message *res_req; @@ -1988,7 +2017,7 @@ static void pci_devices_present_work(struct work_struct *work) { u32 child_no; bool found; - struct pci_function_description *new_desc; + struct hv_pcidev_description *new_desc; struct hv_pci_dev *hpdev; struct hv_pcibus_device *hbus; struct list_head removed; @@ -2089,6 +2118,7 @@ static void pci_devices_present_work(struct work_struct *work) */ pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); + hv_pci_assign_numa_node(hbus); hv_pci_assign_slots(hbus); pci_unlock_rescan_remove(); break; @@ -2107,17 +2137,15 @@ static void pci_devices_present_work(struct work_struct *work) } /** - * hv_pci_devices_present() - Handles list of new children + * hv_pci_start_relations_work() - Queue work to start device discovery * @hbus: Root PCI bus, as understood by this driver - * @relations: Packet from host listing children + * @dr: The list of children returned from host * - * This function is invoked whenever a new list of devices for - * this bus appears. + * Return: 0 on success, -errno on failure */ -static void hv_pci_devices_present(struct hv_pcibus_device *hbus, - struct pci_bus_relations *relations) +static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus, + struct hv_dr_state *dr) { - struct hv_dr_state *dr; struct hv_dr_work *dr_wrk; unsigned long flags; bool pending_dr; @@ -2125,29 +2153,15 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus, if (hbus->state == hv_pcibus_removing) { dev_info(&hbus->hdev->device, "PCI VMBus BUS_RELATIONS: ignored\n"); - return; + return -ENOENT; } dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT); if (!dr_wrk) - return; - - dr = kzalloc(offsetof(struct hv_dr_state, func) + - (sizeof(struct pci_function_description) * - (relations->device_count)), GFP_NOWAIT); - if (!dr) { - kfree(dr_wrk); - return; - } + return -ENOMEM; INIT_WORK(&dr_wrk->wrk, pci_devices_present_work); dr_wrk->bus = hbus; - dr->device_count = relations->device_count; - if (dr->device_count != 0) { - memcpy(dr->func, relations->func, - sizeof(struct pci_function_description) * - dr->device_count); - } spin_lock_irqsave(&hbus->device_list_lock, flags); /* @@ -2165,6 +2179,87 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus, get_hvpcibus(hbus); queue_work(hbus->wq, &dr_wrk->wrk); } + + return 0; +} + +/** + * hv_pci_devices_present() - Handle list of new children + * @hbus: Root PCI bus, as understood by this driver + * @relations: Packet from host listing children + * + * Process a new list of devices on the bus. The list of devices is + * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS, + * whenever a new list of devices for this bus appears. + */ +static void hv_pci_devices_present(struct hv_pcibus_device *hbus, + struct pci_bus_relations *relations) +{ + struct hv_dr_state *dr; + int i; + + dr = kzalloc(offsetof(struct hv_dr_state, func) + + (sizeof(struct hv_pcidev_description) * + (relations->device_count)), GFP_NOWAIT); + + if (!dr) + return; + + dr->device_count = relations->device_count; + for (i = 0; i < dr->device_count; i++) { + dr->func[i].v_id = relations->func[i].v_id; + dr->func[i].d_id = relations->func[i].d_id; + dr->func[i].rev = relations->func[i].rev; + dr->func[i].prog_intf = relations->func[i].prog_intf; + dr->func[i].subclass = relations->func[i].subclass; + dr->func[i].base_class = relations->func[i].base_class; + dr->func[i].subsystem_id = relations->func[i].subsystem_id; + dr->func[i].win_slot = relations->func[i].win_slot; + dr->func[i].ser = relations->func[i].ser; + } + + if (hv_pci_start_relations_work(hbus, dr)) + kfree(dr); +} + +/** + * hv_pci_devices_present2() - Handle list of new children + * @hbus: Root PCI bus, as understood by this driver + * @relations: Packet from host listing children + * + * This function is the v2 version of hv_pci_devices_present() + */ +static void hv_pci_devices_present2(struct hv_pcibus_device *hbus, + struct pci_bus_relations2 *relations) +{ + struct hv_dr_state *dr; + int i; + + dr = kzalloc(offsetof(struct hv_dr_state, func) + + (sizeof(struct hv_pcidev_description) * + (relations->device_count)), GFP_NOWAIT); + + if (!dr) + return; + + dr->device_count = relations->device_count; + for (i = 0; i < dr->device_count; i++) { + dr->func[i].v_id = relations->func[i].v_id; + dr->func[i].d_id = relations->func[i].d_id; + dr->func[i].rev = relations->func[i].rev; + dr->func[i].prog_intf = relations->func[i].prog_intf; + dr->func[i].subclass = relations->func[i].subclass; + dr->func[i].base_class = relations->func[i].base_class; + dr->func[i].subsystem_id = relations->func[i].subsystem_id; + dr->func[i].win_slot = relations->func[i].win_slot; + dr->func[i].ser = relations->func[i].ser; + dr->func[i].flags = relations->func[i].flags; + dr->func[i].virtual_numa_node = + relations->func[i].virtual_numa_node; + } + + if (hv_pci_start_relations_work(hbus, dr)) + kfree(dr); } /** @@ -2280,6 +2375,7 @@ static void hv_pci_onchannelcallback(void *context) struct pci_response *response; struct pci_incoming_message *new_message; struct pci_bus_relations *bus_rel; + struct pci_bus_relations2 *bus_rel2; struct pci_dev_inval_block *inval; struct pci_dev_incoming *dev_message; struct hv_pci_dev *hpdev; @@ -2347,6 +2443,21 @@ static void hv_pci_onchannelcallback(void *context) hv_pci_devices_present(hbus, bus_rel); break; + case PCI_BUS_RELATIONS2: + + bus_rel2 = (struct pci_bus_relations2 *)buffer; + if (bytes_recvd < + offsetof(struct pci_bus_relations2, func) + + (sizeof(struct pci_function_description2) * + (bus_rel2->device_count))) { + dev_err(&hbus->hdev->device, + "bus relations v2 too small\n"); + break; + } + + hv_pci_devices_present2(hbus, bus_rel2); + break; + case PCI_EJECT: dev_message = (struct pci_dev_incoming *)buffer; @@ -2922,7 +3033,7 @@ static int hv_pci_probe(struct hv_device *hdev, * positive by using kmemleak_alloc() and kmemleak_free() to ask * kmemleak to track and scan the hbus buffer. */ - hbus = (struct hv_pcibus_device *)kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); + hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); if (!hbus) return -ENOMEM; hbus->state = hv_pcibus_init; @@ -3058,7 +3169,7 @@ destroy_wq: free_dom: hv_put_dom_num(hbus->sysdata.domain); free_bus: - free_page((unsigned long)hbus); + kfree(hbus); return ret; } @@ -3069,7 +3180,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating) struct pci_packet teardown_packet; u8 buffer[sizeof(struct pci_message)]; } pkt; - struct pci_bus_relations relations; + struct hv_dr_state *dr; struct hv_pci_compl comp_pkt; int ret; @@ -3082,8 +3193,9 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating) if (!hibernating) { /* Delete any children which might still exist. */ - memset(&relations, 0, sizeof(relations)); - hv_pci_devices_present(hbus, &relations); + dr = kzalloc(sizeof(*dr), GFP_KERNEL); + if (dr && hv_pci_start_relations_work(hbus, dr)) + kfree(dr); } ret = hv_send_resources_released(hdev); diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 0e03cef72840..3e64ba6a36a8 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -355,16 +355,6 @@ struct tegra_pcie { int irq; struct resource cs; - struct resource io; - struct resource pio; - struct resource mem; - struct resource prefetch; - struct resource busn; - - struct { - resource_size_t mem; - resource_size_t io; - } offset; struct clk *pex_clk; struct clk *afi_clk; @@ -797,38 +787,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable); -static int tegra_pcie_request_resources(struct tegra_pcie *pcie) -{ - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct list_head *windows = &host->windows; - struct device *dev = pcie->dev; - int err; - - pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); - pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); - pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem); - pci_add_resource(windows, &pcie->busn); - - err = devm_request_pci_bus_resources(dev, windows); - if (err < 0) { - pci_free_resource_list(windows); - return err; - } - - pci_remap_iospace(&pcie->pio, pcie->io.start); - - return 0; -} - -static void tegra_pcie_free_resources(struct tegra_pcie *pcie) -{ - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct list_head *windows = &host->windows; - - pci_unmap_iospace(&pcie->pio); - pci_free_resource_list(windows); -} - static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) { struct tegra_pcie *pcie = pdev->bus->sysdata; @@ -909,36 +867,49 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg) */ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) { - u32 fpci_bar, size, axi_address; + u32 size; + struct resource_entry *entry; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); /* Bar 0: type 1 extended configuration space */ size = resource_size(&pcie->cs); afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START); afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ); - /* Bar 1: downstream IO bar */ - fpci_bar = 0xfdfc0000; - size = resource_size(&pcie->io); - axi_address = pcie->io.start; - afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); - afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); - afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); - - /* Bar 2: prefetchable memory BAR */ - fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1; - size = resource_size(&pcie->prefetch); - axi_address = pcie->prefetch.start; - afi_writel(pcie, axi_address, AFI_AXI_BAR2_START); - afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ); - afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2); - - /* Bar 3: non prefetchable memory BAR */ - fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1; - size = resource_size(&pcie->mem); - axi_address = pcie->mem.start; - afi_writel(pcie, axi_address, AFI_AXI_BAR3_START); - afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ); - afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3); + resource_list_for_each_entry(entry, &bridge->windows) { + u32 fpci_bar, axi_address; + struct resource *res = entry->res; + + size = resource_size(res); + + switch (resource_type(res)) { + case IORESOURCE_IO: + /* Bar 1: downstream IO bar */ + fpci_bar = 0xfdfc0000; + axi_address = pci_pio_to_address(res->start); + afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); + afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); + break; + case IORESOURCE_MEM: + fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1; + axi_address = res->start; + + if (res->flags & IORESOURCE_PREFETCH) { + /* Bar 2: prefetchable memory BAR */ + afi_writel(pcie, axi_address, AFI_AXI_BAR2_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ); + afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2); + + } else { + /* Bar 3: non prefetchable memory BAR */ + afi_writel(pcie, axi_address, AFI_AXI_BAR3_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ); + afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3); + } + break; + } + } /* NULL out the remaining BARs as they are not used */ afi_writel(pcie, 0, AFI_AXI_BAR4_START); @@ -2157,76 +2128,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) struct device *dev = pcie->dev; struct device_node *np = dev->of_node, *port; const struct tegra_pcie_soc *soc = pcie->soc; - struct of_pci_range_parser parser; - struct of_pci_range range; u32 lanes = 0, mask = 0; unsigned int lane = 0; - struct resource res; int err; - if (of_pci_range_parser_init(&parser, np)) { - dev_err(dev, "missing \"ranges\" property\n"); - return -EINVAL; - } - - for_each_of_pci_range(&parser, &range) { - err = of_pci_range_to_resource(&range, np, &res); - if (err < 0) - return err; - - switch (res.flags & IORESOURCE_TYPE_BITS) { - case IORESOURCE_IO: - /* Track the bus -> CPU I/O mapping offset. */ - pcie->offset.io = res.start - range.pci_addr; - - memcpy(&pcie->pio, &res, sizeof(res)); - pcie->pio.name = np->full_name; - - /* - * The Tegra PCIe host bridge uses this to program the - * mapping of the I/O space to the physical address, - * so we override the .start and .end fields here that - * of_pci_range_to_resource() converted to I/O space. - * We also set the IORESOURCE_MEM type to clarify that - * the resource is in the physical memory space. - */ - pcie->io.start = range.cpu_addr; - pcie->io.end = range.cpu_addr + range.size - 1; - pcie->io.flags = IORESOURCE_MEM; - pcie->io.name = "I/O"; - - memcpy(&res, &pcie->io, sizeof(res)); - break; - - case IORESOURCE_MEM: - /* - * Track the bus -> CPU memory mapping offset. This - * assumes that the prefetchable and non-prefetchable - * regions will be the last of type IORESOURCE_MEM in - * the ranges property. - * */ - pcie->offset.mem = res.start - range.pci_addr; - - if (res.flags & IORESOURCE_PREFETCH) { - memcpy(&pcie->prefetch, &res, sizeof(res)); - pcie->prefetch.name = "prefetchable"; - } else { - memcpy(&pcie->mem, &res, sizeof(res)); - pcie->mem.name = "non-prefetchable"; - } - break; - } - } - - err = of_pci_parse_bus_range(np, &pcie->busn); - if (err < 0) { - dev_err(dev, "failed to parse ranges property: %d\n", err); - pcie->busn.name = np->name; - pcie->busn.start = 0; - pcie->busn.end = 0xff; - pcie->busn.flags = IORESOURCE_BUS; - } - /* parse root ports */ for_each_child_of_node(np, port) { struct tegra_pcie_port *rp; @@ -2766,6 +2671,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) struct pci_host_bridge *host; struct tegra_pcie *pcie; struct pci_bus *child; + struct resource *bus; int err; host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); @@ -2780,6 +2686,12 @@ static int tegra_pcie_probe(struct platform_device *pdev) INIT_LIST_HEAD(&pcie->ports); pcie->dev = dev; + err = pci_parse_request_of_pci_ranges(dev, &host->windows, NULL, &bus); + if (err) { + dev_err(dev, "Getting bridge resources failed\n"); + return err; + } + err = tegra_pcie_parse_dt(pcie); if (err < 0) return err; @@ -2803,11 +2715,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) goto teardown_msi; } - err = tegra_pcie_request_resources(pcie); - if (err) - goto pm_runtime_put; - - host->busnr = pcie->busn.start; + host->busnr = bus->start; host->dev.parent = &pdev->dev; host->ops = &tegra_pcie_ops; host->map_irq = tegra_pcie_map_irq; @@ -2816,7 +2724,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) err = pci_scan_root_bus_bridge(host); if (err < 0) { dev_err(dev, "failed to register host: %d\n", err); - goto free_resources; + goto pm_runtime_put; } pci_bus_size_bridges(host->bus); @@ -2835,8 +2743,6 @@ static int tegra_pcie_probe(struct platform_device *pdev) return 0; -free_resources: - tegra_pcie_free_resources(pcie); pm_runtime_put: pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); @@ -2858,7 +2764,6 @@ static int tegra_pcie_remove(struct platform_device *pdev) pci_stop_root_bus(host->bus); pci_remove_root_bus(host->bus); - tegra_pcie_free_resources(pcie); pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 3a10e678c7f4..6d79d14527a6 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -824,8 +824,8 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta); nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); dev_info(dev, "link up, %s x%u %s\n", - PCIE_SPEED2STR(cls + PCI_SPEED_133MHz_PCIX_533), - nlw, ssc_good ? "(SSC)" : "(!SSC)"); + pci_speed_string(pcie_link_speed[cls]), nlw, + ssc_good ? "(SSC)" : "(!SSC)"); /* PCIe->SCB endian mode for BAR */ tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); |