summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig12
-rw-r--r--drivers/pci/ats.c33
-rw-r--r--drivers/pci/bus.c47
-rw-r--r--drivers/pci/controller/Kconfig11
-rw-r--r--drivers/pci/controller/cadence/Kconfig16
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c40
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c44
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c124
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c12
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h25
-rw-r--r--drivers/pci/controller/dwc/Kconfig21
-rw-r--r--drivers/pci/controller/dwc/Makefile2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c8
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c4
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c311
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c13
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c476
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c927
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c342
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c142
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c171
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h110
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c159
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c57
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c27
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c20
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c4
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c12
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c11
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h2
-rw-r--r--drivers/pci/controller/pci-aardvark.c14
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-host-common.c30
-rw-r--r--drivers/pci/controller/pci-host-common.h20
-rw-r--r--drivers/pci/controller/pci-host-generic.c2
-rw-r--r--drivers/pci/controller/pci-hyperv-intf.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c139
-rw-r--r--drivers/pci/controller/pci-mvebu.c34
-rw-r--r--drivers/pci/controller/pci-tegra.c143
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c4
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c1
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c55
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c4
-rw-r--r--drivers/pci/controller/pcie-altera.c259
-rw-r--r--drivers/pci/controller/pcie-apple.c353
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c177
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c45
-rw-r--r--drivers/pci/controller/pcie-mediatek.c21
-rw-r--r--drivers/pci/controller/pcie-mt7621.c15
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c8
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c10
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c10
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c6
-rw-r--r--drivers/pci/controller/pcie-rockchip.h7
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c53
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c14
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c11
-rw-r--r--drivers/pci/controller/pcie-xilinx.c5
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c1
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c16
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c2
-rw-r--r--drivers/pci/controller/vmd.c20
-rw-r--r--drivers/pci/devres.c215
-rw-r--r--drivers/pci/doe.c247
-rw-r--r--drivers/pci/endpoint/Kconfig2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c16
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c30
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c82
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c26
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c17
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c215
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c29
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c85
-rw-r--r--drivers/pci/hotplug/pnv_php.c233
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c3
-rw-r--r--drivers/pci/hotplug/shpchp.h18
-rw-r--r--drivers/pci/hotplug/shpchp_core.c13
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c6
-rw-r--r--drivers/pci/iomap.c16
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/msi/api.c10
-rw-r--r--drivers/pci/msi/irqdomain.c5
-rw-r--r--drivers/pci/msi/msi.c227
-rw-r--r--drivers/pci/msi/msi.h4
-rw-r--r--drivers/pci/of.c171
-rw-r--r--drivers/pci/of_property.c115
-rw-r--r--drivers/pci/p2pdma.c53
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-driver.c19
-rw-r--r--drivers/pci/pci-sysfs.c11
-rw-r--r--drivers/pci/pci.c143
-rw-r--r--drivers/pci/pci.h157
-rw-r--r--drivers/pci/pcie/aer.c515
-rw-r--r--drivers/pci/pcie/bwctrl.c86
-rw-r--r--drivers/pci/pcie/dpc.c93
-rw-r--r--drivers/pci/pcie/err.c1
-rw-r--r--drivers/pci/pcie/ptm.c302
-rw-r--r--drivers/pci/pcie/tlp.c60
-rw-r--r--drivers/pci/probe.c63
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/pwrctrl/Kconfig29
-rw-r--r--drivers/pci/pwrctrl/Makefile7
-rw-r--r--drivers/pci/pwrctrl/core.c4
-rw-r--r--drivers/pci/pwrctrl/slot.c93
-rw-r--r--drivers/pci/quirks.c55
-rw-r--r--drivers/pci/remove.c5
-rw-r--r--drivers/pci/setup-bus.c550
-rw-r--r--drivers/pci/setup-res.c24
-rw-r--r--drivers/pci/slot.c44
-rw-r--r--drivers/pci/tph.c44
127 files changed, 6417 insertions, 2491 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2fbd379923fd..9c0e4aaf4e8c 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -21,6 +21,7 @@ config GENERIC_PCI_IOMAP
menuconfig PCI
bool "PCI support"
depends on HAVE_PCI
+ depends on MMU
help
This option enables support for the PCI local bus, including
support for PCI-X and the foundations for PCI Express support.
@@ -122,7 +123,10 @@ config PCI_ATS
bool
config PCI_DOE
- bool
+ bool "Enable PCI Data Object Exchange (DOE) support"
+ help
+ Say Y here if you want be able to communicate with PCIe DOE
+ mailboxes.
config PCI_ECAM
bool
@@ -203,6 +207,12 @@ config PCI_P2PDMA
P2P DMA transactions must be between devices behind the same root
port.
+ Enabling this option will reduce the entropy of x86 KASLR memory
+ regions. For example - on a 46 bit system, the entropy goes down
+ from 16 bits to 15 bits. The actual reduction in entropy depends
+ on the physical address bits, on processor features, kernel config
+ (5 level page table) and physical memory present on the system.
+
If unsure, say N.
config PCI_LABEL
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index c6b266c772c8..ec6c8dbdc5e9 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -538,4 +538,37 @@ int pci_max_pasids(struct pci_dev *pdev)
return (1 << FIELD_GET(PCI_PASID_CAP_WIDTH, supported));
}
EXPORT_SYMBOL_GPL(pci_max_pasids);
+
+/**
+ * pci_pasid_status - Check the PASID status
+ * @pdev: PCI device structure
+ *
+ * Returns a negative value when no PASID capability is present.
+ * Otherwise the value of the control register is returned.
+ * Status reported are:
+ *
+ * PCI_PASID_CTRL_ENABLE - PASID enabled
+ * PCI_PASID_CTRL_EXEC - Execute permission enabled
+ * PCI_PASID_CTRL_PRIV - Privileged mode enabled
+ */
+int pci_pasid_status(struct pci_dev *pdev)
+{
+ int pasid;
+ u16 ctrl;
+
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ pasid = pdev->pasid_cap;
+ if (!pasid)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pasid + PCI_PASID_CTRL, &ctrl);
+
+ ctrl &= PCI_PASID_CTRL_ENABLE | PCI_PASID_CTRL_EXEC |
+ PCI_PASID_CTRL_PRIV;
+
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(pci_pasid_status);
#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 98910bc0fcc4..69048869ef1c 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -331,47 +331,6 @@ void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
-/*
- * Create pwrctrl devices (if required) for the PCI devices to handle the power
- * state.
- */
-static void pci_pwrctrl_create_devices(struct pci_dev *dev)
-{
- struct device_node *np = dev_of_node(&dev->dev);
- struct device *parent = &dev->dev;
- struct platform_device *pdev;
-
- /*
- * First ensure that we are starting from a PCI bridge and it has a
- * corresponding devicetree node.
- */
- if (np && pci_is_bridge(dev)) {
- /*
- * Now look for the child PCI device nodes and create pwrctrl
- * devices for them. The pwrctrl device drivers will manage the
- * power state of the devices.
- */
- for_each_available_child_of_node_scoped(np, child) {
- /*
- * First check whether the pwrctrl device really
- * needs to be created or not. This is decided
- * based on at least one of the power supplies
- * being defined in the devicetree node of the
- * device.
- */
- if (!of_pci_supply_present(child)) {
- pci_dbg(dev, "skipping OF node: %s\n", child->name);
- return;
- }
-
- /* Now create the pwrctrl device */
- pdev = of_platform_device_create(child, NULL, parent);
- if (!pdev)
- pci_err(dev, "failed to create OF node: %s\n", child->name);
- }
- }
-}
-
/**
* pci_bus_add_device - start driver for a single device
* @dev: device to add
@@ -396,8 +355,6 @@ void pci_bus_add_device(struct pci_dev *dev)
pci_proc_attach_device(dev);
pci_bridge_d3_update(dev);
- pci_pwrctrl_create_devices(dev);
-
/*
* If the PCI device is associated with a pwrctrl device with a
* power supply, create a device link between the PCI device and
@@ -412,7 +369,9 @@ void pci_bus_add_device(struct pci_dev *dev)
pdev->name);
}
- dev->match_driver = !dn || of_device_is_available(dn);
+ if (!dn || of_device_is_available(dn))
+ pci_dev_allow_binding(dev);
+
retval = device_attach(&dev->dev);
if (retval < 0 && retval != -EPROBE_DEFER)
pci_warn(dev, "device attach failed (%d)\n", retval);
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 9800b7681054..886f6f43a895 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -3,6 +3,10 @@
menu "PCI controller drivers"
depends on PCI
+config PCI_HOST_COMMON
+ tristate
+ select PCI_ECAM
+
config PCI_AARDVARK
tristate "Aardvark PCIe controller"
depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
@@ -40,6 +44,7 @@ config PCIE_APPLE
depends on OF
depends on PCI_MSI
select PCI_HOST_COMMON
+ select IRQ_MSI_LIB
help
Say Y here if you want to enable PCIe controller support on Apple
system-on-chips, like the Apple M1. This is required for the USB
@@ -119,10 +124,6 @@ config PCI_FTPCI100
depends on OF
default ARCH_GEMINI
-config PCI_HOST_COMMON
- tristate
- select PCI_ECAM
-
config PCI_HOST_GENERIC
tristate "Generic PCI host controller"
depends on OF
@@ -227,6 +228,7 @@ config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -303,6 +305,7 @@ config PCI_XGENE_MSI
bool "X-Gene v1 PCIe MSI feature"
depends on PCI_XGENE
depends on PCI_MSI
+ select IRQ_MSI_LIB
default y
help
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 8a0044bb3989..666e16b6367f 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -4,16 +4,16 @@ menu "Cadence-based PCIe controllers"
depends on PCI
config PCIE_CADENCE
- bool
+ tristate
config PCIE_CADENCE_HOST
- bool
+ tristate
depends on OF
select IRQ_DOMAIN
select PCIE_CADENCE
config PCIE_CADENCE_EP
- bool
+ tristate
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE
@@ -43,13 +43,14 @@ config PCIE_CADENCE_PLAT_EP
different vendors SoCs.
config PCI_J721E
- bool
+ tristate
+ select PCIE_CADENCE_HOST if PCI_J721E_HOST != n
+ select PCIE_CADENCE_EP if PCI_J721E_EP != n
config PCI_J721E_HOST
- bool "TI J721E PCIe controller (host mode)"
+ tristate "TI J721E PCIe controller (host mode)"
depends on ARCH_K3 || COMPILE_TEST
depends on OF
- select PCIE_CADENCE_HOST
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
@@ -57,11 +58,10 @@ config PCI_J721E_HOST
core.
config PCI_J721E_EP
- bool "TI J721E PCIe controller (endpoint mode)"
+ tristate "TI J721E PCIe controller (endpoint mode)"
depends on ARCH_K3 || COMPILE_TEST
depends on OF
depends on PCI_ENDPOINT
- select PCIE_CADENCE_EP
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index ef1cfdae33bb..6c93f39d0288 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -27,6 +28,7 @@
#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
#define ENABLE_REG_SYS_2 0x108
+#define ENABLE_CLR_REG_SYS_2 0x308
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
#define LINK_DOWN BIT(1)
@@ -116,6 +118,15 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
+static void j721e_pcie_disable_link_irq(struct j721e_pcie *pcie)
+{
+ u32 reg;
+
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_CLR_REG_SYS_2);
+ reg |= pcie->linkdown_irq_regfield;
+ j721e_pcie_intd_writel(pcie, ENABLE_CLR_REG_SYS_2, reg);
+}
+
static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
{
u32 reg;
@@ -153,11 +164,7 @@ static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie)
u32 reg;
reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS);
- reg &= LINK_STATUS;
- if (reg == LINK_UP_DL_COMPLETED)
- return true;
-
- return false;
+ return (reg & LINK_STATUS) == LINK_UP_DL_COMPLETED;
}
static const struct cdns_pcie_ops j721e_pcie_ops = {
@@ -464,7 +471,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_HOST))
return -ENODEV;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
@@ -483,7 +490,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->cdns_pcie = cdns_pcie;
break;
case PCI_MODE_EP:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_EP))
return -ENODEV;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
@@ -633,9 +640,22 @@ static void j721e_pcie_remove(struct platform_device *pdev)
struct j721e_pcie *pcie = platform_get_drvdata(pdev);
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
struct device *dev = &pdev->dev;
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ rc = container_of(cdns_pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+ } else {
+ ep = container_of(cdns_pcie, struct cdns_pcie_ep, pcie);
+ cdns_pcie_ep_disable(ep);
+ }
+
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
clk_disable_unprepare(pcie->refclk);
cdns_pcie_disable_phy(cdns_pcie);
+ j721e_pcie_disable_link_irq(pcie);
pm_runtime_put(dev);
pm_runtime_disable(dev);
}
@@ -730,4 +750,8 @@ static struct platform_driver j721e_pcie_driver = {
.pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
},
};
-builtin_platform_driver(j721e_pcie_driver);
+module_platform_driver(j721e_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for TI's J721E and related SoCs");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 0bf4cde34f51..8ab6cf70c18e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -6,12 +6,14 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
@@ -220,10 +222,11 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u8 mmc = order_base_2(nr_irqs);
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags;
@@ -262,7 +265,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
*/
mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
- return mme;
+ return 1 << mme;
}
static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
@@ -281,12 +284,11 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
- u16 interrupts, enum pci_barno bir,
- u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -298,17 +300,17 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
- /* Set MSIX BAR and offset */
+ /* Set MSI-X BAR and offset */
reg = cap + PCI_MSIX_TABLE;
val = offset | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
- /* Set PBA BAR and offset. BAR must match MSIX BAR */
+ /* Set PBA BAR and offset. BAR must match MSI-X BAR */
reg = cap + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
return 0;
@@ -337,10 +339,10 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
if (is_asserted) {
ep->irq_pending |= BIT(intx);
- msg_code = MSG_CODE_ASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx;
} else {
ep->irq_pending &= ~BIT(intx);
- msg_code = MSG_CODE_DEASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx;
}
spin_lock_irqsave(&ep->lock, flags);
@@ -572,8 +574,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
/*
* Next function field in ARI_CAP_AND_CTR register for last function
- * should be 0.
- * Clearing Next Function Number field for the last function used.
+ * should be 0. Clear Next Function Number field for the last
+ * function used.
*/
last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
@@ -644,6 +646,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_features = cdns_pcie_ep_get_features,
};
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+ struct device *dev = ep->pcie.dev;
+ struct pci_epc *epc = to_pci_epc(dev);
+
+ pci_epc_deinit_notify(epc);
+ pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr,
+ SZ_128K);
+ pci_epc_mem_exit(epc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable);
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
@@ -751,3 +764,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 8af95e9da7ce..59a4631de79f 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -72,6 +73,7 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return rc->cfg_base + (where & 0xfff);
}
+EXPORT_SYMBOL_GPL(cdns_pci_map_bus);
static struct pci_ops cdns_pcie_host_ops = {
.map_bus = cdns_pci_map_bus,
@@ -150,6 +152,14 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie)
return ret;
}
+static void cdns_pcie_host_disable_ptm_response(struct cdns_pcie *pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val & ~CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
+}
+
static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
{
u32 val;
@@ -175,6 +185,26 @@ static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
return ret;
}
+static void cdns_pcie_host_deinit_root_port(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 value, ctrl;
+
+ cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, 0xffff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0xff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0xff);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, 0xffffffff);
+ cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, 0xffff);
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ value = ~(CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+}
+
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -391,6 +421,32 @@ static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
return resource_size(entry2->res) - resource_size(entry1->res);
}
+static void cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ enum cdns_pcie_rp_bar bar;
+ u32 value;
+
+ /* Reset inbound configuration for all BARs which were being used */
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (rc->avail_ib_bar[bar])
+ continue;
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), 0);
+
+ if (bar == RP_NO_BAR)
+ continue;
+
+ value = ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+ }
+}
+
static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -428,6 +484,29 @@ static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
return 0;
}
+static void cdns_pcie_host_deinit_address_translation(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
+ struct resource_entry *entry;
+ int r;
+
+ cdns_pcie_host_unmap_dma_ranges(rc);
+
+ /*
+ * Reset outbound region 0 which was reserved for configuration space
+ * accesses.
+ */
+ cdns_pcie_reset_outbound_region(pcie, 0);
+
+ /* Reset rest of the outbound regions */
+ r = 1;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ cdns_pcie_reset_outbound_region(pcie, r);
+ r++;
+ }
+}
+
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -485,6 +564,12 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
return cdns_pcie_host_map_dma_ranges(rc);
}
+static void cdns_pcie_host_deinit(struct cdns_pcie_rc *rc)
+{
+ cdns_pcie_host_deinit_address_translation(rc);
+ cdns_pcie_host_deinit_root_port(rc);
+}
+
int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
@@ -495,6 +580,15 @@ int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
return cdns_pcie_host_init_address_translation(rc);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_init);
+
+static void cdns_pcie_host_link_disable(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+
+ cdns_pcie_stop_link(pcie);
+ cdns_pcie_host_disable_ptm_response(pcie);
+}
int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
{
@@ -519,6 +613,20 @@ int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
return 0;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_link_setup);
+
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+
+ cdns_pcie_host_deinit(rc);
+ cdns_pcie_host_link_disable(rc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_disable);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
@@ -570,14 +678,10 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
if (!bridge->ops)
bridge->ops = &cdns_pcie_host_ops;
- ret = pci_host_probe(bridge);
- if (ret < 0)
- goto err_init;
-
- return 0;
-
- err_init:
- pm_runtime_put_sync(dev);
-
- return ret;
+ return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe host controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 204e045aed8c..70a19573440e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -4,6 +4,7 @@
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include "pcie-cadence.h"
@@ -23,6 +24,7 @@ void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
@@ -100,6 +102,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region);
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
u8 busnr, u8 fn,
@@ -134,6 +137,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
{
@@ -146,6 +150,7 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
{
@@ -156,6 +161,7 @@ void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
phy_exit(pcie->phy[i]);
}
}
+EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy);
int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
{
@@ -184,6 +190,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_enable_phy);
int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
{
@@ -243,6 +250,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_init_phy);
static int cdns_pcie_suspend_noirq(struct device *dev)
{
@@ -271,3 +279,7 @@ const struct dev_pm_ops cdns_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
cdns_pcie_resume_noirq)
};
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 39ee9945c903..a149845d341a 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -250,17 +250,6 @@ struct cdns_pcie_rp_ib_bar {
struct cdns_pcie;
-enum cdns_pcie_msg_code {
- MSG_CODE_ASSERT_INTA = 0x20,
- MSG_CODE_ASSERT_INTB = 0x21,
- MSG_CODE_ASSERT_INTC = 0x22,
- MSG_CODE_ASSERT_INTD = 0x23,
- MSG_CODE_DEASSERT_INTA = 0x24,
- MSG_CODE_DEASSERT_INTB = 0x25,
- MSG_CODE_DEASSERT_INTC = 0x26,
- MSG_CODE_DEASSERT_INTD = 0x27,
-};
-
enum cdns_pcie_msg_routing {
/* Route to Root Complex */
MSG_ROUTING_TO_RC,
@@ -519,10 +508,11 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
return true;
}
-#ifdef CONFIG_PCIE_CADENCE_HOST
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)
int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
@@ -541,6 +531,10 @@ static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return 0;
}
+static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+}
+
static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
@@ -548,13 +542,18 @@ static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int d
}
#endif
-#ifdef CONFIG_PCIE_CADENCE_EP
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_EP)
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep);
#else
static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
return 0;
}
+
+static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+}
#endif
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index b6d6778b0698..d9f0386396ed 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -6,6 +6,16 @@ menu "DesignWare-based PCIe controllers"
config PCIE_DW
bool
+config PCIE_DW_DEBUGFS
+ bool "DesignWare PCIe debugfs entries"
+ depends on DEBUG_FS
+ depends on PCIE_DW_HOST || PCIE_DW_EP
+ help
+ Say Y here to enable debugfs entries for the PCIe controller. These
+ entries provide various debug features related to the controller and
+ expose the RAS DES capabilities such as Silicon Debug, Error Injection
+ and Statistical Counters.
+
config PCIE_DW_HOST
bool
select PCIE_DW
@@ -27,6 +37,17 @@ config PCIE_AL
required only for DT-based platforms. ACPI platforms with the
Annapurna Labs PCIe controller don't need to enable this.
+config PCIE_AMD_MDB
+ bool "AMD MDB Versal2 PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCIe controller support on AMD
+ Versal2 SoCs. The AMD MDB Versal2 PCIe controller is based on
+ DesignWare IP and therefore the driver re-uses the DesignWare
+ core functions to implement the driver.
+
config PCI_MESON
tristate "Amlogic Meson PCIe controller"
default m if ARCH_MESON
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index a8308d9ea986..908cb7f345db 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_DEBUGFS) += pcie-designware-debugfs.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o
obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 33d6bf460ffe..f97f5266d196 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -118,12 +118,12 @@ static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;
}
-static int dra7xx_pcie_link_up(struct dw_pcie *pci)
+static bool dra7xx_pcie_link_up(struct dw_pcie *pci)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
- return !!(reg & LINK_UP);
+ return reg & LINK_UP;
}
static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
@@ -359,8 +359,8 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
pp);
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pp);
+ dra7xx->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &intx_domain_ops, pp);
of_node_put(pcie_intc_node);
if (!dra7xx->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index ace736b025b1..1f0e98d07109 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -209,12 +209,12 @@ static struct pci_ops exynos_pci_ops = {
.write = exynos_pcie_wr_own_conf,
};
-static int exynos_pcie_link_up(struct dw_pcie *pci)
+static bool exynos_pcie_link_up(struct dw_pcie *pci)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- return (val & PCIE_ELBI_XMLH_LINKUP);
+ return val & PCIE_ELBI_XMLH_LINKUP;
}
static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 90ace941090f..5a38cfaf989b 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -41,14 +41,18 @@
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
-#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
#define IMX95_PCIE_PHY_GEN_CTRL 0x0
#define IMX95_PCIE_REF_USE_PAD BIT(17)
+#define IMX95_PCIE_PHY_MPLLA_CTRL 0x10
+#define IMX95_PCIE_PHY_MPLL_STATE BIT(30)
+
#define IMX95_PCIE_SS_RW_REG_0 0xf0
#define IMX95_PCIE_REF_CLKEN BIT(23)
#define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9)
+#define IMX95_PCIE_SS_RW_REG_1 0xf4
+#define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31)
#define IMX95_PE0_GEN_CTRL_1 0x1050
#define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0)
@@ -72,6 +76,9 @@
#define IMX95_SID_MASK GENMASK(5, 0)
#define IMX95_MAX_LUT 32
+#define IMX95_PCIE_RST_CTRL 0x3010
+#define IMX95_PCIE_COLD_RST BIT(0)
+
#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
enum imx_pcie_variants {
@@ -92,7 +99,7 @@ enum imx_pcie_variants {
};
#define IMX_PCIE_FLAG_IMX_PHY BIT(0)
-#define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1)
+#define IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND BIT(1)
#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3)
#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4)
@@ -106,10 +113,10 @@ enum imx_pcie_variants {
*/
#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
+#define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11)
#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
-#define IMX_PCIE_MAX_CLKS 6
#define IMX_PCIE_MAX_INSTANCES 2
struct imx_pcie;
@@ -120,9 +127,6 @@ struct imx_pcie_drvdata {
u32 flags;
int dbi_length;
const char *gpr;
- const char * const *clk_names;
- const u32 clks_cnt;
- const u32 clks_optional_cnt;
const u32 ltssm_off;
const u32 ltssm_mask;
const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
@@ -131,13 +135,20 @@ struct imx_pcie_drvdata {
int (*init_phy)(struct imx_pcie *pcie);
int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ int (*wait_pll_lock)(struct imx_pcie *pcie);
const struct dw_pcie_host_ops *ops;
};
+struct imx_lut_data {
+ u32 data1;
+ u32 data2;
+};
+
struct imx_pcie {
struct dw_pcie *pci;
struct gpio_desc *reset_gpiod;
- struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
@@ -152,6 +163,8 @@ struct imx_pcie {
struct regulator *vph;
void __iomem *phy_base;
+ /* LUT data for pcie */
+ struct imx_lut_data luts[IMX95_MAX_LUT];
/* power domain for pcie */
struct device *pd_pcie;
/* power domain for pcie phy */
@@ -228,6 +241,19 @@ static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
{
+ /*
+ * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready
+ * Through Beacon or PERST# De-assertion
+ *
+ * When the auxiliary power is not available, the controller
+ * cannot exit from L23 Ready with beacon or PERST# de-assertion
+ * when main power is not removed.
+ *
+ * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1,
+ IMX95_PCIE_SYS_AUX_PWR_DET);
+
regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_PHY_CR_PARA_SEL,
@@ -464,19 +490,37 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
dev_err(dev, "PCIe PLL lock timeout\n");
}
+static int imx95_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
+{
+ u32 val;
+ struct device *dev = imx_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_MPLLA_CTRL, val,
+ val & IMX95_PCIE_PHY_MPLL_STATE,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT)) {
+ dev_err(dev, "PCIe PLL lock timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
{
unsigned long phy_rate = 0;
int mult, div;
u16 val;
int i;
+ struct clk_bulk_data *clks = imx_pcie->clks;
if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return 0;
- for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++)
- if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0)
- phy_rate = clk_get_rate(imx_pcie->clks[i].clk);
+ for (i = 0; i < imx_pcie->num_clks; i++)
+ if (strncmp(clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(clks[i].clk);
switch (phy_rate) {
case 125000000:
@@ -668,7 +712,7 @@ static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
struct device *dev = pci->dev;
int ret;
- ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
+ ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks);
if (ret)
return ret;
@@ -685,7 +729,7 @@ static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
return 0;
err_ref_clk:
- clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
return ret;
}
@@ -694,7 +738,7 @@ static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie)
{
if (imx_pcie->drvdata->enable_ref_clk)
imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
- clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
}
static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
@@ -776,6 +820,43 @@ static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
return 0;
}
+static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ u32 val;
+
+ if (assert) {
+ /*
+ * From i.MX95 PCIe PHY perspective, the COLD reset toggle
+ * should be complete after power-up by the following sequence.
+ * > 10us(at power-up)
+ * > 10ns(warm reset)
+ * |<------------>|
+ * ______________
+ * phy_reset ____/ \________________
+ * ____________
+ * ref_clk_en_______________________/
+ * Toggle COLD reset aligned with this sequence for i.MX95 PCIe.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ /*
+ * Make sure the write to IMX95_PCIE_RST_CTRL is flushed to the
+ * hardware by doing a read. Otherwise, there is no guarantee
+ * that the write has reached the hardware before udelay().
+ */
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(15);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(10);
+ }
+
+ return 0;
+}
+
static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
{
reset_control_assert(imx_pcie->pciephy_reset);
@@ -863,6 +944,12 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
u32 tmp;
int ret;
+ if (!(imx_pcie->drvdata->flags &
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND)) {
+ imx_pcie_ltssm_enable(dev);
+ return 0;
+ }
+
/*
* Force Gen1 operation when starting the link. In case the link is
* started in Gen2 mode, there is a possibility the devices on the
@@ -878,11 +965,11 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
/* Start LTSSM. */
imx_pcie_ltssm_enable(dev);
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
- goto err_reset_phy;
-
if (pci->max_link_speed > 1) {
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
+
/* Allow faster modes after the link is up */
dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
@@ -899,34 +986,15 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
dw_pcie_dbi_ro_wr_dis(pci);
- if (imx_pcie->drvdata->flags &
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE) {
-
- /*
- * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
- * from i.MX6 family when no link speed transition
- * occurs and we go Gen1 -> yep, Gen1. The difference
- * is that, in such case, it will not be cleared by HW
- * which will cause the following code to report false
- * failure.
- */
- ret = imx_pcie_wait_for_speed_change(imx_pcie);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
- }
- }
-
- /* Make sure link training is finished as well! */
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
+ ret = imx_pcie_wait_for_speed_change(imx_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
+ }
} else {
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
- dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
@@ -1185,6 +1253,12 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
goto err_phy_off;
}
+ if (imx_pcie->drvdata->wait_pll_lock) {
+ ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie);
+ if (ret < 0)
+ goto err_phy_off;
+ }
+
imx_setup_phy_mpll(imx_pcie);
return 0;
@@ -1217,20 +1291,30 @@ static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
regulator_disable(imx_pcie->vpcie);
}
-static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr)
+static void imx_pcie_host_post_init(struct dw_pcie_rp *pp)
{
- struct imx_pcie *imx_pcie = to_imx_pcie(pcie);
- struct dw_pcie_rp *pp = &pcie->pp;
- struct resource_entry *entry;
-
- if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP))
- return cpu_addr;
-
- entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
- if (!entry)
- return cpu_addr;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+ u32 val;
- return cpu_addr - entry->offset;
+ if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_8GT_ECN_ERR051586) {
+ /*
+ * ERR051586: Compliance with 8GT/s Receiver Impedance ECN
+ *
+ * The default value of GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * is 1 which makes receiver non-compliant with the ZRX-DC
+ * parameter for 2.5 GT/s when operating at 8 GT/s or higher.
+ * It causes unnecessary timeout in L1.
+ *
+ * Workaround: Program GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * to 0.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
}
/*
@@ -1258,12 +1342,12 @@ static const struct dw_pcie_host_ops imx_pcie_host_ops = {
static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
.init = imx_pcie_host_init,
.deinit = imx_pcie_host_exit,
+ .post_init = imx_pcie_host_post_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = imx_pcie_start_link,
.stop_link = imx_pcie_stop_link,
- .cpu_addr_fixup = imx_pcie_cpu_addr_fixup,
};
static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -1370,6 +1454,7 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
+ imx_pcie_host_post_init(pp);
ret = dw_pcie_ep_init_registers(ep);
if (ret) {
@@ -1406,6 +1491,42 @@ static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
}
}
+static void imx_pcie_lut_save(struct imx_pcie *imx_pcie)
+{
+ u32 data1, data2;
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL,
+ IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (data1 & IMX95_PE0_LUT_VLD) {
+ imx_pcie->luts[i].data1 = data1;
+ imx_pcie->luts[i].data2 = data2;
+ } else {
+ imx_pcie->luts[i].data1 = 0;
+ imx_pcie->luts[i].data2 = 0;
+ }
+ }
+}
+
+static void imx_pcie_lut_restore(struct imx_pcie *imx_pcie)
+{
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ if ((imx_pcie->luts[i].data1 & IMX95_PE0_LUT_VLD) == 0)
+ continue;
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1,
+ imx_pcie->luts[i].data1);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2,
+ imx_pcie->luts[i].data2);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, i);
+ }
+}
+
static int imx_pcie_suspend_noirq(struct device *dev)
{
struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
@@ -1414,6 +1535,8 @@ static int imx_pcie_suspend_noirq(struct device *dev)
return 0;
imx_pcie_msi_save_restore(imx_pcie, true);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_save(imx_pcie);
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
/*
* The minimum for a workaround would be to set PERST# and to
@@ -1458,6 +1581,8 @@ static int imx_pcie_resume_noirq(struct device *dev)
if (ret)
return ret;
}
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_restore(imx_pcie);
imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
@@ -1474,9 +1599,8 @@ static int imx_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct imx_pcie *imx_pcie;
struct device_node *np;
- struct resource *dbi_base;
struct device_node *node = dev->of_node;
- int i, ret, req_cnt;
+ int ret, domain;
u16 val;
imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
@@ -1515,10 +1639,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx_pcie->phy_base);
}
- pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
/* Fetch GPIOs */
imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(imx_pcie->reset_gpiod))
@@ -1526,20 +1646,11 @@ static int imx_pcie_probe(struct platform_device *pdev)
"unable to get reset gpio\n");
gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset");
- if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS)
- return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
-
- for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++)
- imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i];
-
/* Fetch clocks */
- req_cnt = imx_pcie->drvdata->clks_cnt - imx_pcie->drvdata->clks_optional_cnt;
- ret = devm_clk_bulk_get(dev, req_cnt, imx_pcie->clks);
- if (ret)
- return ret;
- imx_pcie->clks[req_cnt].clk = devm_clk_get_optional(dev, "ref");
- if (IS_ERR(imx_pcie->clks[req_cnt].clk))
- return PTR_ERR(imx_pcie->clks[req_cnt].clk);
+ imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks);
+ if (imx_pcie->num_clks < 0)
+ return dev_err_probe(dev, imx_pcie->num_clks,
+ "failed to get clocks\n");
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
@@ -1565,8 +1676,11 @@ static int imx_pcie_probe(struct platform_device *pdev)
switch (imx_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
- if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx_pcie->controller_id = 1;
+ domain = of_get_pci_domain_nr(node);
+ if (domain < 0 || domain > 1)
+ return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n");
+
+ imx_pcie->controller_id = domain;
break;
default:
break;
@@ -1645,6 +1759,7 @@ static int imx_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
+ pci->use_parent_dt_ranges = true;
if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
ret = imx_add_pcie_ep(imx_pcie, pdev);
if (ret < 0)
@@ -1675,24 +1790,15 @@ static void imx_pcie_shutdown(struct platform_device *pdev)
imx_pcie_assert_core_reset(imx_pcie);
}
-static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
-static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
-static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
-static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
-static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"};
-static const char * const imx95_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux", "ref"};
-
static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
.flags = IMX_PCIE_FLAG_IMX_PHY |
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
IMX_PCIE_FLAG_BROKEN_SUSPEND |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
@@ -1704,11 +1810,9 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX6SX] = {
.variant = IMX6SX,
.flags = IMX_PCIE_FLAG_IMX_PHY |
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6sx_clks,
- .clks_cnt = ARRAY_SIZE(imx6sx_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
@@ -1721,12 +1825,10 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX6QP] = {
.variant = IMX6QP,
.flags = IMX_PCIE_FLAG_IMX_PHY |
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
@@ -1742,8 +1844,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_APP_RESET |
IMX_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.enable_ref_clk = imx7d_pcie_enable_ref_clk,
@@ -1755,8 +1855,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHY_RESET |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mq-iomuxc-gpr",
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.mode_off[1] = IOMUXC_GPR12,
@@ -1770,8 +1868,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHYDRV |
IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.enable_ref_clk = imx8mm_pcie_enable_ref_clk,
@@ -1782,8 +1878,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHYDRV |
IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.enable_ref_clk = imx8mm_pcie_enable_ref_clk,
@@ -1793,22 +1887,20 @@ static const struct imx_pcie_drvdata drvdata[] = {
.flags = IMX_PCIE_FLAG_HAS_PHYDRV |
IMX_PCIE_FLAG_CPU_ADDR_FIXUP |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
- .clk_names = imx8q_clks,
- .clks_cnt = ARRAY_SIZE(imx8q_clks),
},
[IMX95] = {
.variant = IMX95,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
- .clk_names = imx95_clks,
- .clks_cnt = ARRAY_SIZE(imx95_clks),
- .clks_optional_cnt = 1,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
.mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .core_reset = imx95_pcie_core_reset,
.init_phy = imx95_pcie_init_phy,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
@@ -1816,8 +1908,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHY_RESET,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.mode_off[1] = IOMUXC_GPR12,
@@ -1832,8 +1922,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
@@ -1845,8 +1933,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
@@ -1857,20 +1943,19 @@ static const struct imx_pcie_drvdata drvdata[] = {
.flags = IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.epc_features = &imx8q_pcie_epc_features,
- .clk_names = imx8q_clks,
- .clks_cnt = ARRAY_SIZE(imx8q_clks),
},
[IMX95_EP] = {
.variant = IMX95_EP,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
IMX_PCIE_FLAG_SUPPORT_64BIT,
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
.mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
.init_phy = imx95_pcie_init_phy,
+ .core_reset = imx95_pcie_core_reset,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
.epc_features = &imx95_pcie_epc_features,
.mode = DW_PCIE_EP_TYPE,
},
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 63bd5003da45..2b2632e513b5 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -492,13 +492,12 @@ static struct pci_ops ks_pcie_ops = {
* @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
* controller driver information.
*/
-static int ks_pcie_link_up(struct dw_pcie *pci)
+static bool ks_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
- val &= PORT_LOGIC_LTSSM_STATE_MASK;
- return (val == PORT_LOGIC_LTSSM_STATE_L0);
+ return (val & PORT_LOGIC_LTSSM_STATE_MASK) == PORT_LOGIC_LTSSM_STATE_L0;
}
static void ks_pcie_stop_link(struct dw_pcie *pci)
@@ -761,7 +760,7 @@ static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
ks_pcie);
}
- intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_np), PCI_NUM_INTX,
&ks_pcie_intx_irq_domain_ops, NULL);
if (!intx_irq_domain) {
dev_err(dev, "Failed to add irq domain for INTX irqs\n");
@@ -966,11 +965,11 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = {
.msix_capable = true,
.bar[BAR_0] = { .type = BAR_RESERVED, },
.bar[BAR_1] = { .type = BAR_RESERVED, },
- .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
.bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
- .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .align = SZ_1M,
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index db9482a113e9..787469d1b396 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -335,7 +335,7 @@ static struct pci_ops meson_pci_ops = {
.write = pci_generic_config_write,
};
-static int meson_pcie_link_up(struct dw_pcie *pci)
+static bool meson_pcie_link_up(struct dw_pcie *pci)
{
struct meson_pcie *mp = to_meson_pcie(pci);
struct device *dev = pci->dev;
@@ -363,7 +363,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
dev_dbg(dev, "speed_okay\n");
if (smlh_up && rdlh_up && ltssm_up && speed_okay)
- return 1;
+ return true;
cnt++;
@@ -371,7 +371,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
} while (cnt < WAIT_LINKUP_TIMEOUT);
dev_err(dev, "error: wait linkup timeout\n");
- return 0;
+ return false;
}
static int meson_pcie_host_init(struct dw_pcie_rp *pp)
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
new file mode 100644
index 000000000000..9f7251a16d32
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for AMD MDB PCIe Bridge
+ *
+ * Copyright (C) 2024-2025, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
+#define AMD_MDB_TLP_IR_MASK_MISC 0x4C4
+#define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8
+#define AMD_MDB_TLP_IR_DISABLE_MISC 0x4CC
+
+#define AMD_MDB_TLP_PCIE_INTX_MASK GENMASK(23, 16)
+
+#define AMD_MDB_PCIE_INTR_INTX_ASSERT(x) BIT((x) * 2)
+
+/* Interrupt registers definitions. */
+#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15
+#define AMD_MDB_PCIE_INTR_INTX 16
+#define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24
+#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25
+#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26
+#define AMD_MDB_PCIE_INTR_NONFATAL 27
+#define AMD_MDB_PCIE_INTR_FATAL 28
+
+#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x)
+#define AMD_MDB_PCIE_IMR_ALL_MASK \
+ ( \
+ IMR(CMPL_TIMEOUT) | \
+ IMR(PM_PME_RCVD) | \
+ IMR(PME_TO_ACK_RCVD) | \
+ IMR(MISC_CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ AMD_MDB_TLP_PCIE_INTX_MASK \
+ )
+
+/**
+ * struct amd_mdb_pcie - PCIe port information
+ * @pci: DesignWare PCIe controller structure
+ * @slcr: MDB System Level Control and Status Register (SLCR) base
+ * @intx_domain: INTx IRQ domain pointer
+ * @mdb_domain: MDB IRQ domain pointer
+ * @intx_irq: INTx IRQ interrupt number
+ */
+struct amd_mdb_pcie {
+ struct dw_pcie pci;
+ void __iomem *slcr;
+ struct irq_domain *intx_domain;
+ struct irq_domain *mdb_domain;
+ int intx_irq;
+};
+
+static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = {
+};
+
+static void amd_mdb_intx_irq_mask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_intx_irq_unmask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_intx_irq_chip = {
+ .name = "AMD MDB INTx",
+ .irq_mask = amd_mdb_intx_irq_mask,
+ .irq_unmask = amd_mdb_intx_irq_unmask,
+};
+
+/**
+ * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: Hardware interrupt number
+ *
+ * Return: Always returns '0'.
+ */
+static int amd_mdb_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ domain operations. */
+static const struct irq_domain_ops amd_intx_domain_ops = {
+ .map = amd_mdb_pcie_intx_map,
+};
+
+static irqreturn_t dw_pcie_rp_intx(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i, int_status;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val);
+
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i))
+ generic_handle_domain_irq(pcie->intx_domain, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(CMPL_TIMEOUT, "Completion timeout"),
+ _IC(PM_PME_RCVD, "PM_PME message received"),
+ _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
+ _IC(MISC_CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+};
+
+static void amd_mdb_event_irq_mask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_event_irq_unmask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_event_irq_chip = {
+ .name = "AMD MDB RC-Event",
+ .irq_mask = amd_mdb_event_irq_mask,
+ .irq_unmask = amd_mdb_event_irq_unmask,
+};
+
+static int amd_mdb_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = amd_mdb_pcie_event_map,
+};
+
+static irqreturn_t amd_mdb_pcie_event(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(pcie->mdb_domain, i);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ return IRQ_HANDLED;
+}
+
+static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie)
+{
+ if (pcie->intx_domain) {
+ irq_domain_remove(pcie->intx_domain);
+ pcie->intx_domain = NULL;
+ }
+
+ if (pcie->mdb_domain) {
+ irq_domain_remove(pcie->mdb_domain);
+ pcie->mdb_domain = NULL;
+ }
+}
+
+static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie)
+{
+ unsigned long val;
+
+ /* Disable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+
+ /* Clear pending TLP interrupts. */
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= AMD_MDB_PCIE_IMR_ALL_MASK;
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ /* Enable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+
+ return 0;
+}
+
+/**
+ * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain
+ * @pcie: PCIe port information
+ * @pdev: Platform device
+ *
+ * Return: Returns '0' on success and error value on failure.
+ */
+static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ int err;
+
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ pcie->mdb_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, pcie);
+ if (!pcie->mdb_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add MDB domain\n");
+ goto out;
+ }
+
+ irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
+
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &amd_intx_domain_ops, pcie);
+ if (!pcie->intx_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add INTx domain\n");
+ goto mdb_out;
+ }
+
+ of_node_put(pcie_intc_node);
+ irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED);
+
+ raw_spin_lock_init(&pp->lock);
+
+ return 0;
+mdb_out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+out:
+ of_node_put(pcie_intc_node);
+ return err;
+}
+
+static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ struct device *dev;
+ struct irq_data *d;
+
+ dev = pcie->pci.dev;
+
+ /*
+ * In the future, error reporting will be hooked to the AER subsystem.
+ * Currently, the driver prints a warning message to the user.
+ */
+ d = irq_domain_get_irq_data(pcie->mdb_domain, irq);
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int i, irq, err;
+
+ amd_mdb_pcie_init_port(pcie);
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(pcie->mdb_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map MDB domain interrupt\n");
+ return -ENOMEM;
+ }
+
+ err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler,
+ IRQF_NO_THREAD, intr_cause[i].sym, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+ }
+
+ pcie->intx_irq = irq_create_mapping(pcie->mdb_domain,
+ AMD_MDB_PCIE_INTR_INTX);
+ if (!pcie->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx,
+ IRQF_NO_THREAD, NULL, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+
+ /* Plug the main event handler. */
+ err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD,
+ "amd_mdb pcie_irq", pcie);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d, err=%d\n",
+ pp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr");
+ if (IS_ERR(pcie->slcr))
+ return PTR_ERR(pcie->slcr);
+
+ err = amd_mdb_pcie_init_irq_domains(pcie, pdev);
+ if (err)
+ return err;
+
+ err = amd_mdb_setup_irq(pcie, pdev);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts, err=%d\n", err);
+ goto out;
+ }
+
+ pp->ops = &amd_mdb_pcie_host_ops;
+
+ err = dw_pcie_host_init(pp);
+ if (err) {
+ dev_err(dev, "Failed to initialize host, err=%d\n", err);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+ return err;
+}
+
+static int amd_mdb_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct amd_mdb_pcie *pcie;
+ struct dw_pcie *pci;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return amd_mdb_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id amd_mdb_pcie_of_match[] = {
+ {
+ .compatible = "amd,versal2-mdb-host",
+ },
+ {},
+};
+
+static struct platform_driver amd_mdb_pcie_driver = {
+ .driver = {
+ .name = "amd-mdb-pcie",
+ .of_match_table = amd_mdb_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = amd_mdb_pcie_probe,
+};
+
+builtin_platform_driver(amd_mdb_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index b5c599ccaacf..c2650fd0d458 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -139,7 +139,7 @@ static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
return ret;
}
-static int armada8k_pcie_link_up(struct dw_pcie *pci)
+static bool armada8k_pcie_link_up(struct dw_pcie *pci)
{
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
@@ -147,10 +147,10 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci)
reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
- return 1;
+ return true;
dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
- return 0;
+ return false;
}
static int armada8k_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
new file mode 100644
index 000000000000..c67601096c48
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -0,0 +1,927 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe controller debugfs driver
+ *
+ * Copyright (C) 2025 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Shradha Todi <shradha.t@samsung.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "pcie-designware.h"
+
+#define SD_STATUS_L1LANE_REG 0xb0
+#define PIPE_RXVALID BIT(18)
+#define PIPE_DETECT_LANE BIT(17)
+#define LANE_SELECT GENMASK(3, 0)
+
+#define ERR_INJ0_OFF 0x34
+#define EINJ_VAL_DIFF GENMASK(28, 16)
+#define EINJ_VC_NUM GENMASK(14, 12)
+#define EINJ_TYPE_SHIFT 8
+#define EINJ0_TYPE GENMASK(11, 8)
+#define EINJ1_TYPE BIT(8)
+#define EINJ2_TYPE GENMASK(9, 8)
+#define EINJ3_TYPE GENMASK(10, 8)
+#define EINJ4_TYPE GENMASK(10, 8)
+#define EINJ5_TYPE BIT(8)
+#define EINJ_COUNT GENMASK(7, 0)
+
+#define ERR_INJ_ENABLE_REG 0x30
+
+#define RAS_DES_EVENT_COUNTER_DATA_REG 0xc
+
+#define RAS_DES_EVENT_COUNTER_CTRL_REG 0x8
+#define EVENT_COUNTER_GROUP_SELECT GENMASK(27, 24)
+#define EVENT_COUNTER_EVENT_SELECT GENMASK(23, 16)
+#define EVENT_COUNTER_LANE_SELECT GENMASK(11, 8)
+#define EVENT_COUNTER_STATUS BIT(7)
+#define EVENT_COUNTER_ENABLE GENMASK(4, 2)
+#define PER_EVENT_ON 0x3
+#define PER_EVENT_OFF 0x1
+
+#define DWC_DEBUGFS_BUF_MAX 128
+
+/**
+ * struct dwc_pcie_rasdes_info - Stores controller common information
+ * @ras_cap_offset: RAS DES vendor specific extended capability offset
+ * @reg_event_lock: Mutex used for RAS DES shadow event registers
+ *
+ * Any parameter constant to all files of the debugfs hierarchy for a single
+ * controller will be stored in this struct. It is allocated and assigned to
+ * controller specific struct dw_pcie during initialization.
+ */
+struct dwc_pcie_rasdes_info {
+ u32 ras_cap_offset;
+ struct mutex reg_event_lock;
+};
+
+/**
+ * struct dwc_pcie_rasdes_priv - Stores file specific private data information
+ * @pci: Reference to the dw_pcie structure
+ * @idx: Index of specific file related information in array of structs
+ *
+ * All debugfs files will have this struct as its private data.
+ */
+struct dwc_pcie_rasdes_priv {
+ struct dw_pcie *pci;
+ int idx;
+};
+
+/**
+ * struct dwc_pcie_err_inj - Store details about each error injection
+ * supported by DWC RAS DES
+ * @name: Name of the error that can be injected
+ * @err_inj_group: Group number to which the error belongs. The value
+ * can range from 0 to 5
+ * @err_inj_type: Each group can have multiple types of error
+ */
+struct dwc_pcie_err_inj {
+ const char *name;
+ u32 err_inj_group;
+ u32 err_inj_type;
+};
+
+static const struct dwc_pcie_err_inj err_inj_list[] = {
+ {"tx_lcrc", 0x0, 0x0},
+ {"b16_crc_dllp", 0x0, 0x1},
+ {"b16_crc_upd_fc", 0x0, 0x2},
+ {"tx_ecrc", 0x0, 0x3},
+ {"fcrc_tlp", 0x0, 0x4},
+ {"parity_tsos", 0x0, 0x5},
+ {"parity_skpos", 0x0, 0x6},
+ {"rx_lcrc", 0x0, 0x8},
+ {"rx_ecrc", 0x0, 0xb},
+ {"tlp_err_seq", 0x1, 0x0},
+ {"ack_nak_dllp_seq", 0x1, 0x1},
+ {"ack_nak_dllp", 0x2, 0x0},
+ {"upd_fc_dllp", 0x2, 0x1},
+ {"nak_dllp", 0x2, 0x2},
+ {"inv_sync_hdr_sym", 0x3, 0x0},
+ {"com_pad_ts1", 0x3, 0x1},
+ {"com_pad_ts2", 0x3, 0x2},
+ {"com_fts", 0x3, 0x3},
+ {"com_idl", 0x3, 0x4},
+ {"end_edb", 0x3, 0x5},
+ {"stp_sdp", 0x3, 0x6},
+ {"com_skp", 0x3, 0x7},
+ {"posted_tlp_hdr", 0x4, 0x0},
+ {"non_post_tlp_hdr", 0x4, 0x1},
+ {"cmpl_tlp_hdr", 0x4, 0x2},
+ {"posted_tlp_data", 0x4, 0x4},
+ {"non_post_tlp_data", 0x4, 0x5},
+ {"cmpl_tlp_data", 0x4, 0x6},
+ {"duplicate_tlp", 0x5, 0x0},
+ {"nullified_tlp", 0x5, 0x1},
+};
+
+static const u32 err_inj_type_mask[] = {
+ EINJ0_TYPE,
+ EINJ1_TYPE,
+ EINJ2_TYPE,
+ EINJ3_TYPE,
+ EINJ4_TYPE,
+ EINJ5_TYPE,
+};
+
+/**
+ * struct dwc_pcie_event_counter - Store details about each event counter
+ * supported in DWC RAS DES
+ * @name: Name of the error counter
+ * @group_no: Group number that the event belongs to. The value can range
+ * from 0 to 4
+ * @event_no: Event number of the particular event. The value ranges are:
+ * Group 0: 0 - 10
+ * Group 1: 5 - 13
+ * Group 2: 0 - 7
+ * Group 3: 0 - 5
+ * Group 4: 0 - 1
+ */
+struct dwc_pcie_event_counter {
+ const char *name;
+ u32 group_no;
+ u32 event_no;
+};
+
+static const struct dwc_pcie_event_counter event_list[] = {
+ {"ebuf_overflow", 0x0, 0x0},
+ {"ebuf_underrun", 0x0, 0x1},
+ {"decode_err", 0x0, 0x2},
+ {"running_disparity_err", 0x0, 0x3},
+ {"skp_os_parity_err", 0x0, 0x4},
+ {"sync_header_err", 0x0, 0x5},
+ {"rx_valid_deassertion", 0x0, 0x6},
+ {"ctl_skp_os_parity_err", 0x0, 0x7},
+ {"retimer_parity_err_1st", 0x0, 0x8},
+ {"retimer_parity_err_2nd", 0x0, 0x9},
+ {"margin_crc_parity_err", 0x0, 0xA},
+ {"detect_ei_infer", 0x1, 0x5},
+ {"receiver_err", 0x1, 0x6},
+ {"rx_recovery_req", 0x1, 0x7},
+ {"n_fts_timeout", 0x1, 0x8},
+ {"framing_err", 0x1, 0x9},
+ {"deskew_err", 0x1, 0xa},
+ {"framing_err_in_l0", 0x1, 0xc},
+ {"deskew_uncompleted_err", 0x1, 0xd},
+ {"bad_tlp", 0x2, 0x0},
+ {"lcrc_err", 0x2, 0x1},
+ {"bad_dllp", 0x2, 0x2},
+ {"replay_num_rollover", 0x2, 0x3},
+ {"replay_timeout", 0x2, 0x4},
+ {"rx_nak_dllp", 0x2, 0x5},
+ {"tx_nak_dllp", 0x2, 0x6},
+ {"retry_tlp", 0x2, 0x7},
+ {"fc_timeout", 0x3, 0x0},
+ {"poisoned_tlp", 0x3, 0x1},
+ {"ecrc_error", 0x3, 0x2},
+ {"unsupported_request", 0x3, 0x3},
+ {"completer_abort", 0x3, 0x4},
+ {"completion_timeout", 0x3, 0x5},
+ {"ebuf_skp_add", 0x4, 0x0},
+ {"ebuf_skp_del", 0x4, 0x1},
+};
+
+static ssize_t lane_detect_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_DETECT_LANE, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t lane_detect_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 lane, val;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val &= ~(LANE_SELECT);
+ val |= FIELD_PREP(LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val);
+
+ return count;
+}
+
+static ssize_t rx_valid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_RXVALID, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t rx_valid_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return lane_detect_write(file, buf, count, ppos);
+}
+
+static ssize_t err_inj_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, counter, vc_num, err_group, type_mask;
+ int val_diff = 0;
+ char *kern_buf;
+
+ err_group = err_inj_list[pdata->idx].err_inj_group;
+ type_mask = err_inj_type_mask[err_group];
+
+ kern_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ if (err_group == 4) {
+ val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num);
+ if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else if (err_group == 1) {
+ val = sscanf(kern_buf, "%u %d", &counter, &val_diff);
+ if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else {
+ val = kstrtou32(kern_buf, 0, &counter);
+ if (val) {
+ kfree(kern_buf);
+ return val;
+ }
+ }
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group));
+ val &= ~(type_mask | EINJ_COUNT);
+ val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask);
+ val |= FIELD_PREP(EINJ_COUNT, counter);
+
+ if (err_group == 1 || err_group == 4) {
+ val &= ~(EINJ_VAL_DIFF);
+ val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff);
+ }
+ if (err_group == 4) {
+ val &= ~(EINJ_VC_NUM);
+ val |= FIELD_PREP(EINJ_VC_NUM, vc_num);
+ }
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group));
+
+ kfree(kern_buf);
+ return count;
+}
+
+static void set_event_number(struct dwc_pcie_rasdes_priv *pdata,
+ struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~EVENT_COUNTER_ENABLE;
+ val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no);
+ val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+}
+
+static ssize_t counter_enable_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_STATUS, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_enable_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, enable;
+
+ val = kstrtou32_from_user(buf, count, 0, &enable);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (enable)
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON);
+ else
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF);
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+
+ /*
+ * While enabling the counter, always read the status back to check if
+ * it is enabled or not. Return error if it is not enabled to let the
+ * users know that the counter is not supported on the platform.
+ */
+ if (enable) {
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset +
+ RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) {
+ mutex_unlock(&rinfo->reg_event_lock);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_lane_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_lane_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, lane;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~(EVENT_COUNTER_LANE_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_value_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm)
+{
+ const char *str;
+
+ switch (ltssm) {
+#define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3);
+ default:
+ str = "DW_PCIE_LTSSM_UNKNOWN";
+ break;
+ }
+
+ return str + strlen("DW_PCIE_LTSSM_");
+}
+
+static int ltssm_status_show(struct seq_file *s, void *v)
+{
+ struct dw_pcie *pci = s->private;
+ enum dw_pcie_ltssm val;
+
+ val = dw_pcie_get_ltssm(pci);
+ seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val);
+
+ return 0;
+}
+
+static int ltssm_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ltssm_status_show, inode->i_private);
+}
+
+#define dwc_debugfs_create(name) \
+debugfs_create_file(#name, 0644, rasdes_debug, pci, \
+ &dbg_ ## name ## _fops)
+
+#define DWC_DEBUGFS_FOPS(name) \
+static const struct file_operations dbg_ ## name ## _fops = { \
+ .open = simple_open, \
+ .read = name ## _read, \
+ .write = name ## _write \
+}
+
+DWC_DEBUGFS_FOPS(lane_detect);
+DWC_DEBUGFS_FOPS(rx_valid);
+
+static const struct file_operations dwc_pcie_err_inj_ops = {
+ .open = simple_open,
+ .write = err_inj_write,
+};
+
+static const struct file_operations dwc_pcie_counter_enable_ops = {
+ .open = simple_open,
+ .read = counter_enable_read,
+ .write = counter_enable_write,
+};
+
+static const struct file_operations dwc_pcie_counter_lane_ops = {
+ .open = simple_open,
+ .read = counter_lane_read,
+ .write = counter_lane_write,
+};
+
+static const struct file_operations dwc_pcie_counter_value_ops = {
+ .open = simple_open,
+ .read = counter_value_read,
+};
+
+static const struct file_operations dwc_pcie_ltssm_status_ops = {
+ .open = ltssm_status_open,
+ .read = seq_read,
+};
+
+static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
+{
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+
+ mutex_destroy(&rinfo->reg_event_lock);
+}
+
+static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ struct dentry *rasdes_debug, *rasdes_err_inj;
+ struct dentry *rasdes_event_counter, *rasdes_events;
+ struct dwc_pcie_rasdes_info *rasdes_info;
+ struct dwc_pcie_rasdes_priv *priv_tmp;
+ struct device *dev = pci->dev;
+ int ras_cap, i, ret;
+
+ /*
+ * If a given SoC has no RAS DES capability, the following call is
+ * bound to return an error, breaking some existing platforms. So,
+ * return 0 here, as this is not necessarily an error.
+ */
+ ras_cap = dw_pcie_find_rasdes_capability(pci);
+ if (!ras_cap) {
+ dev_dbg(dev, "no RAS DES capability available\n");
+ return 0;
+ }
+
+ rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL);
+ if (!rasdes_info)
+ return -ENOMEM;
+
+ /* Create subdirectories for Debug, Error Injection, Statistics. */
+ rasdes_debug = debugfs_create_dir("rasdes_debug", dir);
+ rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir);
+ rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir);
+
+ mutex_init(&rasdes_info->reg_event_lock);
+ rasdes_info->ras_cap_offset = ras_cap;
+ pci->debugfs->rasdes_info = rasdes_info;
+
+ /* Create debugfs files for Debug subdirectory. */
+ dwc_debugfs_create(lane_detect);
+ dwc_debugfs_create(rx_valid);
+
+ /* Create debugfs files for Error Injection subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp,
+ &dwc_pcie_err_inj_ops);
+ }
+
+ /* Create debugfs files for Statistical Counter subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(event_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter);
+ if (event_list[i].group_no == 0 || event_list[i].group_no == 4) {
+ debugfs_create_file("lane_select", 0644, rasdes_events,
+ priv_tmp, &dwc_pcie_counter_lane_ops);
+ }
+ debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_value_ops);
+ debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_enable_ops);
+ }
+
+ return 0;
+
+err_deinit:
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ return ret;
+}
+
+static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ debugfs_create_file("ltssm_status", 0444, dir, pci,
+ &dwc_pcie_ltssm_status_ops);
+}
+
+static int dw_pcie_ptm_check_capability(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ pci->ptm_vsec_offset = dw_pcie_find_ptm_capability(pci);
+
+ return pci->ptm_vsec_offset;
+}
+
+static int dw_pcie_ptm_context_update_write(void *drvdata, u8 mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_REQ_AUTO_UPDATE_ENABLED;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else if (mode == PCIE_PTM_CONTEXT_UPDATE_MANUAL) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_REQ_AUTO_UPDATE_ENABLED;
+ val |= PTM_REQ_START_UPDATE;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_update_read(void *drvdata, u8 *mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ if (FIELD_GET(PTM_REQ_AUTO_UPDATE_ENABLED, val))
+ *mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
+ else
+ /*
+ * PTM_REQ_START_UPDATE is a self clearing register bit. So if
+ * PTM_REQ_AUTO_UPDATE_ENABLED is not set, then it implies that
+ * manual update is used.
+ */
+ *mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_write(void *drvdata, bool valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (valid) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_read(void *drvdata, bool *valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ *valid = !!FIELD_GET(PTM_RES_CCONTEXT_VALID, val);
+
+ return 0;
+}
+
+static int dw_pcie_ptm_local_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_master_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t1_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t2_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t3_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t4_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static bool dw_pcie_ptm_context_update_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_context_valid_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_local_clock_visible(void *drvdata)
+{
+ /* PTM local clock is always visible */
+ return true;
+}
+
+static bool dw_pcie_ptm_master_clock_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t1_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t2_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t3_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t4_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+const struct pcie_ptm_ops dw_pcie_ptm_ops = {
+ .check_capability = dw_pcie_ptm_check_capability,
+ .context_update_write = dw_pcie_ptm_context_update_write,
+ .context_update_read = dw_pcie_ptm_context_update_read,
+ .context_valid_write = dw_pcie_ptm_context_valid_write,
+ .context_valid_read = dw_pcie_ptm_context_valid_read,
+ .local_clock_read = dw_pcie_ptm_local_clock_read,
+ .master_clock_read = dw_pcie_ptm_master_clock_read,
+ .t1_read = dw_pcie_ptm_t1_read,
+ .t2_read = dw_pcie_ptm_t2_read,
+ .t3_read = dw_pcie_ptm_t3_read,
+ .t4_read = dw_pcie_ptm_t4_read,
+ .context_update_visible = dw_pcie_ptm_context_update_visible,
+ .context_valid_visible = dw_pcie_ptm_context_valid_visible,
+ .local_clock_visible = dw_pcie_ptm_local_clock_visible,
+ .master_clock_visible = dw_pcie_ptm_master_clock_visible,
+ .t1_visible = dw_pcie_ptm_t1_visible,
+ .t2_visible = dw_pcie_ptm_t2_visible,
+ .t3_visible = dw_pcie_ptm_t3_visible,
+ .t4_visible = dw_pcie_ptm_t4_visible,
+};
+
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+ if (!pci->debugfs)
+ return;
+
+ pcie_ptm_destroy_debugfs(pci->ptm_debugfs);
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ debugfs_remove_recursive(pci->debugfs->debug_dir);
+}
+
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode)
+{
+ char dirname[DWC_DEBUGFS_BUF_MAX];
+ struct device *dev = pci->dev;
+ struct debugfs_info *debugfs;
+ struct dentry *dir;
+ int err;
+
+ /* Create main directory for each platform driver. */
+ snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev));
+ dir = debugfs_create_dir(dirname, NULL);
+ debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL);
+ if (!debugfs)
+ return;
+
+ debugfs->debug_dir = dir;
+ pci->debugfs = debugfs;
+ err = dwc_pcie_rasdes_debugfs_init(pci, dir);
+ if (err)
+ dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n",
+ err);
+
+ dwc_pcie_ltssm_debugfs_init(pci, dir);
+
+ pci->mode = mode;
+ pci->ptm_debugfs = pcie_ptm_create_debugfs(pci->dev, pci,
+ &dw_pcie_ptm_ops);
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index e41479a9ca02..0ae54a94809b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -102,6 +102,45 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
+/**
+ * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
+ * @pci: DWC PCI device
+ * @prev_cap: Capability preceding the capability that should be hidden
+ * @cap: Capability that should be hidden
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
+{
+ u16 prev_cap_offset, cap_offset;
+ u32 prev_cap_header, cap_header;
+
+ prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
+ if (!prev_cap_offset)
+ return -EINVAL;
+
+ prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
+ cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
+ cap_header = dw_pcie_readl_dbi(pci, cap_offset);
+
+ /* cap must immediately follow prev_cap. */
+ if (PCI_EXT_CAP_ID(cap_header) != cap)
+ return -EINVAL;
+
+ /* Clear next ptr. */
+ prev_cap_header &= ~GENMASK(31, 20);
+
+ /* Set next ptr to next ptr of cap. */
+ prev_cap_header |= cap_header & GENMASK(31, 20);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
+
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
@@ -128,7 +167,7 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
- dma_addr_t cpu_addr, enum pci_barno bar,
+ dma_addr_t parent_bus_addr, enum pci_barno bar,
size_t size)
{
int ret;
@@ -146,7 +185,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
}
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar, size);
+ parent_bus_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
@@ -181,7 +220,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
return ret;
set_bit(free_win, ep->ob_window_map);
- ep->outbound_addr[free_win] = atu->cpu_addr;
+ ep->outbound_addr[free_win] = atu->parent_bus_addr;
return 0;
}
@@ -205,6 +244,125 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
ep->bar_to_atu[bar] = 0;
}
+static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
+ enum pci_barno bar)
+{
+ u32 reg, bar_index;
+ unsigned int offset, nbars;
+ int i;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (!offset)
+ return offset;
+
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg);
+ if (bar_index == bar)
+ return offset;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ unsigned int rebar_offset;
+ u32 rebar_cap, rebar_ctrl;
+ int ret;
+
+ rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
+ if (!rebar_offset)
+ return -EINVAL;
+
+ ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
+ if (ret)
+ return ret;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /*
+ * A BAR mask should not be written for a resizable BAR. The BAR mask
+ * is automatically derived by the controller every time the "selected
+ * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
+ * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
+ * BIT(0) to set the BAR enable bit.
+ */
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ /*
+ * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
+ * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
+ * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
+ */
+ rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
+ rebar_ctrl &= ~GENMASK(31, 16);
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
+
+ /*
+ * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
+ * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
+ * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
+ */
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
+ enum pci_barno bar)
+{
+ const struct pci_epc_features *epc_features;
+
+ if (!ep->ops->get_features)
+ return BAR_PROGRAMMABLE;
+
+ epc_features = ep->ops->get_features(ep);
+
+ return epc_features->bar[bar].type;
+}
+
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
@@ -212,9 +370,9 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
+ enum pci_epc_bar_type bar_type;
int flags = epf_bar->flags;
int ret, type;
- u32 reg;
/*
* DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
@@ -246,19 +404,30 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
goto config_atu;
}
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
-
- dw_pcie_dbi_ro_wr_en(pci);
-
- dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
- dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
-
- if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
- dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ bar_type = dw_pcie_ep_get_bar_type(ep, bar);
+ switch (bar_type) {
+ case BAR_FIXED:
+ /*
+ * There is no need to write a BAR mask for a fixed BAR (except
+ * to write 1 to the LSB of the BAR mask register, to enable the
+ * BAR). Write the BAR mask regardless. (The fixed bits in the
+ * BAR mask register will be read-only anyway.)
+ */
+ fallthrough;
+ case BAR_PROGRAMMABLE:
+ ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
+ break;
+ case BAR_RESIZABLE:
+ ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(pci->dev, "Invalid BAR type\n");
+ break;
}
- dw_pcie_dbi_ro_wr_dis(pci);
+ if (ret)
+ return ret;
config_atu:
if (!(flags & PCI_BASE_ADDRESS_SPACE))
@@ -282,7 +451,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 index;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < pci->num_ob_windows; index++) {
+ for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -314,7 +483,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_find_index(ep, addr, &atu_index);
+ ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
+ &atu_index);
if (ret < 0)
return;
@@ -333,7 +503,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
atu.func_no = func_no;
atu.type = PCIE_ATU_TYPE_MEM;
- atu.cpu_addr = addr;
+ atu.parent_bus_addr = addr - pci->parent_bus_offset;
atu.pci_addr = pci_addr;
atu.size = size;
ret = dw_pcie_ep_outbound_atu(ep, &atu);
@@ -362,15 +532,16 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val);
- return val;
+ return 1 << val;
}
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u8 interrupts)
+ u8 nr_irqs)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dw_pcie_ep_func *ep_func;
+ u8 mmc = order_base_2(nr_irqs);
u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
@@ -380,7 +551,7 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
reg = ep_func->msi_cap + PCI_MSI_FLAGS;
val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
- val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts);
+ val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc);
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -405,11 +576,11 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno bir, u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -425,7 +596,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
dw_pcie_writew_dbi(pci, reg, val);
reg = ep_func->msix_cap + PCI_MSIX_TABLE;
@@ -433,7 +604,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
reg = ep_func->msix_cap + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -501,7 +672,7 @@ static const struct pci_epc_ops epc_ops = {
* @ep: DWC EP device
* @func_no: Function number of the endpoint
*
- * Return: 0 if success, errono otherwise.
+ * Return: 0 if success, errno otherwise.
*/
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
@@ -520,7 +691,7 @@ EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
* @func_no: Function number of the endpoint
* @interrupt_num: Interrupt number to be raised
*
- * Return: 0 if success, errono otherwise.
+ * Return: 0 if success, errno otherwise.
*/
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
@@ -666,6 +837,7 @@ void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ dwc_pcie_debugfs_deinit(pci);
dw_pcie_edma_remove(pci);
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
@@ -690,46 +862,49 @@ void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
-static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
-{
- u32 header;
- int pos = PCI_CFG_SPACE_SIZE;
-
- while (pos) {
- header = dw_pcie_readl_dbi(pci, pos);
- if (PCI_EXT_CAP_ID(header) == cap)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
- if (!pos)
- break;
- }
-
- return 0;
-}
-
static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
{
+ struct dw_pcie_ep *ep = &pci->ep;
unsigned int offset;
unsigned int nbars;
- u32 reg, i;
+ enum pci_barno bar;
+ u32 reg, i, val;
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
dw_pcie_dbi_ro_wr_en(pci);
if (offset) {
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
/*
* PCIe r6.0, sec 7.8.6.2 require us to support at least one
* size in the range from 1 MB to 512 GB. Advertise support
* for 1 MB BAR size only.
+ *
+ * For a BAR that has been configured via dw_pcie_ep_set_bar(),
+ * advertise support for only that size instead.
*/
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ /*
+ * While the RESBAR_CAP_REG_* fields are sticky, the
+ * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
+ * sticky in certain versions of DWC PCIe, but not all).
+ *
+ * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
+ * the controller when RESBAR_CAP_REG is written, which
+ * is why RESBAR_CAP_REG is written here.
+ */
+ val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val);
+ if (ep->epf_bar[bar])
+ pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
+ else
+ val = BIT(4);
+
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
+ }
}
dw_pcie_setup(pci);
@@ -818,7 +993,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
if (ep->ops->init)
ep->ops->init(ep);
- ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+ ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
/*
* PTM responder capability can be disabled only after disabling
@@ -838,6 +1013,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
dw_pcie_ep_init_non_sticky_registers(pci);
+ dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE);
+
return 0;
err_remove_edma:
@@ -884,26 +1061,15 @@ void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
-/**
- * dw_pcie_ep_init - Initialize the endpoint device
- * @ep: DWC EP device
- *
- * Initialize the endpoint device. Allocate resources and create the EPC
- * device with the endpoint framework.
- *
- * Return: 0 if success, errno otherwise.
- */
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
{
- int ret;
- struct resource *res;
- struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
-
- INIT_LIST_HEAD(&ep->func_list);
+ struct pci_epc *epc = ep->epc;
+ struct resource *res;
+ int ret;
ret = dw_pcie_get_resources(pci);
if (ret)
@@ -916,8 +1082,37 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- if (ep->ops->pre_init)
- ep->ops->pre_init(ep);
+ /*
+ * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
+ * dw_pcie_parent_bus_offset() after setting ep->phys_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
+ ep->phys_base);
+
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+
+ return 0;
+}
+
+/**
+ * dw_pcie_ep_init - Initialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Initialize the endpoint device. Allocate resources and create the EPC
+ * device with the endpoint framework.
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ INIT_LIST_HEAD(&ep->func_list);
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
@@ -928,9 +1123,12 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
- if (ret < 0)
- epc->max_functions = 1;
+ ret = dw_pcie_ep_get_resources(ep);
+ if (ret)
+ return ret;
+
+ if (ep->ops->pre_init)
+ ep->ops->pre_init(ep);
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index ffaded8f2df7..906277f9ffaf 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -227,7 +227,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(pci->dev->of_node);
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
&dw_pcie_msi_domain_ops, pp);
@@ -418,19 +418,15 @@ static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
}
}
-int dw_pcie_host_init(struct dw_pcie_rp *pp)
+static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
- struct pci_host_bridge *bridge;
struct resource *res;
int ret;
- raw_spin_lock_init(&pp->lock);
-
ret = dw_pcie_get_resources(pci);
if (ret)
return ret;
@@ -448,20 +444,43 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (IS_ERR(pp->va_cfg0_base))
return PTR_ERR(pp->va_cfg0_base);
- bridge = devm_pci_alloc_host_bridge(dev, 0);
- if (!bridge)
- return -ENOMEM;
-
- pp->bridge = bridge;
-
/* Get the I/O range from DT */
- win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
if (win) {
pp->io_size = resource_size(win->res);
pp->io_bus_addr = win->res->start - win->offset;
pp->io_base = pci_pio_to_address(win->res->start);
}
+ /*
+ * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
+ * call dw_pcie_parent_bus_offset() after setting pp->io_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
+ pp->cfg0_base);
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ raw_spin_lock_init(&pp->lock);
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ pp->bridge = bridge;
+
+ ret = dw_pcie_host_get_resources(pp);
+ if (ret)
+ return ret;
+
/* Set default bus ops */
bridge->ops = &dw_pcie_ops;
bridge->child_ops = &dw_child_pcie_ops;
@@ -504,6 +523,13 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
+ if (pci->num_lanes < 1)
+ pci->num_lanes = dw_pcie_link_get_max_link_width(pci);
+
+ ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes);
+ if (ret)
+ goto err_free_msi;
+
/*
* Allocate the resource for MSG TLP before programming the iATU
* outbound window in dw_pcie_setup_rc(). Since the allocation depends
@@ -548,6 +574,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (pp->ops->post_init)
pp->ops->post_init(pp);
+ dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE);
+
return 0;
err_stop_link:
@@ -572,6 +600,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ dwc_pcie_debugfs_deinit(pci);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
@@ -616,7 +646,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
type = PCIE_ATU_TYPE_CFG1;
atu.type = type;
- atu.cpu_addr = pp->cfg0_base;
+ atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
atu.pci_addr = busdev;
atu.size = pp->cfg0_size;
@@ -641,7 +671,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
if (pp->cfg0_io_shared) {
atu.type = PCIE_ATU_TYPE_IO;
- atu.cpu_addr = pp->io_base;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
@@ -667,7 +697,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
if (pp->cfg0_io_shared) {
atu.type = PCIE_ATU_TYPE_IO;
- atu.cpu_addr = pp->io_base;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
@@ -736,7 +766,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
atu.index = i;
atu.type = PCIE_ATU_TYPE_MEM;
- atu.cpu_addr = entry->res->start;
+ atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
atu.pci_addr = entry->res->start - entry->offset;
/* Adjust iATU size if MSG TLP region was allocated before */
@@ -758,7 +788,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pci->num_ob_windows > ++i) {
atu.index = i;
atu.type = PCIE_ATU_TYPE_IO;
- atu.cpu_addr = pp->io_base;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
@@ -805,6 +835,77 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
return 0;
}
+static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u8 lane_eq_offset, lane_reg_size, cap_id;
+ u8 *presets;
+ u32 cap;
+ int i;
+
+ if (speed == PCIE_SPEED_8_0GT) {
+ presets = (u8 *)pp->presets.eq_presets_8gts;
+ lane_eq_offset = PCI_SECPCI_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_SECPCI;
+ /* For data rate of 8 GT/S each lane equalization control is 16bits wide*/
+ lane_reg_size = 0x2;
+ } else if (speed == PCIE_SPEED_16_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1];
+ lane_eq_offset = PCI_PL_16GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_16GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_32_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1];
+ lane_eq_offset = PCI_PL_32GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_32GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_64_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1];
+ lane_eq_offset = PCI_PL_64GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_64GT;
+ lane_reg_size = 0x1;
+ } else {
+ return;
+ }
+
+ if (presets[0] == PCI_EQ_RESV)
+ return;
+
+ cap = dw_pcie_find_ext_capability(pci, cap_id);
+ if (!cap)
+ return;
+
+ /*
+ * Write preset values to the registers byte-by-byte for the given
+ * number of lanes and register size.
+ */
+ for (i = 0; i < pci->num_lanes * lane_reg_size; i++)
+ dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]);
+}
+
+static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
+
+ /*
+ * Lane equalization settings need to be applied for all data rates the
+ * controller supports and for all supported lanes.
+ */
+
+ if (speed >= PCIE_SPEED_8_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT);
+
+ if (speed >= PCIE_SPEED_16_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT);
+
+ if (speed >= PCIE_SPEED_32_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT);
+
+ if (speed >= PCIE_SPEED_64_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT);
+}
+
int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -858,6 +959,7 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+ dw_pcie_config_presets(pp);
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
@@ -902,13 +1004,13 @@ static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
atu.size = resource_size(pci->pp.msg_res);
atu.index = pci->pp.msg_atu_index;
- atu.cpu_addr = pci->pp.msg_res->start;
+ atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return ret;
- mem = ioremap(atu.cpu_addr, pci->region_align);
+ mem = ioremap(pci->pp.msg_res->start, pci->region_align);
if (!mem)
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 145e7f579072..4d794964fa0f 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -16,6 +16,8 @@
#include <linux/gpio/consumer.h>
#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pcie-dwc.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/types.h>
@@ -52,6 +54,14 @@ static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
[DW_PCIE_PWR_RST] = "pwr",
};
+static const struct dwc_pcie_vsec_id dwc_pcie_ptm_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* EP */
+ .vsec_id = 0x03, .vsec_rev = 0x1 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* RC */
+ .vsec_id = 0x04, .vsec_rev = 0x1 },
+ { }
+};
+
static int dw_pcie_get_clocks(struct dw_pcie *pci)
{
int i, ret;
@@ -283,6 +293,57 @@ u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
}
EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
+ u16 vsec_id)
+{
+ u16 vsec = 0;
+ u32 header;
+
+ if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID))
+ return 0;
+
+ while ((vsec = dw_pcie_find_next_ext_capability(pci, vsec,
+ PCI_EXT_CAP_ID_VNDR))) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_ID(header) == vsec_id)
+ return vsec;
+ }
+
+ return 0;
+}
+
+static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci,
+ const struct dwc_pcie_vsec_id *vsec_ids)
+{
+ const struct dwc_pcie_vsec_id *vid;
+ u16 vsec;
+ u32 header;
+
+ for (vid = vsec_ids; vid->vendor_id; vid++) {
+ vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id,
+ vid->vsec_id);
+ if (vsec) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev)
+ return vsec;
+ }
+ }
+
+ return 0;
+}
+
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability);
+
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_ptm_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ptm_capability);
+
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
if (!IS_ALIGNED((uintptr_t)addr, size)) {
@@ -470,25 +531,22 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
const struct dw_pcie_ob_atu_cfg *atu)
{
- u64 cpu_addr = atu->cpu_addr;
+ u64 parent_bus_addr = atu->parent_bus_addr;
u32 retries, val;
u64 limit_addr;
- if (pci->ops && pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
-
- limit_addr = cpu_addr + atu->size - 1;
+ limit_addr = parent_bus_addr + atu->size - 1;
- if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
!IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
return -EINVAL;
}
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
lower_32_bits(limit_addr));
@@ -502,7 +560,7 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
upper_32_bits(atu->pci_addr));
val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
- if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) &&
dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE;
if (dw_pcie_ver_is(pci, 490A))
@@ -545,13 +603,13 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
+ u64 parent_bus_addr, u64 pci_addr, u64 size)
{
u64 limit_addr = pci_addr + size - 1;
u32 retries, val;
if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
return -EINVAL;
}
@@ -568,9 +626,9 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
upper_32_bits(limit_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
val = type;
if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
@@ -597,18 +655,18 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
}
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar, size_t size)
+ int type, u64 parent_bus_addr, u8 bar, size_t size)
{
u32 retries, val;
- if (!IS_ALIGNED(cpu_addr, pci->region_align) ||
- !IS_ALIGNED(cpu_addr, size))
+ if (!IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, size))
return -EINVAL;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
PCIE_ATU_FUNC_NUM(func_no));
@@ -667,7 +725,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
-int dw_pcie_link_up(struct dw_pcie *pci)
+bool dw_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
@@ -737,6 +795,14 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
}
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci)
+{
+ u8 cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+
+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+}
+
static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
{
u32 lnkcap, lwsc, plc;
@@ -753,22 +819,19 @@ static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
/* Set link width speed control register */
lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
switch (num_lanes) {
case 1:
plc |= PORT_LINK_MODE_1_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
break;
case 2:
plc |= PORT_LINK_MODE_2_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
break;
case 4:
plc |= PORT_LINK_MODE_4_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
break;
case 8:
plc |= PORT_LINK_MODE_8_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
break;
default:
dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
@@ -1105,3 +1168,63 @@ void dw_pcie_setup(struct dw_pcie *pci)
dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
}
+
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phys_addr)
+{
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ int index;
+ u64 reg_addr, fixup_addr;
+ u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+
+ /* Look up reg_name address on parent bus */
+ index = of_property_match_string(np, "reg-names", reg_name);
+
+ if (index < 0) {
+ dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name);
+ return 0;
+ }
+
+ of_property_read_reg(np, index, &reg_addr, NULL);
+
+ fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL;
+ if (fixup) {
+ fixup_addr = fixup(pci, cpu_phys_addr);
+ if (reg_addr == fixup_addr) {
+ dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr, fixup);
+ } else {
+ dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr);
+ reg_addr = fixup_addr;
+ }
+
+ return cpu_phys_addr - reg_addr;
+ }
+
+ if (pci->use_parent_dt_ranges) {
+
+ /*
+ * This platform once had a fixup, presumably because it
+ * translates between CPU and PCI controller addresses.
+ * Log a note if devicetree didn't describe a translation.
+ */
+ if (reg_addr == cpu_phys_addr)
+ dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ } else {
+ if (reg_addr != cpu_phys_addr) {
+ dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ return 0;
+ }
+ }
+
+ return cpu_phys_addr - reg_addr;
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 501d9ddfea16..ce9e18554e42 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -25,6 +25,8 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include "../../pci.h"
+
/* DWC PCIe IP-core versions (native support since v4.70a) */
#define DW_PCIE_VER_365A 0x3336352a
#define DW_PCIE_VER_460A 0x3436302a
@@ -260,6 +262,21 @@
#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+/* PTM register definitions */
+#define PTM_RES_REQ_CTRL 0x8
+#define PTM_RES_CCONTEXT_VALID BIT(0)
+#define PTM_REQ_AUTO_UPDATE_ENABLED BIT(0)
+#define PTM_REQ_START_UPDATE BIT(1)
+
+#define PTM_LOCAL_LSB 0x10
+#define PTM_LOCAL_MSB 0x14
+#define PTM_T1_T2_LSB 0x18
+#define PTM_T1_T2_MSB 0x1c
+#define PTM_T3_T4_LSB 0x28
+#define PTM_T3_T4_MSB 0x2c
+#define PTM_MASTER_LSB 0x38
+#define PTM_MASTER_MSB 0x3c
+
/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
@@ -330,9 +347,40 @@ enum dw_pcie_ltssm {
/* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_POLL_ACTIVE = 0x2,
+ DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3,
+ DW_PCIE_LTSSM_POLL_CONFIG = 0x4,
+ DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5,
DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
+ DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7,
+ DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8,
+ DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9,
+ DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa,
+ DW_PCIE_LTSSM_CFG_COMPLETE = 0xb,
+ DW_PCIE_LTSSM_CFG_IDLE = 0xc,
+ DW_PCIE_LTSSM_RCVRY_LOCK = 0xd,
+ DW_PCIE_LTSSM_RCVRY_SPEED = 0xe,
+ DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf,
+ DW_PCIE_LTSSM_RCVRY_IDLE = 0x10,
DW_PCIE_LTSSM_L0 = 0x11,
+ DW_PCIE_LTSSM_L0S = 0x12,
+ DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13,
+ DW_PCIE_LTSSM_L1_IDLE = 0x14,
DW_PCIE_LTSSM_L2_IDLE = 0x15,
+ DW_PCIE_LTSSM_L2_WAKE = 0x16,
+ DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17,
+ DW_PCIE_LTSSM_DISABLED_IDLE = 0x18,
+ DW_PCIE_LTSSM_DISABLED = 0x19,
+ DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a,
+ DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b,
+ DW_PCIE_LTSSM_LPBK_EXIT = 0x1c,
+ DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d,
+ DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e,
+ DW_PCIE_LTSSM_HOT_RESET = 0x1f,
+ DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20,
+ DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21,
+ DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
+ DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
};
@@ -343,7 +391,7 @@ struct dw_pcie_ob_atu_cfg {
u8 func_no;
u8 code;
u8 routing;
- u64 cpu_addr;
+ u64 parent_bus_addr;
u64 pci_addr;
u64 size;
};
@@ -381,6 +429,7 @@ struct dw_pcie_rp {
int msg_atu_index;
struct resource *msg_res;
bool use_linkup_irq;
+ struct pci_eq_presets presets;
};
struct dw_pcie_ep_ops {
@@ -431,12 +480,17 @@ struct dw_pcie_ops {
size_t size, u32 val);
void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
- int (*link_up)(struct dw_pcie *pcie);
+ bool (*link_up)(struct dw_pcie *pcie);
enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie);
int (*start_link)(struct dw_pcie *pcie);
void (*stop_link)(struct dw_pcie *pcie);
};
+struct debugfs_info {
+ struct dentry *debug_dir;
+ void *rasdes_info;
+};
+
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
@@ -445,6 +499,7 @@ struct dw_pcie {
void __iomem *atu_base;
resource_size_t atu_phys_addr;
size_t atu_size;
+ resource_size_t parent_bus_offset;
u32 num_ib_windows;
u32 num_ob_windows;
u32 region_align;
@@ -465,6 +520,23 @@ struct dw_pcie {
struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
struct gpio_desc *pe_rst;
bool suspended;
+ struct debugfs_info *debugfs;
+ enum dw_pcie_device_mode mode;
+ u16 ptm_vsec_offset;
+ struct pci_ptm_debugfs *ptm_debugfs;
+
+ /*
+ * If iATU input addresses are offset from CPU physical addresses,
+ * we previously required .cpu_addr_fixup() to convert them. We
+ * now rely on the devicetree instead. If .cpu_addr_fixup()
+ * exists, we compare its results with devicetree.
+ *
+ * If .cpu_addr_fixup() does not exist, we assume the offset is
+ * zero and warn if devicetree claims otherwise. If we know all
+ * devicetrees correctly describe the offset, set
+ * use_parent_dt_ranges to true to avoid this warning.
+ */
+ bool use_parent_dt_ranges;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -478,6 +550,8 @@ void dw_pcie_version_detect(struct dw_pcie *pci);
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci);
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
@@ -485,20 +559,25 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val);
u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-int dw_pcie_link_up(struct dw_pcie *pci);
+bool dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci);
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
const struct dw_pcie_ob_atu_cfg *atu);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
+ u64 parent_bus_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar, size_t size);
+ int type, u64 parent_bus_addr,
+ u8 bar, size_t size);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
int dw_pcie_edma_detect(struct dw_pcie *pci);
void dw_pcie_edma_remove(struct dw_pcie *pci);
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phy_addr);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
@@ -743,6 +822,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap);
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
#else
@@ -800,10 +880,30 @@ static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
+static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci,
+ u8 prev_cap, u8 cap)
+{
+ return 0;
+}
+
static inline struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
return NULL;
}
#endif
+
+#ifdef CONFIG_PCIE_DW_DEBUGFS
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode);
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci);
+#else
+static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci,
+ enum dw_pcie_device_mode mode)
+{
+}
+static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+}
+#endif
+
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 93698abff4d9..b5f5eee5a50e 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -8,6 +8,7 @@
* Author: Simon Xue <xxm@rock-chips.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/irqchip/chained_irq.h>
@@ -21,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include "../../pci.h"
#include "pcie-designware.h"
/*
@@ -33,26 +35,38 @@
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
-#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
-#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
-#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
-#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
-#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
-#define PCIE_CLIENT_INTR_MASK_MISC 0x24
-#define PCIE_SMLH_LINKUP BIT(16)
-#define PCIE_RDLH_LINKUP BIT(17)
-#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
-#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
-#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
-#define PCIE_L0S_ENTRY 0x11
-#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+/* General Control Register */
+#define PCIE_CLIENT_GENERAL_CON 0x0
+#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
+#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
+
+/* Interrupt Status Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+
+/* Interrupt Status Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
+#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
+#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
+
+/* Interrupt Mask Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
-#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+
+/* Interrupt Mask Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_MASK_MISC 0x24
+
+/* Hot Reset Control Register */
#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_LTSSM_APP_DLY2_EN BIT(1)
+#define PCIE_LTSSM_APP_DLY2_DONE BIT(3)
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+
+/* LTSSM Status Register */
#define PCIE_CLIENT_LTSSM_STATUS 0x300
-#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
-#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+#define PCIE_LINKUP 0x3
+#define PCIE_LINKUP_MASK GENMASK(17, 16)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
struct dw_pcie pci;
@@ -144,8 +158,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -163,25 +177,36 @@ static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
}
static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
}
-static int rockchip_pcie_link_up(struct dw_pcie *pci)
+static bool rockchip_pcie_link_up(struct dw_pcie *pci)
{
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
u32 val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
- (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
- return 1;
+ return FIELD_GET(PCIE_LINKUP_MASK, val) == PCIE_LINKUP;
+}
- return 0;
+static void rockchip_pcie_enable_l0s(struct dw_pcie *pci)
+{
+ u32 cap, lnkcap;
+
+ /* Enable L0S capability for all SoCs */
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ if (cap) {
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap |= PCI_EXP_LNKCAP_ASPM_L0S;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
}
static int rockchip_pcie_start_link(struct dw_pcie *pci)
@@ -202,7 +227,7 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
* We need more extra time as before, rather than setting just
* 100us as we don't know how long should the device need to reset.
*/
- msleep(100);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
return 0;
@@ -233,6 +258,8 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
rockchip);
+ rockchip_pcie_enable_l0s(pci);
+
return 0;
}
@@ -240,11 +267,37 @@ static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
.init = rockchip_pcie_host_init,
};
+/*
+ * ATS does not work on RK3588 when running in EP mode.
+ *
+ * After the host has enabled ATS on the EP side, it will send an IOTLB
+ * invalidation request to the EP side. However, the RK3588 will never send
+ * a completion back and eventually the host will print an IOTLB_INV_TIMEOUT
+ * error, and the EP will not be operational. If we hide the ATS capability,
+ * things work as expected.
+ */
+static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ /* Only hide the ATS capability for RK3588 running in EP mode. */
+ if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep"))
+ return;
+
+ if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI,
+ PCI_EXT_CAP_ID_ATS))
+ dev_err(dev, "failed to hide ATS capability\n");
+}
+
static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar;
+ rockchip_pcie_enable_l0s(pci);
+ rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
+
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
};
@@ -272,13 +325,14 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = true,
+ .intx_capable = false,
.align = SZ_64K,
- .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
};
/*
@@ -292,13 +346,14 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = true,
+ .intx_capable = false,
.align = SZ_64K,
- .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
.bar[BAR_4] = { .type = BAR_RESERVED, },
- .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
};
static const struct pci_epc_features *
@@ -379,8 +434,8 @@ static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
{
- phy_exit(rockchip->phy);
phy_power_off(rockchip->phy);
+ phy_exit(rockchip->phy);
}
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -395,7 +450,7 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
struct dw_pcie *pci = &rockchip->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
- u32 reg, val;
+ u32 reg;
reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
@@ -404,8 +459,8 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
if (reg & PCIE_RDLH_LINK_UP_CHGED) {
- val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ if (rockchip_pcie_link_up(pci)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
/* Rescan the bus to enumerate endpoint devices */
pci_lock_rescan_remove();
@@ -433,11 +488,14 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
if (reg & PCIE_LINK_REQ_RST_NOT_INT) {
dev_dbg(dev, "hot reset or link-down reset\n");
dw_pcie_ep_linkdown(&pci->ep);
+ /* Stop delaying link training. */
+ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_APP_DLY2_DONE);
+ rockchip_pcie_writel_apb(rockchip, val,
+ PCIE_CLIENT_HOT_RESET_CTRL);
}
if (reg & PCIE_RDLH_LINK_UP_CHGED) {
- val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ if (rockchip_pcie_link_up(pci)) {
dev_dbg(dev, "link up\n");
dw_pcie_ep_linkup(&pci->ep);
}
@@ -474,7 +532,7 @@ static int rockchip_pcie_configure_rc(struct platform_device *pdev,
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
pp = &rockchip->pci.pp;
pp->ops = &rockchip_pcie_host_ops;
@@ -515,12 +573,15 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
return ret;
}
- /* LTSSM enable control mode */
- val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ /*
+ * LTSSM enable control mode, and automatically delay link training on
+ * hot reset/link-down reset.
+ */
+ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
rockchip->pci.ep.page_size = SZ_64K;
@@ -570,6 +631,10 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->pci.ops = &dw_pcie_ops;
rockchip->data = data;
+ /* Default N_FTS value (210) is broken, override it to 255 */
+ rockchip->pci.n_fts[0] = 255; /* Gen1 */
+ rockchip->pci.n_fts[1] = 255; /* Gen2+ */
+
ret = rockchip_pcie_resource_get(pdev, rockchip);
if (ret)
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 8904b5b85ee5..3c17897e56fc 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -15,6 +15,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 1f2f4c28a949..a52071589377 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -151,7 +151,7 @@ static struct pci_ops histb_pci_ops = {
.write = histb_pcie_wr_own_conf,
};
-static int histb_pcie_link_up(struct dw_pcie *pci)
+static bool histb_pcie_link_up(struct dw_pcie *pci)
{
struct histb_pcie *hipcie = to_histb_pcie(pci);
u32 regval;
@@ -160,11 +160,8 @@ static int histb_pcie_link_up(struct dw_pcie *pci)
regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
status &= PCIE_LTSSM_STATE_MASK;
- if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
- (status == PCIE_LTSSM_STATE_ACTIVE))
- return 1;
-
- return 0;
+ return ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE));
}
static int histb_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index 9b53b8f6f268..c21906eced61 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -57,7 +57,6 @@
PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
-#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
struct intel_pcie {
@@ -381,13 +380,7 @@ static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
return intel_pcie_host_setup(pcie);
}
-static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
-{
- return cpu_addr + BUS_IATU_OFFSET;
-}
-
static const struct dw_pcie_ops intel_pcie_ops = {
- .cpu_addr_fixup = intel_pcie_cpu_addr,
};
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
@@ -409,6 +402,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
pci = &pcie->pci;
pci->dev = dev;
+ pci->use_parent_dt_ranges = true;
pp = &pci->pp;
ret = intel_pcie_get_resources(pdev);
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 278205db60a2..67dd3337b447 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -101,7 +101,7 @@ static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
}
-static int keembay_pcie_link_up(struct dw_pcie *pci)
+static bool keembay_pcie_link_up(struct dw_pcie *pci)
{
struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
u32 val;
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 1b2088acb538..91559c8b1866 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -216,10 +216,9 @@ static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
- if (reg_val & PIPE_CLK_STABLE) {
- dev_err(dev, "PIPE clk is not stable\n");
- return -EINVAL;
- }
+ if (reg_val & PIPE_CLK_STABLE)
+ return dev_err_probe(dev, -ETIMEDOUT,
+ "PIPE clk is not stable\n");
return 0;
}
@@ -371,10 +370,9 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
if (ret < 0)
return 0;
- if (ret > MAX_PCI_SLOTS) {
- dev_err(dev, "Too many GPIO clock requests!\n");
- return -EINVAL;
- }
+ if (ret > MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many GPIO clock requests!\n");
pcie->n_gpio_clkreq = ret;
@@ -420,17 +418,16 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
"unable to get a valid reset gpio\n");
}
- if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) {
- dev_err(dev, "Too many PCI slots!\n");
- return -EINVAL;
- }
+ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many PCI slots!\n");
+
pcie->num_slots++;
ret = of_pci_get_devfn(child);
- if (ret < 0) {
- dev_err(dev, "failed to parse devfn: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to parse devfn\n");
slot = PCI_SLOT(ret);
@@ -452,7 +449,7 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *child, *node = dev->of_node;
+ struct device_node *node = dev->of_node;
void __iomem *apb_base;
int ret;
@@ -477,17 +474,13 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
return ret;
/* Parse OF children */
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
if (ret)
- goto put_node;
+ return ret;
}
return 0;
-
-put_node:
- of_node_put(child);
- return ret;
}
static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
@@ -593,16 +586,13 @@ static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
}
-static int kirin_pcie_link_up(struct dw_pcie *pci)
+static bool kirin_pcie_link_up(struct dw_pcie *pci)
{
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
u32 val;
regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val);
- if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
- return 1;
-
- return 0;
+ return (val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE;
}
static int kirin_pcie_start_link(struct dw_pcie *pci)
@@ -729,16 +719,9 @@ static int kirin_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
int ret;
- if (!dev->of_node) {
- dev_err(dev, "NULL node\n");
- return -EINVAL;
- }
-
data = of_device_get_match_data(dev);
- if (!data) {
- dev_err(dev, "OF data missing\n");
- return -EINVAL;
- }
+ if (!data)
+ return dev_err_probe(dev, -EINVAL, "OF data missing\n");
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index c08f64d7a825..bf7c6ac0f3e3 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -48,7 +48,7 @@
#define PARF_DBI_BASE_ADDR_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
-#define PARF_NO_SNOOP_OVERIDE 0x3d4
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
@@ -60,6 +60,7 @@
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
#define PARF_INT_ALL_5_MASK 0x2dcc
+#define PARF_INT_ALL_3_MASK 0x2e18
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_DOWN BIT(1)
@@ -89,9 +90,9 @@
#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
-/* PARF_NO_SNOOP_OVERIDE register fields */
-#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
-#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
/* PARF_DEVICE_TYPE register fields */
#define PARF_DEVICE_TYPE_EP 0x0
@@ -132,6 +133,9 @@
/* PARF_INT_ALL_5_MASK fields */
#define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0)
+/* PARF_INT_ALL_3_MASK fields */
+#define PARF_INT_ALL_3_PTM_UPDATING BIT(4)
+
/* ELBI registers */
#define ELBI_SYS_STTS 0x08
#define ELBI_CS2_ENABLE 0xa4
@@ -261,7 +265,7 @@ static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
}
}
-static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
+static bool qcom_pcie_dw_link_up(struct dw_pcie *pci)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
u32 reg;
@@ -497,6 +501,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_5_MASK);
}
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_3_MASK);
+ val &= ~PARF_INT_ALL_3_PTM_UPDATING;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_3_MASK);
+
ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
@@ -529,8 +537,8 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop)
- writel_relaxed(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
- pcie_ep->parf + PARF_NO_SNOOP_OVERIDE);
+ writel_relaxed(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie_ep->parf + PARF_NO_SNOOP_OVERRIDE);
return 0;
@@ -825,6 +833,10 @@ static const struct pci_epc_features qcom_pcie_epc_features = {
.msi_capable = true,
.msix_capable = false,
.align = SZ_4K,
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features *
@@ -933,6 +945,7 @@ static const struct of_device_id qcom_pcie_ep_match[] = {
{ .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},
{ .compatible = "qcom,sdx55-pcie-ep", },
{ .compatible = "qcom,sm8450-pcie-ep", },
+ { .compatible = "qcom,sar2130p-pcie-ep", },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index e4d3366ead1f..9b12f2f02042 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -61,7 +61,7 @@
#define PARF_DBI_BASE_ADDR_V2_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
-#define PARF_NO_SNOOP_OVERIDE 0x3d4
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_DEVICE_TYPE 0x1000
@@ -135,9 +135,9 @@
#define PARF_INT_ALL_LINK_UP BIT(13)
#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
-/* PARF_NO_SNOOP_OVERIDE register fields */
-#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
-#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
/* PARF_DEVICE_TYPE register fields */
#define DEVICE_TYPE_RC 0x4
@@ -289,7 +289,7 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
/* Ensure that PERST has been asserted for at least 100 ms */
- msleep(100);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(pcie->reset, 0);
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
@@ -1007,8 +1007,8 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
if (pcie_cfg->override_no_snoop)
- writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
- pcie->parf + PARF_NO_SNOOP_OVERIDE);
+ writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie->parf + PARF_NO_SNOOP_OVERRIDE);
qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
@@ -1221,12 +1221,12 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_link_up(struct dw_pcie *pci)
+static bool qcom_pcie_link_up(struct dw_pcie *pci)
{
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return val & PCI_EXP_LNKSTA_DLLLA;
}
static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
@@ -1564,6 +1564,7 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
/* Rescan the bus to enumerate endpoint devices */
pci_lock_rescan_remove();
@@ -1840,6 +1841,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
{ .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index fc872dd35029..18055807a4f5 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -87,7 +87,7 @@ struct rcar_gen4_pcie {
#define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw)
/* Common */
-static int rcar_gen4_pcie_link_up(struct dw_pcie *dw)
+static bool rcar_gen4_pcie_link_up(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
u32 val, mask;
@@ -403,6 +403,7 @@ static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
.msix_capable = false,
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 },
.bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_1M,
};
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index ff986ced56b2..01794a9d3ad2 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -110,15 +110,12 @@ static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pc
MSI_CTRL_INT, &app_reg->int_mask);
}
-static int spear13xx_pcie_link_up(struct dw_pcie *pci)
+static bool spear13xx_pcie_link_up(struct dw_pcie *pci)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
- if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
- return 1;
-
- return 0;
+ return readl(&app_reg->app_status_1) & XMLH_LINK_UP;
}
static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 5103995cd6c7..4f26086f25da 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -713,7 +713,16 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
static void init_debugfs(struct tegra_pcie_dw *pcie)
{
- debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
+ struct device *dev = pcie->dev;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name)
+ return;
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+
+ debugfs_create_devm_seqfile(dev, "aspm_state_cnt", pcie->debugfs,
aspm_state_cnt);
}
#else
@@ -1027,12 +1036,12 @@ retry_link:
return 0;
}
-static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+static bool tegra_pcie_dw_link_up(struct dw_pcie *pci)
{
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return val & PCI_EXP_LNKSTA_DLLLA;
}
static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
@@ -1634,7 +1643,6 @@ static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
struct device *dev = pcie->dev;
- char *name;
int ret;
pm_runtime_enable(dev);
@@ -1664,13 +1672,6 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
goto fail_host_init;
}
- name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
- if (!name) {
- ret = -ENOMEM;
- goto fail_host_init;
- }
-
- pcie->debugfs = debugfs_create_dir(name, NULL);
init_debugfs(pcie);
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 5757ca3803c9..297e7a3d9b36 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -135,7 +135,7 @@ static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie)
return 0;
}
-static int uniphier_pcie_link_up(struct dw_pcie *pci)
+static bool uniphier_pcie_link_up(struct dw_pcie *pci)
{
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
u32 val, mask;
@@ -279,7 +279,7 @@ static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp)
goto out_put_node;
}
- pcie->intx_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(np_intc), PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
if (!pcie->intx_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
index 318c278e65c8..cdeac6177143 100644
--- a/drivers/pci/controller/dwc/pcie-visconti.c
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -121,13 +121,13 @@ static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
return readl_relaxed(pcie->mpu_base + reg);
}
-static int visconti_pcie_link_up(struct dw_pcie *pci)
+static bool visconti_pcie_link_up(struct dw_pcie *pci)
{
struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
void __iomem *addr = pcie->ulreg_base;
u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
- return !!(val & PCIE_UL_S_L0);
+ return val & PCIE_UL_S_L0;
}
static int visconti_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index 5af22bee913b..4919b27eaf44 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -53,18 +53,13 @@ static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie,
iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
}
-static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
+static bool ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
{
struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci);
u32 state;
state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
- state = state & PF_DBG_LTSSM_MASK;
-
- if (state == PF_DBG_LTSSM_L0)
- return 1;
-
- return 0;
+ return (state & PF_DBG_LTSSM_MASK) == PF_DBG_LTSSM_L0;
}
static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie)
@@ -174,8 +169,7 @@ static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci)
static void ls_g4_pcie_reset(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork);
struct mobiveil_pcie *mv_pci = &pcie->pci;
u16 ctrl;
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 0e088e74155d..a600f46ee3c3 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -435,12 +435,12 @@ static const struct irq_domain_ops msi_domain_ops = {
static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_init(&msi->lock);
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, pcie);
+ msi->dev_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, pcie);
if (!msi->dev_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -461,12 +461,11 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
struct mobiveil_root_port *rp = &pcie->rp;
/* setup INTx */
- rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ rp->intx_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!rp->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
index e63abb887ee3..662f17f9bf65 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -160,7 +160,7 @@ struct mobiveil_root_port {
};
struct mobiveil_pab_ops {
- int (*link_up)(struct mobiveil_pcie *pcie);
+ bool (*link_up)(struct mobiveil_pcie *pcie);
};
struct mobiveil_pcie {
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index a29796cce420..7bac64533b14 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -1456,9 +1456,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
raw_spin_lock_init(&pcie->msi_irq_lock);
mutex_init(&pcie->msi_used_lock);
- pcie->msi_inner_domain =
- irq_domain_add_linear(NULL, MSI_IRQ_NUM,
- &advk_msi_domain_ops, pcie);
+ pcie->msi_inner_domain = irq_domain_create_linear(NULL, MSI_IRQ_NUM,
+ &advk_msi_domain_ops, pcie);
if (!pcie->msi_inner_domain)
return -ENOMEM;
@@ -1508,9 +1507,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
irq_chip->irq_mask = advk_pcie_irq_mask;
irq_chip->irq_unmask = advk_pcie_irq_unmask;
- pcie->irq_domain =
- irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &advk_pcie_irq_domain_ops, pcie);
+ pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &advk_pcie_irq_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
ret = -ENOMEM;
@@ -1549,9 +1547,7 @@ static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
{
- pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
- &advk_pcie_rp_irq_domain_ops,
- pcie);
+ pcie->rp_irq_domain = irq_domain_create_linear(NULL, 1, &advk_pcie_rp_irq_domain_ops, pcie);
if (!pcie->rp_irq_domain) {
dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index ffdeed25e961..28e43831c0f1 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -345,8 +345,8 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return irq ?: -EINVAL;
}
- p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &faraday_pci_irqdomain_ops, p);
+ p->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &faraday_pci_irqdomain_ops, p);
of_node_put(intc);
if (!p->irqdomain) {
dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index f441bfd6f96a..b37052863847 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Generic PCI host driver common code
+ * Common library for PCI host controller drivers
*
* Copyright (C) 2014 ARM Limited
*
@@ -15,6 +15,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
static void gen_pci_unmap_cfg(void *ptr)
{
pci_ecam_free((struct pci_config_window *)ptr);
@@ -49,25 +51,21 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
return cfg;
}
-int pci_host_common_probe(struct platform_device *pdev)
+int pci_host_common_init(struct platform_device *pdev,
+ const struct pci_ecam_ops *ops)
{
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
- const struct pci_ecam_ops *ops;
-
- ops = of_device_get_match_data(&pdev->dev);
- if (!ops)
- return -ENODEV;
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
- platform_set_drvdata(pdev, bridge);
-
of_pci_check_probe_only();
+ platform_set_drvdata(pdev, bridge);
+
/* Parse and map our Configuration Space windows */
cfg = gen_pci_init(dev, bridge, ops);
if (IS_ERR(cfg))
@@ -81,6 +79,18 @@ int pci_host_common_probe(struct platform_device *pdev)
return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(pci_host_common_init);
+
+int pci_host_common_probe(struct platform_device *pdev)
+{
+ const struct pci_ecam_ops *ops;
+
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
+ return -ENODEV;
+
+ return pci_host_common_init(pdev, ops);
+}
EXPORT_SYMBOL_GPL(pci_host_common_probe);
void pci_host_common_remove(struct platform_device *pdev)
@@ -94,5 +104,5 @@ void pci_host_common_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(pci_host_common_remove);
-MODULE_DESCRIPTION("Generic PCI host common driver");
+MODULE_DESCRIPTION("Common library for PCI host controller drivers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-common.h b/drivers/pci/controller/pci-host-common.h
new file mode 100644
index 000000000000..65bd9e032353
--- /dev/null
+++ b/drivers/pci/controller/pci-host-common.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common library for PCI host controller drivers
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#ifndef _PCI_HOST_COMMON_H
+#define _PCI_HOST_COMMON_H
+
+struct pci_ecam_ops;
+
+int pci_host_common_probe(struct platform_device *pdev);
+int pci_host_common_init(struct platform_device *pdev,
+ const struct pci_ecam_ops *ops);
+void pci_host_common_remove(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 4051b9b61dac..c1bc0d34348f 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -14,6 +14,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
.pci_ops = {
diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c
index cc96be450360..28b3e93d31c0 100644
--- a/drivers/pci/controller/pci-hyperv-intf.c
+++ b/drivers/pci/controller/pci-hyperv-intf.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
struct hyperv_pci_block_ops hvpci_block_ops;
EXPORT_SYMBOL_GPL(hvpci_block_ops);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 6084b38bdda1..13680363ff19 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -50,6 +50,7 @@
#include <linux/irqdomain.h>
#include <linux/acpi.h>
#include <linux/sizes.h>
+#include <linux/of_irq.h>
#include <asm/mshyperv.h>
/*
@@ -309,8 +310,6 @@ struct pci_packet {
void (*completion_func)(void *context, struct pci_response *resp,
int resp_packet_size);
void *compl_ctxt;
-
- struct pci_message message[];
};
/*
@@ -601,7 +600,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
#define hv_msi_prepare pci_msi_prepare
/**
- * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
+ * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current
* affinity.
* @data: Describes the IRQ
*
@@ -610,7 +609,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
* is built out of this PCI bus's instance GUID and the function
* number of the device.
*/
-static void hv_arch_irq_unmask(struct irq_data *data)
+static void hv_irq_retarget_interrupt(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct hv_retarget_device_interrupt *params;
@@ -715,6 +714,20 @@ out:
dev_err(&hbus->hdev->device,
"%s() failed: %#llx", __func__, res);
}
+
+static void hv_arch_irq_unmask(struct irq_data *data)
+{
+ if (hv_root_partition())
+ /*
+ * In case of the nested root partition, the nested hypervisor
+ * is taking care of interrupt remapping and thus the
+ * MAP_DEVICE_INTERRUPT hypercall is required instead of
+ * RETARGET_INTERRUPT.
+ */
+ (void)hv_map_msi_interrupt(data, NULL);
+ else
+ hv_irq_retarget_interrupt(data);
+}
#elif defined(CONFIG_ARM64)
/*
* SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
@@ -817,9 +830,17 @@ static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
int ret;
fwspec.fwnode = domain->parent->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = hwirq;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ if (is_of_node(fwspec.fwnode)) {
+ /* SPI lines for OF translations start at offset 32 */
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = hwirq - 32;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+ } else {
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ }
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (ret)
@@ -887,10 +908,44 @@ static const struct irq_domain_ops hv_pci_domain_ops = {
.activate = hv_pci_vec_irq_domain_activate,
};
+#ifdef CONFIG_OF
+
+static struct irq_domain *hv_pci_of_irq_domain_parent(void)
+{
+ struct device_node *parent;
+ struct irq_domain *domain;
+
+ parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
+ if (!parent)
+ return NULL;
+ domain = irq_find_host(parent);
+ of_node_put(parent);
+
+ return domain;
+}
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
+{
+ acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
+
+ gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
+ if (!gsi_domain_disp_fn)
+ return NULL;
+ return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
+ DOMAIN_BUS_ANY);
+}
+
+#endif
+
static int hv_pci_irqchip_init(void)
{
static struct hv_pci_chip_data *chip_data;
struct fwnode_handle *fn = NULL;
+ struct irq_domain *irq_domain_parent = NULL;
int ret = -ENOMEM;
chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
@@ -907,9 +962,24 @@ static int hv_pci_irqchip_init(void)
* way to ensure that all the corresponding devices are also gone and
* no interrupts will be generated.
*/
- hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
- fn, &hv_pci_domain_ops,
- chip_data);
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ irq_domain_parent = hv_pci_acpi_irq_domain_parent();
+#endif
+#ifdef CONFIG_OF
+ if (!irq_domain_parent)
+ irq_domain_parent = hv_pci_of_irq_domain_parent();
+#endif
+ if (!irq_domain_parent) {
+ WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
+ ret = -EINVAL;
+ goto free_chip;
+ }
+
+ hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
+ HV_PCI_MSI_SPI_NR,
+ fn, &hv_pci_domain_ops,
+ chip_data);
if (!hv_msi_gic_irq_domain) {
pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
@@ -1356,7 +1426,7 @@ static struct pci_ops hv_pcifront_ops = {
*
* If the PF driver wishes to initiate communication, it can "invalidate" one or
* more of the first 64 blocks. This invalidation is delivered via a callback
- * supplied by the VF driver by this driver.
+ * supplied to the VF driver by this driver.
*
* No protocol is implied, except that supplied by the PF and VF drivers.
*/
@@ -1438,7 +1508,7 @@ static int hv_read_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_read_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- read_blk = (struct pci_read_block *)&pkt.pkt.message;
+ read_blk = (struct pci_read_block *)pkt.buf;
read_blk->message_type.type = PCI_READ_BLOCK;
read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
read_blk->block_id = block_id;
@@ -1518,7 +1588,7 @@ static int hv_write_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_write_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- write_blk = (struct pci_write_block *)&pkt.pkt.message;
+ write_blk = (struct pci_write_block *)pkt.buf;
write_blk->message_type.type = PCI_WRITE_BLOCK;
write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
write_blk->block_id = block_id;
@@ -1599,7 +1669,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
return;
}
memset(&ctxt, 0, sizeof(ctxt));
- int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
+ int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
int_pkt->message_type.type =
PCI_DELETE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
@@ -1757,8 +1827,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
- cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
- false);
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
cpu = cpu_next;
spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
@@ -2483,7 +2552,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
comp_pkt.hpdev = hpdev;
pkt.init_packet.compl_ctxt = &comp_pkt;
pkt.init_packet.completion_func = q_resource_requirements;
- res_req = (struct pci_child_message *)&pkt.init_packet.message;
+ res_req = (struct pci_child_message *)pkt.buffer;
res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
res_req->wslot.slot = desc->win_slot.slot;
@@ -2861,7 +2930,7 @@ static void hv_eject_device_work(struct work_struct *work)
pci_destroy_slot(hpdev->pci_slot);
memset(&ctxt, 0, sizeof(ctxt));
- ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
@@ -3119,7 +3188,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev,
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- version_req = (struct pci_version_request *)&pkt->message;
+ version_req = (struct pci_version_request *)(pkt + 1);
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
for (i = 0; i < num_version; i++) {
@@ -3341,7 +3410,7 @@ enter_d0_retry:
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
+ d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
d0_entry->message_type.type = PCI_BUS_D0ENTRY;
d0_entry->mmio_base = hbus->mem_config->start;
@@ -3499,20 +3568,20 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
res_assigned =
- (struct pci_resources_assigned *)&pkt->message;
+ (struct pci_resources_assigned *)(pkt + 1);
res_assigned->message_type.type =
PCI_RESOURCES_ASSIGNED;
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
} else {
res_assigned2 =
- (struct pci_resources_assigned2 *)&pkt->message;
+ (struct pci_resources_assigned2 *)(pkt + 1);
res_assigned2->message_type.type =
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
put_pcichild(hpdev);
- ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+ ret = vmbus_sendpacket(hdev->channel, pkt + 1,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -3810,6 +3879,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
} pkt;
+ struct pci_message *msg;
struct hv_pci_compl comp_pkt;
struct hv_pci_dev *hpdev, *tmp;
unsigned long flags;
@@ -3855,10 +3925,10 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt.teardown_packet.compl_ctxt = &comp_pkt;
- pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
+ msg = (struct pci_message *)pkt.buffer;
+ msg->type = PCI_BUS_D0EXIT;
- ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
- sizeof(struct pci_message),
+ ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
(unsigned long)&pkt.teardown_packet,
&trans_id, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -3976,24 +4046,18 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
struct irq_data *irq_data;
struct msi_desc *entry;
- int ret = 0;
if (!pdev->msi_enabled && !pdev->msix_enabled)
return 0;
- msi_lock_descs(&pdev->dev);
+ guard(msi_descs_lock)(&pdev->dev);
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data)) {
- ret = -EINVAL;
- break;
- }
-
+ if (WARN_ON_ONCE(!irq_data))
+ return -EINVAL;
hv_compose_msi_msg(irq_data, &entry->msg);
}
- msi_unlock_descs(&pdev->dev);
-
- return ret;
+ return 0;
}
/*
@@ -4094,6 +4158,9 @@ static int __init init_hv_pci_drv(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
+ if (hv_root_partition() && !hv_nested)
+ return -ENODEV;
+
ret = hv_pci_irqchip_init();
if (ret)
return ret;
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 665f35f9d826..a4a2bac4f4b2 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1078,9 +1078,9 @@ static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
return -ENODEV;
}
- port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &mvebu_pcie_intx_irq_domain_ops,
- port);
+ port->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX,
+ &mvebu_pcie_intx_irq_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->intx_irq_domain) {
dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
@@ -1179,37 +1179,29 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
unsigned int *tgt,
unsigned int *attr)
{
- const int na = 3, ns = 2;
- const __be32 *range;
- int rlen, nranges, rangesz, pna, i;
+ struct of_range range;
+ struct of_range_parser parser;
*tgt = -1;
*attr = -1;
- range = of_get_property(np, "ranges", &rlen);
- if (!range)
+ if (of_pci_range_parser_init(&parser, np))
return -EINVAL;
- pna = of_n_addr_cells(np);
- rangesz = pna + na + ns;
- nranges = rlen / sizeof(__be32) / rangesz;
-
- for (i = 0; i < nranges; i++, range += rangesz) {
- u32 flags = of_read_number(range, 1);
- u32 slot = of_read_number(range + 1, 1);
- u64 cpuaddr = of_read_number(range + na, pna);
+ for_each_of_range(&parser, &range) {
unsigned long rtype;
+ u32 slot = upper_32_bits(range.bus_addr);
- if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
+ if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_IO)
rtype = IORESOURCE_IO;
- else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+ else if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_MEM32)
rtype = IORESOURCE_MEM;
else
continue;
if (slot == PCI_SLOT(devfn) && type == rtype) {
- *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
- *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ *tgt = DT_CPUADDR_TO_TARGET(range.cpu_addr);
+ *attr = DT_CPUADDR_TO_ATTR(range.cpu_addr);
return 0;
}
}
@@ -1422,7 +1414,7 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
}
/*
- * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
+ * devm_of_pci_get_host_bridge_resources() only sets up translatable resources,
* so we need extra resource setup parsing our special DT properties encoding
* the MEM and IO apertures.
*/
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index b3cdbc5927de..467ddc701adc 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -22,6 +22,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -1547,7 +1548,7 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
unsigned int index = i * 32 + offset;
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/*
* that's weird who triggered this?
@@ -1565,30 +1566,6 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void tegra_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void tegra_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void tegra_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip tegra_msi_top_chip = {
- .name = "Tegra PCIe MSI",
- .irq_ack = tegra_msi_top_irq_ack,
- .irq_mask = tegra_msi_top_irq_mask,
- .irq_unmask = tegra_msi_top_irq_unmask,
-};
-
static void tegra_msi_irq_ack(struct irq_data *d)
{
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
@@ -1690,42 +1667,40 @@ static const struct irq_domain_ops tegra_msi_domain_ops = {
.free = tegra_msi_domain_free,
};
-static struct msi_domain_info tegra_msi_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .chip = &tegra_msi_top_chip,
+static const struct msi_parent_ops tegra_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT |
+ MSI_FLAG_NO_AFFINITY),
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int tegra_allocate_domains(struct tegra_msi *msi)
{
struct tegra_pcie *pcie = msi_to_pcie(msi);
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &tegra_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &tegra_msi_domain_ops,
+ .size = INT_PCI_MSI_NR,
+ .host_data = msi,
+ };
- msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
+ msi->domain = msi_create_parent_irq_domain(&info, &tegra_msi_parent_ops);
if (!msi->domain) {
dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
return -ENOMEM;
}
-
return 0;
}
static void tegra_free_domains(struct tegra_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
@@ -2106,47 +2081,39 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *np = dev->of_node, *port;
+ struct device_node *np = dev->of_node;
const struct tegra_pcie_soc *soc = pcie->soc;
u32 lanes = 0, mask = 0;
unsigned int lane = 0;
int err;
/* parse root ports */
- for_each_child_of_node(np, port) {
+ for_each_child_of_node_scoped(np, port) {
struct tegra_pcie_port *rp;
unsigned int index;
u32 value;
char *label;
err = of_pci_get_devfn(port);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
index = PCI_SLOT(err);
- if (index < 1 || index > soc->num_ports) {
- dev_err(dev, "invalid port number: %d\n", index);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (index < 1 || index > soc->num_ports)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid port number: %d\n", index);
index--;
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
- if (err < 0) {
- dev_err(dev, "failed to parse # of lanes: %d\n",
- err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "failed to parse # of lanes\n");
- if (value > 16) {
- dev_err(dev, "invalid # of lanes: %u\n", value);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (value > 16)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid # of lanes: %u\n", value);
lanes |= value << (index << 3);
@@ -2159,16 +2126,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
lane += value;
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
- if (!rp) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!rp)
+ return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
INIT_LIST_HEAD(&rp->list);
rp->index = index;
@@ -2177,16 +2140,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->np = port;
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
- if (IS_ERR(rp->base)) {
- err = PTR_ERR(rp->base);
- goto err_node_put;
- }
+ if (IS_ERR(rp->base))
+ return PTR_ERR(rp->base);
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
- if (!label) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!label)
+ return -ENOMEM;
/*
* Returns -ENOENT if reset-gpios property is not populated
@@ -2199,34 +2158,26 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
GPIOD_OUT_LOW,
label);
if (IS_ERR(rp->reset_gpio)) {
- if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
+ if (PTR_ERR(rp->reset_gpio) == -ENOENT)
rp->reset_gpio = NULL;
- } else {
- dev_err(dev, "failed to get reset GPIO: %ld\n",
- PTR_ERR(rp->reset_gpio));
- err = PTR_ERR(rp->reset_gpio);
- goto err_node_put;
- }
+ else
+ return dev_err_probe(dev, PTR_ERR(rp->reset_gpio),
+ "failed to get reset GPIO\n");
}
list_add_tail(&rp->list, &pcie->ports);
}
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
- if (err < 0) {
- dev_err(dev, "invalid lane configuration\n");
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "invalid lane configuration\n");
err = tegra_pcie_get_regulators(pcie, mask);
if (err < 0)
return err;
return 0;
-
-err_node_put:
- of_node_put(port);
- return err;
}
/*
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index b5bd10a62adb..b5b4a958e6a2 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -11,6 +11,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
static void set_val(u32 v, int where, int size, u32 *val)
@@ -204,7 +206,7 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
v = readl(addr);
if (v & 0xff00)
- pr_err("Bad MSIX cap header: %08x\n", v);
+ pr_err("Bad MSI-X cap header: %08x\n", v);
v |= 0xbc00; /* next capability is EA at 0xbc */
set_val(v, where, size, val);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index f1bd5de67997..5fa037fb61dc 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
+#include "pci-host-common.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 88c0977bc41a..b05ec8b0bb93 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/of_pci.h>
@@ -32,7 +33,6 @@ struct xgene_msi_group {
struct xgene_msi {
struct device_node *node;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
u64 msi_addr;
void __iomem *msi_regs;
unsigned long *bitmap;
@@ -44,20 +44,6 @@ struct xgene_msi {
/* Global data */
static struct xgene_msi xgene_msi_ctrl;
-static struct irq_chip xgene_msi_top_irq_chip = {
- .name = "X-Gene1 MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info xgene_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &xgene_msi_top_irq_chip,
-};
-
/*
* X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
* n is group number (0..F), x is index of registers in each group (0..7)
@@ -154,7 +140,7 @@ static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
* the expected behaviour of .set_affinity for each MSI interrupt, the 16
* MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
- * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
+ * for each core). The MSI vector is moved from 1 MSI GIC IRQ to another
* MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
* consequence, the total MSI vectors that X-Gene v1 supports will be
* reduced to 256 (2048/8) vectors.
@@ -235,34 +221,35 @@ static void xgene_irq_domain_free(struct irq_domain *domain,
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static const struct irq_domain_ops msi_domain_ops = {
+static const struct irq_domain_ops xgene_msi_domain_ops = {
.alloc = xgene_irq_domain_alloc,
.free = xgene_irq_domain_free,
};
+static const struct msi_parent_ops xgene_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS),
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int xgene_allocate_domains(struct xgene_msi *msi)
{
- msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
- &msi_domain_ops, msi);
- if (!msi->inner_domain)
- return -ENOMEM;
-
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
- &xgene_msi_domain_info,
- msi->inner_domain);
-
- if (!msi->msi_domain) {
- irq_domain_remove(msi->inner_domain);
- return -ENOMEM;
- }
-
- return 0;
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(msi->node),
+ .ops = &xgene_msi_domain_ops,
+ .size = NR_MSI_VEC,
+ .host_data = msi,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &xgene_msi_parent_ops);
+ return msi->inner_domain ? 0 : -ENOMEM;
}
static void xgene_free_domains(struct xgene_msi *msi)
{
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
if (msi->inner_domain)
irq_domain_remove(msi->inner_domain);
}
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index e1cee3c0575f..a43f21eb8fbb 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -164,9 +164,9 @@ static const struct irq_domain_ops msi_domain_ops = {
static int altera_allocate_domains(struct altera_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(msi->pdev->dev.of_node);
- msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
&msi_domain_ops, msi);
if (!msi->inner_domain) {
dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index eb55a7f8573a..0fc77176a52e 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -6,6 +6,7 @@
* Description: Altera PCIe host controller driver
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -77,9 +78,25 @@
#define S10_TLP_FMTTYPE_CFGWR0 0x45
#define S10_TLP_FMTTYPE_CFGWR1 0x44
+#define AGLX_RP_CFG_ADDR(pcie, reg) (((pcie)->hip_base) + (reg))
+#define AGLX_RP_SECONDARY(pcie) \
+ readb(AGLX_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
+
+#define AGLX_BDF_REG 0x00002004
+#define AGLX_ROOT_PORT_IRQ_STATUS 0x14c
+#define AGLX_ROOT_PORT_IRQ_ENABLE 0x150
+#define CFG_AER BIT(4)
+
+#define AGLX_CFG_TARGET GENMASK(13, 12)
+#define AGLX_CFG_TARGET_TYPE0 0
+#define AGLX_CFG_TARGET_TYPE1 1
+#define AGLX_CFG_TARGET_LOCAL_2000 2
+#define AGLX_CFG_TARGET_LOCAL_3000 3
+
enum altera_pcie_version {
ALTERA_PCIE_V1 = 0,
ALTERA_PCIE_V2,
+ ALTERA_PCIE_V3,
};
struct altera_pcie {
@@ -102,6 +119,11 @@ struct altera_pcie_ops {
int size, u32 *value);
int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
int where, int size, u32 value);
+ int (*ep_read_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value);
+ int (*ep_write_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value);
+ void (*rp_isr)(struct irq_desc *desc);
};
struct altera_pcie_data {
@@ -112,6 +134,9 @@ struct altera_pcie_data {
u32 cfgrd1;
u32 cfgwr0;
u32 cfgwr1;
+ u32 port_conf_offset;
+ u32 port_irq_status_offset;
+ u32 port_irq_enable_offset;
};
struct tlp_rp_regpair_t {
@@ -131,6 +156,28 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
return readl_relaxed(pcie->cra_base + reg);
}
+static inline void cra_writew(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writew_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readw(struct altera_pcie *pcie, const u32 reg)
+{
+ return readw_relaxed(pcie->cra_base + reg);
+}
+
+static inline void cra_writeb(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writeb_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readb(struct altera_pcie *pcie, const u32 reg)
+{
+ return readb_relaxed(pcie->cra_base + reg);
+}
+
static bool altera_pcie_link_up(struct altera_pcie *pcie)
{
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
@@ -145,11 +192,20 @@ static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
}
+static bool aglx_altera_pcie_link_up(struct altera_pcie *pcie)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie,
+ pcie->pcie_data->cap_offset +
+ PCI_EXP_LNKSTA);
+
+ return (readw_relaxed(addr) & PCI_EXP_LNKSTA_DLLLA);
+}
+
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
* from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
* using these registers, so it can be reached by DMA from EP devices.
- * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
+ * This BAR0 will also access to MSI vector when receiving MSI/MSI-X interrupt
* from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
* should be hidden during enumeration to avoid the sizing and resource
* allocation by PCIe core.
@@ -425,6 +481,103 @@ static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
return PCIBIOS_SUCCESSFUL;
}
+static int aglx_rp_read_cfg(struct altera_pcie *pcie, int where,
+ int size, u32 *value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ *value = readb_relaxed(addr);
+ break;
+ case 2:
+ *value = readw_relaxed(addr);
+ break;
+ default:
+ *value = readl_relaxed(addr);
+ break;
+ }
+
+ /* Interrupt PIN not programmed in hardware, set to INTA. */
+ if (where == PCI_INTERRUPT_PIN && size == 1 && !(*value))
+ *value = 0x01;
+ else if (where == PCI_INTERRUPT_LINE && !(*value & 0xff00))
+ *value |= 0x0100;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
+ int where, int size, u32 value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ writeb_relaxed(value, addr);
+ break;
+ case 2:
+ writew_relaxed(value, addr);
+ break;
+ default:
+ writel_relaxed(value, addr);
+ break;
+ }
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on Root Port
+ * and update local copy of root bus number accordingly.
+ */
+ if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
+ pcie->root_bus_nr = value & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_write_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ cra_writeb(pcie, value, where);
+ break;
+ case 2:
+ cra_writew(pcie, value, where);
+ break;
+ default:
+ cra_writel(pcie, value, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_read_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ *value = cra_readb(pcie, where);
+ break;
+ case 2:
+ *value = cra_readw(pcie, where);
+ break;
+ default:
+ *value = cra_readl(pcie, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
unsigned int devfn, int where, int size,
u32 *value)
@@ -437,6 +590,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
size, value);
+ if (pcie->pcie_data->ops->ep_read_cfg)
+ return pcie->pcie_data->ops->ep_read_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -481,6 +638,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
where, size, value);
+ if (pcie->pcie_data->ops->ep_write_cfg)
+ return pcie->pcie_data->ops->ep_write_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -659,7 +820,32 @@ static void altera_pcie_isr(struct irq_desc *desc)
dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);
}
}
+ chained_irq_exit(chip, desc);
+}
+
+static void aglx_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_pcie *pcie;
+ struct device *dev;
+ u32 status;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+ dev = &pcie->pdev->dev;
+
+ status = readl(pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset);
+ if (status & CFG_AER) {
+ writel(CFG_AER, (pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset));
+
+ ret = generic_handle_domain_irq(pcie->irq_domain, 0);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ %d\n", pcie->irq);
+ }
chained_irq_exit(chip, desc);
}
@@ -669,7 +855,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
struct device_node *node = dev->of_node;
/* Setup INTx */
- pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), PCI_NUM_INTX,
&intx_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -694,9 +880,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (IS_ERR(pcie->cra_base))
return PTR_ERR(pcie->cra_base);
- if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
- pcie->hip_base =
- devm_platform_ioremap_resource_byname(pdev, "Hip");
+ if (pcie->pcie_data->version == ALTERA_PCIE_V2 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ pcie->hip_base = devm_platform_ioremap_resource_byname(pdev, "Hip");
if (IS_ERR(pcie->hip_base))
return PTR_ERR(pcie->hip_base);
}
@@ -706,7 +892,7 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (pcie->irq < 0)
return pcie->irq;
- irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
+ irq_set_chained_handler_and_data(pcie->irq, pcie->pcie_data->ops->rp_isr, pcie);
return 0;
}
@@ -719,6 +905,7 @@ static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
.tlp_read_pkt = tlp_read_packet,
.tlp_write_pkt = tlp_write_packet,
.get_link_status = altera_pcie_link_up,
+ .rp_isr = altera_pcie_isr,
};
static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
@@ -727,6 +914,16 @@ static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
.get_link_status = s10_altera_pcie_link_up,
.rp_read_cfg = s10_rp_read_cfg,
.rp_write_cfg = s10_rp_write_cfg,
+ .rp_isr = altera_pcie_isr,
+};
+
+static const struct altera_pcie_ops altera_pcie_ops_3_0 = {
+ .rp_read_cfg = aglx_rp_read_cfg,
+ .rp_write_cfg = aglx_rp_write_cfg,
+ .get_link_status = aglx_altera_pcie_link_up,
+ .ep_read_cfg = aglx_ep_read_cfg,
+ .ep_write_cfg = aglx_ep_write_cfg,
+ .rp_isr = aglx_isr,
};
static const struct altera_pcie_data altera_pcie_1_0_data = {
@@ -749,11 +946,44 @@ static const struct altera_pcie_data altera_pcie_2_0_data = {
.cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
};
+static const struct altera_pcie_data altera_pcie_3_0_f_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x14000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_p_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x104000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_r_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x1300,
+ .port_irq_status_offset = 0x0,
+ .port_irq_enable_offset = 0x4,
+};
+
static const struct of_device_id altera_pcie_of_match[] = {
{.compatible = "altr,pcie-root-port-1.0",
.data = &altera_pcie_1_0_data },
{.compatible = "altr,pcie-root-port-2.0",
.data = &altera_pcie_2_0_data },
+ {.compatible = "altr,pcie-root-port-3.0-f-tile",
+ .data = &altera_pcie_3_0_f_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-p-tile",
+ .data = &altera_pcie_3_0_p_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-r-tile",
+ .data = &altera_pcie_3_0_r_tile_data },
{},
};
@@ -791,11 +1021,18 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
- /* clear all interrupts */
- cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
- /* enable all interrupts */
- cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
- altera_pcie_host_init(pcie);
+ if (pcie->pcie_data->version == ALTERA_PCIE_V1 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V2) {
+ /* clear all interrupts */
+ cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
+ /* enable all interrupts */
+ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
+ } else if (pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ writel(CFG_AER,
+ pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_enable_offset);
+ }
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index a7e51bc1c2fe..0380d300adca 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -18,10 +18,12 @@
* Author: Marc Zyngier <maz@kernel.org>
*/
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -29,6 +31,9 @@
#include <linux/of_irq.h>
#include <linux/pci-ecam.h>
+#include "pci-host-common.h"
+
+/* T8103 (original M1) and related SoCs */
#define CORE_RC_PHYIF_CTL 0x00024
#define CORE_RC_PHYIF_CTL_RUN BIT(0)
#define CORE_RC_PHYIF_STAT 0x00028
@@ -39,14 +44,18 @@
#define CORE_RC_STAT_READY BIT(0)
#define CORE_FABRIC_STAT 0x04000
#define CORE_FABRIC_STAT_MASK 0x001F001F
-#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
-#define CORE_LANE_CFG_REFCLK0REQ BIT(0)
-#define CORE_LANE_CFG_REFCLK1REQ BIT(1)
-#define CORE_LANE_CFG_REFCLK0ACK BIT(2)
-#define CORE_LANE_CFG_REFCLK1ACK BIT(3)
-#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
-#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
-#define CORE_LANE_CTL_CFGACC BIT(15)
+
+#define CORE_PHY_DEFAULT_BASE(port) (0x84000 + 0x4000 * (port))
+
+#define PHY_LANE_CFG 0x00000
+#define PHY_LANE_CFG_REFCLK0REQ BIT(0)
+#define PHY_LANE_CFG_REFCLK1REQ BIT(1)
+#define PHY_LANE_CFG_REFCLK0ACK BIT(2)
+#define PHY_LANE_CFG_REFCLK1ACK BIT(3)
+#define PHY_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
+#define PHY_LANE_CFG_REFCLKCGEN (BIT(30) | BIT(31))
+#define PHY_LANE_CTL 0x00004
+#define PHY_LANE_CTL_CFGACC BIT(15)
#define PORT_LTSSMCTL 0x00080
#define PORT_LTSSMCTL_START BIT(0)
@@ -100,7 +109,7 @@
#define PORT_REFCLK_CGDIS BIT(8)
#define PORT_PERST 0x00814
#define PORT_PERST_OFF BIT(0)
-#define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
+#define PORT_RID2SID 0x00828
#define PORT_RID2SID_VALID BIT(31)
#define PORT_RID2SID_SID_SHIFT 16
#define PORT_RID2SID_BUS_SHIFT 8
@@ -118,7 +127,15 @@
#define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
#define PORT_PREFMEM_ENABLE 0x00994
-#define MAX_RID2SID 64
+/* T602x (M2-pro and co) */
+#define PORT_T602X_MSIADDR 0x016c
+#define PORT_T602X_MSIADDR_HI 0x0170
+#define PORT_T602X_PERST 0x082c
+#define PORT_T602X_RID2SID 0x3000
+#define PORT_T602X_MSIMAP 0x3800
+
+#define PORT_MSIMAP_ENABLE BIT(31)
+#define PORT_MSIMAP_TARGET GENMASK(7, 0)
/*
* The doorbell address is set to 0xfffff000, which by convention
@@ -129,29 +146,69 @@
*/
#define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
+struct hw_info {
+ u32 phy_lane_ctl;
+ u32 port_msiaddr;
+ u32 port_msiaddr_hi;
+ u32 port_refclk;
+ u32 port_perst;
+ u32 port_rid2sid;
+ u32 port_msimap;
+ u32 max_rid2sid;
+};
+
+static const struct hw_info t8103_hw = {
+ .phy_lane_ctl = PHY_LANE_CTL,
+ .port_msiaddr = PORT_MSIADDR,
+ .port_msiaddr_hi = 0,
+ .port_refclk = PORT_REFCLK,
+ .port_perst = PORT_PERST,
+ .port_rid2sid = PORT_RID2SID,
+ .port_msimap = 0,
+ .max_rid2sid = 64,
+};
+
+static const struct hw_info t602x_hw = {
+ .phy_lane_ctl = 0,
+ .port_msiaddr = PORT_T602X_MSIADDR,
+ .port_msiaddr_hi = PORT_T602X_MSIADDR_HI,
+ .port_refclk = 0,
+ .port_perst = PORT_T602X_PERST,
+ .port_rid2sid = PORT_T602X_RID2SID,
+ .port_msimap = PORT_T602X_MSIMAP,
+ /* 16 on t602x, guess for autodetect on future HW */
+ .max_rid2sid = 512,
+};
+
struct apple_pcie {
struct mutex lock;
struct device *dev;
void __iomem *base;
- struct irq_domain *domain;
+ const struct hw_info *hw;
unsigned long *bitmap;
struct list_head ports;
+ struct list_head entry;
struct completion event;
struct irq_fwspec fwspec;
u32 nvecs;
};
struct apple_pcie_port {
+ raw_spinlock_t lock;
struct apple_pcie *pcie;
struct device_node *np;
void __iomem *base;
+ void __iomem *phy;
struct irq_domain *domain;
struct list_head entry;
- DECLARE_BITMAP(sid_map, MAX_RID2SID);
+ unsigned long *sid_map;
int sid_map_sz;
int idx;
};
+static LIST_HEAD(pcie_list);
+static DEFINE_MUTEX(pcie_list_lock);
+
static void rmw_set(u32 set, void __iomem *addr)
{
writel_relaxed(readl_relaxed(addr) | set, addr);
@@ -162,27 +219,6 @@ static void rmw_clear(u32 clr, void __iomem *addr)
writel_relaxed(readl_relaxed(addr) & ~clr, addr);
}
-static void apple_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void apple_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip apple_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_mask = apple_msi_top_irq_mask,
- .irq_unmask = apple_msi_top_irq_unmask,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
- .irq_set_type = irq_chip_set_type_parent,
-};
-
static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
msg->address_hi = upper_32_bits(DOORBELL_ADDR);
@@ -226,8 +262,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
- &apple_msi_bottom_chip,
- domain->host_data);
+ &apple_msi_bottom_chip, pcie);
}
return 0;
@@ -251,24 +286,20 @@ static const struct irq_domain_ops apple_msi_domain_ops = {
.free = apple_msi_domain_free,
};
-static struct msi_domain_info apple_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .chip = &apple_msi_top_chip,
-};
-
static void apple_port_irq_mask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
- writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
+ guard(raw_spinlock_irqsave)(&port->lock);
+ rmw_set(BIT(data->hwirq), port->base + PORT_INTMSK);
}
static void apple_port_irq_unmask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
- writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
+ guard(raw_spinlock_irqsave)(&port->lock);
+ rmw_clear(BIT(data->hwirq), port->base + PORT_INTMSK);
}
static bool hwirq_is_intx(unsigned int hwirq)
@@ -372,7 +403,9 @@ static void apple_port_irq_handler(struct irq_desc *desc)
static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
{
struct fwnode_handle *fwnode = &port->np->fwnode;
+ struct apple_pcie *pcie = port->pcie;
unsigned int irq;
+ u32 val = 0;
/* FIXME: consider moving each interrupt under each port */
irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
@@ -387,20 +420,31 @@ static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
return -ENOMEM;
/* Disable all interrupts */
- writel_relaxed(~0, port->base + PORT_INTMSKSET);
+ writel_relaxed(~0, port->base + PORT_INTMSK);
writel_relaxed(~0, port->base + PORT_INTSTAT);
+ writel_relaxed(~0, port->base + PORT_LINKCMDSTS);
irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
/* Configure MSI base address */
BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
- writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
+ writel_relaxed(lower_32_bits(DOORBELL_ADDR),
+ port->base + pcie->hw->port_msiaddr);
+ if (pcie->hw->port_msiaddr_hi)
+ writel_relaxed(0, port->base + pcie->hw->port_msiaddr_hi);
/* Enable MSIs, shared between all ports */
- writel_relaxed(0, port->base + PORT_MSIBASE);
- writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
- PORT_MSICFG_EN, port->base + PORT_MSICFG);
+ if (pcie->hw->port_msimap) {
+ for (int i = 0; i < pcie->nvecs; i++)
+ writel_relaxed(FIELD_PREP(PORT_MSIMAP_TARGET, i) |
+ PORT_MSIMAP_ENABLE,
+ port->base + pcie->hw->port_msimap + 4 * i);
+ } else {
+ writel_relaxed(0, port->base + PORT_MSIBASE);
+ val = ilog2(pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT;
+ }
+ writel_relaxed(val | PORT_MSICFG_EN, port->base + PORT_MSICFG);
return 0;
}
@@ -467,43 +511,47 @@ static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
u32 stat;
int res;
- res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
- stat & CORE_RC_PHYIF_STAT_REFCLK,
- 100, 50000);
- if (res < 0)
- return res;
+ if (pcie->hw->phy_lane_ctl)
+ rmw_set(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
- rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
- rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
+ rmw_set(PHY_LANE_CFG_REFCLK0REQ, port->phy + PHY_LANE_CFG);
- res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK0ACK,
+ res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+ stat, stat & PHY_LANE_CFG_REFCLK0ACK,
100, 50000);
if (res < 0)
return res;
- rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
- res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK1ACK,
+ rmw_set(PHY_LANE_CFG_REFCLK1REQ, port->phy + PHY_LANE_CFG);
+ res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+ stat, stat & PHY_LANE_CFG_REFCLK1ACK,
100, 50000);
if (res < 0)
return res;
- rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
+ if (pcie->hw->phy_lane_ctl)
+ rmw_clear(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
- rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
- rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
+ rmw_set(PHY_LANE_CFG_REFCLKEN, port->phy + PHY_LANE_CFG);
+
+ if (pcie->hw->port_refclk)
+ rmw_set(PORT_REFCLK_EN, port->base + pcie->hw->port_refclk);
return 0;
}
+static void __iomem *port_rid2sid_addr(struct apple_pcie_port *port, int idx)
+{
+ return port->base + port->pcie->hw->port_rid2sid + 4 * idx;
+}
+
static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
int idx, u32 val)
{
- writel_relaxed(val, port->base + PORT_RID2SID(idx));
+ writel_relaxed(val, port_rid2sid_addr(port, idx));
/* Read back to ensure completion of the write */
- return readl_relaxed(port->base + PORT_RID2SID(idx));
+ return readl_relaxed(port_rid2sid_addr(port, idx));
}
static int apple_pcie_setup_port(struct apple_pcie *pcie,
@@ -512,6 +560,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
struct platform_device *platform = to_platform_device(pcie->dev);
struct apple_pcie_port *port;
struct gpio_desc *reset;
+ struct resource *res;
+ char name[16];
u32 stat, idx;
int ret, i;
@@ -524,6 +574,10 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
if (!port)
return -ENOMEM;
+ port->sid_map = devm_bitmap_zalloc(pcie->dev, pcie->hw->max_rid2sid, GFP_KERNEL);
+ if (!port->sid_map)
+ return -ENOMEM;
+
ret = of_property_read_u32_index(np, "reg", 0, &idx);
if (ret)
return ret;
@@ -533,14 +587,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
port->pcie = pcie;
port->np = np;
- port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
+ raw_spin_lock_init(&port->lock);
+
+ snprintf(name, sizeof(name), "port%d", port->idx);
+ res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+ if (!res)
+ res = platform_get_resource(platform, IORESOURCE_MEM, port->idx + 2);
+
+ port->base = devm_ioremap_resource(&platform->dev, res);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
+ snprintf(name, sizeof(name), "phy%d", port->idx);
+ res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+ if (res)
+ port->phy = devm_ioremap_resource(&platform->dev, res);
+ else
+ port->phy = pcie->base + CORE_PHY_DEFAULT_BASE(port->idx);
+
rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
/* Assert PERST# before setting up the clock */
- gpiod_set_value(reset, 1);
+ gpiod_set_value_cansleep(reset, 1);
ret = apple_pcie_setup_refclk(pcie, port);
if (ret < 0)
@@ -550,8 +618,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
usleep_range(100, 200);
/* Deassert PERST# */
- rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
- gpiod_set_value(reset, 0);
+ rmw_set(PORT_PERST_OFF, port->base + pcie->hw->port_perst);
+ gpiod_set_value_cansleep(reset, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
@@ -563,7 +631,11 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
}
- rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
+ if (pcie->hw->port_refclk)
+ rmw_clear(PORT_REFCLK_CGDIS, port->base + pcie->hw->port_refclk);
+ else
+ rmw_set(PHY_LANE_CFG_REFCLKCGEN, port->phy + PHY_LANE_CFG);
+
rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
ret = apple_pcie_port_setup_irq(port);
@@ -571,7 +643,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
/* Reset all RID/SID mappings, and check for RAZ/WI registers */
- for (i = 0; i < MAX_RID2SID; i++) {
+ for (i = 0; i < pcie->hw->max_rid2sid; i++) {
if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
break;
apple_pcie_rid2sid_write(port, i, 0);
@@ -584,6 +656,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
list_add_tail(&port->entry, &pcie->ports);
init_completion(&pcie->event);
+ /* In the success path, we keep a reference to np around */
+ of_node_get(np);
+
ret = apple_pcie_port_register_irqs(port);
WARN_ON(ret);
@@ -595,11 +670,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return 0;
}
+static const struct msi_parent_ops apple_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT),
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int apple_msi_init(struct apple_pcie *pcie)
{
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &apple_msi_domain_ops,
+ .size = pcie->nvecs,
+ .host_data = pcie,
+ };
struct of_phandle_args args = {};
- struct irq_domain *parent;
int ret;
ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
@@ -619,38 +711,58 @@ static int apple_msi_init(struct apple_pcie *pcie)
if (!pcie->bitmap)
return -ENOMEM;
- parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
- if (!parent) {
+ info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
+ if (!info.parent) {
dev_err(pcie->dev, "failed to find parent domain\n");
return -ENXIO;
}
- parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
- &apple_msi_domain_ops, pcie);
- if (!parent) {
+ if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) {
dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+ return 0;
+}
- pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
- parent);
- if (!pcie->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
- return -ENOMEM;
+static void apple_pcie_register(struct apple_pcie *pcie)
+{
+ guard(mutex)(&pcie_list_lock);
+
+ list_add_tail(&pcie->entry, &pcie_list);
+}
+
+static void apple_pcie_unregister(struct apple_pcie *pcie)
+{
+ guard(mutex)(&pcie_list_lock);
+
+ list_del(&pcie->entry);
+}
+
+static struct apple_pcie *apple_pcie_lookup(struct device *dev)
+{
+ struct apple_pcie *pcie;
+
+ guard(mutex)(&pcie_list_lock);
+
+ list_for_each_entry(pcie, &pcie_list, entry) {
+ if (pcie->dev == dev)
+ return pcie;
}
- return 0;
+ return NULL;
}
static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
{
struct pci_config_window *cfg = pdev->sysdata;
- struct apple_pcie *pcie = cfg->priv;
+ struct apple_pcie *pcie;
struct pci_dev *port_pdev;
struct apple_pcie_port *port;
+ pcie = apple_pcie_lookup(cfg->parent);
+ if (WARN_ON(!pcie))
+ return NULL;
+
/* Find the root port this device is on */
port_pdev = pcie_find_root_port(pdev);
@@ -716,7 +828,7 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci
for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
u32 val;
- val = readl_relaxed(port->base + PORT_RID2SID(idx));
+ val = readl_relaxed(port_rid2sid_addr(port, idx));
if ((val & 0xffff) == rid) {
apple_pcie_rid2sid_write(port, idx, 0);
bitmap_release_region(port->sid_map, idx, 0);
@@ -731,35 +843,17 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci
static int apple_pcie_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
- struct platform_device *platform = to_platform_device(dev);
- struct device_node *of_port;
struct apple_pcie *pcie;
int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
-
- pcie->dev = dev;
-
- mutex_init(&pcie->lock);
-
- pcie->base = devm_platform_ioremap_resource(platform, 1);
- if (IS_ERR(pcie->base))
- return PTR_ERR(pcie->base);
-
- cfg->priv = pcie;
- INIT_LIST_HEAD(&pcie->ports);
-
- ret = apple_msi_init(pcie);
- if (ret)
- return ret;
+ pcie = apple_pcie_lookup(dev);
+ if (WARN_ON(!pcie))
+ return -ENOENT;
- for_each_child_of_node(dev->of_node, of_port) {
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
ret = apple_pcie_setup_port(pcie, of_port);
if (ret) {
- dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
- of_node_put(of_port);
+ dev_err(dev, "Port %pOF setup fail: %d\n", of_port, ret);
return ret;
}
}
@@ -778,14 +872,49 @@ static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
}
};
+static int apple_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = dev;
+ pcie->hw = of_device_get_match_data(dev);
+ if (!pcie->hw)
+ return -ENODEV;
+ pcie->base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ mutex_init(&pcie->lock);
+ INIT_LIST_HEAD(&pcie->ports);
+
+ ret = apple_msi_init(pcie);
+ if (ret)
+ return ret;
+
+ apple_pcie_register(pcie);
+
+ ret = pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops);
+ if (ret)
+ apple_pcie_unregister(pcie);
+
+ return ret;
+}
+
static const struct of_device_id apple_pcie_of_match[] = {
- { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
+ { .compatible = "apple,t6020-pcie", .data = &t602x_hw },
+ { .compatible = "apple,pcie", .data = &t8103_hw },
{ }
};
MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
static struct platform_driver apple_pcie_driver = {
- .probe = pci_host_common_probe,
+ .probe = apple_pcie_probe,
.driver = {
.name = "pcie-apple",
.of_match_table = apple_pcie_of_match,
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 1a3bdc01b074..92887b394eb4 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -40,7 +40,7 @@
/* Broadcom STB PCIe Register Offsets */
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc
-#define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0
+#define PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN 0x0
#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
@@ -55,6 +55,10 @@
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+#define PCIE_RC_PL_PHY_CTL_15 0x184c
+#define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000
+#define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff
+
#define PCIE_MISC_MISC_CTRL 0x4008
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400
@@ -146,9 +150,6 @@
#define MSI_INT_MASK_SET 0x10
#define MSI_INT_MASK_CLR 0x14
-#define PCIE_EXT_CFG_DATA 0x8000
-#define PCIE_EXT_CFG_INDEX 0x9000
-
#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
@@ -174,8 +175,9 @@
#define MDIO_PORT0 0x0
#define MDIO_DATA_MASK 0x7fffffff
#define MDIO_PORT_MASK 0xf0000
+#define MDIO_PORT_EXT_MASK 0x200000
#define MDIO_REGAD_MASK 0xffff
-#define MDIO_CMD_MASK 0xfff00000
+#define MDIO_CMD_MASK 0x00100000
#define MDIO_CMD_READ 0x1
#define MDIO_CMD_WRITE 0x0
#define MDIO_DATA_DONE_MASK 0x80000000
@@ -191,11 +193,11 @@
#define SSC_STATUS_PLL_LOCK_MASK 0x800
#define PCIE_BRCM_MAX_MEMC 3
-#define IDX_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_INDEX])
-#define DATA_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_DATA])
-#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->reg_offsets[RGR1_SW_INIT_1])
-#define HARD_DEBUG(pcie) ((pcie)->reg_offsets[PCIE_HARD_DEBUG])
-#define INTR2_CPU_BASE(pcie) ((pcie)->reg_offsets[PCIE_INTR2_CPU_BASE])
+#define IDX_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->cfg->offsets[RGR1_SW_INIT_1])
+#define HARD_DEBUG(pcie) ((pcie)->cfg->offsets[PCIE_HARD_DEBUG])
+#define INTR2_CPU_BASE(pcie) ((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE])
/* Rescal registers */
#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
@@ -234,13 +236,24 @@ struct inbound_win {
u64 cpu_addr;
};
+/*
+ * The RESCAL block is tied to PCIe controller #1, regardless of the number of
+ * controllers, and turning off PCIe controller #1 prevents access to the RESCAL
+ * register blocks, therefore no other controller can access this register
+ * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB),
+ * or a hang (AXI).
+ */
+#define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN BIT(0)
+
struct pcie_cfg_data {
const int *offsets;
const enum pcie_soc_base soc_base;
const bool has_phy;
+ const u32 quirks;
u8 num_inbound_wins;
int (*perst_set)(struct brcm_pcie *pcie, u32 val);
int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ int (*post_setup)(struct brcm_pcie *pcie);
};
struct subdev_regulators {
@@ -276,8 +289,6 @@ struct brcm_pcie {
int gen;
u64 msi_target_addr;
struct brcm_msi *msi;
- const int *reg_offsets;
- enum pcie_soc_base soc_base;
struct reset_control *rescal;
struct reset_control *perst_reset;
struct reset_control *bridge_reset;
@@ -285,17 +296,14 @@ struct brcm_pcie {
int num_memc;
u64 memc_size[PCIE_BRCM_MAX_MEMC];
u32 hw_rev;
- int (*perst_set)(struct brcm_pcie *pcie, u32 val);
- int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
struct subdev_regulators *sr;
bool ep_wakeup_capable;
- bool has_phy;
- u8 num_inbound_wins;
+ const struct pcie_cfg_data *cfg;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
{
- return pcie->soc_base == BCM7435 || pcie->soc_base == BCM7425;
+ return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425;
}
/*
@@ -309,8 +317,8 @@ static int brcm_pcie_encode_ibar_size(u64 size)
if (log2_in >= 12 && log2_in <= 15)
/* Covers 4KB to 32KB (inclusive) */
return (log2_in - 12) + 0x1c;
- else if (log2_in >= 16 && log2_in <= 35)
- /* Covers 64KB to 32GB, (inclusive) */
+ else if (log2_in >= 16 && log2_in <= 36)
+ /* Covers 64KB to 64GB, (inclusive) */
return log2_in - 15;
/* Something is awry so disable */
return 0;
@@ -320,6 +328,7 @@ static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
{
u32 pkt = 0;
+ pkt |= FIELD_PREP(MDIO_PORT_EXT_MASK, port >> 4);
pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
@@ -405,10 +414,10 @@ static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
+ u32p_replace_bits(&lnkcap, gen, PCI_EXP_LNKCAP_SLS);
writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkctl2 = (lnkctl2 & ~0xf) | gen;
+ u16p_replace_bits(&lnkctl2, gen, PCI_EXP_LNKCTL2_TLS);
writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
}
@@ -550,7 +559,7 @@ static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return hwirq;
for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
+ irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i,
&brcm_msi_bottom_irq_chip, domain->host_data,
handle_edge_irq, NULL, NULL);
return 0;
@@ -572,10 +581,10 @@ static const struct irq_domain_ops msi_domain_ops = {
static int brcm_allocate_domains(struct brcm_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
+ struct fwnode_handle *fwnode = of_fwnode_handle(msi->np);
struct device *dev = msi->dev;
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->nr, &msi_domain_ops, msi);
if (!msi->inner_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -717,8 +726,8 @@ static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
- writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
- return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
+ writel(idx, base + IDX_ADDR(pcie));
+ return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where);
}
static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
@@ -821,6 +830,39 @@ static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
return 0;
}
+static int brcm_pcie_post_setup_bcm2712(struct brcm_pcie *pcie)
+{
+ static const u16 data[] = { 0x50b9, 0xbda1, 0x0094, 0x97b4, 0x5030,
+ 0x5030, 0x0007 };
+ static const u8 regs[] = { 0x16, 0x17, 0x18, 0x19, 0x1b, 0x1c, 0x1e };
+ int ret, i;
+ u32 tmp;
+
+ /* Allow a 54MHz (xosc) refclk source */
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 0x1600);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, regs[i], data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(100, 200);
+
+ /*
+ * Set L1SS sub-state timers to avoid lengthy state transitions,
+ * PM clock period is 18.52ns (1/54MHz, round down).
+ */
+ tmp = readl(pcie->base + PCIE_RC_PL_PHY_CTL_15);
+ tmp &= ~PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK;
+ tmp |= 0x12;
+ writel(tmp, pcie->base + PCIE_RC_PL_PHY_CTL_15);
+
+ return 0;
+}
+
static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,
u64 cpu_addr, u64 pci_offset)
{
@@ -855,7 +897,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
* security considerations, and is not implemented in our modern
* SoCs.
*/
- if (pcie->soc_base != BCM7712)
+ if (pcie->cfg->soc_base != BCM7712)
add_inbound_win(b++, &n, 0, 0, 0);
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
@@ -872,10 +914,10 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
* That being said, each BARs size must still be a power of
* two.
*/
- if (pcie->soc_base == BCM7712)
+ if (pcie->cfg->soc_base == BCM7712)
add_inbound_win(b++, &n, size, cpu_start, pcie_start);
- if (n > pcie->num_inbound_wins)
+ if (n > pcie->cfg->num_inbound_wins)
break;
}
@@ -889,7 +931,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
* that enables multiple memory controllers. As such, it can return
* now w/o doing special configuration.
*/
- if (pcie->soc_base == BCM7712)
+ if (pcie->cfg->soc_base == BCM7712)
return n;
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
@@ -1012,7 +1054,7 @@ static void set_inbound_win_registers(struct brcm_pcie *pcie,
* 7712:
* All of their BARs need to be set.
*/
- if (pcie->soc_base == BCM7712) {
+ if (pcie->cfg->soc_base == BCM7712) {
/* BUS remap register settings */
reg_offset = brcm_ubus_reg_offset(i);
tmp = lower_32_bits(cpu_addr) & ~0xfff;
@@ -1036,15 +1078,15 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
int memc, ret;
/* Reset the bridge */
- ret = pcie->bridge_sw_init_set(pcie, 1);
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
if (ret)
return ret;
/* Ensure that PERST# is asserted; some bootloaders may deassert it. */
- if (pcie->soc_base == BCM2711) {
- ret = pcie->perst_set(pcie, 1);
+ if (pcie->cfg->soc_base == BCM2711) {
+ ret = pcie->cfg->perst_set(pcie, 1);
if (ret) {
- pcie->bridge_sw_init_set(pcie, 0);
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
return ret;
}
}
@@ -1052,7 +1094,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
usleep_range(100, 200);
/* Take the bridge out of reset */
- ret = pcie->bridge_sw_init_set(pcie, 0);
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 0);
if (ret)
return ret;
@@ -1072,9 +1114,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
*/
if (is_bmips(pcie))
burst = 0x1; /* 256 bytes */
- else if (pcie->soc_base == BCM2711)
+ else if (pcie->cfg->soc_base == BCM2711)
burst = 0x0; /* 128 bytes */
- else if (pcie->soc_base == BCM7278)
+ else if (pcie->cfg->soc_base == BCM7278)
burst = 0x3; /* 512 bytes */
else
burst = 0x2; /* 512 bytes */
@@ -1180,10 +1222,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
/* PCIe->SCB endian mode for inbound window */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
- u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN,
PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ if (pcie->cfg->post_setup) {
+ ret = pcie->cfg->post_setup(pcie);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -1199,7 +1247,7 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
/* 7712 does not have this (RGR1) timer */
- if (pcie->soc_base == BCM7712)
+ if (pcie->cfg->soc_base == BCM7712)
return;
/* Each unit in timeout register is 1/216,000,000 seconds */
@@ -1281,7 +1329,7 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
brcm_pcie_set_gen(pcie, pcie->gen);
/* Unassert the fundamental reset */
- ret = pcie->perst_set(pcie, 0);
+ ret = pcie->cfg->perst_set(pcie, 0);
if (ret)
return ret;
@@ -1465,12 +1513,12 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
static inline int brcm_phy_start(struct brcm_pcie *pcie)
{
- return pcie->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
}
static inline int brcm_phy_stop(struct brcm_pcie *pcie)
{
- return pcie->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
}
static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
@@ -1481,7 +1529,7 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie);
/* Assert fundamental reset */
- ret = pcie->perst_set(pcie, 1);
+ ret = pcie->cfg->perst_set(pcie, 1);
if (ret)
return ret;
@@ -1495,8 +1543,9 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
writel(tmp, base + HARD_DEBUG(pcie));
- /* Shutdown PCIe bridge */
- ret = pcie->bridge_sw_init_set(pcie, 1);
+ if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN))
+ /* Shutdown PCIe bridge */
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
return ret;
}
@@ -1584,7 +1633,7 @@ static int brcm_pcie_resume_noirq(struct device *dev)
goto err_reset;
/* Take bridge out of reset so we can access the SERDES reg */
- pcie->bridge_sw_init_set(pcie, 0);
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
/* SERDES_IDDQ = 0 */
tmp = readl(base + HARD_DEBUG(pcie));
@@ -1662,7 +1711,7 @@ static void brcm_pcie_remove(struct platform_device *pdev)
static const int pcie_offsets[] = {
[RGR1_SW_INIT_1] = 0x9210,
[EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [EXT_CFG_DATA] = 0x8000,
[PCIE_HARD_DEBUG] = 0x4204,
[PCIE_INTR2_CPU_BASE] = 0x4300,
};
@@ -1670,7 +1719,7 @@ static const int pcie_offsets[] = {
static const int pcie_offsets_bcm7278[] = {
[RGR1_SW_INIT_1] = 0xc010,
[EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [EXT_CFG_DATA] = 0x8000,
[PCIE_HARD_DEBUG] = 0x4204,
[PCIE_INTR2_CPU_BASE] = 0x4300,
};
@@ -1684,8 +1733,9 @@ static const int pcie_offsets_bcm7425[] = {
};
static const int pcie_offsets_bcm7712[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
[EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [EXT_CFG_DATA] = 0x8000,
[PCIE_HARD_DEBUG] = 0x4304,
[PCIE_INTR2_CPU_BASE] = 0x4400,
};
@@ -1706,6 +1756,16 @@ static const struct pcie_cfg_data bcm2711_cfg = {
.num_inbound_wins = 3,
};
+static const struct pcie_cfg_data bcm2712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .soc_base = BCM7712,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .post_setup = brcm_pcie_post_setup_bcm2712,
+ .quirks = CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN,
+ .num_inbound_wins = 10,
+};
+
static const struct pcie_cfg_data bcm4908_cfg = {
.offsets = pcie_offsets,
.soc_base = BCM4908,
@@ -1757,6 +1817,7 @@ static const struct pcie_cfg_data bcm7712_cfg = {
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
+ { .compatible = "brcm,bcm2712-pcie", .data = &bcm2712_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg },
@@ -1805,12 +1866,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = &pdev->dev;
pcie->np = np;
- pcie->reg_offsets = data->offsets;
- pcie->soc_base = data->soc_base;
- pcie->perst_set = data->perst_set;
- pcie->bridge_sw_init_set = data->bridge_sw_init_set;
- pcie->has_phy = data->has_phy;
- pcie->num_inbound_wins = data->num_inbound_wins;
+ pcie->cfg = data;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -1845,7 +1901,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
- pcie->bridge_sw_init_set(pcie, 0);
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
if (pcie->swinit_reset) {
ret = reset_control_assert(pcie->swinit_reset);
@@ -1884,7 +1940,8 @@ static int brcm_pcie_probe(struct platform_device *pdev)
goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
- if (pcie->soc_base == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ if (pcie->cfg->soc_base == BCM4908 &&
+ pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
ret = -ENODEV;
goto fail;
@@ -1904,7 +1961,8 @@ static int brcm_pcie_probe(struct platform_device *pdev)
}
}
- bridge->ops = pcie->soc_base == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
+ bridge->ops = pcie->cfg->soc_base == BCM7425 ?
+ &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
@@ -1947,3 +2005,4 @@ module_platform_driver(brcm_pcie_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
MODULE_AUTHOR("Broadcom");
+MODULE_SOFTDEP("pre: irq_bcm2712_mip");
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 649fcb449f34..d2cb4c4f821a 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -446,12 +446,12 @@ static void iproc_msi_disable(struct iproc_msi *msi)
static int iproc_msi_alloc_domains(struct device_node *node,
struct iproc_msi *msi)
{
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
- &msi_domain_ops, msi);
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->nr_msi_vecs,
+ &msi_domain_ops, msi);
if (!msi->inner_domain)
return -ENOMEM;
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node),
&iproc_msi_domain_info,
msi->inner_domain);
if (!msi->msi_domain) {
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index d0cc7f3b4b52..b55f5973414c 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -354,7 +354,8 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
range_type, *num, (unsigned long long)cpu_addr,
- (unsigned long long)pci_addr, (unsigned long long)table_size);
+ (unsigned long long)pci_addr,
+ (unsigned long long)table_size);
cpu_addr += table_size;
pci_addr += table_size;
@@ -744,8 +745,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
return -ENODEV;
}
- pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
ret = -ENODEV;
@@ -755,8 +756,9 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
/* Setup MSI */
mutex_init(&pcie->lock);
- pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
- &mtk_msi_bottom_domain_ops, pcie);
+ pcie->msi_bottom_domain = irq_domain_create_linear(of_fwnode_handle(node),
+ PCIE_MSI_IRQS_NUM,
+ &mtk_msi_bottom_domain_ops, pcie);
if (!pcie->msi_bottom_domain) {
dev_err(dev, "failed to create MSI bottom domain\n");
ret = -ENODEV;
@@ -889,7 +891,8 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
for (i = 0; i < num_resets; i++)
pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
- ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
+ ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets,
+ pcie->phy_resets);
if (ret) {
dev_err(dev, "failed to get PHY bulk reset\n");
return ret;
@@ -919,13 +922,14 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
return pcie->num_clks;
}
- ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
- if (ret == 0) {
- if (num_lanes == 0 || num_lanes > 16 || (num_lanes != 1 && num_lanes % 2))
+ ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
+ if (ret == 0) {
+ if (num_lanes == 0 || num_lanes > 16 ||
+ (num_lanes != 1 && num_lanes % 2))
dev_warn(dev, "invalid num-lanes, using controller defaults\n");
- else
+ else
pcie->num_lanes = num_lanes;
- }
+ }
return 0;
}
@@ -946,7 +950,6 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
*/
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
pcie->phy_resets);
- reset_control_assert(pcie->mac_reset);
/* Wait for the time needed to complete the reset lines assert. */
msleep(PCIE_EN7581_RESET_TIME_MS);
@@ -987,7 +990,8 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
goto err_phy_on;
}
- err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
if (err) {
dev_err(dev, "failed to deassert PHYs\n");
goto err_phy_deassert;
@@ -1032,7 +1036,8 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
err_clk_prepare_enable:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
- reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
err_phy_deassert:
phy_power_off(pcie->phy);
err_phy_on:
@@ -1056,7 +1061,8 @@ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
/* PHY power on and enable pipe clock */
- err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
if (err) {
dev_err(dev, "failed to deassert PHYs\n");
return err;
@@ -1096,7 +1102,8 @@ err_clk_init:
err_phy_on:
phy_exit(pcie->phy);
err_phy_init:
- reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
return err;
}
@@ -1111,7 +1118,8 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
- reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
}
static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie)
@@ -1138,7 +1146,8 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
* Deassert the line in order to avoid unbalance in deassert_count
* counter since the bulk is shared.
*/
- reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
/* Don't touch the hardware registers before power up */
err = pcie->soc->power_up(pcie);
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 3bcfc4e58ba2..e1934aa06c8d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -485,7 +485,7 @@ static struct msi_domain_info mtk_msi_domain_info = {
static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(port->pcie->dev->of_node);
mutex_init(&port->lock);
@@ -569,8 +569,8 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
return -ENODEV;
}
- port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->irq_domain) {
dev_err(dev, "failed to get INTx IRQ domain\n");
@@ -1041,24 +1041,22 @@ err_free_ck:
static int mtk_pcie_setup(struct mtk_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
struct mtk_pcie_port *port, *tmp;
int err, slot;
slot = of_get_pci_domain_nr(dev->of_node);
if (slot < 0) {
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
err = of_pci_get_devfn(child);
- if (err < 0) {
- dev_err(dev, "failed to get devfn: %d\n", err);
- goto error_put_node;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to get devfn\n");
slot = PCI_SLOT(err);
err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
- goto error_put_node;
+ return err;
}
} else {
err = mtk_pcie_parse_port(pcie, node, slot);
@@ -1079,9 +1077,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
mtk_pcie_subsys_powerdown(pcie);
return 0;
-error_put_node:
- of_node_put(child);
- return err;
}
static int mtk_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 776caa0b1011..01ead2f92e87 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -258,30 +258,25 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
{
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
int err;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
int slot;
err = of_pci_get_devfn(child);
- if (err < 0) {
- of_node_put(child);
- dev_err(dev, "failed to parse devfn: %d\n", err);
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse devfn\n");
slot = PCI_SLOT(err);
err = mt7621_pcie_parse_port(pcie, child, slot);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index c5e0d025bc43..a8a966844cf3 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -256,15 +256,15 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(atu_index + 1, ep->ib_window_map);
}
-static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
- u8 interrupts)
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
+ u8 mmc = order_base_2(nr_irqs);
u32 flags;
flags = rcar_pci_read_reg(pcie, MSICAP(fn));
- flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
+ flags |= mmc << MSICAP0_MMESCAP_OFFSET;
rcar_pci_write_reg(pcie, flags, MSICAP(fn));
return 0;
@@ -280,7 +280,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
if (!(flags & MSICAP0_MSIE))
return -EINVAL;
- return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+ return 1 << ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
}
static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 7c92eada04af..c32b803a47c7 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -178,8 +178,8 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
* space, it's generally only accessible when in endpoint mode.
* When in root complex mode, the controller is unable to target
* itself with either type 0 or type 1 accesses, and indeed, any
- * controller initiated target transfer to its own config space
- * result in a completer abort.
+ * controller-initiated target transfer to its own config space
+ * results in a completer abort.
*
* Each channel effectively only supports a single device, but as
* the same channel <-> device access works for any PCI_SLOT()
@@ -775,7 +775,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
if (err)
return err;
- /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ /* Two IRQs are for MSI, but they are also used for non-MSI IRQs */
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_bottom_chip.name, host);
@@ -792,7 +792,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
goto err;
}
- /* disable all MSIs */
+ /* Disable all MSIs */
rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
/*
@@ -892,6 +892,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
dev_err(pcie->dev, "Failed to map inbound regions!\n");
return -EINVAL;
}
+
/*
* If the size of the range is larger than the alignment of
* the start address, we have to use multiple entries to
@@ -903,6 +904,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
size = min(size, alignment);
}
+
/* Hardware supports max 4GiB inbound region */
size = min(size, 1ULL << 32);
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 85ea36df2f59..55416b8311dd 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -308,10 +308,11 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
}
static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
- u8 multi_msg_cap)
+ u8 nr_irqs)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
+ u8 mmc = order_base_2(nr_irqs);
u32 flags;
flags = rockchip_pcie_read(rockchip,
@@ -319,7 +320,7 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
flags |=
- (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+ (mmc << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
(PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
rockchip_pcie_write(rockchip, flags,
@@ -340,8 +341,8 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
return -EINVAL;
- return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
- ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+ return 1 << ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
}
static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
@@ -694,6 +695,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = false,
+ .intx_capable = true,
.align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
};
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 6a46be17aa91..648b6fcb93b0 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -439,7 +439,7 @@ static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
dev_dbg(dev, "malformed TLP received from the link\n");
if (sub_reg & PCIE_CORE_INT_UCR)
- dev_dbg(dev, "malformed TLP received from the link\n");
+ dev_dbg(dev, "Unexpected Completion received from the link\n");
if (sub_reg & PCIE_CORE_INT_FCE)
dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
@@ -693,8 +693,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 14954f43e5e9..5864a20323f2 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -319,11 +319,12 @@ static const char * const rockchip_pci_pm_rsts[] = {
"aclk",
};
+/* NOTE: Do not reorder the deassert sequence of the following reset pins */
static const char * const rockchip_pci_core_rsts[] = {
- "mgmt-sticky",
- "core",
- "mgmt",
"pipe",
+ "mgmt",
+ "core",
+ "mgmt-sticky",
};
struct rockchip_pcie {
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index dc8ecdbee56c..d38f27e20761 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -84,6 +84,7 @@ enum xilinx_cpm_version {
CPM,
CPM5,
CPM5_HOST1,
+ CPM5NC_HOST,
};
/**
@@ -394,17 +395,15 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
return -EINVAL;
}
- port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops,
- port);
+ port->cpm_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
if (!port->cpm_domain)
goto out;
irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain)
goto out;
@@ -478,6 +477,9 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
const struct xilinx_cpm_variant *variant = port->variant;
+ if (variant->version == CPM5NC_HOST)
+ return;
+
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
else
@@ -538,7 +540,8 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
if (IS_ERR(port->cfg))
return PTR_ERR(port->cfg);
- if (port->variant->version == CPM5) {
+ if (port->variant->version == CPM5 ||
+ port->variant->version == CPM5_HOST1) {
port->reg_base = devm_platform_ioremap_resource_byname(pdev,
"cpm_csr");
if (IS_ERR(port->reg_base))
@@ -578,9 +581,13 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
port->dev = dev;
- err = xilinx_cpm_pcie_init_irq_domain(port);
- if (err)
- return err;
+ port->variant = of_device_get_match_data(dev);
+
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_pcie_init_irq_domain(port);
+ if (err)
+ return err;
+ }
bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
if (!bus) {
@@ -588,8 +595,6 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
goto err_free_irq_domains;
}
- port->variant = of_device_get_match_data(dev);
-
err = xilinx_cpm_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
@@ -598,10 +603,12 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
xilinx_cpm_pcie_init_port(port);
- err = xilinx_cpm_setup_irq(port);
- if (err) {
- dev_err(dev, "Failed to set up interrupts\n");
- goto err_setup_irq;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_setup_irq(port);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts\n");
+ goto err_setup_irq;
+ }
}
bridge->sysdata = port->cfg;
@@ -614,11 +621,13 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
return 0;
err_host_bridge:
- xilinx_cpm_free_interrupts(port);
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_interrupts(port);
err_setup_irq:
pci_ecam_free(port->cfg);
err_free_irq_domains:
- xilinx_cpm_free_irq_domains(port);
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_irq_domains(port);
return err;
}
@@ -641,6 +650,10 @@ static const struct xilinx_cpm_variant cpm5_host1 = {
.ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,
};
+static const struct xilinx_cpm_variant cpm5n_host = {
+ .version = CPM5NC_HOST,
+};
+
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
{
.compatible = "xlnx,versal-cpm-host-1.00",
@@ -654,6 +667,10 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
.compatible = "xlnx,versal-cpm5-host1",
.data = &cpm5_host1,
},
+ {
+ .compatible = "xlnx,versal-cpm5nc-host",
+ .data = &cpm5n_host,
+ },
{}
};
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
index dd117f07fc95..dc9690a535e1 100644
--- a/drivers/pci/controller/pcie-xilinx-dma-pl.c
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -470,10 +470,10 @@ static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
struct device *dev = port->dev;
struct xilinx_msi *msi = &port->msi;
int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
- struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(port->dev->of_node);
- msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS,
- &dev_msi_domain_ops, port);
+ msi->dev_domain = irq_domain_create_linear(NULL, XILINX_NUM_MSI_IRQS,
+ &dev_msi_domain_ops, port);
if (!msi->dev_domain)
goto out;
@@ -585,15 +585,15 @@ static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port)
return -EINVAL;
}
- port->pldma_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops, port);
+ port->pldma_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
if (!port->pldma_domain)
return -ENOMEM;
irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 8d6e2a89b067..c8b05477b719 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -495,11 +495,10 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
struct device *dev = pcie->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
- msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
- &dev_msi_domain_ops, pcie);
+ msi->dev_domain = irq_domain_create_linear(NULL, INT_PCI_MSI_NR, &dev_msi_domain_ops, pcie);
if (!msi->dev_domain) {
dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
@@ -582,10 +581,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
return -EINVAL;
}
- pcie->intx_irq_domain = irq_domain_add_linear(intc_node,
- PCI_NUM_INTX,
- &intx_domain_ops,
- pcie);
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
of_node_put(intc_node);
if (!pcie->intx_irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 0b534f73a942..e36aa874bae9 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -461,9 +461,8 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie)
return -ENODEV;
}
- pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- pcie);
+ pcie->leg_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
of_node_put(pcie_intc_node);
if (!pcie->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
index 3fdfffdf0270..24bbf93b8051 100644
--- a/drivers/pci/controller/plda/pcie-microchip-host.c
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -23,6 +23,7 @@
#include <linux/wordpart.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#include "pcie-plda.h"
#define MC_MAX_NUM_INBOUND_WINDOWS 8
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index 4153214ca410..3abedf723215 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -150,13 +150,12 @@ static struct msi_domain_info plda_msi_domain_info = {
static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
{
struct device *dev = port->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct plda_msi *msi = &port->msi;
mutex_init(&port->msi.lock);
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
- &msi_domain_ops, port);
+ msi->dev_domain = irq_domain_create_linear(NULL, msi->num_vectors, &msi_domain_ops, port);
if (!msi->dev_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -393,10 +392,9 @@ static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
return -EINVAL;
}
- port->event_domain = irq_domain_add_linear(pcie_intc_node,
- port->num_events,
- &plda_event_domain_ops,
- port);
+ port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ port->num_events, &plda_event_domain_ops,
+ port);
if (!port->event_domain) {
dev_err(dev, "failed to get event domain\n");
of_node_put(pcie_intc_node);
@@ -405,8 +403,8 @@ static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain) {
dev_err(dev, "failed to get an INTx IRQ domain\n");
of_node_put(pcie_intc_node);
diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
index e73c1b7bc8ef..3caf53c6c082 100644
--- a/drivers/pci/controller/plda/pcie-starfive.c
+++ b/drivers/pci/controller/plda/pcie-starfive.c
@@ -368,7 +368,7 @@ static int starfive_pcie_host_init(struct plda_pcie_rp *plda)
* of 100ms following exit from a conventional reset before
* sending a configuration request to the device.
*/
- msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS);
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
if (starfive_pcie_host_wait_for_link(pcie))
dev_info(dev, "port link down\n");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 94ceec50a2b9..8df064b62a2f 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -17,6 +17,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+#include <xen/xen.h>
+
#include <asm/irqdomain.h>
#define VMD_CFGBAR 0
@@ -970,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vmd_dev *vmd;
int err;
+ if (xen_domain()) {
+ /*
+ * Xen doesn't have knowledge about devices in the VMD bus
+ * because the config space of devices behind the VMD bridge is
+ * not known to Xen, and hence Xen cannot discover or configure
+ * them in any way.
+ *
+ * Bypass of MSI remapping won't work in that case as direct
+ * write by Linux to the MSI entries won't result in functional
+ * interrupts, as Xen is the entity that manages the host
+ * interrupt controller and must configure interrupts. However
+ * multiplexing of interrupts by the VMD bridge will work under
+ * Xen, so force the usage of that mode which must always be
+ * supported by VMD bridges.
+ */
+ features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
+ }
+
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index 73047316889e..9f4190501395 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -6,30 +6,13 @@
/*
* On the state of PCI's devres implementation:
*
- * The older devres API for PCI has two significant problems:
+ * The older PCI devres API has one significant problem:
*
- * 1. It is very strongly tied to the statically allocated mapping table in
- * struct pcim_iomap_devres below. This is mostly solved in the sense of the
- * pcim_ functions in this file providing things like ranged mapping by
- * bypassing this table, whereas the functions that were present in the old
- * API still enter the mapping addresses into the table for users of the old
- * API.
- *
- * 2. The region-request-functions in pci.c do become managed IF the device has
- * been enabled with pcim_enable_device() instead of pci_enable_device().
- * This resulted in the API becoming inconsistent: Some functions have an
- * obviously managed counter-part (e.g., pci_iomap() <-> pcim_iomap()),
- * whereas some don't and are never managed, while others don't and are
- * _sometimes_ managed (e.g. pci_request_region()).
- *
- * Consequently, in the new API, region requests performed by the pcim_
- * functions are automatically cleaned up through the devres callback
- * pcim_addr_resource_release().
- *
- * Users of pcim_enable_device() + pci_*region*() are redirected in
- * pci.c to the managed functions here in this file. This isn't exactly
- * perfect, but the only alternative way would be to port ALL drivers
- * using said combination to pcim_ functions.
+ * It is very strongly tied to the statically allocated mapping table in struct
+ * pcim_iomap_devres below. This is mostly solved in the sense of the pcim_
+ * functions in this file providing things like ranged mapping by bypassing
+ * this table, whereas the functions that were present in the old API still
+ * enter the mapping addresses into the table for users of the old API.
*
* TODO:
* Remove the legacy table entirely once all calls to pcim_iomap_table() in
@@ -87,104 +70,6 @@ static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
res->bar = -1;
}
-/*
- * The following functions, __pcim_*_region*, exist as counterparts to the
- * versions from pci.c - which, unfortunately, can be in "hybrid mode", i.e.,
- * sometimes managed, sometimes not.
- *
- * To separate the APIs cleanly, we define our own, simplified versions here.
- */
-
-/**
- * __pcim_request_region_range - Request a ranged region
- * @pdev: PCI device the region belongs to
- * @bar: BAR the range is within
- * @offset: offset from the BAR's start address
- * @maxlen: length in bytes, beginning at @offset
- * @name: name of the driver requesting the resource
- * @req_flags: flags for the request, e.g., for kernel-exclusive requests
- *
- * Returns: 0 on success, a negative error code on failure.
- *
- * Request a range within a device's PCI BAR. Sanity check the input.
- */
-static int __pcim_request_region_range(struct pci_dev *pdev, int bar,
- unsigned long offset,
- unsigned long maxlen,
- const char *name, int req_flags)
-{
- resource_size_t start = pci_resource_start(pdev, bar);
- resource_size_t len = pci_resource_len(pdev, bar);
- unsigned long dev_flags = pci_resource_flags(pdev, bar);
-
- if (start == 0 || len == 0) /* Unused BAR. */
- return 0;
- if (len <= offset)
- return -EINVAL;
-
- start += offset;
- len -= offset;
-
- if (len > maxlen && maxlen != 0)
- len = maxlen;
-
- if (dev_flags & IORESOURCE_IO) {
- if (!request_region(start, len, name))
- return -EBUSY;
- } else if (dev_flags & IORESOURCE_MEM) {
- if (!__request_mem_region(start, len, name, req_flags))
- return -EBUSY;
- } else {
- /* That's not a device we can request anything on. */
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __pcim_release_region_range(struct pci_dev *pdev, int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(pdev, bar);
- resource_size_t len = pci_resource_len(pdev, bar);
- unsigned long flags = pci_resource_flags(pdev, bar);
-
- if (len <= offset || start == 0)
- return;
-
- if (len == 0 || maxlen == 0) /* This an unused BAR. Do nothing. */
- return;
-
- start += offset;
- len -= offset;
-
- if (len > maxlen)
- len = maxlen;
-
- if (flags & IORESOURCE_IO)
- release_region(start, len);
- else if (flags & IORESOURCE_MEM)
- release_mem_region(start, len);
-}
-
-static int __pcim_request_region(struct pci_dev *pdev, int bar,
- const char *name, int flags)
-{
- unsigned long offset = 0;
- unsigned long len = pci_resource_len(pdev, bar);
-
- return __pcim_request_region_range(pdev, bar, offset, len, name, flags);
-}
-
-static void __pcim_release_region(struct pci_dev *pdev, int bar)
-{
- unsigned long offset = 0;
- unsigned long len = pci_resource_len(pdev, bar);
-
- __pcim_release_region_range(pdev, bar, offset, len);
-}
-
static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -192,11 +77,11 @@ static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
switch (res->type) {
case PCIM_ADDR_DEVRES_TYPE_REGION:
- __pcim_release_region(pdev, res->bar);
+ pci_release_region(pdev, res->bar);
break;
case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
pci_iounmap(pdev, res->baseaddr);
- __pcim_release_region(pdev, res->bar);
+ pci_release_region(pdev, res->bar);
break;
case PCIM_ADDR_DEVRES_TYPE_MAPPING:
pci_iounmap(pdev, res->baseaddr);
@@ -735,7 +620,7 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
res->bar = bar;
- ret = __pcim_request_region(pdev, bar, name, 0);
+ ret = pci_request_region(pdev, bar, name);
if (ret != 0)
goto err_region;
@@ -749,7 +634,7 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
return res->baseaddr;
err_iomap:
- __pcim_release_region(pdev, bar);
+ pci_release_region(pdev, bar);
err_region:
pcim_addr_devres_free(res);
@@ -823,8 +708,20 @@ err:
}
EXPORT_SYMBOL(pcim_iomap_regions);
-static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
- int request_flags)
+/**
+ * pcim_request_region - Request a PCI BAR
+ * @pdev: PCI device to request region for
+ * @bar: Index of BAR to request
+ * @name: Name of the driver requesting the resource
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ *
+ * Request region specified by @bar.
+ *
+ * The region will automatically be released on driver detach. If desired,
+ * release manually only with pcim_release_region().
+ */
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
{
int ret;
struct pcim_addr_devres *res;
@@ -838,7 +735,7 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
res->type = PCIM_ADDR_DEVRES_TYPE_REGION;
res->bar = bar;
- ret = __pcim_request_region(pdev, bar, name, request_flags);
+ ret = pci_request_region(pdev, bar, name);
if (ret != 0) {
pcim_addr_devres_free(res);
return ret;
@@ -847,45 +744,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
devres_add(&pdev->dev, res);
return 0;
}
-
-/**
- * pcim_request_region - Request a PCI BAR
- * @pdev: PCI device to request region for
- * @bar: Index of BAR to request
- * @name: Name of the driver requesting the resource
- *
- * Returns: 0 on success, a negative error code on failure.
- *
- * Request region specified by @bar.
- *
- * The region will automatically be released on driver detach. If desired,
- * release manually only with pcim_release_region().
- */
-int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
-{
- return _pcim_request_region(pdev, bar, name, 0);
-}
EXPORT_SYMBOL(pcim_request_region);
/**
- * pcim_request_region_exclusive - Request a PCI BAR exclusively
- * @pdev: PCI device to request region for
- * @bar: Index of BAR to request
- * @name: Name of the driver requesting the resource
- *
- * Returns: 0 on success, a negative error code on failure.
- *
- * Request region specified by @bar exclusively.
- *
- * The region will automatically be released on driver detach. If desired,
- * release manually only with pcim_release_region().
- */
-int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name)
-{
- return _pcim_request_region(pdev, bar, name, IORESOURCE_EXCLUSIVE);
-}
-
-/**
* pcim_release_region - Release a PCI BAR
* @pdev: PCI device to operate on
* @bar: Index of BAR to release
@@ -893,7 +754,7 @@ int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *nam
* Release a region manually that was previously requested by
* pcim_request_region().
*/
-void pcim_release_region(struct pci_dev *pdev, int bar)
+static void pcim_release_region(struct pci_dev *pdev, int bar)
{
struct pcim_addr_devres res_searched;
@@ -956,30 +817,6 @@ err:
EXPORT_SYMBOL(pcim_request_all_regions);
/**
- * pcim_iounmap_regions - Unmap and release PCI BARs (DEPRECATED)
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to unmap and release
- *
- * Unmap and release regions specified by @mask.
- *
- * This function is DEPRECATED. Do not use it in new code.
- * Use pcim_iounmap_region() instead.
- */
-void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
-{
- int i;
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (!mask_contains_bar(mask, i))
- continue;
-
- pcim_iounmap_region(pdev, i);
- pcim_remove_bar_from_legacy_table(pdev, i);
- }
-}
-EXPORT_SYMBOL(pcim_iounmap_regions);
-
-/**
* pcim_iomap_range - Create a ranged __iomap mapping within a PCI BAR
* @pdev: PCI device to map IO resources for
* @bar: Index of the BAR
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index 7bd7892c5222..aae9a8a00406 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -14,15 +14,17 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci-doe.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "pci.h"
-#define PCI_DOE_PROTOCOL_DISCOVERY 0
+#define PCI_DOE_FEATURE_DISCOVERY 0
/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
#define PCI_DOE_TIMEOUT HZ
@@ -43,22 +45,27 @@
*
* @pdev: PCI device this mailbox belongs to
* @cap_offset: Capability offset
- * @prots: Array of protocols supported (encoded as long values)
+ * @feats: Array of features supported (encoded as long values)
* @wq: Wait queue for work item
* @work_queue: Queue of pci_doe_work items
* @flags: Bit array of PCI_DOE_FLAG_* flags
+ * @sysfs_attrs: Array of sysfs device attributes
*/
struct pci_doe_mb {
struct pci_dev *pdev;
u16 cap_offset;
- struct xarray prots;
+ struct xarray feats;
wait_queue_head_t wq;
struct workqueue_struct *work_queue;
unsigned long flags;
+
+#ifdef CONFIG_SYSFS
+ struct device_attribute *sysfs_attrs;
+#endif
};
-struct pci_doe_protocol {
+struct pci_doe_feature {
u16 vid;
u8 type;
};
@@ -66,7 +73,7 @@ struct pci_doe_protocol {
/**
* struct pci_doe_task - represents a single query/response
*
- * @prot: DOE Protocol
+ * @feat: DOE Feature
* @request_pl: The request payload
* @request_pl_sz: Size of the request payload (bytes)
* @response_pl: The response payload
@@ -78,7 +85,7 @@ struct pci_doe_protocol {
* @doe_mb: Used internally by the mailbox
*/
struct pci_doe_task {
- struct pci_doe_protocol prot;
+ struct pci_doe_feature feat;
const __le32 *request_pl;
size_t request_pl_sz;
__le32 *response_pl;
@@ -92,6 +99,152 @@ struct pci_doe_task {
struct pci_doe_mb *doe_mb;
};
+#ifdef CONFIG_SYSFS
+static ssize_t doe_discovery_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "0001:00\n");
+}
+static DEVICE_ATTR_RO(doe_discovery);
+
+static struct attribute *pci_doe_sysfs_feature_attrs[] = {
+ &dev_attr_doe_discovery.attr,
+ NULL
+};
+
+static bool pci_doe_features_sysfs_group_visible(struct kobject *kobj)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ return !xa_empty(&pdev->doe_mbs);
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(pci_doe_features_sysfs)
+
+const struct attribute_group pci_doe_sysfs_group = {
+ .name = "doe_features",
+ .attrs = pci_doe_sysfs_feature_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_doe_features_sysfs),
+};
+
+static ssize_t pci_doe_sysfs_feature_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", attr->attr.name);
+}
+
+static void pci_doe_sysfs_feature_remove(struct pci_dev *pdev,
+ struct pci_doe_mb *doe_mb)
+{
+ struct device_attribute *attrs = doe_mb->sysfs_attrs;
+ struct device *dev = &pdev->dev;
+ unsigned long i;
+ void *entry;
+
+ if (!attrs)
+ return;
+
+ doe_mb->sysfs_attrs = NULL;
+ xa_for_each(&doe_mb->feats, i, entry) {
+ if (attrs[i].show)
+ sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr,
+ pci_doe_sysfs_group.name);
+ kfree(attrs[i].attr.name);
+ }
+ kfree(attrs);
+}
+
+static int pci_doe_sysfs_feature_populate(struct pci_dev *pdev,
+ struct pci_doe_mb *doe_mb)
+{
+ struct device *dev = &pdev->dev;
+ struct device_attribute *attrs;
+ unsigned long num_features = 0;
+ unsigned long vid, type;
+ unsigned long i;
+ void *entry;
+ int ret;
+
+ xa_for_each(&doe_mb->feats, i, entry)
+ num_features++;
+
+ attrs = kcalloc(num_features, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs) {
+ pci_warn(pdev, "Failed allocating the device_attribute array\n");
+ return -ENOMEM;
+ }
+
+ doe_mb->sysfs_attrs = attrs;
+ xa_for_each(&doe_mb->feats, i, entry) {
+ sysfs_attr_init(&attrs[i].attr);
+ vid = xa_to_value(entry) >> 8;
+ type = xa_to_value(entry) & 0xFF;
+
+ if (vid == PCI_VENDOR_ID_PCI_SIG &&
+ type == PCI_DOE_FEATURE_DISCOVERY) {
+
+ /*
+ * DOE Discovery, manually displayed by
+ * `dev_attr_doe_discovery`
+ */
+ continue;
+ }
+
+ attrs[i].attr.name = kasprintf(GFP_KERNEL,
+ "%04lx:%02lx", vid, type);
+ if (!attrs[i].attr.name) {
+ ret = -ENOMEM;
+ pci_warn(pdev, "Failed allocating the attribute name\n");
+ goto fail;
+ }
+
+ attrs[i].attr.mode = 0444;
+ attrs[i].show = pci_doe_sysfs_feature_show;
+
+ ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr,
+ pci_doe_sysfs_group.name);
+ if (ret) {
+ attrs[i].show = NULL;
+ if (ret != -EEXIST) {
+ pci_warn(pdev, "Failed adding %s to sysfs group\n",
+ attrs[i].attr.name);
+ goto fail;
+ } else
+ kfree(attrs[i].attr.name);
+ }
+ }
+
+ return 0;
+
+fail:
+ pci_doe_sysfs_feature_remove(pdev, doe_mb);
+ return ret;
+}
+
+void pci_doe_sysfs_teardown(struct pci_dev *pdev)
+{
+ struct pci_doe_mb *doe_mb;
+ unsigned long index;
+
+ xa_for_each(&pdev->doe_mbs, index, doe_mb)
+ pci_doe_sysfs_feature_remove(pdev, doe_mb);
+}
+
+void pci_doe_sysfs_init(struct pci_dev *pdev)
+{
+ struct pci_doe_mb *doe_mb;
+ unsigned long index;
+ int ret;
+
+ xa_for_each(&pdev->doe_mbs, index, doe_mb) {
+ ret = pci_doe_sysfs_feature_populate(pdev, doe_mb);
+ if (ret)
+ return;
+ }
+}
+#endif
+
static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
{
if (wait_event_timeout(doe_mb->wq,
@@ -183,8 +336,8 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
length = 0;
/* Write DOE Header */
- val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
- FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
+ val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->feat.vid) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->feat.type);
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
@@ -229,12 +382,12 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
int i = 0;
u32 val;
- /* Read the first dword to get the protocol */
+ /* Read the first dword to get the feature */
pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
- if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
- (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
- dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
- doe_mb->cap_offset, task->prot.vid, task->prot.type,
+ if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->feat.vid) ||
+ (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->feat.type)) {
+ dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Feature] = [%04x, %02x], got [%04x, %02x]\n",
+ doe_mb->cap_offset, task->feat.vid, task->feat.type,
FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
return -EIO;
@@ -396,7 +549,7 @@ static void pci_doe_task_complete(struct pci_doe_task *task)
}
static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u16 *vid,
- u8 *protocol)
+ u8 *feature)
{
u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
*index) |
@@ -407,7 +560,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u1
u32 response_pl;
int rc;
- rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_PROTOCOL_DISCOVERY,
+ rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_FEATURE_DISCOVERY,
&request_pl_le, sizeof(request_pl_le),
&response_pl_le, sizeof(response_pl_le));
if (rc < 0)
@@ -418,7 +571,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u1
response_pl = le32_to_cpu(response_pl_le);
*vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
- *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
+ *feature = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_TYPE,
response_pl);
*index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
response_pl);
@@ -426,12 +579,12 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u1
return 0;
}
-static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
+static void *pci_doe_xa_feat_entry(u16 vid, u8 type)
{
- return xa_mk_value((vid << 8) | prot);
+ return xa_mk_value((vid << 8) | type);
}
-static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
+static int pci_doe_cache_features(struct pci_doe_mb *doe_mb)
{
u8 index = 0;
u8 xa_idx = 0;
@@ -442,19 +595,19 @@ static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
do {
int rc;
u16 vid;
- u8 prot;
+ u8 type;
rc = pci_doe_discovery(doe_mb, PCI_EXT_CAP_VER(hdr), &index,
- &vid, &prot);
+ &vid, &type);
if (rc)
return rc;
pci_dbg(doe_mb->pdev,
- "[%x] Found protocol %d vid: %x prot: %x\n",
- doe_mb->cap_offset, xa_idx, vid, prot);
+ "[%x] Found feature %d vid: %x type: %x\n",
+ doe_mb->cap_offset, xa_idx, vid, type);
- rc = xa_insert(&doe_mb->prots, xa_idx++,
- pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
+ rc = xa_insert(&doe_mb->feats, xa_idx++,
+ pci_doe_xa_feat_entry(vid, type), GFP_KERNEL);
if (rc)
return rc;
} while (index);
@@ -478,7 +631,7 @@ static void pci_doe_cancel_tasks(struct pci_doe_mb *doe_mb)
* @pdev: PCI device to create the DOE mailbox for
* @cap_offset: Offset of the DOE mailbox
*
- * Create a single mailbox object to manage the mailbox protocol at the
+ * Create a single mailbox object to manage the mailbox feature at the
* cap_offset specified.
*
* RETURNS: created mailbox object on success
@@ -497,7 +650,7 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
doe_mb->pdev = pdev;
doe_mb->cap_offset = cap_offset;
init_waitqueue_head(&doe_mb->wq);
- xa_init(&doe_mb->prots);
+ xa_init(&doe_mb->feats);
doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
dev_bus_name(&pdev->dev),
@@ -520,11 +673,11 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
/*
* The state machine and the mailbox should be in sync now;
- * Use the mailbox to query protocols.
+ * Use the mailbox to query features.
*/
- rc = pci_doe_cache_protocols(doe_mb);
+ rc = pci_doe_cache_features(doe_mb);
if (rc) {
- pci_err(pdev, "[%x] failed to cache protocols : %d\n",
+ pci_err(pdev, "[%x] failed to cache features : %d\n",
doe_mb->cap_offset, rc);
goto err_cancel;
}
@@ -533,7 +686,7 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
err_cancel:
pci_doe_cancel_tasks(doe_mb);
- xa_destroy(&doe_mb->prots);
+ xa_destroy(&doe_mb->feats);
err_destroy_wq:
destroy_workqueue(doe_mb->work_queue);
err_free:
@@ -551,31 +704,31 @@ err_free:
static void pci_doe_destroy_mb(struct pci_doe_mb *doe_mb)
{
pci_doe_cancel_tasks(doe_mb);
- xa_destroy(&doe_mb->prots);
+ xa_destroy(&doe_mb->feats);
destroy_workqueue(doe_mb->work_queue);
kfree(doe_mb);
}
/**
- * pci_doe_supports_prot() - Return if the DOE instance supports the given
- * protocol
+ * pci_doe_supports_feat() - Return if the DOE instance supports the given
+ * feature
* @doe_mb: DOE mailbox capability to query
- * @vid: Protocol Vendor ID
- * @type: Protocol type
+ * @vid: Feature Vendor ID
+ * @type: Feature type
*
- * RETURNS: True if the DOE mailbox supports the protocol specified
+ * RETURNS: True if the DOE mailbox supports the feature specified
*/
-static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
+static bool pci_doe_supports_feat(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
{
unsigned long index;
void *entry;
- /* The discovery protocol must always be supported */
- if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
+ /* The discovery feature must always be supported */
+ if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_FEATURE_DISCOVERY)
return true;
- xa_for_each(&doe_mb->prots, index, entry)
- if (entry == pci_doe_xa_prot_entry(vid, type))
+ xa_for_each(&doe_mb->feats, index, entry)
+ if (entry == pci_doe_xa_feat_entry(vid, type))
return true;
return false;
@@ -603,7 +756,7 @@ static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
static int pci_doe_submit_task(struct pci_doe_mb *doe_mb,
struct pci_doe_task *task)
{
- if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
+ if (!pci_doe_supports_feat(doe_mb, task->feat.vid, task->feat.type))
return -EINVAL;
if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
@@ -649,8 +802,8 @@ int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type,
{
DECLARE_COMPLETION_ONSTACK(c);
struct pci_doe_task task = {
- .prot.vid = vendor,
- .prot.type = type,
+ .feat.vid = vendor,
+ .feat.type = type,
.request_pl = request,
.request_pl_sz = request_sz,
.response_pl = response,
@@ -677,7 +830,7 @@ EXPORT_SYMBOL_GPL(pci_doe);
* @vendor: Vendor ID
* @type: Data Object Type
*
- * Find first DOE mailbox of a PCI device which supports the given protocol.
+ * Find first DOE mailbox of a PCI device which supports the given feature.
*
* RETURNS: Pointer to the DOE mailbox or NULL if none was found.
*/
@@ -688,7 +841,7 @@ struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor,
unsigned long index;
xa_for_each(&pdev->doe_mbs, index, doe_mb)
- if (pci_doe_supports_prot(doe_mb, vendor, type))
+ if (pci_doe_supports_feat(doe_mb, vendor, type))
return doe_mb;
return NULL;
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index 17bbdc9bbde0..1c5d82eb57d4 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -26,7 +26,7 @@ config PCI_ENDPOINT_CONFIGFS
help
This will enable the configfs entry that can be used to
configure the endpoint function and used to bind the
- function with a endpoint controller.
+ function with an endpoint controller.
source "drivers/pci/endpoint/functions/Kconfig"
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 54286a40bdfb..6643a88c7a0c 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -125,7 +125,7 @@ static const struct pci_epf_mhi_ep_info sm8450_info = {
static struct pci_epf_header sa8775p_header = {
.vendorid = PCI_VENDOR_ID_QCOM,
- .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */
+ .deviceid = 0x0116,
.baseclass_code = PCI_CLASS_OTHERS,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 2409787cf56d..50eb4106369f 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -45,6 +45,9 @@
#define TIMER_RESOLUTION 1
#define CAP_UNALIGNED_ACCESS BIT(0)
+#define CAP_MSI BIT(1)
+#define CAP_MSIX BIT(2)
+#define CAP_INTX BIT(3)
static struct workqueue_struct *kpcitest_workqueue;
@@ -651,7 +654,7 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
case IRQ_TYPE_MSIX:
count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
if (irq_number > count || count <= 0) {
- dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
+ dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
irq_number, count);
return;
}
@@ -738,6 +741,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
@@ -773,6 +777,15 @@ static void pci_epf_test_set_capabilities(struct pci_epf *epf)
if (epc->ops->align_addr)
caps |= CAP_UNALIGNED_ACCESS;
+ if (epf_test->epc_features->msi_capable)
+ caps |= CAP_MSI;
+
+ if (epf_test->epc_features->msix_capable)
+ caps |= CAP_MSIX;
+
+ if (epf_test->epc_features->intx_capable)
+ caps |= CAP_INTX;
+
reg->caps = cpu_to_le32(caps);
}
@@ -929,6 +942,7 @@ static void pci_epf_test_free_space(struct pci_epf *epf)
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
}
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 874cb097b093..577055be3033 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -408,11 +408,9 @@ static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
*/
static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
{
- size_t align;
enum pci_barno barno;
struct epf_ntb_ctrl *ctrl;
u32 spad_size, ctrl_size;
- u64 size;
struct pci_epf *epf = ntb->epf;
struct device *dev = &epf->dev;
u32 spad_count;
@@ -422,31 +420,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
epf->func_no,
epf->vfunc_no);
barno = ntb->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar[barno].fixed_size;
- align = epc_features->align;
-
- if ((!IS_ALIGNED(size, align)))
- return -EINVAL;
-
spad_count = ntb->spad_count;
- ctrl_size = sizeof(struct epf_ntb_ctrl);
+ ctrl_size = ALIGN(sizeof(struct epf_ntb_ctrl), sizeof(u32));
spad_size = 2 * spad_count * sizeof(u32);
- if (!align) {
- ctrl_size = roundup_pow_of_two(ctrl_size);
- spad_size = roundup_pow_of_two(spad_size);
- } else {
- ctrl_size = ALIGN(ctrl_size, align);
- spad_size = ALIGN(spad_size, align);
- }
-
- if (!size)
- size = ctrl_size + spad_size;
- else if (size < ctrl_size + spad_size)
- return -EINVAL;
-
- base = pci_epf_alloc_space(epf, size, barno, epc_features, 0);
+ base = pci_epf_alloc_space(epf, ctrl_size + spad_size,
+ barno, epc_features, 0);
if (!base) {
dev_err(dev, "Config/Status/SPAD alloc region fail\n");
return -ENOMEM;
@@ -530,7 +510,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
struct device *dev = &ntb->epf->dev;
int ret;
struct pci_epf_bar *epf_bar;
- void __iomem *mw_addr;
+ void *mw_addr;
enum pci_barno barno;
size_t size = sizeof(u32) * ntb->db_count;
@@ -700,7 +680,7 @@ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
barno = pci_epc_get_next_free_bar(epc_features, barno);
if (barno < 0) {
dev_err(dev, "Fail to get NTB function BAR\n");
- return barno;
+ return -ENOENT;
}
ntb->epf_ntb_bar[bar] = barno;
}
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 9e9ca5f8e8f8..ca7f19cc973a 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -25,13 +25,6 @@ static void devm_pci_epc_release(struct device *dev, void *res)
pci_epc_destroy(epc);
}
-static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
-{
- struct pci_epc **epc = res;
-
- return *epc == match_data;
-}
-
/**
* pci_epc_put() - release the PCI endpoint controller
* @epc: epc returned by pci_epc_get()
@@ -300,8 +293,6 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
if (interrupt < 0)
return 0;
- interrupt = 1 << interrupt;
-
return interrupt;
}
EXPORT_SYMBOL_GPL(pci_epc_get_msi);
@@ -311,28 +302,25 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
* @epc: the EPC device on which MSI has to be configured
* @func_no: the physical endpoint function number in the EPC device
* @vfunc_no: the virtual endpoint function number in the physical function
- * @interrupts: number of MSI interrupts required by the EPF
+ * @nr_irqs: number of MSI interrupts required by the EPF
*
* Invoke to set the required number of MSI interrupts.
*/
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 nr_irqs)
{
int ret;
- u8 encode_int;
if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
- if (interrupts < 1 || interrupts > 32)
+ if (nr_irqs < 1 || nr_irqs > 32)
return -EINVAL;
if (!epc->ops->set_msi)
return 0;
- encode_int = order_base_2(interrupts);
-
mutex_lock(&epc->lock);
- ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
+ ret = epc->ops->set_msi(epc, func_no, vfunc_no, nr_irqs);
mutex_unlock(&epc->lock);
return ret;
@@ -364,7 +352,7 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
if (interrupt < 0)
return 0;
- return interrupt + 1;
+ return interrupt;
}
EXPORT_SYMBOL_GPL(pci_epc_get_msix);
@@ -373,29 +361,28 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
* @epc: the EPC device on which MSI-X has to be configured
* @func_no: the physical endpoint function number in the EPC device
* @vfunc_no: the virtual endpoint function number in the physical function
- * @interrupts: number of MSI-X interrupts required by the EPF
+ * @nr_irqs: number of MSI-X interrupts required by the EPF
* @bir: BAR where the MSI-X table resides
* @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno bir, u32 offset)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u16 nr_irqs,
+ enum pci_barno bir, u32 offset)
{
int ret;
if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
- if (interrupts < 1 || interrupts > 2048)
+ if (nr_irqs < 1 || nr_irqs > 2048)
return -EINVAL;
if (!epc->ops->set_msix)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
- offset);
+ ret = epc->ops->set_msix(epc, func_no, vfunc_no, nr_irqs, bir, offset);
mutex_unlock(&epc->lock);
return ret;
@@ -609,6 +596,10 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
if (!epc_features)
return -EINVAL;
+ if (epc_features->bar[bar].type == BAR_RESIZABLE &&
+ (epf_bar->size < SZ_1M || (u64)epf_bar->size > (SZ_128G * 1024)))
+ return -EINVAL;
+
if (epc_features->bar[bar].type == BAR_FIXED &&
(epc_features->bar[bar].fixed_size != epf_bar->size))
return -EINVAL;
@@ -635,6 +626,33 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
EXPORT_SYMBOL_GPL(pci_epc_set_bar);
/**
+ * pci_epc_bar_size_to_rebar_cap() - convert a size to the representation used
+ * by the Resizable BAR Capability Register
+ * @size: the size to convert
+ * @cap: where to store the result
+ *
+ * Returns 0 on success and a negative error code in case of error.
+ */
+int pci_epc_bar_size_to_rebar_cap(size_t size, u32 *cap)
+{
+ /*
+ * As per PCIe r6.0, sec 7.8.6.2, min size for a resizable BAR is 1 MB,
+ * thus disallow a requested BAR size smaller than 1 MB.
+ * Disallow a requested BAR size larger than 128 TB.
+ */
+ if (size < SZ_1M || (u64)size > (SZ_128G * 1024))
+ return -EINVAL;
+
+ *cap = ilog2(size) - ilog2(SZ_1M);
+
+ /* Sizes in REBAR_CAP start at BIT(4). */
+ *cap = BIT(*cap + 4);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epc_bar_size_to_rebar_cap);
+
+/**
* pci_epc_write_header() - write standard configuration header
* @epc: the EPC device to which the configuration header should be written
* @func_no: the physical endpoint function number in the EPC device
@@ -931,24 +949,6 @@ void pci_epc_destroy(struct pci_epc *epc)
}
EXPORT_SYMBOL_GPL(pci_epc_destroy);
-/**
- * devm_pci_epc_destroy() - destroy the EPC device
- * @dev: device that wants to destroy the EPC
- * @epc: the EPC device that has to be destroyed
- *
- * Invoke to destroy the devres associated with this
- * pci_epc and destroy the EPC device.
- */
-void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
-{
- int r;
-
- r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match,
- epc);
- dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
-}
-EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
-
static void pci_epc_release(struct device *dev)
{
kfree(to_pci_epc(dev));
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 50bc2892a36c..577a9e490115 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -236,12 +236,13 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
}
dev = epc->dev.parent;
- dma_free_coherent(dev, epf_bar[bar].size, addr,
+ dma_free_coherent(dev, epf_bar[bar].aligned_size, addr,
epf_bar[bar].phys_addr);
epf_bar[bar].phys_addr = 0;
epf_bar[bar].addr = NULL;
epf_bar[bar].size = 0;
+ epf_bar[bar].aligned_size = 0;
epf_bar[bar].barno = 0;
epf_bar[bar].flags = 0;
}
@@ -264,7 +265,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
enum pci_epc_interface_type type)
{
u64 bar_fixed_size = epc_features->bar[bar].fixed_size;
- size_t align = epc_features->align;
+ size_t aligned_size, align = epc_features->align;
struct pci_epf_bar *epf_bar;
dma_addr_t phys_addr;
struct pci_epc *epc;
@@ -274,6 +275,10 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
if (size < 128)
size = 128;
+ /* According to PCIe base spec, min size for a resizable BAR is 1 MB. */
+ if (epc_features->bar[bar].type == BAR_RESIZABLE && size < SZ_1M)
+ size = SZ_1M;
+
if (epc_features->bar[bar].type == BAR_FIXED && bar_fixed_size) {
if (size > bar_fixed_size) {
dev_err(&epf->dev,
@@ -281,12 +286,18 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
return NULL;
}
size = bar_fixed_size;
+ } else {
+ /* BAR size must be power of two */
+ size = roundup_pow_of_two(size);
}
- if (align)
- size = ALIGN(size, align);
- else
- size = roundup_pow_of_two(size);
+ /*
+ * Allocate enough memory to accommodate the iATU alignment
+ * requirement. In most cases, this will be the same as .size but
+ * it might be different if, for example, the fixed size of a BAR
+ * is smaller than align.
+ */
+ aligned_size = align ? ALIGN(size, align) : size;
if (type == PRIMARY_INTERFACE) {
epc = epf->epc;
@@ -297,7 +308,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
}
dev = epc->dev.parent;
- space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ space = dma_alloc_coherent(dev, aligned_size, &phys_addr, GFP_KERNEL);
if (!space) {
dev_err(dev, "failed to allocate mem space\n");
return NULL;
@@ -306,6 +317,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
epf_bar[bar].phys_addr = phys_addr;
epf_bar[bar].addr = space;
epf_bar[bar].size = size;
+ epf_bar[bar].aligned_size = aligned_size;
epf_bar[bar].barno = bar;
if (upper_32_bits(size) || epc_features->bar[bar].only_64bit)
epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 123c4c7c2ab5..3207860b52e4 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -97,7 +97,7 @@ config HOTPLUG_PCI_CPCI_ZT5550
tristate "Ziatech ZT5550 CompactPCI Hotplug driver"
depends on HOTPLUG_PCI_CPCI && X86
help
- Say Y here if you have an Performance Technologies (formerly Intel,
+ Say Y here if you have a Performance Technologies (formerly Intel,
formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
To compile this driver as a module, choose M here: the
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 03fa39ab0c88..a31d6b62f523 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -44,8 +44,6 @@ struct cpci_hp_controller_ops {
int (*enable_irq)(void);
int (*disable_irq)(void);
int (*check_irq)(void *dev_id);
- u8 (*get_power)(struct slot *slot);
- int (*set_power)(struct slot *slot, int value);
};
struct cpci_hp_controller {
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index d0559d2faf50..dd93e53ea7c2 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -71,13 +71,10 @@ static int
enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = to_slot(hotplug_slot);
- int retval = 0;
dbg("%s - physical_slot = %s", __func__, slot_name(slot));
- if (controller->ops->set_power)
- retval = controller->ops->set_power(slot, 1);
- return retval;
+ return 0;
}
static int
@@ -109,12 +106,6 @@ disable_slot(struct hotplug_slot *hotplug_slot)
}
cpci_led_on(slot);
- if (controller->ops->set_power) {
- retval = controller->ops->set_power(slot, 0);
- if (retval)
- goto disable_error;
- }
-
slot->adapter_status = 0;
if (slot->extracting) {
@@ -129,11 +120,7 @@ disable_error:
static u8
cpci_get_power_status(struct slot *slot)
{
- u8 power = 1;
-
- if (controller->ops->get_power)
- power = controller->ops->get_power(slot);
- return power;
+ return 1;
}
static int
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index c01968ef0bd7..760a5dec0431 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1794,7 +1794,7 @@ static void interrupt_event_handler(struct controller *ctrl)
} else if (ctrl->event_queue[loop].event_type ==
INT_BUTTON_CANCEL) {
dbg("button cancel\n");
- del_timer(&p_slot->task_event);
+ timer_delete(&p_slot->task_event);
mutex_lock(&ctrl->crit_sect);
@@ -1883,7 +1883,7 @@ void cpqhp_pushbutton_thread(struct timer_list *t)
{
u8 hp_slot;
struct pci_func *func;
- struct slot *p_slot = from_timer(p_slot, t, task_event);
+ struct slot *p_slot = timer_container_of(p_slot, t, task_event);
struct controller *ctrl = (struct controller *) p_slot->ctrl;
pushbutton_pending = NULL;
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 36236ac88fd5..fadcf98a8a66 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -14,21 +14,15 @@
* Scott Murray <scottm@somanetworks.com>
*/
-#include <linux/module.h> /* try_module_get & module_put */
+#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
-#include <linux/pagemap.h>
#include <linux/init.h>
-#include <linux/mount.h>
-#include <linux/namei.h>
-#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
-#include <linux/uaccess.h>
#include "../pci.h"
#include "cpci_hotplug.h"
@@ -42,20 +36,14 @@
/* local variables */
static bool debug;
-static LIST_HEAD(pci_hotplug_slot_list);
-static DEFINE_MUTEX(pci_hp_mutex);
-
/* Weee, fun with macros... */
#define GET_STATUS(name, type) \
static int get_##name(struct hotplug_slot *slot, type *value) \
{ \
const struct hotplug_slot_ops *ops = slot->ops; \
int retval = 0; \
- if (!try_module_get(slot->owner)) \
- return -ENODEV; \
if (ops->get_##name) \
retval = ops->get_##name(slot, value); \
- module_put(slot->owner); \
return retval; \
}
@@ -88,10 +76,6 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
power = (u8)(lpower & 0xff);
dbg("power = %d\n", power);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
switch (power) {
case 0:
if (slot->ops->disable_slot)
@@ -107,9 +91,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
err("Illegal value specified for power\n");
retval = -EINVAL;
}
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -146,15 +128,9 @@ static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
attention = (u8)(lattention & 0xff);
dbg(" - attention = %d\n", attention);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
if (ops->set_attention_status)
retval = ops->set_attention_status(slot, attention);
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -212,15 +188,9 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
test = (u32)(ltest & 0xffffffff);
dbg("test = %d\n", test);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
if (slot->ops->hardware_test)
retval = slot->ops->hardware_test(slot, test);
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -231,12 +201,8 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
.store = test_write_file
};
-static bool has_power_file(struct pci_slot *pci_slot)
+static bool has_power_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if ((slot->ops->enable_slot) ||
(slot->ops->disable_slot) ||
(slot->ops->get_power_status))
@@ -244,87 +210,79 @@ static bool has_power_file(struct pci_slot *pci_slot)
return false;
}
-static bool has_attention_file(struct pci_slot *pci_slot)
+static bool has_attention_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if ((slot->ops->set_attention_status) ||
(slot->ops->get_attention_status))
return true;
return false;
}
-static bool has_latch_file(struct pci_slot *pci_slot)
+static bool has_latch_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->get_latch_status)
return true;
return false;
}
-static bool has_adapter_file(struct pci_slot *pci_slot)
+static bool has_adapter_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->get_adapter_status)
return true;
return false;
}
-static bool has_test_file(struct pci_slot *pci_slot)
+static bool has_test_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->hardware_test)
return true;
return false;
}
-static int fs_add_slot(struct pci_slot *pci_slot)
+static int fs_add_slot(struct hotplug_slot *slot, struct pci_slot *pci_slot)
{
+ struct kobject *kobj;
int retval = 0;
/* Create symbolic link to the hotplug driver module */
- pci_hp_create_module_link(pci_slot);
+ kobj = kset_find_obj(module_kset, slot->mod_name);
+ if (kobj) {
+ retval = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ if (retval)
+ dev_err(&pci_slot->bus->dev,
+ "Error creating sysfs link (%d)\n", retval);
+ kobject_put(kobj);
+ }
- if (has_power_file(pci_slot)) {
+ if (has_power_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(pci_slot)) {
+ if (has_attention_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(pci_slot)) {
+ if (has_latch_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(pci_slot)) {
+ if (has_adapter_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_test_file(pci_slot)) {
+ if (has_test_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
@@ -334,56 +292,45 @@ static int fs_add_slot(struct pci_slot *pci_slot)
goto exit;
exit_test:
- if (has_adapter_file(pci_slot))
+ if (has_adapter_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(pci_slot))
+ if (has_latch_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
exit_latch:
- if (has_attention_file(pci_slot))
+ if (has_attention_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(pci_slot))
+ if (has_power_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
- pci_hp_remove_module_link(pci_slot);
+ sysfs_remove_link(&pci_slot->kobj, "module");
exit:
return retval;
}
-static void fs_remove_slot(struct pci_slot *pci_slot)
+static void fs_remove_slot(struct hotplug_slot *slot, struct pci_slot *pci_slot)
{
- if (has_power_file(pci_slot))
+ if (has_power_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(pci_slot))
+ if (has_attention_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
- if (has_latch_file(pci_slot))
+ if (has_latch_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(pci_slot))
+ if (has_adapter_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
- if (has_test_file(pci_slot))
+ if (has_test_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_test.attr);
- pci_hp_remove_module_link(pci_slot);
-}
-
-static struct hotplug_slot *get_slot_from_name(const char *name)
-{
- struct hotplug_slot *slot;
-
- list_for_each_entry(slot, &pci_hotplug_slot_list, slot_list) {
- if (strcmp(hotplug_slot_name(slot), name) == 0)
- return slot;
- }
- return NULL;
+ sysfs_remove_link(&pci_slot->kobj, "module");
}
/**
@@ -476,18 +423,19 @@ EXPORT_SYMBOL_GPL(__pci_hp_initialize);
*/
int pci_hp_add(struct hotplug_slot *slot)
{
- struct pci_slot *pci_slot = slot->pci_slot;
+ struct pci_slot *pci_slot;
int result;
- result = fs_add_slot(pci_slot);
+ if (WARN_ON(!slot))
+ return -EINVAL;
+
+ pci_slot = slot->pci_slot;
+
+ result = fs_add_slot(slot, pci_slot);
if (result)
return result;
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
- mutex_lock(&pci_hp_mutex);
- list_add(&slot->slot_list, &pci_hotplug_slot_list);
- mutex_unlock(&pci_hp_mutex);
- dbg("Added slot %s to the list\n", hotplug_slot_name(slot));
return 0;
}
EXPORT_SYMBOL_GPL(pci_hp_add);
@@ -514,22 +462,10 @@ EXPORT_SYMBOL_GPL(pci_hp_deregister);
*/
void pci_hp_del(struct hotplug_slot *slot)
{
- struct hotplug_slot *temp;
-
if (WARN_ON(!slot))
return;
- mutex_lock(&pci_hp_mutex);
- temp = get_slot_from_name(hotplug_slot_name(slot));
- if (WARN_ON(temp != slot)) {
- mutex_unlock(&pci_hp_mutex);
- return;
- }
-
- list_del(&slot->slot_list);
- mutex_unlock(&pci_hp_mutex);
- dbg("Removed slot %s from the list\n", hotplug_slot_name(slot));
- fs_remove_slot(slot->pci_slot);
+ fs_remove_slot(slot, slot->pci_slot);
}
EXPORT_SYMBOL_GPL(pci_hp_del);
@@ -552,6 +488,75 @@ void pci_hp_destroy(struct hotplug_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_hp_destroy);
+static DECLARE_WAIT_QUEUE_HEAD(pci_hp_link_change_wq);
+
+/**
+ * pci_hp_ignore_link_change - begin code section causing spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Mark the beginning of a code section causing spurious link changes on the
+ * Secondary Bus of @pdev, e.g. as a side effect of a Secondary Bus Reset,
+ * D3cold transition, firmware update or FPGA reconfiguration.
+ *
+ * Hotplug drivers can thus check whether such a code section is executing
+ * concurrently, await it with pci_hp_spurious_link_change() and ignore the
+ * resulting link change events.
+ *
+ * Must be paired with pci_hp_unignore_link_change(). May be called both
+ * from the PCI core and from Endpoint drivers. May be called for bridges
+ * which are not hotplug-capable, in which case it has no effect because
+ * no hotplug driver is bound to the bridge.
+ */
+void pci_hp_ignore_link_change(struct pci_dev *pdev)
+{
+ set_bit(PCI_LINK_CHANGING, &pdev->priv_flags);
+ smp_mb__after_atomic(); /* pairs with implied barrier of wait_event() */
+}
+
+/**
+ * pci_hp_unignore_link_change - end code section causing spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Mark the end of a code section causing spurious link changes on the
+ * Secondary Bus of @pdev. Must be paired with pci_hp_ignore_link_change().
+ */
+void pci_hp_unignore_link_change(struct pci_dev *pdev)
+{
+ set_bit(PCI_LINK_CHANGED, &pdev->priv_flags);
+ mb(); /* ensure pci_hp_spurious_link_change() sees either bit set */
+ clear_bit(PCI_LINK_CHANGING, &pdev->priv_flags);
+ wake_up_all(&pci_hp_link_change_wq);
+}
+
+/**
+ * pci_hp_spurious_link_change - check for spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Check whether a code section is executing concurrently which is causing
+ * spurious link changes on the Secondary Bus of @pdev. Await the end of the
+ * code section if so.
+ *
+ * May be called by hotplug drivers to check whether a link change is spurious
+ * and can be ignored.
+ *
+ * Because a genuine link change may have occurred in-between a spurious link
+ * change and the invocation of this function, hotplug drivers should perform
+ * sanity checks such as retrieving the current link state and bringing down
+ * the slot if the link is down.
+ *
+ * Return: %true if such a code section has been executing concurrently,
+ * otherwise %false. Also return %true if such a code section has not been
+ * executing concurrently, but at least once since the last invocation of this
+ * function.
+ */
+bool pci_hp_spurious_link_change(struct pci_dev *pdev)
+{
+ wait_event(pci_hp_link_change_wq,
+ !test_bit(PCI_LINK_CHANGING, &pdev->priv_flags));
+
+ return test_and_clear_bit(PCI_LINK_CHANGED, &pdev->priv_flags);
+}
+
static int __init pci_hotplug_init(void)
{
int result;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 273dd8c66f4e..debc79b0adfb 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -187,6 +187,7 @@ int pciehp_card_present(struct controller *ctrl);
int pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
int pciehp_check_link_active(struct controller *ctrl);
+bool pciehp_device_replaced(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 997841c69893..f59baa912970 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -284,35 +284,6 @@ static int pciehp_suspend(struct pcie_device *dev)
return 0;
}
-static bool pciehp_device_replaced(struct controller *ctrl)
-{
- struct pci_dev *pdev __free(pci_dev_put) = NULL;
- u32 reg;
-
- if (pci_dev_is_disconnected(ctrl->pcie->port))
- return false;
-
- pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
- if (!pdev)
- return true;
-
- if (pci_read_config_dword(pdev, PCI_VENDOR_ID, &reg) ||
- reg != (pdev->vendor | (pdev->device << 16)) ||
- pci_read_config_dword(pdev, PCI_CLASS_REVISION, &reg) ||
- reg != (pdev->revision | (pdev->class << 8)))
- return true;
-
- if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
- (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, &reg) ||
- reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16))))
- return true;
-
- if (pci_get_dsn(pdev) != ctrl->dsn)
- return true;
-
- return false;
-}
-
static int pciehp_resume_noirq(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index d603a7aa7483..bcc938d4420f 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -131,7 +131,7 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
INDICATOR_NOOP);
/* Don't carry LBMS indications across */
- pcie_reset_lbms_count(ctrl->pcie->port);
+ pcie_reset_lbms(ctrl->pcie->port);
}
static int pciehp_enable_slot(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 28ab393af1c0..91d2d92717d9 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -292,7 +292,7 @@ int pciehp_check_link_status(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
bool found;
- u16 lnk_status;
+ u16 lnk_status, linksta2;
if (!pcie_wait_for_link(pdev, true)) {
ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
@@ -319,7 +319,8 @@ int pciehp_check_link_status(struct controller *ctrl)
return -1;
}
- __pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &linksta2);
+ __pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status, linksta2);
if (!found) {
ctrl_info(ctrl, "Slot(%s): No device found\n",
@@ -430,7 +431,7 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
* removed immediately after the check so the caller may need to take
* this into account.
*
- * It the hotplug controller itself is not available anymore returns
+ * If the hotplug controller itself is not available anymore returns
* %-ENODEV.
*/
int pciehp_card_present(struct controller *ctrl)
@@ -562,20 +563,50 @@ void pciehp_power_off_slot(struct controller *ctrl)
PCI_EXP_SLTCTL_PWR_OFF);
}
-static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
- struct pci_dev *pdev, int irq)
+bool pciehp_device_replaced(struct controller *ctrl)
+{
+ struct pci_dev *pdev __free(pci_dev_put) = NULL;
+ u32 reg;
+
+ if (pci_dev_is_disconnected(ctrl->pcie->port))
+ return false;
+
+ pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
+ if (!pdev)
+ return true;
+
+ if (pci_read_config_dword(pdev, PCI_VENDOR_ID, &reg) ||
+ reg != (pdev->vendor | (pdev->device << 16)) ||
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &reg) ||
+ reg != (pdev->revision | (pdev->class << 8)))
+ return true;
+
+ if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, &reg) ||
+ reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16))))
+ return true;
+
+ if (pci_get_dsn(pdev) != ctrl->dsn)
+ return true;
+
+ return false;
+}
+
+static void pciehp_ignore_link_change(struct controller *ctrl,
+ struct pci_dev *pdev, int irq,
+ u16 ignored_events)
{
/*
* Ignore link changes which occurred while waiting for DPC recovery.
* Could be several if DPC triggered multiple times consecutively.
+ * Also ignore link changes caused by Secondary Bus Reset, etc.
*/
synchronize_hardirq(irq);
- atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
+ atomic_and(~ignored_events, &ctrl->pending_events);
if (pciehp_poll_mode)
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_DLLSC);
- ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
- slot_name(ctrl));
+ ignored_events);
+ ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored\n", slot_name(ctrl));
/*
* If the link is unexpectedly down after successful recovery,
@@ -583,8 +614,8 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
* Synthesize it to ensure that it is acted on.
*/
down_read_nested(&ctrl->reset_lock, ctrl->depth);
- if (!pciehp_check_link_active(ctrl))
- pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
+ if (!pciehp_check_link_active(ctrl) || pciehp_device_replaced(ctrl))
+ pciehp_request(ctrl, ignored_events);
up_read(&ctrl->reset_lock);
}
@@ -731,12 +762,19 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
/*
* Ignore Link Down/Up events caused by Downstream Port Containment
- * if recovery from the error succeeded.
+ * if recovery succeeded, or caused by Secondary Bus Reset,
+ * suspend to D3cold, firmware update, FPGA reconfiguration, etc.
*/
- if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
+ if ((events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) &&
+ (pci_dpc_recovered(pdev) || pci_hp_spurious_link_change(pdev)) &&
ctrl->state == ON_STATE) {
- events &= ~PCI_EXP_SLTSTA_DLLSC;
- pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
+ u16 ignored_events = PCI_EXP_SLTSTA_DLLSC;
+
+ if (!ctrl->inband_presence_disabled)
+ ignored_events |= PCI_EXP_SLTSTA_PDC;
+
+ events &= ~ignored_events;
+ pciehp_ignore_link_change(ctrl, pdev, irq, ignored_events);
}
/*
@@ -901,7 +939,6 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
- u16 stat_mask = 0, ctrl_mask = 0;
int rc;
if (probe)
@@ -909,23 +946,11 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
down_write_nested(&ctrl->reset_lock, ctrl->depth);
- if (!ATTN_BUTTN(ctrl)) {
- ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
- stat_mask |= PCI_EXP_SLTSTA_PDC;
- }
- ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
- stat_mask |= PCI_EXP_SLTSTA_DLLSC;
-
- pcie_write_cmd(ctrl, 0, ctrl_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
+ pci_hp_ignore_link_change(pdev);
rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
- pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
+ pci_hp_unignore_link_change(pdev);
up_write(&ctrl->reset_lock);
return rc;
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 573a41869c15..4f85e7fe29ec 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -3,12 +3,15 @@
* PCI Hotplug Driver for PowerPC PowerNV platform.
*
* Copyright Gavin Shan, IBM Corporation 2016.
+ * Copyright (C) 2025 Raptor Engineering, LLC
+ * Copyright (C) 2025 Raptor Computing Systems, LLC
*/
#include <linux/bitfield.h>
#include <linux/libfdt.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include <linux/pci_hotplug.h>
#include <linux/of_fdt.h>
@@ -36,8 +39,10 @@ static void pnv_php_register(struct device_node *dn);
static void pnv_php_unregister_one(struct device_node *dn);
static void pnv_php_unregister(struct device_node *dn);
+static void pnv_php_enable_irq(struct pnv_php_slot *php_slot);
+
static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
- bool disable_device)
+ bool disable_device, bool disable_msi)
{
struct pci_dev *pdev = php_slot->pdev;
u16 ctrl;
@@ -53,19 +58,15 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
php_slot->irq = 0;
}
- if (php_slot->wq) {
- destroy_workqueue(php_slot->wq);
- php_slot->wq = NULL;
- }
-
- if (disable_device) {
+ if (disable_device || disable_msi) {
if (pdev->msix_enabled)
pci_disable_msix(pdev);
else if (pdev->msi_enabled)
pci_disable_msi(pdev);
+ }
+ if (disable_device)
pci_disable_device(pdev);
- }
}
static void pnv_php_free_slot(struct kref *kref)
@@ -74,7 +75,8 @@ static void pnv_php_free_slot(struct kref *kref)
struct pnv_php_slot, kref);
WARN_ON(!list_empty(&php_slot->children));
- pnv_php_disable_irq(php_slot, false);
+ pnv_php_disable_irq(php_slot, false, false);
+ destroy_workqueue(php_slot->wq);
kfree(php_slot->name);
kfree(php_slot);
}
@@ -391,6 +393,20 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
return 0;
}
+static int pcie_check_link_active(struct pci_dev *pdev)
+{
+ u16 lnk_status;
+ int ret;
+
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
+ return -ENODEV;
+
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+
+ return ret;
+}
+
static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
@@ -403,6 +419,19 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
*/
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
if (ret >= 0) {
+ if (pci_pcie_type(php_slot->pdev) == PCI_EXP_TYPE_DOWNSTREAM &&
+ presence == OPAL_PCI_SLOT_EMPTY) {
+ /*
+ * Similar to pciehp_hpc, check whether the Link Active
+ * bit is set to account for broken downstream bridges
+ * that don't properly assert Presence Detect State, as
+ * was observed on the Microsemi Switchtec PM8533 PFX
+ * [11f8:8533].
+ */
+ if (pcie_check_link_active(php_slot->pdev) > 0)
+ presence = OPAL_PCI_SLOT_PRESENT;
+ }
+
*state = presence;
ret = 0;
} else {
@@ -442,6 +471,61 @@ static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
return 0;
}
+static int pnv_php_activate_slot(struct pnv_php_slot *php_slot,
+ struct hotplug_slot *slot)
+{
+ int ret, i;
+
+ /*
+ * Issue initial slot activation command to firmware
+ *
+ * Firmware will power slot on, attempt to train the link, and
+ * discover any downstream devices. If this process fails, firmware
+ * will return an error code and an invalid device tree. Failure
+ * can be caused for multiple reasons, including a faulty
+ * downstream device, poor connection to the downstream device, or
+ * a previously latched PHB fence. On failure, issue fundamental
+ * reset up to three times before aborting.
+ */
+ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
+ if (ret) {
+ SLOT_WARN(
+ php_slot,
+ "PCI slot activation failed with error code %d, possible frozen PHB",
+ ret);
+ SLOT_WARN(
+ php_slot,
+ "Attempting complete PHB reset before retrying slot activation\n");
+ for (i = 0; i < 3; i++) {
+ /*
+ * Slot activation failed, PHB may be fenced from a
+ * prior device failure.
+ *
+ * Use the OPAL fundamental reset call to both try a
+ * device reset and clear any potentially active PHB
+ * fence / freeze.
+ */
+ SLOT_WARN(php_slot, "Try %d...\n", i + 1);
+ pci_set_pcie_reset_state(php_slot->pdev,
+ pcie_warm_reset);
+ msleep(250);
+ pci_set_pcie_reset_state(php_slot->pdev,
+ pcie_deassert_reset);
+
+ ret = pnv_php_set_slot_power_state(
+ slot, OPAL_PCI_SLOT_POWER_ON);
+ if (!ret)
+ break;
+ }
+
+ if (i >= 3)
+ SLOT_WARN(php_slot,
+ "Failed to bring slot online, aborting!\n");
+ }
+
+ return ret;
+}
+
static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
{
struct hotplug_slot *slot = &php_slot->slot;
@@ -504,7 +588,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
goto scan;
/* Power is off, turn it on and then scan the slot */
- ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
+ ret = pnv_php_activate_slot(php_slot, slot);
if (ret)
return ret;
@@ -561,8 +645,58 @@ static int pnv_php_reset_slot(struct hotplug_slot *slot, bool probe)
static int pnv_php_enable_slot(struct hotplug_slot *slot)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+ u32 prop32;
+ int ret;
+
+ ret = pnv_php_enable(php_slot, true);
+ if (ret)
+ return ret;
+
+ /* (Re-)enable interrupt if the slot supports surprise hotplug */
+ ret = of_property_read_u32(php_slot->dn, "ibm,slot-surprise-pluggable",
+ &prop32);
+ if (!ret && prop32)
+ pnv_php_enable_irq(php_slot);
+
+ return 0;
+}
+
+/*
+ * Disable any hotplug interrupts for all slots on the provided bus, as well as
+ * all downstream slots in preparation for a hot unplug.
+ */
+static int pnv_php_disable_all_irqs(struct pci_bus *bus)
+{
+ struct pci_bus *child_bus;
+ struct pci_slot *slot;
+
+ /* First go down child buses */
+ list_for_each_entry(child_bus, &bus->children, node)
+ pnv_php_disable_all_irqs(child_bus);
+
+ /* Disable IRQs for all pnv_php slots on this bus */
+ list_for_each_entry(slot, &bus->slots, list) {
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot->hotplug);
- return pnv_php_enable(php_slot, true);
+ pnv_php_disable_irq(php_slot, false, true);
+ }
+
+ return 0;
+}
+
+/*
+ * Disable any hotplug interrupts for all downstream slots on the provided
+ * bus in preparation for a hot unplug.
+ */
+static int pnv_php_disable_all_downstream_irqs(struct pci_bus *bus)
+{
+ struct pci_bus *child_bus;
+
+ /* Go down child buses, recursively deactivating their IRQs */
+ list_for_each_entry(child_bus, &bus->children, node)
+ pnv_php_disable_all_irqs(child_bus);
+
+ return 0;
}
static int pnv_php_disable_slot(struct hotplug_slot *slot)
@@ -579,6 +713,13 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
php_slot->state != PNV_PHP_STATE_REGISTERED)
return 0;
+ /*
+ * Free all IRQ resources from all child slots before remove.
+ * Note that we do not disable the root slot IRQ here as that
+ * would also deactivate the slot hot (re)plug interrupt!
+ */
+ pnv_php_disable_all_downstream_irqs(php_slot->bus);
+
/* Remove all devices behind the slot */
pci_lock_rescan_remove();
pci_hp_remove_devices(php_slot->bus);
@@ -647,6 +788,15 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
return NULL;
}
+ /* Allocate workqueue for this slot's interrupt handling */
+ php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
+ if (!php_slot->wq) {
+ SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
+ kfree(php_slot->name);
+ kfree(php_slot);
+ return NULL;
+ }
+
if (dn->child && PCI_DN(dn->child))
php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn);
else
@@ -745,16 +895,63 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
return entry.vector;
}
+static void
+pnv_php_detect_clear_suprise_removal_freeze(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ struct eeh_dev *edev;
+ struct eeh_pe *pe;
+ int i, rc;
+
+ /*
+ * When a device is surprise removed from a downstream bridge slot,
+ * the upstream bridge port can still end up frozen due to related EEH
+ * events, which will in turn block the MSI interrupts for slot hotplug
+ * detection.
+ *
+ * Detect and thaw any frozen upstream PE after slot deactivation.
+ */
+ edev = pci_dev_to_eeh_dev(pdev);
+ pe = edev ? edev->pe : NULL;
+ rc = eeh_pe_get_state(pe);
+ if ((rc == -ENODEV) || (rc == -ENOENT)) {
+ SLOT_WARN(
+ php_slot,
+ "Upstream bridge PE state unknown, hotplug detect may fail\n");
+ } else {
+ if (pe->state & EEH_PE_ISOLATED) {
+ SLOT_WARN(
+ php_slot,
+ "Upstream bridge PE %02x frozen, thawing...\n",
+ pe->addr);
+ for (i = 0; i < 3; i++)
+ if (!eeh_unfreeze_pe(pe))
+ break;
+ if (i >= 3)
+ SLOT_WARN(
+ php_slot,
+ "Unable to thaw PE %02x, hotplug detect will fail!\n",
+ pe->addr);
+ else
+ SLOT_WARN(php_slot,
+ "PE %02x thawed successfully\n",
+ pe->addr);
+ }
+ }
+}
+
static void pnv_php_event_handler(struct work_struct *work)
{
struct pnv_php_event *event =
container_of(work, struct pnv_php_event, work);
struct pnv_php_slot *php_slot = event->php_slot;
- if (event->added)
+ if (event->added) {
pnv_php_enable_slot(&php_slot->slot);
- else
+ } else {
pnv_php_disable_slot(&php_slot->slot);
+ pnv_php_detect_clear_suprise_removal_freeze(php_slot);
+ }
kfree(event);
}
@@ -843,14 +1040,6 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
u16 sts, ctrl;
int ret;
- /* Allocate workqueue */
- php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
- if (!php_slot->wq) {
- SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
- pnv_php_disable_irq(php_slot, true);
- return;
- }
-
/* Check PDC (Presence Detection Change) is broken or not */
ret = of_property_read_u32(php_slot->dn, "ibm,slot-broken-pdc",
&broken_pdc);
@@ -869,7 +1058,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED,
php_slot->name, php_slot);
if (ret) {
- pnv_php_disable_irq(php_slot, true);
+ pnv_php_disable_irq(php_slot, true, true);
SLOT_WARN(php_slot, "Error %d enabling IRQ %d\n", ret, irq);
return;
}
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 055518ee354d..d9996516f49e 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -59,16 +59,15 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
if (pdev && pci_num_vf(pdev)) {
- pci_dev_put(pdev);
rc = -EBUSY;
goto out;
}
rc = zpci_deconfigure_device(zdev);
out:
- mutex_unlock(&zdev->state_lock);
if (pdev)
pci_dev_put(pdev);
+ mutex_unlock(&zdev->state_lock);
return rc;
}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index f0e2d2d54d71..a425530e0939 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -33,24 +33,8 @@ extern bool shpchp_poll_mode;
extern int shpchp_poll_time;
extern bool shpchp_debug;
-#define dbg(format, arg...) \
-do { \
- if (shpchp_debug) \
- printk(KERN_DEBUG "%s: " format, MY_NAME, ## arg); \
-} while (0)
-#define err(format, arg...) \
- printk(KERN_ERR "%s: " format, MY_NAME, ## arg)
-#define info(format, arg...) \
- printk(KERN_INFO "%s: " format, MY_NAME, ## arg)
-#define warn(format, arg...) \
- printk(KERN_WARNING "%s: " format, MY_NAME, ## arg)
-
#define ctrl_dbg(ctrl, format, arg...) \
- do { \
- if (shpchp_debug) \
- pci_printk(KERN_DEBUG, ctrl->pci_dev, \
- format, ## arg); \
- } while (0)
+ pci_dbg(ctrl->pci_dev, format, ## arg)
#define ctrl_err(ctrl, format, arg...) \
pci_err(ctrl->pci_dev, format, ## arg)
#define ctrl_info(ctrl, format, arg...) \
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index a92e28b72908..0c341453afc6 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -22,7 +22,6 @@
#include "shpchp.h"
/* Global variables */
-bool shpchp_debug;
bool shpchp_poll_mode;
int shpchp_poll_time;
@@ -33,10 +32,8 @@ int shpchp_poll_time;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-module_param(shpchp_debug, bool, 0644);
module_param(shpchp_poll_mode, bool, 0644);
module_param(shpchp_poll_time, int, 0644);
-MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds");
@@ -324,20 +321,12 @@ static struct pci_driver shpc_driver = {
static int __init shpcd_init(void)
{
- int retval;
-
- retval = pci_register_driver(&shpc_driver);
- dbg("%s: pci_register_driver = %d\n", __func__, retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
- return retval;
+ return pci_register_driver(&shpc_driver);
}
static void __exit shpcd_cleanup(void)
{
- dbg("unload_shpchpd()\n");
pci_unregister_driver(&shpc_driver);
- info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
module_init(shpcd_init);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 012b9e3fe5b0..183bf43510a1 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -211,7 +211,7 @@ static inline int shpc_indirect_read(struct controller *ctrl, int index,
*/
static void int_poll_timeout(struct timer_list *t)
{
- struct controller *ctrl = from_timer(ctrl, t, poll_timer);
+ struct controller *ctrl = timer_container_of(ctrl, t, poll_timer);
/* Poll for interrupt events. regs == NULL => polling */
shpc_isr(0, ctrl);
@@ -564,7 +564,7 @@ void shpchp_release_ctlr(struct controller *ctrl)
shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
if (shpchp_poll_mode)
- del_timer(&ctrl->poll_timer);
+ timer_delete(&ctrl->poll_timer);
else {
free_irq(ctrl->pci_dev->irq, ctrl);
pci_disable_msi(ctrl->pci_dev);
@@ -675,7 +675,7 @@ static int shpc_get_cur_bus_speed(struct controller *ctrl)
out:
bus->cur_bus_speed = bus_speed;
- dbg("Current bus speed = %d\n", bus_speed);
+ ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed);
return retval;
}
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
index fe706ed946df..ea86c282a386 100644
--- a/drivers/pci/iomap.c
+++ b/drivers/pci/iomap.c
@@ -25,10 +25,6 @@
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR from offset to the end, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device().
* */
void __iomem *pci_iomap_range(struct pci_dev *dev,
int bar,
@@ -76,10 +72,6 @@ EXPORT_SYMBOL(pci_iomap_range);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR from offset to the end, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device().
* */
void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
int bar,
@@ -127,10 +119,6 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device(). If you need automatic cleanup, use pcim_iomap().
* */
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
@@ -152,10 +140,6 @@ EXPORT_SYMBOL(pci_iomap);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device().
* */
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
{
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index a964c66b4295..10693b5d7eb6 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -972,7 +972,7 @@ void pci_iov_remove(struct pci_dev *dev)
void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
int vf_bar = resno - PCI_IOV_RESOURCES;
struct pci_bus_region region;
u16 cmd;
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index b956ce591f96..818d55fbad0d 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msi_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msi_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -162,7 +161,7 @@ struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
EXPORT_SYMBOL_GPL(pci_msix_alloc_irq_at);
/**
- * pci_msix_free_irq - Free an interrupt on a PCI/MSIX interrupt domain
+ * pci_msix_free_irq - Free an interrupt on a PCI/MSI-X interrupt domain
*
* @dev: The PCI device to operate on
* @map: A struct msi_map describing the interrupt to free
@@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msix_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msix_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -401,7 +399,7 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
* Return: true if MSI has not been globally disabled through ACPI FADT,
* PCI bridge quirks, or the "pci=nomsi" kernel command-line option.
*/
-int pci_msi_enabled(void)
+bool pci_msi_enabled(void)
{
return pci_msi_enable;
}
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index d7ba8795d60f..c05152733993 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -271,6 +271,7 @@ static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_doma
/**
* pci_setup_msi_device_domain - Setup a device MSI interrupt domain
* @pdev: The PCI device to create the domain on
+ * @hwsize: The maximum number of MSI vectors
*
* Return:
* True when:
@@ -287,7 +288,7 @@ static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_doma
* - The device is removed
* - MSI is disabled and a MSI-X domain is created
*/
-bool pci_setup_msi_device_domain(struct pci_dev *pdev)
+bool pci_setup_msi_device_domain(struct pci_dev *pdev, unsigned int hwsize)
{
if (WARN_ON_ONCE(pdev->msix_enabled))
return false;
@@ -297,7 +298,7 @@ bool pci_setup_msi_device_domain(struct pci_dev *pdev)
if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSIX))
msi_remove_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN);
- return pci_create_device_domain(pdev, &pci_msi_template, 1);
+ return pci_create_device_domain(pdev, &pci_msi_template, hwsize);
}
/**
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 2f647cac4cae..d686488f4111 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -10,12 +10,12 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include "../pci.h"
#include "msi.h"
-int pci_msi_enable = 1;
-int pci_msi_ignore_mask;
+bool pci_msi_enable = true;
/**
* pci_msi_supported - check whether MSI may be enabled on a device
@@ -295,8 +295,7 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
- /* Respect XEN's mask disabling */
- if (pci_msi_ignore_mask)
+ if (pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY))
control &= ~PCI_MSI_FLAGS_MASKBIT;
desc.nvec_used = nvec;
@@ -336,43 +335,13 @@ static int msi_verify_entries(struct pci_dev *dev)
return !entry ? 0 : -EIO;
}
-/**
- * msi_capability_init - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
- * @nvec: number of interrupts to allocate
- * @affd: description of automatic IRQ affinity assignments (may be %NULL)
- *
- * Setup the MSI capability structure of the device with the requested
- * number of interrupts. A return value of zero indicates the successful
- * setup of an entry with the new MSI IRQ. A negative return value indicates
- * an error, and a positive return value indicates the number of interrupts
- * which could have been allocated.
- */
-static int msi_capability_init(struct pci_dev *dev, int nvec,
- struct irq_affinity *affd)
+static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
{
- struct irq_affinity_desc *masks = NULL;
+ int ret = msi_setup_msi_desc(dev, nvec, masks);
struct msi_desc *entry, desc;
- int ret;
-
- /* Reject multi-MSI early on irq domain enabled architectures */
- if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
- return 1;
-
- /*
- * Disable MSI during setup in the hardware, but mark it enabled
- * so that setup code can evaluate it.
- */
- pci_msi_set_enable(dev, 0);
- dev->msi_enabled = 1;
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
-
- msi_lock_descs(&dev->dev);
- ret = msi_setup_msi_desc(dev, nvec, masks);
if (ret)
- goto fail;
+ return ret;
/* All MSIs are unmasked by default; mask them all */
entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
@@ -394,24 +363,51 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
goto err;
/* Set MSI enabled bits */
+ dev->msi_enabled = 1;
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 1);
pcibios_free_irq(dev);
dev->irq = entry->irq;
- goto unlock;
-
+ return 0;
err:
pci_msi_unmask(&desc, msi_multi_mask(&desc));
pci_free_msi_irqs(dev);
-fail:
- dev->msi_enabled = 0;
-unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
return ret;
}
+/**
+ * msi_capability_init - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: number of interrupts to allocate
+ * @affd: description of automatic IRQ affinity assignments (may be %NULL)
+ *
+ * Setup the MSI capability structure of the device with the requested
+ * number of interrupts. A return value of zero indicates the successful
+ * setup of an entry with the new MSI IRQ. A negative return value indicates
+ * an error, and a positive return value indicates the number of interrupts
+ * which could have been allocated.
+ */
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ struct irq_affinity *affd)
+{
+ /* Reject multi-MSI early on irq domain enabled architectures */
+ if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
+ return 1;
+
+ /*
+ * Disable MSI during setup in the hardware, but mark it enabled
+ * so that setup code can evaluate it.
+ */
+ pci_msi_set_enable(dev, 0);
+
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msi_capability_init(dev, nvec, masks);
+}
+
int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
struct irq_affinity *affd)
{
@@ -443,16 +439,16 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (nvec < minvec)
return -ENOSPC;
- if (nvec > maxvec)
- nvec = maxvec;
-
rc = pci_setup_msi_context(dev);
if (rc)
return rc;
- if (!pci_setup_msi_device_domain(dev))
+ if (!pci_setup_msi_device_domain(dev, nvec))
return -ENODEV;
+ if (nvec > maxvec)
+ nvec = maxvec;
+
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
@@ -609,12 +605,16 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
desc->pci.msi_attrib.is_64 = 1;
desc->pci.msi_attrib.default_irq = dev->irq;
desc->pci.mask_base = dev->msix_base;
- desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !desc->pci.msi_attrib.is_virtual;
- if (desc->pci.msi_attrib.can_mask) {
+
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY) &&
+ !desc->pci.msi_attrib.is_virtual) {
void __iomem *addr = pci_msix_desc_addr(desc);
+ desc->pci.msi_attrib.can_mask = 1;
+ /* Workaround for SUN NIU insanity, which requires write before read */
+ if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST)
+ writel(0, addr + PCI_MSIX_ENTRY_DATA);
desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
}
@@ -659,45 +659,43 @@ static void msix_mask_all(void __iomem *base, int tsize)
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
int i;
- if (pci_msi_ignore_mask)
- return;
-
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
-static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, struct irq_affinity *affd)
-{
- struct irq_affinity_desc *masks = NULL;
- int ret;
+DEFINE_FREE(free_msi_irqs, struct pci_dev *, if (_T) pci_free_msi_irqs(_T));
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+static int __msix_setup_interrupts(struct pci_dev *__dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity_desc *masks)
+{
+ struct pci_dev *dev __free(free_msi_irqs) = __dev;
- msi_lock_descs(&dev->dev);
- ret = msix_setup_msi_descs(dev, entries, nvec, masks);
+ int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
if (ret)
- goto out_free;
+ return ret;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
- goto out_free;
+ return ret;
/* Check if all MSI entries honor device restrictions */
ret = msi_verify_entries(dev);
if (ret)
- goto out_free;
+ return ret;
msix_update_entries(dev, entries);
- goto out_unlock;
+ retain_and_null_ptr(dev);
+ return 0;
+}
-out_free:
- pci_free_msi_irqs(dev);
-out_unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
- return ret;
+static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity *affd)
+{
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msix_setup_interrupts(dev, entries, nvec, masks);
}
/**
@@ -744,15 +742,17 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Disable INTX */
pci_intx_for_msi(dev, 0);
- /*
- * Ensure that all table entries are masked to prevent
- * stale entries from firing in a crash kernel.
- *
- * Done late to deal with a broken Marvell NVME device
- * which takes the MSI-X mask bits into account even
- * when MSI-X is disabled, which prevents MSI delivery.
- */
- msix_mask_all(dev->msix_base, tsize);
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY)) {
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(dev->msix_base, tsize);
+ }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
@@ -871,13 +871,13 @@ void __pci_restore_msix_state(struct pci_dev *dev)
write_msg = arch_restore_msi_irqs(dev);
- msi_lock_descs(&dev->dev);
- msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
- if (write_msg)
- __pci_write_msi_msg(entry, &entry->msg);
- pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ scoped_guard (msi_descs_lock, &dev->dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
+ if (write_msg)
+ __pci_write_msi_msg(entry, &entry->msg);
+ pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ }
}
- msi_unlock_descs(&dev->dev);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
@@ -916,6 +916,55 @@ void pci_free_msi_irqs(struct pci_dev *dev)
}
}
+#ifdef CONFIG_PCIE_TPH
+/**
+ * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
+ * @pdev: The PCIe device to update
+ * @index: The MSI-X index to update
+ * @tag: The tag to write
+ *
+ * Returns: 0 on success, error code on failure
+ */
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ struct msi_desc *msi_desc;
+ struct irq_desc *irq_desc;
+ unsigned int virq;
+
+ if (!pdev->msix_enabled)
+ return -ENXIO;
+
+ virq = msi_get_virq(&pdev->dev, index);
+ if (!virq)
+ return -ENXIO;
+
+ guard(msi_descs_lock)(&pdev->dev);
+
+ /*
+ * This is a horrible hack, but short of implementing a PCI
+ * specific interrupt chip callback and a huge pile of
+ * infrastructure, this is the minor nuissance. It provides the
+ * protection against concurrent operations on this entry and keeps
+ * the control word cache in sync.
+ */
+ irq_desc = irq_to_desc(virq);
+ if (!irq_desc)
+ return -ENXIO;
+
+ guard(raw_spinlock_irq)(&irq_desc->lock);
+ msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data);
+ if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual)
+ return -ENXIO;
+
+ msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST;
+ msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
+ pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl);
+ /* Flush the write */
+ readl(pci_msix_desc_addr(msi_desc));
+ return 0;
+}
+#endif
+
/* Misc. infrastructure */
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
@@ -926,5 +975,5 @@ EXPORT_SYMBOL(msi_desc_to_pci_dev);
void pci_no_msi(void)
{
- pci_msi_enable = 0;
+ pci_msi_enable = false;
}
diff --git a/drivers/pci/msi/msi.h b/drivers/pci/msi/msi.h
index ee53cf079f4e..0b420b319f50 100644
--- a/drivers/pci/msi/msi.h
+++ b/drivers/pci/msi/msi.h
@@ -87,7 +87,7 @@ static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc);
/* Subsystem variables */
-extern int pci_msi_enable;
+extern bool pci_msi_enable;
/* MSI internal functions invoked from the public APIs */
void pci_msi_shutdown(struct pci_dev *dev);
@@ -107,7 +107,7 @@ enum support_mode {
};
bool pci_msi_domain_supports(struct pci_dev *dev, unsigned int feature_mask, enum support_mode mode);
-bool pci_setup_msi_device_domain(struct pci_dev *pdev);
+bool pci_setup_msi_device_domain(struct pci_dev *pdev, unsigned int hwsize);
bool pci_setup_msix_device_domain(struct pci_dev *pdev, unsigned int hwsize);
/* Legacy (!IRQDOMAIN) fallbacks */
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 7a806f5c0d20..3579265f1198 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -455,9 +455,9 @@ failed:
* @out_irq: structure of_phandle_args filled by this function
*
* This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
+ * device node exists for a given pci_dev, it will use normal OF tree
* walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
+ * PCI tree until a device node is found, at which point it will finish
* resolving using the OF tree walking.
*/
static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
@@ -517,13 +517,16 @@ static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *
}
/*
- * Ok, we have found a parent with a device-node, hand over to
+ * Ok, we have found a parent with a device node, hand over to
* the OF parsing code.
+ *
* We build a unit address from the linux device to be used for
* resolution. Note that we use the linux bus number which may
* not match your firmware bus numbering.
+ *
* Fortunately, in most cases, interrupt-map-mask doesn't
* include the bus number as part of the matching.
+ *
* You should still be careful about that though if you intend
* to rely on this function (you ship a firmware that doesn't
* create device nodes for all PCI devices).
@@ -653,8 +656,8 @@ void of_pci_remove_node(struct pci_dev *pdev)
np = pci_device_to_OF_node(pdev);
if (!np || !of_node_check_flag(np, OF_DYNAMIC))
return;
- pdev->dev.of_node = NULL;
+ device_remove_of_node(&pdev->dev);
of_changeset_revert(np->data);
of_changeset_destroy(np->data);
of_node_put(np);
@@ -711,11 +714,18 @@ void of_pci_make_dev_node(struct pci_dev *pdev)
goto out_free_node;
np->data = cset;
- pdev->dev.of_node = np;
+
+ ret = device_add_of_node(&pdev->dev, np);
+ if (ret)
+ goto out_revert_cset;
+
kfree(name);
return;
+out_revert_cset:
+ np->data = NULL;
+ of_changeset_revert(cset);
out_free_node:
of_node_put(np);
out_destroy_cset:
@@ -724,7 +734,112 @@ out_destroy_cset:
out_free_name:
kfree(name);
}
-#endif
+
+void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge)
+{
+ struct device_node *np;
+
+ np = pci_bus_to_OF_node(bridge->bus);
+ if (!np || !of_node_check_flag(np, OF_DYNAMIC))
+ return;
+
+ device_remove_of_node(&bridge->bus->dev);
+ device_remove_of_node(&bridge->dev);
+ of_changeset_revert(np->data);
+ of_changeset_destroy(np->data);
+ of_node_put(np);
+}
+
+void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge)
+{
+ struct device_node *np = NULL;
+ struct of_changeset *cset;
+ const char *name;
+ int ret;
+
+ /*
+ * If there is already a device tree node linked to the PCI bus handled
+ * by this bridge (i.e. the PCI root bus), nothing to do.
+ */
+ if (pci_bus_to_OF_node(bridge->bus))
+ return;
+
+ /*
+ * The root bus has no node. Check that the host bridge has no node
+ * too
+ */
+ if (bridge->dev.of_node) {
+ dev_err(&bridge->dev, "PCI host bridge of_node already set");
+ return;
+ }
+
+ /* Check if there is a DT root node to attach the created node */
+ if (!of_root) {
+ pr_err("of_root node is NULL, cannot create PCI host bridge node\n");
+ return;
+ }
+
+ name = kasprintf(GFP_KERNEL, "pci@%x,%x", pci_domain_nr(bridge->bus),
+ bridge->bus->number);
+ if (!name)
+ return;
+
+ cset = kmalloc(sizeof(*cset), GFP_KERNEL);
+ if (!cset)
+ goto out_free_name;
+ of_changeset_init(cset);
+
+ np = of_changeset_create_node(cset, of_root, name);
+ if (!np)
+ goto out_destroy_cset;
+
+ ret = of_pci_add_host_bridge_properties(bridge, cset, np);
+ if (ret)
+ goto out_free_node;
+
+ /*
+ * This of_node will be added to an existing device. The of_node parent
+ * is the root OF node and so this node will be handled by the platform
+ * bus. Avoid any new device creation.
+ */
+ of_node_set_flag(np, OF_POPULATED);
+ np->fwnode.dev = &bridge->dev;
+ fwnode_dev_initialized(&np->fwnode, true);
+
+ ret = of_changeset_apply(cset);
+ if (ret)
+ goto out_free_node;
+
+ np->data = cset;
+
+ /* Add the of_node to host bridge and the root bus */
+ ret = device_add_of_node(&bridge->dev, np);
+ if (ret)
+ goto out_revert_cset;
+
+ ret = device_add_of_node(&bridge->bus->dev, np);
+ if (ret)
+ goto out_remove_bridge_dev_of_node;
+
+ kfree(name);
+
+ return;
+
+out_remove_bridge_dev_of_node:
+ device_remove_of_node(&bridge->dev);
+out_revert_cset:
+ np->data = NULL;
+ of_changeset_revert(cset);
+out_free_node:
+ of_node_put(np);
+out_destroy_cset:
+ of_changeset_destroy(cset);
+ kfree(cset);
+out_free_name:
+ kfree(name);
+}
+
+#endif /* CONFIG_PCI_DYNAMIC_OF_NODES */
/**
* of_pci_supply_present() - Check if the power supply is present for the PCI
@@ -851,3 +966,47 @@ u32 of_pci_get_slot_power_limit(struct device_node *node,
return slot_power_limit_mw;
}
EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit);
+
+/**
+ * of_pci_get_equalization_presets - Parses the "eq-presets-Ngts" property.
+ *
+ * @dev: Device containing the properties.
+ * @presets: Pointer to store the parsed data.
+ * @num_lanes: Maximum number of lanes supported.
+ *
+ * If the property is present, read and store the data in the @presets structure.
+ * Else, assign a default value of PCI_EQ_RESV.
+ *
+ * Return: 0 if the property is not available or successfully parsed else
+ * errno otherwise.
+ */
+int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes)
+{
+ char name[20];
+ int ret;
+
+ presets->eq_presets_8gts[0] = PCI_EQ_RESV;
+ ret = of_property_read_u16_array(dev->of_node, "eq-presets-8gts",
+ presets->eq_presets_8gts, num_lanes);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Error reading eq-presets-8gts: %d\n", ret);
+ return ret;
+ }
+
+ for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++) {
+ presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV;
+ snprintf(name, sizeof(name), "eq-presets-%dgts", 8 << (i + 1));
+ ret = of_property_read_u8_array(dev->of_node, name,
+ presets->eq_presets_Ngts[i],
+ num_lanes);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Error reading %s: %d\n", name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_equalization_presets);
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index 58fbafac7c6a..506fcd507113 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -54,9 +54,13 @@ enum of_pci_prop_compatible {
static void of_pci_set_address(struct pci_dev *pdev, u32 *prop, u64 addr,
u32 reg_num, u32 flags, bool reloc)
{
- prop[0] = FIELD_PREP(OF_PCI_ADDR_FIELD_BUS, pdev->bus->number) |
- FIELD_PREP(OF_PCI_ADDR_FIELD_DEV, PCI_SLOT(pdev->devfn)) |
- FIELD_PREP(OF_PCI_ADDR_FIELD_FUNC, PCI_FUNC(pdev->devfn));
+ if (pdev) {
+ prop[0] = FIELD_PREP(OF_PCI_ADDR_FIELD_BUS, pdev->bus->number) |
+ FIELD_PREP(OF_PCI_ADDR_FIELD_DEV, PCI_SLOT(pdev->devfn)) |
+ FIELD_PREP(OF_PCI_ADDR_FIELD_FUNC, PCI_FUNC(pdev->devfn));
+ } else
+ prop[0] = 0;
+
prop[0] |= flags | reg_num;
if (!reloc) {
prop[0] |= OF_PCI_ADDR_FIELD_NONRELOC;
@@ -65,7 +69,7 @@ static void of_pci_set_address(struct pci_dev *pdev, u32 *prop, u64 addr,
}
}
-static int of_pci_get_addr_flags(struct resource *res, u32 *flags)
+static int of_pci_get_addr_flags(const struct resource *res, u32 *flags)
{
u32 ss;
@@ -390,3 +394,106 @@ int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs,
return 0;
}
+
+static bool of_pci_is_range_resource(const struct resource *res, u32 *flags)
+{
+ if (!(resource_type(res) & IORESOURCE_MEM) &&
+ !(resource_type(res) & IORESOURCE_MEM_64))
+ return false;
+
+ if (of_pci_get_addr_flags(res, flags))
+ return false;
+
+ return true;
+}
+
+static int of_pci_host_bridge_prop_ranges(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ struct resource_entry *window;
+ unsigned int ranges_sz = 0;
+ unsigned int n_range = 0;
+ struct resource *res;
+ int n_addr_cells;
+ u32 *ranges;
+ u64 val64;
+ u32 flags;
+ int ret;
+
+ n_addr_cells = of_n_addr_cells(np);
+ if (n_addr_cells <= 0 || n_addr_cells > 2)
+ return -EINVAL;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ res = window->res;
+ if (!of_pci_is_range_resource(res, &flags))
+ continue;
+ n_range++;
+ }
+
+ if (!n_range)
+ return 0;
+
+ ranges = kcalloc(n_range,
+ (OF_PCI_ADDRESS_CELLS + OF_PCI_SIZE_CELLS +
+ n_addr_cells) * sizeof(*ranges),
+ GFP_KERNEL);
+ if (!ranges)
+ return -ENOMEM;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ res = window->res;
+ if (!of_pci_is_range_resource(res, &flags))
+ continue;
+
+ /* PCI bus address */
+ val64 = res->start;
+ of_pci_set_address(NULL, &ranges[ranges_sz],
+ val64 - window->offset, 0, flags, false);
+ ranges_sz += OF_PCI_ADDRESS_CELLS;
+
+ /* Host bus address */
+ if (n_addr_cells == 2)
+ ranges[ranges_sz++] = upper_32_bits(val64);
+ ranges[ranges_sz++] = lower_32_bits(val64);
+
+ /* Size */
+ val64 = resource_size(res);
+ ranges[ranges_sz] = upper_32_bits(val64);
+ ranges[ranges_sz + 1] = lower_32_bits(val64);
+ ranges_sz += OF_PCI_SIZE_CELLS;
+ }
+
+ ret = of_changeset_add_prop_u32_array(ocs, np, "ranges", ranges,
+ ranges_sz);
+ kfree(ranges);
+ return ret;
+}
+
+int of_pci_add_host_bridge_properties(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ int ret;
+
+ ret = of_changeset_add_prop_string(ocs, np, "device_type", "pci");
+ if (ret)
+ return ret;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#address-cells",
+ OF_PCI_ADDRESS_CELLS);
+ if (ret)
+ return ret;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#size-cells",
+ OF_PCI_SIZE_CELLS);
+ if (ret)
+ return ret;
+
+ ret = of_pci_host_bridge_prop_ranges(bridge, ocs, np);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 0cb7e0aaba0e..8d955c25aed3 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -140,13 +140,22 @@ static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj,
rcu_read_unlock();
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
- ret = vm_insert_page(vma, vaddr, virt_to_page(kaddr));
+ struct page *page = virt_to_page(kaddr);
+
+ /*
+ * Initialise the refcount for the freshly allocated page. As
+ * we have just allocated the page no one else should be
+ * using it.
+ */
+ VM_WARN_ON_ONCE_PAGE(!page_ref_count(page), page);
+ set_page_count(page, 1);
+ ret = vm_insert_page(vma, vaddr, page);
if (ret) {
gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len);
return ret;
}
percpu_ref_get(ref);
- put_page(virt_to_page(kaddr));
+ put_page(page);
kaddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
@@ -193,7 +202,7 @@ static const struct attribute_group p2pmem_group = {
static void p2pdma_page_free(struct page *page)
{
- struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap);
+ struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page));
/* safe to dereference while a reference is held to the percpu ref */
struct pci_p2pdma *p2pdma =
rcu_dereference_protected(pgmap->provider->p2pdma, 1);
@@ -995,40 +1004,12 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
return type;
}
-/**
- * pci_p2pdma_map_segment - map an sg segment determining the mapping type
- * @state: State structure that should be declared outside of the for_each_sg()
- * loop and initialized to zero.
- * @dev: DMA device that's doing the mapping operation
- * @sg: scatterlist segment to map
- *
- * This is a helper to be used by non-IOMMU dma_map_sg() implementations where
- * the sg segment is the same for the page_link and the dma_address.
- *
- * Attempt to map a single segment in an SGL with the PCI bus address.
- * The segment must point to a PCI P2PDMA page and thus must be
- * wrapped in a is_pci_p2pdma_page(sg_page(sg)) check.
- *
- * Returns the type of mapping used and maps the page if the type is
- * PCI_P2PDMA_MAP_BUS_ADDR.
- */
-enum pci_p2pdma_map_type
-pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
- struct scatterlist *sg)
+void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
+ struct device *dev, struct page *page)
{
- if (state->pgmap != sg_page(sg)->pgmap) {
- state->pgmap = sg_page(sg)->pgmap;
- state->map = pci_p2pdma_map_type(state->pgmap, dev);
- state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
- }
-
- if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) {
- sg->dma_address = sg_phys(sg) + state->bus_off;
- sg_dma_len(sg) = sg->length;
- sg_dma_mark_bus_address(sg);
- }
-
- return state->map;
+ state->pgmap = page_pgmap(page);
+ state->map = pci_p2pdma_map_type(state->pgmap, dev);
+ state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
}
/**
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index af370628e583..99c58ee09fbb 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -816,13 +816,11 @@ int pci_acpi_program_hp_params(struct pci_dev *dev)
bool pciehp_is_native(struct pci_dev *bridge)
{
const struct pci_host_bridge *host;
- u32 slot_cap;
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
- pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
- if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
+ if (!bridge->is_pciehp)
return false;
if (pcie_ports_native)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f57ea36d125d..01e6aea1b0c7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -555,12 +555,6 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev)
pci_enable_wake(pci_dev, PCI_D0, false);
}
-static void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
-{
- pci_power_up(pci_dev);
- pci_update_current_state(pci_dev, PCI_D0);
-}
-
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
pci_pm_power_up_and_verify_state(pci_dev);
@@ -812,8 +806,7 @@ static int pci_pm_suspend(struct device *dev)
* suspend callbacks can cope with runtime-suspended devices, it is
* better to resume the device from runtime suspend here.
*/
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- pci_dev_need_resume(pci_dev)) {
+ if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
@@ -1151,8 +1144,7 @@ static int pci_pm_poweroff(struct device *dev)
}
/* The reason to do that is the same as in pci_pm_suspend(). */
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- pci_dev_need_resume(pci_dev)) {
+ if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
@@ -1509,7 +1501,7 @@ static int pci_bus_match(struct device *dev, const struct device_driver *drv)
struct pci_driver *pci_drv;
const struct pci_device_id *found_id;
- if (!pci_dev->match_driver)
+ if (pci_dev_binding_disallowed(pci_dev))
return 0;
pci_drv = (struct pci_driver *)to_pci_driver(drv);
@@ -1636,7 +1628,7 @@ static int pci_bus_num_vf(struct device *dev)
*/
static int pci_dma_configure(struct device *dev)
{
- struct pci_driver *driver = to_pci_driver(dev->driver);
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *bridge;
int ret = 0;
@@ -1653,7 +1645,8 @@ static int pci_dma_configure(struct device *dev)
pci_put_host_bridge_device(bridge);
- if (!ret && !driver->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_pci_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 0e7eb2a42d88..268c69daa4d5 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1257,6 +1257,10 @@ static int pci_create_resource_files(struct pci_dev *pdev)
int i;
int retval;
+ /* Skip devices with non-mappable BARs */
+ if (pdev->non_mappable_bars)
+ return 0;
+
/* Expose the PCI resources from this device as files */
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
@@ -1471,6 +1475,9 @@ static ssize_t reset_method_store(struct device *dev,
return count;
}
+ pm_runtime_get_sync(dev);
+ struct device *pmdev __free(pm_runtime_put) = dev;
+
if (sysfs_streq(buf, "default")) {
pci_init_reset_methods(pdev);
return count;
@@ -1801,9 +1808,13 @@ const struct attribute_group *pci_dev_attr_groups[] = {
&pcie_dev_attr_group,
#ifdef CONFIG_PCIEAER
&aer_stats_attr_group,
+ &aer_attr_group,
#endif
#ifdef CONFIG_PCIEASPM
&aspm_ctrl_attr_group,
#endif
+#ifdef CONFIG_PCI_DOE
+ &pci_doe_sysfs_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index f489256cd9de..640d339a447d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1879,7 +1879,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
unsigned int pos, nbars, i;
u32 ctrl;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ pos = pdev->rebar_cap;
if (!pos)
return;
@@ -1892,7 +1892,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
- res = pdev->resource + bar_idx;
+ res = pci_resource_n(pdev, bar_idx);
size = pci_rebar_bytes_to_size(resource_size(res));
ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
@@ -3030,8 +3030,12 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
* pci_bridge_d3_possible - Is it possible to put the bridge into D3
* @bridge: Bridge to check
*
- * This function checks if it is possible to move the bridge to D3.
- * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
+ * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
+ *
+ * Return: Whether it is possible to move the bridge to D3.
+ *
+ * The return value is guaranteed to be constant across the entire lifetime
+ * of the bridge, including its hot-removal.
*/
bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
@@ -3075,10 +3079,10 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
- * It should be safe to put PCIe ports from 2015 or newer
- * to D3.
+ * Out of caution, we only allow PCIe ports from 2015 or newer
+ * into D3 on x86.
*/
- if (dmi_get_bios_year() >= 2015)
+ if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
return true;
break;
}
@@ -3192,6 +3196,12 @@ void pci_d3cold_disable(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_d3cold_disable);
+void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
+{
+ pci_power_up(pci_dev);
+ pci_update_current_state(pci_dev, PCI_D0);
+}
+
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
@@ -3202,9 +3212,6 @@ void pci_pm_init(struct pci_dev *dev)
u16 status;
u16 pmc;
- pm_runtime_forbid(&dev->dev);
- pm_runtime_set_active(&dev->dev);
- pm_runtime_enable(&dev->dev);
device_enable_async_suspend(&dev->dev);
dev->wakeup_prepared = false;
@@ -3214,14 +3221,14 @@ void pci_pm_init(struct pci_dev *dev)
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
if (!pm)
- return;
+ goto poweron;
/* Check device's ability to generate PME# */
pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
pci_err(dev, "unsupported PM cap regs version (%u)\n",
pmc & PCI_PM_CAP_VER_MASK);
- return;
+ goto poweron;
}
dev->pm_cap = pm;
@@ -3266,6 +3273,11 @@ void pci_pm_init(struct pci_dev *dev)
pci_read_config_word(dev, PCI_STATUS, &status);
if (status & PCI_STATUS_IMM_READY)
dev->imm_ready = 1;
+poweron:
+ pci_pm_power_up_and_verify_state(dev);
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_set_active(&dev->dev);
+ pm_runtime_enable(&dev->dev);
}
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
@@ -3726,6 +3738,11 @@ void pci_acs_init(struct pci_dev *dev)
pci_enable_acs(dev);
}
+void pci_rebar_init(struct pci_dev *pdev)
+{
+ pdev->rebar_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+}
+
/**
* pci_rebar_find_pos - find position of resize ctrl reg for BAR
* @pdev: PCI device
@@ -3740,7 +3757,7 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
unsigned int pos, nbars, i;
u32 ctrl;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ pos = pdev->rebar_cap;
if (!pos)
return -ENOTSUPP;
@@ -3765,7 +3782,7 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
* @bar: BAR to query
*
* Get the possible sizes of a resizable BAR as bitmask defined in the spec
- * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
+ * (bit 0=1MB, bit 31=128TB). Returns 0 if BAR isn't resizable.
*/
u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
{
@@ -3813,7 +3830,7 @@ int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
* pci_rebar_set_size - set a new size for a BAR
* @pdev: PCI device
* @bar: BAR to set size to
- * @size: new size as defined in the spec (0=1MB, 19=512GB)
+ * @size: new size as defined in the spec (0=1MB, 31=128TB)
*
* Set the new size of a BAR as defined in the spec.
* Returns zero if resizing was successful, error code otherwise.
@@ -3932,16 +3949,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
if (!pci_bar_index_is_valid(bar))
return;
- /*
- * This is done for backwards compatibility, because the old PCI devres
- * API had a mode in which the function became managed if it had been
- * enabled with pcim_enable_device() instead of pci_enable_device().
- */
- if (pci_is_managed(pdev)) {
- pcim_release_region(pdev, bar);
- return;
- }
-
if (pci_resource_len(pdev, bar) == 0)
return;
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
@@ -3979,13 +3986,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
if (!pci_bar_index_is_valid(bar))
return -EINVAL;
- if (pci_is_managed(pdev)) {
- if (exclusive == IORESOURCE_EXCLUSIVE)
- return pcim_request_region_exclusive(pdev, bar, name);
-
- return pcim_request_region(pdev, bar, name);
- }
-
if (pci_resource_len(pdev, bar) == 0)
return 0;
@@ -4022,11 +4022,6 @@ err_out:
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
{
@@ -4079,11 +4074,6 @@ err_out:
* @name: Name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *name)
@@ -4099,11 +4089,6 @@ EXPORT_SYMBOL(pci_request_selected_regions);
* @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
const char *name)
@@ -4139,11 +4124,6 @@ EXPORT_SYMBOL(pci_release_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions(struct pci_dev *pdev, const char *name)
{
@@ -4168,11 +4148,6 @@ EXPORT_SYMBOL(pci_request_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning message is also
* printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
{
@@ -4252,7 +4227,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
#ifndef pci_remap_iospace
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
-#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+#if defined(PCI_IOBASE)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
if (!(res->flags & IORESOURCE_IO))
@@ -4285,7 +4260,7 @@ EXPORT_SYMBOL(pci_remap_iospace);
*/
void pci_unmap_iospace(struct resource *res)
{
-#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+#if defined(PCI_IOBASE)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
vunmap_range(vaddr, vaddr + resource_size(res));
@@ -4713,6 +4688,11 @@ static int pcie_wait_for_link_status(struct pci_dev *pdev,
* @pdev: Device whose link to retrain.
* @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
*
+ * Trigger retraining of the PCIe Link and wait for the completion of the
+ * retraining. As link retraining is known to asserts LBMS and may change
+ * the Link Speed, LBMS is cleared after the retraining and the Link Speed
+ * of the subordinate bus is updated.
+ *
* Retrain completion status is retrieved from the Link Status Register
* according to @use_lt. It is not verified whether the use of the DLLLA
* bit is valid.
@@ -4752,7 +4732,19 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
* to track link speed or width changes made by hardware itself
* in attempt to correct unreliable link operation.
*/
- pcie_reset_lbms_count(pdev);
+ pcie_reset_lbms(pdev);
+
+ /*
+ * Ensure the Link Speed updates after retraining in case the Link
+ * Speed was changed because of the retraining. While the bwctrl's
+ * IRQ handler normally picks up the new Link Speed, clearing LBMS
+ * races with the IRQ handler reading the Link Status register and
+ * can result in the handler returning early without updating the
+ * Link Speed.
+ */
+ if (pdev->subordinate)
+ pcie_update_link_speed(pdev->subordinate);
+
return rc;
}
@@ -4780,7 +4772,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
/*
* PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
- * after which we should expect an link active if the reset was
+ * after which we should expect the link to be active if the reset was
* successful. If so, software must wait a minimum 100ms before sending
* configuration requests to devices downstream this port.
*
@@ -4949,7 +4941,7 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
delay);
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
- pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
+ pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
return -ENOTTY;
}
@@ -5244,6 +5236,7 @@ const struct pci_reset_fn_method pci_reset_fn_methods[] = {
int __pci_reset_function_locked(struct pci_dev *dev)
{
int i, m, rc;
+ const struct pci_reset_fn_method *method;
might_sleep();
@@ -5260,9 +5253,13 @@ int __pci_reset_function_locked(struct pci_dev *dev)
if (!m)
return -ENOTTY;
- rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
+ method = &pci_reset_fn_methods[m];
+ pci_dbg(dev, "reset via %s\n", method->name);
+ rc = method->reset_fn(dev, PCI_RESET_DO_RESET);
if (!rc)
return 0;
+
+ pci_dbg(dev, "%s failed with %d\n", method->name, rc);
if (rc != -ENOTTY)
return rc;
}
@@ -5528,7 +5525,8 @@ static void pci_slot_unlock(struct pci_slot *slot)
continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
}
@@ -6204,21 +6202,25 @@ void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
enum pci_bus_speed speed, speed_cap;
struct pci_dev *limiting_dev = NULL;
u32 bw_avail, bw_cap;
+ char *flit_mode = "";
bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
+ if (dev->bus && dev->bus->flit_mode)
+ flit_mode = ", in Flit mode";
+
if (bw_avail >= bw_cap && verbose)
- pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n",
bw_cap / 1000, bw_cap % 1000,
- pci_speed_string(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap, flit_mode);
else if (bw_avail < bw_cap)
- pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n",
bw_avail / 1000, bw_avail % 1000,
pci_speed_string(speed), width,
limiting_dev ? pci_name(limiting_dev) : "<unknown>",
bw_cap / 1000, bw_cap % 1000,
- pci_speed_string(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap, flit_mode);
}
/**
@@ -6788,11 +6790,6 @@ int __weak pci_ext_cfg_avail(void)
return 1;
}
-void __weak pci_fixup_cardbus(struct pci_bus *bus)
-{
-}
-EXPORT_SYMBOL(pci_fixup_cardbus);
-
static int __init pci_setup(char *str)
{
while (str) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d22755de688b..98d6fccb383e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -9,6 +9,8 @@ struct pcie_tlp_log;
/* Number of possible devfns: 0.0 to 1f.7 inclusive */
#define MAX_NR_DEVFNS 256
+#define MAX_NR_LANES 16
+
#define PCI_FIND_CAP_TTL 48
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
@@ -59,7 +61,7 @@ struct pcie_tlp_log;
* completes before sending a Configuration Request to the device
* immediately below that Port."
*/
-#define PCIE_RESET_CONFIG_DEVICE_WAIT_MS 100
+#define PCIE_RESET_CONFIG_WAIT_MS 100
/* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */
#define PCIE_MSG_TYPE_R_RC 0
@@ -148,6 +150,7 @@ void pci_dev_adjust_pme(struct pci_dev *dev);
void pci_dev_complete_resume(struct pci_dev *pci_dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
+void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev);
void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_msi_init(struct pci_dev *dev);
@@ -227,6 +230,7 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
/* Functions for PCI Hotplug drivers to use */
int pci_hp_add_bridge(struct pci_dev *dev);
+bool pci_hp_spurious_link_change(struct pci_dev *pdev);
#if defined(CONFIG_SYSFS) && defined(HAVE_PCI_LEGACY)
void pci_create_legacy_files(struct pci_bus *bus);
@@ -269,6 +273,7 @@ extern const struct attribute_group *pci_dev_groups[];
extern const struct attribute_group *pci_dev_attr_groups[];
extern const struct attribute_group *pcibus_groups[];
extern const struct attribute_group *pci_bus_groups[];
+extern const struct attribute_group pci_doe_sysfs_group;
#else
static inline int pci_create_sysfs_dev_files(struct pci_dev *pdev) { return 0; }
static inline void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { }
@@ -282,6 +287,8 @@ extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mmio_size;
extern unsigned long pci_hotplug_mmio_pref_size;
extern unsigned long pci_hotplug_bus_size;
+extern unsigned long pci_cardbus_io_size;
+extern unsigned long pci_cardbus_mem_size;
/**
* pci_match_one_device - Tell if a PCI device structure has a matching
@@ -325,6 +332,10 @@ enum pci_bar_type {
struct device *pci_get_host_bridge_device(struct pci_dev *dev);
void pci_put_host_bridge_device(struct device *dev);
+unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
+int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
+int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
+
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int rrs_timeout);
@@ -349,6 +360,29 @@ void pci_walk_bus_locked(struct pci_bus *top,
void *userdata);
const char *pci_resource_name(struct pci_dev *dev, unsigned int i);
+bool pci_resource_is_optional(const struct pci_dev *dev, int resno);
+
+/**
+ * pci_resource_num - Reverse lookup resource number from device resources
+ * @dev: PCI device
+ * @res: Resource to lookup index for (MUST be a @dev's resource)
+ *
+ * Perform reverse lookup to determine the resource number for @res within
+ * @dev resource array. NOTE: The caller is responsible for ensuring @res is
+ * among @dev's resources!
+ *
+ * Returns: resource number.
+ */
+static inline int pci_resource_num(const struct pci_dev *dev,
+ const struct resource *res)
+{
+ int resno = res - &dev->resource[0];
+
+ /* Passing a resource that is not among dev's resources? */
+ WARN_ON_ONCE(resno >= PCI_NUM_RESOURCES);
+
+ return resno;
+}
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
void pci_disable_bridge_window(struct pci_dev *dev);
@@ -422,9 +456,10 @@ const char *pci_speed_string(enum pci_bus_speed speed);
void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
void pcie_report_downtraining(struct pci_dev *dev);
-static inline void __pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
+static inline void __pcie_update_link_speed(struct pci_bus *bus, u16 linksta, u16 linksta2)
{
bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
+ bus->flit_mode = (linksta2 & PCI_EXP_LNKSTA2_FLIT) ? 1 : 0;
}
void pcie_update_link_speed(struct pci_bus *bus);
@@ -472,6 +507,14 @@ static inline void pci_npem_create(struct pci_dev *dev) { }
static inline void pci_npem_remove(struct pci_dev *dev) { }
#endif
+#if defined(CONFIG_PCI_DOE) && defined(CONFIG_SYSFS)
+void pci_doe_sysfs_init(struct pci_dev *pci_dev);
+void pci_doe_sysfs_teardown(struct pci_dev *pdev);
+#else
+static inline void pci_doe_sysfs_init(struct pci_dev *pdev) { }
+static inline void pci_doe_sysfs_teardown(struct pci_dev *pdev) { }
+#endif
+
/**
* pci_dev_set_io_state - Set the new error state if possible.
*
@@ -518,6 +561,10 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
#define PCI_DPC_RECOVERED 1
#define PCI_DPC_RECOVERING 2
#define PCI_DEV_REMOVED 3
+#define PCI_LINK_CHANGED 4
+#define PCI_LINK_CHANGING 5
+#define PCI_LINK_LBMS_SEEN 6
+#define PCI_DEV_ALLOW_BINDING 7
static inline void pci_dev_assign_added(struct pci_dev *dev)
{
@@ -541,6 +588,16 @@ static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev)
return test_and_set_bit(PCI_DEV_REMOVED, &dev->priv_flags);
}
+static inline void pci_dev_allow_binding(struct pci_dev *dev)
+{
+ set_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags);
+}
+
+static inline bool pci_dev_binding_disallowed(struct pci_dev *dev)
+{
+ return !test_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags);
+}
+
#ifdef CONFIG_PCIEAER
#include <linux/aer.h>
@@ -548,12 +605,15 @@ static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev)
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int ratelimit_print[AER_MAX_MULTI_ERR_DEVICES];
int error_dev_num;
+ const char *level; /* printk level */
unsigned int id:16;
unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
- unsigned int __pad1:5;
+ unsigned int root_ratelimit_print:1; /* 0=skip, 1=print */
+ unsigned int __pad1:4;
unsigned int multi_error_valid:1;
unsigned int first_error:5;
@@ -565,14 +625,16 @@ struct aer_err_info {
struct pcie_tlp_log tlp; /* TLP Header */
};
-int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
-void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+int aer_get_device_error_info(struct aer_err_info *info, int i);
+void aer_print_error(struct aer_err_info *info, int i);
int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
- unsigned int tlp_len, struct pcie_tlp_log *log);
+ unsigned int tlp_len, bool flit,
+ struct pcie_tlp_log *log);
unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc);
void pcie_print_tlp_log(const struct pci_dev *dev,
- const struct pcie_tlp_log *log, const char *pfx);
+ const struct pcie_tlp_log *log, const char *level,
+ const char *pfx);
#endif /* CONFIG_PCIEAER */
#ifdef CONFIG_PCIEPORTBUS
@@ -648,6 +710,10 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
+static inline bool pci_resource_is_iov(int resno)
+{
+ return resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END;
+}
extern const struct attribute_group sriov_pf_dev_attr_group;
extern const struct attribute_group sriov_vf_dev_attr_group;
#else
@@ -657,12 +723,21 @@ static inline int pci_iov_init(struct pci_dev *dev)
}
static inline void pci_iov_release(struct pci_dev *dev) { }
static inline void pci_iov_remove(struct pci_dev *dev) { }
+static inline void pci_iov_update_resource(struct pci_dev *dev, int resno) { }
+static inline resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
+ int resno)
+{
+ return 0;
+}
static inline void pci_restore_iov_state(struct pci_dev *dev) { }
static inline int pci_iov_bus_range(struct pci_bus *bus)
{
return 0;
}
-
+static inline bool pci_resource_is_iov(int resno)
+{
+ return false;
+}
#endif /* CONFIG_PCI_IOV */
#ifdef CONFIG_PCIE_TPH
@@ -696,12 +771,10 @@ unsigned long pci_cardbus_resource_alignment(struct resource *);
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
struct resource *res)
{
-#ifdef CONFIG_PCI_IOV
- int resno = res - dev->resource;
+ int resno = pci_resource_num(dev, res);
- if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ if (pci_resource_is_iov(resno))
return pci_sriov_resource_alignment(dev, resno);
-#endif
if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
return pci_cardbus_resource_alignment(res);
return resource_alignment(res);
@@ -773,14 +846,9 @@ static inline void pcie_ecrc_get_policy(char *str) { }
#endif
#ifdef CONFIG_PCIEPORTBUS
-void pcie_reset_lbms_count(struct pci_dev *port);
-int pcie_lbms_count(struct pci_dev *port, unsigned long *val);
+void pcie_reset_lbms(struct pci_dev *port);
#else
-static inline void pcie_reset_lbms_count(struct pci_dev *port) {}
-static inline int pcie_lbms_count(struct pci_dev *port, unsigned long *val)
-{
- return -EOPNOTSUPP;
-}
+static inline void pcie_reset_lbms(struct pci_dev *port) {}
#endif
struct pci_dev_reset_methods {
@@ -815,6 +883,7 @@ static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
}
#endif
+void pci_rebar_init(struct pci_dev *pdev);
int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size);
static inline u64 pci_rebar_size_to_bytes(int size)
@@ -824,6 +893,21 @@ static inline u64 pci_rebar_size_to_bytes(int size)
struct device_node;
+#define PCI_EQ_RESV 0xff
+
+enum equalization_preset_type {
+ EQ_PRESET_TYPE_8GTS,
+ EQ_PRESET_TYPE_16GTS,
+ EQ_PRESET_TYPE_32GTS,
+ EQ_PRESET_TYPE_64GTS,
+ EQ_PRESET_TYPE_MAX
+};
+
+struct pci_eq_presets {
+ u16 eq_presets_8gts[MAX_NR_LANES];
+ u8 eq_presets_Ngts[EQ_PRESET_TYPE_MAX - 1][MAX_NR_LANES];
+};
+
#ifdef CONFIG_OF
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
@@ -838,7 +922,9 @@ void pci_release_bus_of_node(struct pci_bus *bus);
int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge);
bool of_pci_supply_present(struct device_node *np);
-
+int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes);
#else
static inline int
of_get_pci_domain_nr(struct device_node *node)
@@ -883,6 +969,17 @@ static inline bool of_pci_supply_present(struct device_node *np)
{
return false;
}
+
+static inline int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes)
+{
+ presets->eq_presets_8gts[0] = PCI_EQ_RESV;
+ for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++)
+ presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV;
+
+ return 0;
+}
#endif /* CONFIG_OF */
struct of_changeset;
@@ -892,9 +989,16 @@ void of_pci_make_dev_node(struct pci_dev *pdev);
void of_pci_remove_node(struct pci_dev *pdev);
int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs,
struct device_node *np);
+void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge);
+void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge);
+int of_pci_add_host_bridge_properties(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np);
#else
static inline void of_pci_make_dev_node(struct pci_dev *pdev) { }
static inline void of_pci_remove_node(struct pci_dev *pdev) { }
+static inline void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge) { }
+static inline void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge) { }
#endif
#ifdef CONFIG_PCIEAER
@@ -902,6 +1006,7 @@ void pci_no_aer(void);
void pci_aer_init(struct pci_dev *dev);
void pci_aer_exit(struct pci_dev *dev);
extern const struct attribute_group aer_stats_attr_group;
+extern const struct attribute_group aer_attr_group;
void pci_aer_clear_fatal_status(struct pci_dev *dev);
int pci_aer_clear_status(struct pci_dev *dev);
int pci_aer_raw_clear_status(struct pci_dev *dev);
@@ -1000,10 +1105,14 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
}
#endif
-int pcim_intx(struct pci_dev *dev, int enable);
-int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
- const char *name);
-void pcim_release_region(struct pci_dev *pdev, int bar);
+#ifdef CONFIG_PCI_MSI
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag);
+#else
+static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ return -ENODEV;
+}
+#endif
/*
* Config Address for PCI Configuration Mechanism #1
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 508474e17183..70ac66188367 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -2,7 +2,7 @@
/*
* Implement the AER root port service driver. The driver registers an IRQ
* handler. When a root port triggers an AER interrupt, the IRQ handler
- * collects root port status and schedules work.
+ * collects Root Port status and schedules work.
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
@@ -17,6 +17,7 @@
#include <linux/bitops.h>
#include <linux/cper.h>
+#include <linux/dev_printk.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/sched.h>
@@ -27,6 +28,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <acpi/apei.h>
#include <acpi/ghes.h>
@@ -35,6 +37,9 @@
#include "../pci.h"
#include "portdrv.h"
+#define aer_printk(level, pdev, fmt, arg...) \
+ dev_printk(level, &(pdev)->dev, fmt, ##arg)
+
#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
@@ -50,15 +55,15 @@ struct aer_rpc {
DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
};
-/* AER stats for the device */
-struct aer_stats {
+/* AER info for the device */
+struct aer_info {
/*
* Fields for all AER capable devices. They indicate the errors
* "as seen by this device". Note that this may mean that if an
- * end point is causing problems, the AER counters may increment
- * at its link partner (e.g. root port) because the errors will be
- * "seen" by the link partner and not the problematic end point
+ * Endpoint is causing problems, the AER counters may increment
+ * at its link partner (e.g. Root Port) because the errors will be
+ * "seen" by the link partner and not the problematic Endpoint
* itself (which may report all counters as 0 as it never saw any
* problems).
*/
@@ -76,14 +81,18 @@ struct aer_stats {
u64 dev_total_nonfatal_errs;
/*
- * Fields for Root ports & root complex event collectors only, these
+ * Fields for Root Ports & Root Complex Event Collectors only; these
* indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
- * messages received by the root port / event collector, INCLUDING the
- * ones that are generated internally (by the rootport itself)
+ * messages received by the Root Port / Event Collector, INCLUDING the
+ * ones that are generated internally (by the Root Port itself)
*/
u64 rootport_total_cor_errs;
u64 rootport_total_fatal_errs;
u64 rootport_total_nonfatal_errs;
+
+ /* Ratelimits for errors */
+ struct ratelimit_state correctable_ratelimit;
+ struct ratelimit_state nonfatal_ratelimit;
};
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
@@ -138,7 +147,7 @@ static const char * const ecrc_policy_str[] = {
* enable_ecrc_checking - enable PCIe ECRC checking for a device
* @dev: the PCI device
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
static int enable_ecrc_checking(struct pci_dev *dev)
{
@@ -159,10 +168,10 @@ static int enable_ecrc_checking(struct pci_dev *dev)
}
/**
- * disable_ecrc_checking - disables PCIe ECRC checking for a device
+ * disable_ecrc_checking - disable PCIe ECRC checking for a device
* @dev: the PCI device
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
static int disable_ecrc_checking(struct pci_dev *dev)
{
@@ -283,10 +292,10 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev)
* pci_aer_raw_clear_status - Clear AER error registers.
* @dev: the PCI device
*
- * Clearing AER error status registers unconditionally, regardless of
+ * Clear AER error status registers unconditionally, regardless of
* whether they're owned by firmware or the OS.
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
int pci_aer_raw_clear_status(struct pci_dev *dev)
{
@@ -373,13 +382,18 @@ void pci_aer_init(struct pci_dev *dev)
if (!dev->aer_cap)
return;
- dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+ dev->aer_info = kzalloc(sizeof(*dev->aer_info), GFP_KERNEL);
+
+ ratelimit_state_init(&dev->aer_info->correctable_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+ ratelimit_state_init(&dev->aer_info->nonfatal_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
/*
* We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
* PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event
- * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec
- * 7.8.4).
+ * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r6.0, sec
+ * 7.8.4.9).
*/
n = pcie_cap_has_rtctl(dev) ? 5 : 4;
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
@@ -394,8 +408,8 @@ void pci_aer_init(struct pci_dev *dev)
void pci_aer_exit(struct pci_dev *dev)
{
- kfree(dev->aer_stats);
- dev->aer_stats = NULL;
+ kfree(dev->aer_info);
+ dev->aer_info = NULL;
}
#define AER_AGENT_RECEIVER 0
@@ -533,10 +547,10 @@ static const char *aer_agent_string[] = {
{ \
unsigned int i; \
struct pci_dev *pdev = to_pci_dev(dev); \
- u64 *stats = pdev->aer_stats->stats_array; \
+ u64 *stats = pdev->aer_info->stats_array; \
size_t len = 0; \
\
- for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
+ for (i = 0; i < ARRAY_SIZE(pdev->aer_info->stats_array); i++) { \
if (strings_array[i]) \
len += sysfs_emit_at(buf, len, "%s %llu\n", \
strings_array[i], \
@@ -547,7 +561,7 @@ static const char *aer_agent_string[] = {
i, stats[i]); \
} \
len += sysfs_emit_at(buf, len, "TOTAL_%s %llu\n", total_string, \
- pdev->aer_stats->total_field); \
+ pdev->aer_info->total_field); \
return len; \
} \
static DEVICE_ATTR_RO(name)
@@ -568,7 +582,7 @@ aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
char *buf) \
{ \
struct pci_dev *pdev = to_pci_dev(dev); \
- return sysfs_emit(buf, "%llu\n", pdev->aer_stats->field); \
+ return sysfs_emit(buf, "%llu\n", pdev->aer_info->field); \
} \
static DEVICE_ATTR_RO(name)
@@ -595,7 +609,7 @@ static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
- if (!pdev->aer_stats)
+ if (!pdev->aer_info)
return 0;
if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
@@ -613,31 +627,136 @@ const struct attribute_group aer_stats_attr_group = {
.is_visible = aer_stats_attrs_are_visible,
};
+/*
+ * Ratelimit interval
+ * <=0: disabled with ratelimit.interval = 0
+ * >0: enabled with ratelimit.interval in ms
+ */
+#define aer_ratelimit_interval_attr(name, ratelimit) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ \
+ return sysfs_emit(buf, "%d\n", \
+ pdev->aer_info->ratelimit.interval); \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ int interval; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ if (kstrtoint(buf, 0, &interval) < 0) \
+ return -EINVAL; \
+ \
+ if (interval <= 0) \
+ interval = 0; \
+ else \
+ interval = msecs_to_jiffies(interval); \
+ \
+ pdev->aer_info->ratelimit.interval = interval; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR_RW(name);
+
+#define aer_ratelimit_burst_attr(name, ratelimit) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ \
+ return sysfs_emit(buf, "%d\n", \
+ pdev->aer_info->ratelimit.burst); \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ int burst; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ if (kstrtoint(buf, 0, &burst) < 0) \
+ return -EINVAL; \
+ \
+ pdev->aer_info->ratelimit.burst = burst; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR_RW(name);
+
+#define aer_ratelimit_attrs(name) \
+ aer_ratelimit_interval_attr(name##_ratelimit_interval_ms, \
+ name##_ratelimit) \
+ aer_ratelimit_burst_attr(name##_ratelimit_burst, \
+ name##_ratelimit)
+
+aer_ratelimit_attrs(correctable)
+aer_ratelimit_attrs(nonfatal)
+
+static struct attribute *aer_attrs[] = {
+ &dev_attr_correctable_ratelimit_interval_ms.attr,
+ &dev_attr_correctable_ratelimit_burst.attr,
+ &dev_attr_nonfatal_ratelimit_interval_ms.attr,
+ &dev_attr_nonfatal_ratelimit_burst.attr,
+ NULL
+};
+
+static umode_t aer_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (!pdev->aer_info)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group aer_attr_group = {
+ .name = "aer",
+ .attrs = aer_attrs,
+ .is_visible = aer_attrs_are_visible,
+};
+
static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_info *info)
{
unsigned long status = info->status & ~info->mask;
int i, max = -1;
u64 *counter = NULL;
- struct aer_stats *aer_stats = pdev->aer_stats;
+ struct aer_info *aer_info = pdev->aer_info;
- if (!aer_stats)
+ if (!aer_info)
return;
switch (info->severity) {
case AER_CORRECTABLE:
- aer_stats->dev_total_cor_errs++;
- counter = &aer_stats->dev_cor_errs[0];
+ aer_info->dev_total_cor_errs++;
+ counter = &aer_info->dev_cor_errs[0];
max = AER_MAX_TYPEOF_COR_ERRS;
break;
case AER_NONFATAL:
- aer_stats->dev_total_nonfatal_errs++;
- counter = &aer_stats->dev_nonfatal_errs[0];
+ aer_info->dev_total_nonfatal_errs++;
+ counter = &aer_info->dev_nonfatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
case AER_FATAL:
- aer_stats->dev_total_fatal_errs++;
- counter = &aer_stats->dev_fatal_errs[0];
+ aer_info->dev_total_fatal_errs++;
+ counter = &aer_info->dev_fatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
}
@@ -649,54 +768,88 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_source *e_src)
{
- struct aer_stats *aer_stats = pdev->aer_stats;
+ struct aer_info *aer_info = pdev->aer_info;
- if (!aer_stats)
+ if (!aer_info)
return;
if (e_src->status & PCI_ERR_ROOT_COR_RCV)
- aer_stats->rootport_total_cor_errs++;
+ aer_info->rootport_total_cor_errs++;
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- aer_stats->rootport_total_fatal_errs++;
+ aer_info->rootport_total_fatal_errs++;
else
- aer_stats->rootport_total_nonfatal_errs++;
+ aer_info->rootport_total_nonfatal_errs++;
+ }
+}
+
+static int aer_ratelimit(struct pci_dev *dev, unsigned int severity)
+{
+ switch (severity) {
+ case AER_NONFATAL:
+ return __ratelimit(&dev->aer_info->nonfatal_ratelimit);
+ case AER_CORRECTABLE:
+ return __ratelimit(&dev->aer_info->correctable_ratelimit);
+ default:
+ return 1; /* Don't ratelimit fatal errors */
}
}
-static void __aer_print_error(struct pci_dev *dev,
- struct aer_err_info *info)
+static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
const char **strings;
unsigned long status = info->status & ~info->mask;
- const char *level, *errmsg;
+ const char *level = info->level;
+ const char *errmsg;
int i;
- if (info->severity == AER_CORRECTABLE) {
+ if (info->severity == AER_CORRECTABLE)
strings = aer_correctable_error_string;
- level = KERN_WARNING;
- } else {
+ else
strings = aer_uncorrectable_error_string;
- level = KERN_ERR;
- }
for_each_set_bit(i, &status, 32) {
errmsg = strings[i];
if (!errmsg)
errmsg = "Unknown Error Bit";
- pci_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
+ aer_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
info->first_error == i ? " (First)" : "");
}
- pci_dev_aer_stats_incr(dev, info);
}
-void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
+static void aer_print_source(struct pci_dev *dev, struct aer_err_info *info,
+ bool found)
+{
+ u16 source = info->id;
+
+ pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d%s\n",
+ info->multi_error_valid ? "Multiple " : "",
+ aer_error_severity_string[info->severity],
+ pci_domain_nr(dev->bus), PCI_BUS_NUM(source),
+ PCI_SLOT(source), PCI_FUNC(source),
+ found ? "" : " (no details found");
+}
+
+void aer_print_error(struct aer_err_info *info, int i)
{
- int layer, agent;
- int id = pci_dev_id(dev);
- const char *level;
+ struct pci_dev *dev;
+ int layer, agent, id;
+ const char *level = info->level;
+
+ if (WARN_ON_ONCE(i >= AER_MAX_MULTI_ERR_DEVICES))
+ return;
+
+ dev = info->dev[i];
+ id = pci_dev_id(dev);
+
+ pci_dev_aer_stats_incr(dev, info);
+ trace_aer_event(pci_name(dev), (info->status & ~info->mask),
+ info->severity, info->tlp_header_valid, &info->tlp);
+
+ if (!info->ratelimit_print[i])
+ return;
if (!info->status) {
pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
@@ -707,38 +860,21 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR;
-
- pci_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
+ aer_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
aer_error_severity_string[info->severity],
aer_error_layer[layer], aer_agent_string[agent]);
- pci_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ aer_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
dev->vendor, dev->device, info->status, info->mask);
__aer_print_error(dev, info);
if (info->tlp_header_valid)
- pcie_print_tlp_log(dev, &info->tlp, dev_fmt(" "));
+ pcie_print_tlp_log(dev, &info->tlp, level, dev_fmt(" "));
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
pci_err(dev, " Error of this Agent is reported first\n");
-
- trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
- info->severity, info->tlp_header_valid, &info->tlp);
-}
-
-static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
-{
- u8 bus = info->id >> 8;
- u8 devfn = info->id & 0xff;
-
- pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d\n",
- info->multi_error_valid ? "Multiple " : "",
- aer_error_severity_string[info->severity],
- pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -761,40 +897,48 @@ void pci_print_aer(struct pci_dev *dev, int aer_severity,
{
int layer, agent, tlp_header_valid = 0;
u32 status, mask;
- struct aer_err_info info;
+ struct aer_err_info info = {
+ .severity = aer_severity,
+ .first_error = PCI_ERR_CAP_FEP(aer->cap_control),
+ };
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
+ info.level = KERN_WARNING;
} else {
status = aer->uncor_status;
mask = aer->uncor_mask;
+ info.level = KERN_ERR;
tlp_header_valid = status & AER_LOG_TLP_MASKS;
}
- layer = AER_GET_LAYER_ERROR(aer_severity, status);
- agent = AER_GET_AGENT(aer_severity, status);
-
- memset(&info, 0, sizeof(info));
- info.severity = aer_severity;
info.status = status;
info.mask = mask;
- info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
- pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
+ pci_dev_aer_stats_incr(dev, &info);
+ trace_aer_event(pci_name(dev), (status & ~mask),
+ aer_severity, tlp_header_valid, &aer->header_log);
+
+ if (!aer_ratelimit(dev, info.severity))
+ return;
+
+ layer = AER_GET_LAYER_ERROR(aer_severity, status);
+ agent = AER_GET_AGENT(aer_severity, status);
+
+ aer_printk(info.level, dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
+ status, mask);
__aer_print_error(dev, &info);
- pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
- aer_error_layer[layer], aer_agent_string[agent]);
+ aer_printk(info.level, dev, "aer_layer=%s, aer_agent=%s\n",
+ aer_error_layer[layer], aer_agent_string[agent]);
if (aer_severity != AER_CORRECTABLE)
- pci_err(dev, "aer_uncor_severity: 0x%08x\n",
- aer->uncor_severity);
+ aer_printk(info.level, dev, "aer_uncor_severity: 0x%08x\n",
+ aer->uncor_severity);
if (tlp_header_valid)
- pcie_print_tlp_log(dev, &aer->header_log, dev_fmt(" "));
-
- trace_aer_event(dev_name(&dev->dev), (status & ~mask),
- aer_severity, tlp_header_valid, &aer->header_log);
+ pcie_print_tlp_log(dev, &aer->header_log, info.level,
+ dev_fmt(" "));
}
EXPORT_SYMBOL_NS_GPL(pci_print_aer, "CXL");
@@ -805,12 +949,27 @@ EXPORT_SYMBOL_NS_GPL(pci_print_aer, "CXL");
*/
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
- if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
- e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
- e_info->error_dev_num++;
- return 0;
+ int i = e_info->error_dev_num;
+
+ if (i >= AER_MAX_MULTI_ERR_DEVICES)
+ return -ENOSPC;
+
+ e_info->dev[i] = pci_dev_get(dev);
+ e_info->error_dev_num++;
+
+ /*
+ * Ratelimit AER log messages. "dev" is either the source
+ * identified by the root's Error Source ID or it has an unmasked
+ * error logged in its own AER Capability. Messages are emitted
+ * when "ratelimit_print[i]" is non-zero. If we will print detail
+ * for a downstream device, make sure we print the Error Source ID
+ * from the root as well.
+ */
+ if (aer_ratelimit(dev, e_info->severity)) {
+ e_info->ratelimit_print[i] = 1;
+ e_info->root_ratelimit_print = 1;
}
- return -ENOSPC;
+ return 0;
}
/**
@@ -825,8 +984,8 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
u16 reg16;
/*
- * When bus id is equal to 0, it might be a bad id
- * reported by root port.
+ * When bus ID is equal to 0, it might be a bad ID
+ * reported by Root Port.
*/
if ((PCI_BUS_NUM(e_info->id) != 0) &&
!(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
@@ -834,15 +993,15 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
if (e_info->id == pci_dev_id(dev))
return true;
- /* Continue id comparing if there is no multiple error */
+ /* Continue ID comparing if there is no multiple error */
if (!e_info->multi_error_valid)
return false;
}
/*
* When either
- * 1) bus id is equal to 0. Some ports might lose the bus
- * id of error source id;
+ * 1) bus ID is equal to 0. Some ports might lose the bus
+ * ID of error source id;
* 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
* 3) There are multiple errors and prior ID comparing fails;
* We check AER status registers to find possible reporter.
@@ -894,9 +1053,9 @@ static int find_device_iter(struct pci_dev *dev, void *data)
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @e_info: including detailed error information such like id
+ * @e_info: including detailed error information such as ID
*
- * Return true if found.
+ * Return: true if found.
*
* Invoked by DPC when error is detected at the Root Port.
* Caller of this function must set id, severity, and multi_error_valid of
@@ -904,7 +1063,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
* e_info->error_dev_num and e_info->dev[], based on the given information.
*/
static bool find_source_device(struct pci_dev *parent,
- struct aer_err_info *e_info)
+ struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
int result;
@@ -922,15 +1081,8 @@ static bool find_source_device(struct pci_dev *parent,
else
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
- if (!e_info->error_dev_num) {
- u8 bus = e_info->id >> 8;
- u8 devfn = e_info->id & 0xff;
-
- pci_info(parent, "found no error details for %04x:%02x:%02x.%d\n",
- pci_domain_nr(parent->bus), bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
+ if (!e_info->error_dev_num)
return false;
- }
return true;
}
@@ -938,9 +1090,9 @@ static bool find_source_device(struct pci_dev *parent,
/**
* pci_aer_unmask_internal_errors - unmask internal errors
- * @dev: pointer to the pcie_dev data structure
+ * @dev: pointer to the pci_dev data structure
*
- * Unmasks internal errors in the Uncorrectable and Correctable Error
+ * Unmask internal errors in the Uncorrectable and Correctable Error
* Mask registers.
*
* Note: AER must be enabled and supported by the device which must be
@@ -1003,7 +1155,7 @@ static int cxl_rch_handle_error_iter(struct pci_dev *dev, void *data)
if (!is_cxl_mem_dev(dev) || !cxl_error_is_native(dev))
return 0;
- /* protect dev->driver */
+ /* Protect dev->driver */
device_lock(&dev->dev);
err_handler = dev->driver ? dev->driver->err_handler : NULL;
@@ -1137,9 +1289,10 @@ static void aer_recover_work_func(struct work_struct *work)
pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
entry.devfn);
if (!pdev) {
- pr_err("no pci_dev for %04x:%02x:%02x.%x\n",
- entry.domain, entry.bus,
- PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ pr_err_ratelimited("%04x:%02x:%02x.%x: no pci_dev found\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn),
+ PCI_FUNC(entry.devfn));
continue;
}
pci_print_aer(pdev, entry.severity, entry.regs);
@@ -1195,19 +1348,26 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
/**
* aer_get_device_error_info - read error status from dev and store it to info
- * @dev: pointer to the device expected to have a error record
* @info: pointer to structure to store the error record
+ * @i: index into info->dev[]
*
- * Return 1 on success, 0 on error.
+ * Return: 1 on success, 0 on error.
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
-int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
+int aer_get_device_error_info(struct aer_err_info *info, int i)
{
- int type = pci_pcie_type(dev);
- int aer = dev->aer_cap;
+ struct pci_dev *dev;
+ int type, aer;
u32 aercc;
+ if (i >= AER_MAX_MULTI_ERR_DEVICES)
+ return 0;
+
+ dev = info->dev[i];
+ aer = dev->aer_cap;
+ type = pci_pcie_type(dev);
+
/* Must reset in this function */
info->status = 0;
info->tlp_header_valid = 0;
@@ -1245,6 +1405,7 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG,
aer + PCI_ERR_PREFIX_LOG,
aer_tlp_log_len(dev, aercc),
+ aercc & PCI_ERR_CAP_TLP_LOG_FLIT,
&info->tlp);
}
}
@@ -1256,74 +1417,98 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
{
int i;
- /* Report all before handle them, not to lost records by reset etc. */
+ /* Report all before handling them, to not lose records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (aer_get_device_error_info(e_info->dev[i], e_info))
- aer_print_error(e_info->dev[i], e_info);
+ if (aer_get_device_error_info(e_info, i))
+ aer_print_error(e_info, i);
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (aer_get_device_error_info(e_info->dev[i], e_info))
+ if (aer_get_device_error_info(e_info, i))
handle_error_source(e_info->dev[i], e_info);
}
}
/**
- * aer_isr_one_error - consume an error detected by root port
- * @rpc: pointer to the root port which holds an error
- * @e_src: pointer to an error source
+ * aer_isr_one_error_type - consume a Correctable or Uncorrectable Error
+ * detected by Root Port or RCEC
+ * @root: pointer to Root Port or RCEC that signaled AER interrupt
+ * @info: pointer to AER error info
*/
-static void aer_isr_one_error(struct aer_rpc *rpc,
- struct aer_err_source *e_src)
+static void aer_isr_one_error_type(struct pci_dev *root,
+ struct aer_err_info *info)
{
- struct pci_dev *pdev = rpc->rpd;
- struct aer_err_info e_info;
+ bool found;
- pci_rootport_aer_stats_incr(pdev, e_src);
+ found = find_source_device(root, info);
/*
- * There is a possibility that both correctable error and
- * uncorrectable error being logged. Report correctable error first.
+ * If we're going to log error messages, we've already set
+ * "info->root_ratelimit_print" and "info->ratelimit_print[i]" to
+ * non-zero (which enables printing) because this is either an
+ * ERR_FATAL or we found a device with an error logged in its AER
+ * Capability.
+ *
+ * If we didn't find the Error Source device, at least log the
+ * Requester ID from the ERR_* Message received by the Root Port or
+ * RCEC, ratelimited by the RP or RCEC.
*/
- if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
- e_info.id = ERR_COR_ID(e_src->id);
- e_info.severity = AER_CORRECTABLE;
-
- if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
- e_info.multi_error_valid = 1;
- else
- e_info.multi_error_valid = 0;
- aer_print_port_info(pdev, &e_info);
+ if (info->root_ratelimit_print ||
+ (!found && aer_ratelimit(root, info->severity)))
+ aer_print_source(root, info, found);
- if (find_source_device(pdev, &e_info))
- aer_process_err_devices(&e_info);
- }
-
- if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
- e_info.id = ERR_UNCOR_ID(e_src->id);
+ if (found)
+ aer_process_err_devices(info);
+}
- if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- e_info.severity = AER_FATAL;
- else
- e_info.severity = AER_NONFATAL;
+/**
+ * aer_isr_one_error - consume error(s) signaled by an AER interrupt from
+ * Root Port or RCEC
+ * @root: pointer to Root Port or RCEC that signaled AER interrupt
+ * @e_src: pointer to an error source
+ */
+static void aer_isr_one_error(struct pci_dev *root,
+ struct aer_err_source *e_src)
+{
+ u32 status = e_src->status;
- if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
- e_info.multi_error_valid = 1;
- else
- e_info.multi_error_valid = 0;
+ pci_rootport_aer_stats_incr(root, e_src);
- aer_print_port_info(pdev, &e_info);
+ /*
+ * There is a possibility that both correctable error and
+ * uncorrectable error being logged. Report correctable error first.
+ */
+ if (status & PCI_ERR_ROOT_COR_RCV) {
+ int multi = status & PCI_ERR_ROOT_MULTI_COR_RCV;
+ struct aer_err_info e_info = {
+ .id = ERR_COR_ID(e_src->id),
+ .severity = AER_CORRECTABLE,
+ .level = KERN_WARNING,
+ .multi_error_valid = multi ? 1 : 0,
+ };
+
+ aer_isr_one_error_type(root, &e_info);
+ }
- if (find_source_device(pdev, &e_info))
- aer_process_err_devices(&e_info);
+ if (status & PCI_ERR_ROOT_UNCOR_RCV) {
+ int fatal = status & PCI_ERR_ROOT_FATAL_RCV;
+ int multi = status & PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ struct aer_err_info e_info = {
+ .id = ERR_UNCOR_ID(e_src->id),
+ .severity = fatal ? AER_FATAL : AER_NONFATAL,
+ .level = KERN_ERR,
+ .multi_error_valid = multi ? 1 : 0,
+ };
+
+ aer_isr_one_error_type(root, &e_info);
}
}
/**
- * aer_isr - consume errors detected by root port
+ * aer_isr - consume errors detected by Root Port
* @irq: IRQ assigned to Root Port
* @context: pointer to Root Port data structure
*
- * Invoked, as DPC, when root port records new detected error
+ * Invoked, as DPC, when Root Port records new detected error
*/
static irqreturn_t aer_isr(int irq, void *context)
{
@@ -1335,7 +1520,7 @@ static irqreturn_t aer_isr(int irq, void *context)
return IRQ_NONE;
while (kfifo_get(&rpc->aer_fifo, &e_src))
- aer_isr_one_error(rpc, &e_src);
+ aer_isr_one_error(rpc->rpd, &e_src);
return IRQ_HANDLED;
}
@@ -1383,7 +1568,7 @@ static void aer_disable_irq(struct pci_dev *pdev)
int aer = pdev->aer_cap;
u32 reg32;
- /* Disable Root's interrupt in response to error messages */
+ /* Disable Root Port's interrupt in response to error messages */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
@@ -1583,9 +1768,9 @@ static struct pcie_port_service_driver aerdriver = {
};
/**
- * pcie_aer_init - register AER root service driver
+ * pcie_aer_init - register AER service driver
*
- * Invoked when AER root service driver is loaded.
+ * Invoked when AER service driver is loaded.
*/
int __init pcie_aer_init(void)
{
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
index d8d2aa85a229..36f939f23d34 100644
--- a/drivers/pci/pcie/bwctrl.c
+++ b/drivers/pci/pcie/bwctrl.c
@@ -38,24 +38,14 @@
/**
* struct pcie_bwctrl_data - PCIe bandwidth controller
* @set_speed_mutex: Serializes link speed changes
- * @lbms_count: Count for LBMS (since last reset)
* @cdev: Thermal cooling device associated with the port
*/
struct pcie_bwctrl_data {
struct mutex set_speed_mutex;
- atomic_t lbms_count;
struct thermal_cooling_device *cdev;
};
-/*
- * Prevent port removal during LBMS count accessors and Link Speed changes.
- *
- * These have to be differentiated because pcie_bwctrl_change_speed() calls
- * pcie_retrain_link() which uses LBMS count reset accessor on success
- * (using just one rwsem triggers "possible recursive locking detected"
- * warning).
- */
-static DECLARE_RWSEM(pcie_bwctrl_lbms_rwsem);
+/* Prevent port removal during Link Speed changes. */
static DECLARE_RWSEM(pcie_bwctrl_setspeed_rwsem);
static bool pcie_valid_speed(enum pci_bus_speed speed)
@@ -127,18 +117,7 @@ static int pcie_bwctrl_change_speed(struct pci_dev *port, u16 target_speed, bool
if (ret != PCIBIOS_SUCCESSFUL)
return pcibios_err_to_errno(ret);
- ret = pcie_retrain_link(port, use_lt);
- if (ret < 0)
- return ret;
-
- /*
- * Ensure link speed updates also with platforms that have problems
- * with notifications.
- */
- if (port->subordinate)
- pcie_update_link_speed(port->subordinate);
-
- return 0;
+ return pcie_retrain_link(port, use_lt);
}
/**
@@ -202,15 +181,14 @@ int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
static void pcie_bwnotif_enable(struct pcie_device *srv)
{
- struct pcie_bwctrl_data *data = srv->port->link_bwctrl;
struct pci_dev *port = srv->port;
u16 link_status;
int ret;
- /* Count LBMS seen so far as one */
+ /* Note if LBMS has been seen so far */
ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status);
if (ret == PCIBIOS_SUCCESSFUL && link_status & PCI_EXP_LNKSTA_LBMS)
- atomic_inc(&data->lbms_count);
+ set_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
pcie_capability_set_word(port, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
@@ -233,7 +211,6 @@ static void pcie_bwnotif_disable(struct pci_dev *port)
static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
{
struct pcie_device *srv = context;
- struct pcie_bwctrl_data *data = srv->port->link_bwctrl;
struct pci_dev *port = srv->port;
u16 link_status, events;
int ret;
@@ -247,7 +224,7 @@ static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
return IRQ_NONE;
if (events & PCI_EXP_LNKSTA_LBMS)
- atomic_inc(&data->lbms_count);
+ set_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
@@ -262,31 +239,10 @@ static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
return IRQ_HANDLED;
}
-void pcie_reset_lbms_count(struct pci_dev *port)
+void pcie_reset_lbms(struct pci_dev *port)
{
- struct pcie_bwctrl_data *data;
-
- guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem);
- data = port->link_bwctrl;
- if (data)
- atomic_set(&data->lbms_count, 0);
- else
- pcie_capability_write_word(port, PCI_EXP_LNKSTA,
- PCI_EXP_LNKSTA_LBMS);
-}
-
-int pcie_lbms_count(struct pci_dev *port, unsigned long *val)
-{
- struct pcie_bwctrl_data *data;
-
- guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem);
- data = port->link_bwctrl;
- if (!data)
- return -ENOTTY;
-
- *val = atomic_read(&data->lbms_count);
-
- return 0;
+ clear_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
}
static int pcie_bwnotif_probe(struct pcie_device *srv)
@@ -308,18 +264,16 @@ static int pcie_bwnotif_probe(struct pcie_device *srv)
return ret;
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- port->link_bwctrl = data;
-
- ret = request_irq(srv->irq, pcie_bwnotif_irq,
- IRQF_SHARED, "PCIe bwctrl", srv);
- if (ret) {
- port->link_bwctrl = NULL;
- return ret;
- }
+ port->link_bwctrl = data;
- pcie_bwnotif_enable(srv);
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
}
+
+ pcie_bwnotif_enable(srv);
}
pci_dbg(port, "enabled with IRQ %d\n", srv->irq);
@@ -339,13 +293,11 @@ static void pcie_bwnotif_remove(struct pcie_device *srv)
pcie_cooling_device_unregister(data->cdev);
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- pcie_bwnotif_disable(srv->port);
+ pcie_bwnotif_disable(srv->port);
- free_irq(srv->irq, srv);
+ free_irq(srv->irq, srv);
- srv->port->link_bwctrl = NULL;
- }
+ srv->port->link_bwctrl = NULL;
}
}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 242cabd5eeeb..fc18349614d7 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -219,8 +219,10 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
goto clear_status;
pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG,
- dpc_tlp_log_len(pdev), &tlp_log);
- pcie_print_tlp_log(pdev, &tlp_log, dev_fmt(""));
+ dpc_tlp_log_len(pdev),
+ pdev->subordinate->flit_mode,
+ &tlp_log);
+ pcie_print_tlp_log(pdev, &tlp_log, KERN_ERR, dev_fmt(""));
if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG + 1)
goto clear_status;
@@ -250,46 +252,59 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
else
info->severity = AER_NONFATAL;
+ info->level = KERN_ERR;
+
+ info->dev[0] = dev;
+ info->error_dev_num = 1;
+
return 1;
}
void dpc_process_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
- struct aer_err_info info;
+ struct aer_err_info info = {};
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
- pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
-
- pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
- status, source);
reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN;
- ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT;
- pci_warn(pdev, "%s detected\n",
- (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR) ?
- "unmasked uncorrectable error" :
- (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE) ?
- "ERR_NONFATAL" :
- (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ?
- "ERR_FATAL" :
- (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ?
- "RP PIO error" :
- (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ?
- "software trigger" :
- "reserved error");
-
- /* show RP PIO error detail information */
- if (pdev->dpc_rp_extensions &&
- reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT &&
- ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO)
- dpc_process_rp_pio_error(pdev);
- else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR &&
- dpc_get_aer_uncorrect_severity(pdev, &info) &&
- aer_get_device_error_info(pdev, &info)) {
- aer_print_error(pdev, &info);
- pci_aer_clear_nonfatal_status(pdev);
- pci_aer_clear_fatal_status(pdev);
+
+ switch (reason) {
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR:
+ pci_warn(pdev, "containment event, status:%#06x: unmasked uncorrectable error detected\n",
+ status);
+ if (dpc_get_aer_uncorrect_severity(pdev, &info) &&
+ aer_get_device_error_info(&info, 0)) {
+ aer_print_error(&info, 0);
+ pci_aer_clear_nonfatal_status(pdev);
+ pci_aer_clear_fatal_status(pdev);
+ }
+ break;
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE:
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE:
+ pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID,
+ &source);
+ pci_warn(pdev, "containment event, status:%#06x, %s received from %04x:%02x:%02x.%d\n",
+ status,
+ (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ?
+ "ERR_FATAL" : "ERR_NONFATAL",
+ pci_domain_nr(pdev->bus), PCI_BUS_NUM(source),
+ PCI_SLOT(source), PCI_FUNC(source));
+ break;
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT:
+ ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT;
+ pci_warn(pdev, "containment event, status:%#06x: %s detected\n",
+ status,
+ (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ?
+ "RP PIO error" :
+ (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ?
+ "software trigger" :
+ "reserved error");
+ /* show RP PIO error detail information */
+ if (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO &&
+ pdev->dpc_rp_extensions)
+ dpc_process_rp_pio_error(pdev);
+ break;
}
}
@@ -398,11 +413,21 @@ void pci_dpc_init(struct pci_dev *pdev)
/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
if (!pdev->dpc_rp_log_size) {
+ u16 flags;
+ int ret;
+
+ ret = pcie_capability_read_word(pdev, PCI_EXP_FLAGS, &flags);
+ if (ret)
+ return;
+
pdev->dpc_rp_log_size =
FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
+ if (FIELD_GET(PCI_EXP_FLAGS_FLIT, flags))
+ pdev->dpc_rp_log_size += FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE4,
+ cap) << 4;
+
if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG ||
- pdev->dpc_rp_log_size > PCIE_STD_NUM_TLP_HEADERLOG + 1 +
- PCIE_STD_MAX_TLP_PREFIXLOG) {
+ pdev->dpc_rp_log_size > PCIE_STD_MAX_TLP_HEADERLOG + 1) {
pci_err(pdev, "RP PIO log size %u is invalid\n",
pdev->dpc_rp_log_size);
pdev->dpc_rp_log_size = 0;
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 31090770fffc..de6381c690f5 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -271,7 +271,6 @@ failed:
pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
- /* TODO: Should kernel panic here? */
pci_info(bridge, "device recovery failed\n");
return status;
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 7cfb6c0d5dcb..4bd73f038ffb 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -5,6 +5,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -252,3 +253,304 @@ bool pcie_ptm_enabled(struct pci_dev *dev)
return dev->ptm_enabled;
}
EXPORT_SYMBOL(pcie_ptm_enabled);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static ssize_t context_update_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
+ char buf[7];
+ int ret;
+ u8 mode;
+
+ if (!ptm_debugfs->ops->context_update_write)
+ return -EOPNOTSUPP;
+
+ if (count < 1 || count >= sizeof(buf))
+ return -EINVAL;
+
+ ret = copy_from_user(buf, ubuf, count);
+ if (ret)
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ if (sysfs_streq(buf, "auto"))
+ mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
+ else if (sysfs_streq(buf, "manual"))
+ mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
+ else
+ return -EINVAL;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_update_write(ptm_debugfs->pdata, mode);
+ mutex_unlock(&ptm_debugfs->lock);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t context_update_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
+ char buf[8]; /* Extra space for NULL termination at the end */
+ ssize_t pos;
+ u8 mode;
+
+ if (!ptm_debugfs->ops->context_update_read)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ptm_debugfs->ops->context_update_read(ptm_debugfs->pdata, &mode);
+ mutex_unlock(&ptm_debugfs->lock);
+
+ if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO)
+ pos = scnprintf(buf, sizeof(buf), "auto\n");
+ else
+ pos = scnprintf(buf, sizeof(buf), "manual\n");
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+}
+
+static const struct file_operations context_update_fops = {
+ .open = simple_open,
+ .read = context_update_read,
+ .write = context_update_write,
+};
+
+static int context_valid_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ bool valid;
+ int ret;
+
+ if (!ptm_debugfs->ops->context_valid_read)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_valid_read(ptm_debugfs->pdata, &valid);
+ mutex_unlock(&ptm_debugfs->lock);
+ if (ret)
+ return ret;
+
+ *val = valid;
+
+ return 0;
+}
+
+static int context_valid_set(void *data, u64 val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ int ret;
+
+ if (!ptm_debugfs->ops->context_valid_write)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_valid_write(ptm_debugfs->pdata, !!val);
+ mutex_unlock(&ptm_debugfs->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(context_valid_fops, context_valid_get,
+ context_valid_set, "%llu\n");
+
+static int local_clock_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->local_clock_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->local_clock_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(local_clock_fops, local_clock_get, NULL, "%llu\n");
+
+static int master_clock_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->master_clock_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->master_clock_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(master_clock_fops, master_clock_get, NULL, "%llu\n");
+
+static int t1_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t1_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t1_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t1_fops, t1_get, NULL, "%llu\n");
+
+static int t2_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t2_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t2_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t2_fops, t2_get, NULL, "%llu\n");
+
+static int t3_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t3_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t3_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t3_fops, t3_get, NULL, "%llu\n");
+
+static int t4_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t4_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t4_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t4_fops, t4_get, NULL, "%llu\n");
+
+#define pcie_ptm_create_debugfs_file(pdata, mode, attr) \
+ do { \
+ if (ops->attr##_visible && ops->attr##_visible(pdata)) \
+ debugfs_create_file(#attr, mode, ptm_debugfs->debugfs, \
+ ptm_debugfs, &attr##_fops); \
+ } while (0)
+
+/*
+ * pcie_ptm_create_debugfs() - Create debugfs entries for the PTM context
+ * @dev: PTM capable component device
+ * @pdata: Private data of the PTM capable component device
+ * @ops: PTM callback structure
+ *
+ * Create debugfs entries for exposing the PTM context of the PTM capable
+ * components such as Root Complex and Endpoint controllers.
+ *
+ * Return: Pointer to 'struct pci_ptm_debugfs' if success, NULL otherwise.
+ */
+struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
+ const struct pcie_ptm_ops *ops)
+{
+ struct pci_ptm_debugfs *ptm_debugfs;
+ char *dirname;
+ int ret;
+
+ /* Caller must provide check_capability() callback */
+ if (!ops->check_capability)
+ return NULL;
+
+ /* Check for PTM capability before creating debugfs attrbutes */
+ ret = ops->check_capability(pdata);
+ if (!ret) {
+ dev_dbg(dev, "PTM capability not present\n");
+ return NULL;
+ }
+
+ ptm_debugfs = kzalloc(sizeof(*ptm_debugfs), GFP_KERNEL);
+ if (!ptm_debugfs)
+ return NULL;
+
+ dirname = devm_kasprintf(dev, GFP_KERNEL, "pcie_ptm_%s", dev_name(dev));
+ if (!dirname)
+ return NULL;
+
+ ptm_debugfs->debugfs = debugfs_create_dir(dirname, NULL);
+ ptm_debugfs->pdata = pdata;
+ ptm_debugfs->ops = ops;
+ mutex_init(&ptm_debugfs->lock);
+
+ pcie_ptm_create_debugfs_file(pdata, 0644, context_update);
+ pcie_ptm_create_debugfs_file(pdata, 0644, context_valid);
+ pcie_ptm_create_debugfs_file(pdata, 0444, local_clock);
+ pcie_ptm_create_debugfs_file(pdata, 0444, master_clock);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t1);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t2);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t3);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t4);
+
+ return ptm_debugfs;
+}
+EXPORT_SYMBOL_GPL(pcie_ptm_create_debugfs);
+
+/*
+ * pcie_ptm_destroy_debugfs() - Destroy debugfs entries for the PTM context
+ * @ptm_debugfs: Pointer to the PTM debugfs struct
+ */
+void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs)
+{
+ if (!ptm_debugfs)
+ return;
+
+ mutex_destroy(&ptm_debugfs->lock);
+ debugfs_remove_recursive(ptm_debugfs->debugfs);
+}
+EXPORT_SYMBOL_GPL(pcie_ptm_destroy_debugfs);
+#endif
diff --git a/drivers/pci/pcie/tlp.c b/drivers/pci/pcie/tlp.c
index 0860b5da837f..71f8fc9ea2ed 100644
--- a/drivers/pci/pcie/tlp.c
+++ b/drivers/pci/pcie/tlp.c
@@ -7,6 +7,7 @@
#include <linux/aer.h>
#include <linux/array_size.h>
+#include <linux/bitfield.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -21,6 +22,9 @@
*/
unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc)
{
+ if (aercc & PCI_ERR_CAP_TLP_LOG_FLIT)
+ return FIELD_GET(PCI_ERR_CAP_TLP_LOG_SIZE, aercc);
+
return PCIE_STD_NUM_TLP_HEADERLOG +
((aercc & PCI_ERR_CAP_PREFIX_LOG_PRESENT) ?
dev->eetlp_prefix_max : 0);
@@ -49,6 +53,7 @@ unsigned int dpc_tlp_log_len(struct pci_dev *dev)
* @where: PCI Config offset of TLP Header Log
* @where2: PCI Config offset of TLP Prefix Log
* @tlp_len: TLP Log length (Header Log + TLP Prefix Log in DWORDs)
+ * @flit: TLP Logged in Flit mode
* @log: TLP Log structure to fill
*
* Fill @log from TLP Header Log registers, e.g., AER or DPC.
@@ -56,28 +61,34 @@ unsigned int dpc_tlp_log_len(struct pci_dev *dev)
* Return: 0 on success and filled TLP Log structure, <0 on error.
*/
int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
- unsigned int tlp_len, struct pcie_tlp_log *log)
+ unsigned int tlp_len, bool flit, struct pcie_tlp_log *log)
{
unsigned int i;
int off, ret;
- u32 *to;
+
+ if (tlp_len > ARRAY_SIZE(log->dw))
+ tlp_len = ARRAY_SIZE(log->dw);
memset(log, 0, sizeof(*log));
for (i = 0; i < tlp_len; i++) {
- if (i < PCIE_STD_NUM_TLP_HEADERLOG) {
+ if (i < PCIE_STD_NUM_TLP_HEADERLOG)
off = where + i * 4;
- to = &log->dw[i];
- } else {
+ else
off = where2 + (i - PCIE_STD_NUM_TLP_HEADERLOG) * 4;
- to = &log->prefix[i - PCIE_STD_NUM_TLP_HEADERLOG];
- }
- ret = pci_read_config_dword(dev, off, to);
+ ret = pci_read_config_dword(dev, off, &log->dw[i]);
if (ret)
return pcibios_err_to_errno(ret);
}
+ /*
+ * Hard-code non-Flit mode to 4 DWORDs, for now. The exact length
+ * can only be known if the TLP is parsed.
+ */
+ log->header_len = flit ? tlp_len : 4;
+ log->flit = flit;
+
return 0;
}
@@ -87,29 +98,40 @@ int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
* pcie_print_tlp_log - Print TLP Header / Prefix Log contents
* @dev: PCIe device
* @log: TLP Log structure
+ * @level: Printk log level
* @pfx: String prefix
*
* Prints TLP Header and Prefix Log information held by @log.
*/
void pcie_print_tlp_log(const struct pci_dev *dev,
- const struct pcie_tlp_log *log, const char *pfx)
+ const struct pcie_tlp_log *log, const char *level,
+ const char *pfx)
{
- char buf[11 * (PCIE_STD_NUM_TLP_HEADERLOG + ARRAY_SIZE(log->prefix)) +
- sizeof(EE_PREFIX_STR)];
+ /* EE_PREFIX_STR fits the extended DW space needed for the Flit mode */
+ char buf[11 * PCIE_STD_MAX_TLP_HEADERLOG + 1];
unsigned int i;
int len;
len = scnprintf(buf, sizeof(buf), "%#010x %#010x %#010x %#010x",
log->dw[0], log->dw[1], log->dw[2], log->dw[3]);
- if (log->prefix[0])
- len += scnprintf(buf + len, sizeof(buf) - len, EE_PREFIX_STR);
- for (i = 0; i < ARRAY_SIZE(log->prefix); i++) {
- if (!log->prefix[i])
- break;
- len += scnprintf(buf + len, sizeof(buf) - len,
- " %#010x", log->prefix[i]);
+ if (log->flit) {
+ for (i = PCIE_STD_NUM_TLP_HEADERLOG; i < log->header_len; i++) {
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->dw[i]);
+ }
+ } else {
+ if (log->prefix[0])
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ EE_PREFIX_STR);
+ for (i = 0; i < ARRAY_SIZE(log->prefix); i++) {
+ if (!log->prefix[i])
+ break;
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->prefix[i]);
+ }
}
- pci_err(dev, "%sTLP Header: %s\n", pfx, buf);
+ dev_printk(level, &dev->dev, "%sTLP Header%s: %s\n", pfx,
+ log->flit ? " (Flit)" : "", buf);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3da48c13d9cc..509421a7e501 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -9,6 +9,8 @@
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -789,10 +791,11 @@ EXPORT_SYMBOL_GPL(pci_speed_string);
void pcie_update_link_speed(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
- u16 linksta;
+ u16 linksta, linksta2;
pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
- __pcie_update_link_speed(bus, linksta);
+ pcie_capability_read_word(bridge, PCI_EXP_LNKSTA2, &linksta2);
+ __pcie_update_link_speed(bus, linksta, linksta2);
}
EXPORT_SYMBOL_GPL(pcie_update_link_speed);
@@ -1096,6 +1099,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
}
+ of_pci_make_host_bridge_node(bridge);
+
down_write(&pci_bus_sem);
list_add_tail(&bus->node, &pci_root_buses);
up_write(&pci_bus_sem);
@@ -1673,7 +1678,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
if (reg32 & PCI_EXP_SLTCAP_HPC)
- pdev->is_hotplug_bridge = 1;
+ pdev->is_hotplug_bridge = pdev->is_pciehp = 1;
}
static void set_pcie_thunderbolt(struct pci_dev *dev)
@@ -2053,7 +2058,7 @@ int pci_setup_device(struct pci_dev *dev)
if (class == PCI_CLASS_BRIDGE_PCI)
goto bad;
pci_read_irq(dev);
- pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
+ pci_read_bases(dev, PCI_STD_NUM_BARS, PCI_ROM_ADDRESS);
pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device);
@@ -2503,6 +2508,43 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
}
EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
+#if IS_ENABLED(CONFIG_PCI_PWRCTRL)
+static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ np = of_pci_find_child_device(dev_of_node(&bus->dev), devfn);
+ if (!np || of_find_device_by_node(np))
+ return NULL;
+
+ /*
+ * First check whether the pwrctrl device really needs to be created or
+ * not. This is decided based on at least one of the power supplies
+ * being defined in the devicetree node of the device.
+ */
+ if (!of_pci_supply_present(np)) {
+ pr_debug("PCI/pwrctrl: Skipping OF node: %s\n", np->name);
+ return NULL;
+ }
+
+ /* Now create the pwrctrl device */
+ pdev = of_platform_device_create(np, NULL, &host->dev);
+ if (!pdev) {
+ pr_err("PCI/pwrctrl: Failed to create pwrctrl device for node: %s\n", np->name);
+ return NULL;
+ }
+
+ return pdev;
+}
+#else
+static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
+{
+ return NULL;
+}
+#endif
+
/*
* Read the config data for a PCI device, sanity-check it,
* and fill in the dev structure.
@@ -2512,6 +2554,15 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
struct pci_dev *dev;
u32 l;
+ /*
+ * Create pwrctrl device (if required) for the PCI device to handle the
+ * power state. If the pwrctrl device is created, then skip scanning
+ * further as the pwrctrl core will rescan the bus after powering on
+ * the device.
+ */
+ if (pci_pwrctrl_create_device(bus, devfn))
+ return NULL;
+
if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
return NULL;
@@ -2574,6 +2625,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_rcec_init(dev); /* Root Complex Event Collector */
pci_doe_init(dev); /* Data Object Exchange */
pci_tph_init(dev); /* TLP Processing Hints */
+ pci_rebar_init(dev); /* Resizable BAR */
pcie_report_downtraining(dev);
pci_init_reset_methods(dev);
@@ -2666,11 +2718,12 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
pci_set_msi_domain(dev);
/* Notifier could use PCI capabilities */
- dev->match_driver = false;
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
pci_npem_create(dev);
+
+ pci_doe_sysfs_init(dev);
}
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index f967709082d6..9348a0fb8084 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -251,6 +251,10 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
security_locked_down(LOCKDOWN_PCI_ACCESS))
return -EPERM;
+ /* Skip devices with non-mappable BARs */
+ if (dev->non_mappable_bars)
+ return -EINVAL;
+
if (fpriv->mmap_state == pci_mmap_io) {
if (!arch_can_pci_mmap_io())
return -EINVAL;
diff --git a/drivers/pci/pwrctrl/Kconfig b/drivers/pci/pwrctrl/Kconfig
index 54589bb2403b..6956c1854811 100644
--- a/drivers/pci/pwrctrl/Kconfig
+++ b/drivers/pci/pwrctrl/Kconfig
@@ -1,12 +1,33 @@
# SPDX-License-Identifier: GPL-2.0-only
-config HAVE_PWRCTL
+config HAVE_PWRCTRL
bool
-config PCI_PWRCTL
+config PCI_PWRCTRL
tristate
-config PCI_PWRCTL_PWRSEQ
+config PCI_PWRCTRL_PWRSEQ
tristate
select POWER_SEQUENCING
- select PCI_PWRCTL
+ select PCI_PWRCTRL
+
+config PCI_PWRCTRL_SLOT
+ tristate "PCI Power Control driver for PCI slots"
+ select PCI_PWRCTRL
+ help
+ Say Y here to enable the PCI Power Control driver to control the power
+ state of PCI slots.
+
+ This is a generic driver that controls the power state of different
+ PCI slots. The voltage regulators powering the rails of the PCI slots
+ are expected to be defined in the devicetree node of the PCI bridge.
+
+# deprecated
+config HAVE_PWRCTL
+ bool
+ select HAVE_PWRCTRL
+
+# deprecated
+config PCI_PWRCTL_PWRSEQ
+ tristate
+ select PCI_PWRCTRL_PWRSEQ
diff --git a/drivers/pci/pwrctrl/Makefile b/drivers/pci/pwrctrl/Makefile
index 75c7ce531c7e..a4e5808d7850 100644
--- a/drivers/pci/pwrctrl/Makefile
+++ b/drivers/pci/pwrctrl/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctrl-core.o
+obj-$(CONFIG_PCI_PWRCTRL) += pci-pwrctrl-core.o
pci-pwrctrl-core-y := core.o
-obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctrl-pwrseq.o
+obj-$(CONFIG_PCI_PWRCTRL_PWRSEQ) += pci-pwrctrl-pwrseq.o
+
+obj-$(CONFIG_PCI_PWRCTRL_SLOT) += pci-pwrctrl-slot.o
+pci-pwrctrl-slot-y := slot.o
diff --git a/drivers/pci/pwrctrl/core.c b/drivers/pci/pwrctrl/core.c
index 2fb174db91e5..6bdbfed584d6 100644
--- a/drivers/pci/pwrctrl/core.c
+++ b/drivers/pci/pwrctrl/core.c
@@ -44,7 +44,7 @@ static void rescan_work_func(struct work_struct *work)
struct pci_pwrctrl, work);
pci_lock_rescan_remove();
- pci_rescan_bus(to_pci_dev(pwrctrl->dev->parent)->bus);
+ pci_rescan_bus(to_pci_host_bridge(pwrctrl->dev->parent)->bus);
pci_unlock_rescan_remove();
}
@@ -101,6 +101,8 @@ EXPORT_SYMBOL_GPL(pci_pwrctrl_device_set_ready);
*/
void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl)
{
+ cancel_work_sync(&pwrctrl->work);
+
/*
* We don't have to delete the link here. Typically, this function
* is only called when the power control device is being detached. If
diff --git a/drivers/pci/pwrctrl/slot.c b/drivers/pci/pwrctrl/slot.c
new file mode 100644
index 000000000000..18becc144913
--- /dev/null
+++ b/drivers/pci/pwrctrl/slot.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pci-pwrctrl.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+struct pci_pwrctrl_slot_data {
+ struct pci_pwrctrl ctx;
+ struct regulator_bulk_data *supplies;
+ int num_supplies;
+};
+
+static void devm_pci_pwrctrl_slot_power_off(void *data)
+{
+ struct pci_pwrctrl_slot_data *slot = data;
+
+ regulator_bulk_disable(slot->num_supplies, slot->supplies);
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+}
+
+static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
+{
+ struct pci_pwrctrl_slot_data *slot;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ slot = devm_kzalloc(dev, sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+
+ ret = of_regulator_bulk_get_all(dev, dev_of_node(dev),
+ &slot->supplies);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to get slot regulators\n");
+ return ret;
+ }
+
+ slot->num_supplies = ret;
+ ret = regulator_bulk_enable(slot->num_supplies, slot->supplies);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to enable slot regulators\n");
+ goto err_regulator_free;
+ }
+
+ ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_slot_power_off,
+ slot);
+ if (ret)
+ goto err_regulator_disable;
+
+ pci_pwrctrl_init(&slot->ctx, dev);
+
+ ret = devm_pci_pwrctrl_device_set_ready(dev, &slot->ctx);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register pwrctrl driver\n");
+
+ return 0;
+
+err_regulator_disable:
+ regulator_bulk_disable(slot->num_supplies, slot->supplies);
+err_regulator_free:
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+
+ return ret;
+}
+
+static const struct of_device_id pci_pwrctrl_slot_of_match[] = {
+ {
+ .compatible = "pciclass,0604",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pci_pwrctrl_slot_of_match);
+
+static struct platform_driver pci_pwrctrl_slot_driver = {
+ .driver = {
+ .name = "pci-pwrctrl-slot",
+ .of_match_table = pci_pwrctrl_slot_of_match,
+ },
+ .probe = pci_pwrctrl_slot_probe,
+};
+module_platform_driver(pci_pwrctrl_slot_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Generic PCI Power Control driver for PCI Slots");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 82b21e34c545..db6e142b082d 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -38,14 +38,10 @@
static bool pcie_lbms_seen(struct pci_dev *dev, u16 lnksta)
{
- unsigned long count;
- int ret;
-
- ret = pcie_lbms_count(dev, &count);
- if (ret < 0)
- return lnksta & PCI_EXP_LNKSTA_LBMS;
+ if (test_bit(PCI_LINK_LBMS_SEEN, &dev->priv_flags))
+ return true;
- return count > 0;
+ return lnksta & PCI_EXP_LNKSTA_LBMS;
}
/*
@@ -109,13 +105,13 @@ int pcie_failed_link_retrain(struct pci_dev *dev)
!pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting)
return ret;
- pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
if (!(lnksta & PCI_EXP_LNKSTA_DLLLA) && pcie_lbms_seen(dev, lnksta)) {
- u16 oldlnkctl2 = lnkctl2;
+ u16 oldlnkctl2;
pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &oldlnkctl2);
ret = pcie_set_target_speed(dev, PCIE_SPEED_2_5GT, false);
if (ret) {
pci_info(dev, "retraining failed\n");
@@ -127,6 +123,8 @@ int pcie_failed_link_retrain(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
}
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
+
if ((lnksta & PCI_EXP_LNKSTA_DLLLA) &&
(lnkctl2 & PCI_EXP_LNKCTL2_TLS) == PCI_EXP_LNKCTL2_TLS_2_5GT &&
pci_match_id(ids, dev)) {
@@ -621,7 +619,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
{
u32 region;
struct pci_bus_region bus_region;
- struct resource *res = dev->resource + pos;
+ struct resource *res = pci_resource_n(dev, pos);
const char *res_name = pci_resource_name(dev, pos);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
@@ -671,7 +669,7 @@ static void quirk_io_region(struct pci_dev *dev, int port,
{
u16 region;
struct pci_bus_region bus_region;
- struct resource *res = dev->resource + nr;
+ struct resource *res = pci_resource_n(dev, nr);
pci_read_config_word(dev, port, &region);
region &= ~(size - 1);
@@ -1990,12 +1988,12 @@ static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
device_create_managed_software_node(&pdev->dev, properties, NULL))
pci_warn(pdev, "could not add stall property");
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
/*
* It's possible for the MSI to get corrupted if SHPC and ACPI are used
@@ -4995,6 +4993,18 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
+static int pci_quirk_loongson_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * Loongson PCIe Root Ports don't advertise an ACS capability, but
+ * they do not allow peer-to-peer transactions between Root Ports.
+ * Allow each Root Port to be in a separate IOMMU group by masking
+ * SV/RR/CR/UF bits.
+ */
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
/*
* Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on
* multi-function devices, the hardware isolates the functions by
@@ -5128,6 +5138,17 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
+ /* Loongson PCIe Root Ports */
+ { PCI_VENDOR_ID_LOONGSON, 0x3C09, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x3C19, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x3C29, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A09, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A19, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A29, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A39, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A49, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A59, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A69, pci_quirk_loongson_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
/* Zhaoxin multi-function devices */
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index efc37fcb73e2..445afdfa6498 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -41,7 +41,6 @@ static void pci_stop_dev(struct pci_dev *dev)
if (!pci_dev_test_and_clear_added(dev))
return;
- pci_pwrctrl_unregister(&dev->dev);
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
@@ -53,6 +52,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
if (pci_dev_test_and_set_removed(dev))
return;
+ pci_doe_sysfs_teardown(dev);
pci_npem_remove(dev);
device_del(&dev->dev);
@@ -64,6 +64,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
pci_doe_destroy(dev);
pcie_aspm_exit_link_state(dev);
pci_bridge_d3_update(dev);
+ pci_pwrctrl_unregister(&dev->dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
@@ -163,6 +164,8 @@ void pci_stop_root_bus(struct pci_bus *bus)
&bus->devices, bus_list)
pci_stop_bus_device(child);
+ of_pci_remove_host_bridge_node(host_bridge);
+
/* stop the host bridge */
device_release_driver(&host_bridge->dev);
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8707c5b08cf3..07c3d021a47e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -127,12 +127,42 @@ static resource_size_t get_res_add_align(struct list_head *head,
return dev_res ? dev_res->min_align : 0;
}
+static void restore_dev_resource(struct pci_dev_resource *dev_res)
+{
+ struct resource *res = dev_res->res;
+
+ res->start = dev_res->start;
+ res->end = dev_res->end;
+ res->flags = dev_res->flags;
+}
+
+static bool pdev_resources_assignable(struct pci_dev *dev)
+{
+ u16 class = dev->class >> 8, command;
+
+ /* Don't touch classless devices or host bridges or IOAPICs */
+ if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
+ return false;
+
+ /* Don't touch IOAPIC devices already enabled by firmware */
+ if (class == PCI_CLASS_SYSTEM_PIC) {
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ return false;
+ }
+
+ return true;
+}
+
/* Sort resources by alignment */
static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
{
struct resource *r;
int i;
+ if (!pdev_resources_assignable(dev))
+ return;
+
pci_dev_for_each_resource(dev, r, i) {
const char *r_name = pci_resource_name(dev, i);
struct pci_dev_resource *dev_res, *tmp;
@@ -157,6 +187,9 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
panic("%s: kzalloc() failed!\n", __func__);
tmp->res = r;
tmp->dev = dev;
+ tmp->start = r->start;
+ tmp->end = r->end;
+ tmp->flags = r->flags;
/* Fallback is smallest one or list is empty */
n = head;
@@ -176,23 +209,16 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
}
}
-static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head)
+bool pci_resource_is_optional(const struct pci_dev *dev, int resno)
{
- u16 class = dev->class >> 8;
-
- /* Don't touch classless devices or host bridges or IOAPICs */
- if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
- return;
+ const struct resource *res = pci_resource_n(dev, resno);
- /* Don't touch IOAPIC devices already enabled by firmware */
- if (class == PCI_CLASS_SYSTEM_PIC) {
- u16 command;
- pci_read_config_word(dev, PCI_COMMAND, &command);
- if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
- return;
- }
+ if (pci_resource_is_iov(resno))
+ return true;
+ if (resno == PCI_ROM_RESOURCE && !(res->flags & IORESOURCE_ROM_ENABLE))
+ return true;
- pdev_sort_resources(dev, head);
+ return false;
}
static inline void reset_resource(struct resource *res)
@@ -216,10 +242,11 @@ static inline void reset_resource(struct resource *res)
static void reassign_resources_sorted(struct list_head *realloc_head,
struct list_head *head)
{
- struct resource *res;
- const char *res_name;
struct pci_dev_resource *add_res, *tmp;
struct pci_dev_resource *dev_res;
+ struct pci_dev *dev;
+ struct resource *res;
+ const char *res_name;
resource_size_t add_size, align;
int idx;
@@ -227,9 +254,15 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
bool found_match = false;
res = add_res->res;
+ dev = add_res->dev;
+ idx = pci_resource_num(dev, res);
- /* Skip resource that has been reset */
- if (!res->flags)
+ /*
+ * Skip resource that failed the earlier assignment and is
+ * not optional as it would just fail again.
+ */
+ if (!res->parent && resource_size(res) &&
+ !pci_resource_is_optional(dev, idx))
goto out;
/* Skip this resource if not found in head list */
@@ -242,20 +275,22 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
if (!found_match) /* Just skip */
continue;
- idx = res - &add_res->dev->resource[0];
- res_name = pci_resource_name(add_res->dev, idx);
+ res_name = pci_resource_name(dev, idx);
add_size = add_res->add_size;
align = add_res->min_align;
- if (!resource_size(res)) {
- resource_set_range(res, align, add_size);
- if (pci_assign_resource(add_res->dev, idx))
- reset_resource(res);
- } else {
+ if (!res->parent) {
+ resource_set_range(res, align,
+ resource_size(res) + add_size);
+ if (pci_assign_resource(dev, idx)) {
+ pci_dbg(dev,
+ "%s %pR: ignoring failure in optional allocation\n",
+ res_name, res);
+ }
+ } else if (add_size > 0) {
res->flags |= add_res->flags &
(IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
- if (pci_reassign_resource(add_res->dev, idx,
- add_size, align))
- pci_info(add_res->dev, "%s %pR: failed to add %llx\n",
+ if (pci_reassign_resource(dev, idx, add_size, align))
+ pci_info(dev, "%s %pR: failed to add optional %llx\n",
res_name, res,
(unsigned long long) add_size);
}
@@ -271,36 +306,39 @@ out:
* @head: Head of the list tracking requests for resources
* @fail_head: Head of the list tracking requests that could not be
* allocated
+ * @optional: Assign also optional resources
*
* Satisfy resource requests of each element in the list. Add requests that
* could not be satisfied to the failed_list.
*/
static void assign_requested_resources_sorted(struct list_head *head,
- struct list_head *fail_head)
+ struct list_head *fail_head,
+ bool optional)
{
- struct resource *res;
struct pci_dev_resource *dev_res;
+ struct resource *res;
+ struct pci_dev *dev;
+ bool optional_res;
int idx;
list_for_each_entry(dev_res, head, list) {
res = dev_res->res;
- idx = res - &dev_res->dev->resource[0];
- if (resource_size(res) &&
- pci_assign_resource(dev_res->dev, idx)) {
+ dev = dev_res->dev;
+ idx = pci_resource_num(dev, res);
+ optional_res = pci_resource_is_optional(dev, idx);
+
+ if (!resource_size(res))
+ continue;
+
+ if (!optional && optional_res)
+ continue;
+
+ if (pci_assign_resource(dev, idx)) {
if (fail_head) {
- /*
- * If the failed resource is a ROM BAR and
- * it will be enabled later, don't add it
- * to the list.
- */
- if (!((idx == PCI_ROM_RESOURCE) &&
- (!(res->flags & IORESOURCE_ROM_ENABLE))))
- add_to_list(fail_head,
- dev_res->dev, res,
- 0 /* don't care */,
- 0 /* don't care */);
+ add_to_list(fail_head, dev, res,
+ 0 /* don't care */,
+ 0 /* don't care */);
}
- reset_resource(res);
}
}
}
@@ -345,6 +383,20 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res)
return false; /* Should not get here */
}
+/* Return: @true if assignment of a required resource failed. */
+static bool pci_required_resource_failed(struct list_head *fail_head)
+{
+ struct pci_dev_resource *fail_res;
+
+ list_for_each_entry(fail_res, fail_head, list) {
+ int idx = pci_resource_num(fail_res->dev, fail_res->res);
+
+ if (!pci_resource_is_optional(fail_res->dev, idx))
+ return true;
+ }
+ return false;
+}
+
static void __assign_resources_sorted(struct list_head *head,
struct list_head *realloc_head,
struct list_head *fail_head)
@@ -354,9 +406,11 @@ static void __assign_resources_sorted(struct list_head *head,
* adjacent, so later reassign can not reallocate them one by one in
* parent resource window.
*
- * Try to assign requested + add_size at beginning. If could do that,
- * could get out early. If could not do that, we still try to assign
- * requested at first, then try to reassign add_size for some resources.
+ * Try to assign required and any optional resources at beginning
+ * (add_size included). If all required resources were successfully
+ * assigned, get out early. If could not do that, we still try to
+ * assign required at first, then try to reassign some optional
+ * resources.
*
* Separate three resource type checking if we need to release
* assigned resource after requested + add_size try.
@@ -372,27 +426,36 @@ static void __assign_resources_sorted(struct list_head *head,
*/
LIST_HEAD(save_head);
LIST_HEAD(local_fail_head);
+ LIST_HEAD(dummy_head);
struct pci_dev_resource *save_res;
struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
+ struct resource *res;
+ struct pci_dev *dev;
+ const char *res_name;
+ int idx;
unsigned long fail_type;
resource_size_t add_align, align;
+ if (!realloc_head)
+ realloc_head = &dummy_head;
+
/* Check if optional add_size is there */
- if (!realloc_head || list_empty(realloc_head))
- goto requested_and_reassign;
+ if (list_empty(realloc_head))
+ goto assign;
/* Save original start, end, flags etc at first */
list_for_each_entry(dev_res, head, list) {
if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
free_list(&save_head);
- goto requested_and_reassign;
+ goto assign;
}
}
/* Update res in head list with add_size in realloc_head list */
list_for_each_entry_safe(dev_res, tmp_res, head, list) {
- dev_res->res->end += get_res_add_size(realloc_head,
- dev_res->res);
+ res = dev_res->res;
+
+ res->end += get_res_add_size(realloc_head, res);
/*
* There are two kinds of additional resources in the list:
@@ -400,10 +463,10 @@ static void __assign_resources_sorted(struct list_head *head,
* 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
* Here just fix the additional alignment for bridge
*/
- if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
+ if (!(res->flags & IORESOURCE_STARTALIGN))
continue;
- add_align = get_res_add_align(realloc_head, dev_res->res);
+ add_align = get_res_add_align(realloc_head, res);
/*
* The "head" list is sorted by alignment so resources with
@@ -412,11 +475,8 @@ static void __assign_resources_sorted(struct list_head *head,
* need to reorder the list by alignment to make it
* consistent.
*/
- if (add_align > dev_res->res->start) {
- resource_size_t r_size = resource_size(dev_res->res);
-
- dev_res->res->start = add_align;
- dev_res->res->end = add_align + r_size - 1;
+ if (add_align > res->start) {
+ resource_set_range(res, add_align, resource_size(res));
list_for_each_entry(dev_res2, head, list) {
align = pci_resource_alignment(dev_res2->dev,
@@ -431,54 +491,95 @@ static void __assign_resources_sorted(struct list_head *head,
}
- /* Try updated head list with add_size added */
- assign_requested_resources_sorted(head, &local_fail_head);
+assign:
+ assign_requested_resources_sorted(head, &local_fail_head, true);
- /* All assigned with add_size? */
+ /* All non-optional resources assigned? */
if (list_empty(&local_fail_head)) {
/* Remove head list from realloc_head list */
list_for_each_entry(dev_res, head, list)
remove_from_list(realloc_head, dev_res->res);
free_list(&save_head);
- free_list(head);
- return;
+ goto out;
+ }
+
+ /* Without realloc_head and only optional fails, nothing more to do. */
+ if (!pci_required_resource_failed(&local_fail_head) &&
+ list_empty(realloc_head)) {
+ list_for_each_entry(save_res, &save_head, list) {
+ struct resource *res = save_res->res;
+
+ if (res->parent)
+ continue;
+
+ restore_dev_resource(save_res);
+ }
+ free_list(&local_fail_head);
+ free_list(&save_head);
+ goto out;
}
/* Check failed type */
fail_type = pci_fail_res_type_mask(&local_fail_head);
/* Remove not need to be released assigned res from head list etc */
- list_for_each_entry_safe(dev_res, tmp_res, head, list)
- if (dev_res->res->parent &&
- !pci_need_to_release(fail_type, dev_res->res)) {
+ list_for_each_entry_safe(dev_res, tmp_res, head, list) {
+ res = dev_res->res;
+
+ if (res->parent && !pci_need_to_release(fail_type, res)) {
/* Remove it from realloc_head list */
- remove_from_list(realloc_head, dev_res->res);
- remove_from_list(&save_head, dev_res->res);
+ remove_from_list(realloc_head, res);
+ remove_from_list(&save_head, res);
list_del(&dev_res->list);
kfree(dev_res);
}
+ }
free_list(&local_fail_head);
/* Release assigned resource */
- list_for_each_entry(dev_res, head, list)
- if (dev_res->res->parent)
- release_resource(dev_res->res);
- /* Restore start/end/flags from saved list */
- list_for_each_entry(save_res, &save_head, list) {
- struct resource *res = save_res->res;
+ list_for_each_entry(dev_res, head, list) {
+ res = dev_res->res;
+ dev = dev_res->dev;
+
+ if (!res->parent)
+ continue;
- res->start = save_res->start;
- res->end = save_res->end;
- res->flags = save_res->flags;
+ idx = pci_resource_num(dev, res);
+ res_name = pci_resource_name(dev, idx);
+ pci_dbg(dev, "%s %pR: releasing\n", res_name, res);
+
+ release_resource(res);
+ restore_dev_resource(dev_res);
}
+ /* Restore start/end/flags from saved list */
+ list_for_each_entry(save_res, &save_head, list)
+ restore_dev_resource(save_res);
free_list(&save_head);
-requested_and_reassign:
/* Satisfy the must-have resource requests */
- assign_requested_resources_sorted(head, fail_head);
+ assign_requested_resources_sorted(head, NULL, false);
/* Try to satisfy any additional optional resource requests */
- if (realloc_head)
+ if (!list_empty(realloc_head))
reassign_resources_sorted(realloc_head, head);
+
+out:
+ /* Reset any failed resource, cannot use fail_head as it can be NULL. */
+ list_for_each_entry(dev_res, head, list) {
+ res = dev_res->res;
+ dev = dev_res->dev;
+
+ if (res->parent)
+ continue;
+
+ if (fail_head) {
+ add_to_list(fail_head, dev, res,
+ 0 /* don't care */,
+ 0 /* don't care */);
+ }
+
+ reset_resource(res);
+ }
+
free_list(head);
}
@@ -488,7 +589,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev,
{
LIST_HEAD(head);
- __dev_sort_resources(dev, &head);
+ pdev_sort_resources(dev, &head);
__assign_resources_sorted(&head, add_head, fail_head);
}
@@ -501,7 +602,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus,
LIST_HEAD(head);
list_for_each_entry(dev, &bus->devices, bus_list)
- __dev_sort_resources(dev, &head);
+ pdev_sort_resources(dev, &head);
__assign_resources_sorted(&head, realloc_head, fail_head);
}
@@ -675,8 +776,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
{
struct pci_dev *bridge = bus->self;
- pci_info(bridge, "PCI bridge to %pR\n",
- &bus->busn_res);
+ pci_info(bridge, "PCI bridge to %pR\n", &bus->busn_res);
if (type & IORESOURCE_IO)
pci_setup_bridge_io(bridge);
@@ -694,7 +794,7 @@ void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
{
}
-void pci_setup_bridge(struct pci_bus *bus)
+static void pci_setup_bridge(struct pci_bus *bus)
{
unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
@@ -814,11 +914,9 @@ static resource_size_t calculate_iosize(resource_size_t size,
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
size = size + size1;
- if (size < old_size)
- size = old_size;
- size = ALIGN(max(size, add_size) + children_add_size, align);
- return size;
+ size = max(size, add_size) + children_add_size;
+ return ALIGN(max(size, old_size), align);
}
static resource_size_t calculate_memsize(resource_size_t size,
@@ -843,9 +941,9 @@ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
return 1;
}
-#define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */
-#define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */
-#define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */
+#define PCI_P2P_DEFAULT_MEM_ALIGN SZ_1M
+#define PCI_P2P_DEFAULT_IO_ALIGN SZ_4K
+#define PCI_P2P_DEFAULT_IO_ALIGN_1K SZ_1K
static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
{
@@ -910,7 +1008,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
continue;
r_size = resource_size(r);
- if (r_size < 0x400)
+ if (r_size < SZ_1K)
/* Might be re-aligned for ISA */
size += r_size;
else
@@ -1064,7 +1162,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
{
struct pci_dev *dev;
resource_size_t min_align, win_align, align, size, size0, size1 = 0;
- resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
+ resource_size_t aligns[28]; /* Alignments from 1MB to 128TB */
int order, max_order;
struct resource *b_res = find_bus_resource_of_type(bus,
mask | IORESOURCE_PREFETCH, type);
@@ -1097,17 +1195,15 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
(r->flags & mask) != type3))
continue;
r_size = resource_size(r);
-#ifdef CONFIG_PCI_IOV
+
/* Put SRIOV requested res to the optional list */
- if (realloc_head && i >= PCI_IOV_RESOURCES &&
- i <= PCI_IOV_RESOURCE_END) {
+ if (realloc_head && pci_resource_is_optional(dev, i)) {
add_align = max(pci_resource_alignment(dev, r), add_align);
- r->end = r->start - 1;
- add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */);
+ add_to_list(realloc_head, dev, r, 0, 0 /* Don't care */);
children_add_size += r_size;
continue;
}
-#endif
+
/*
* aligns[0] is for 1MB (since bridge memory
* windows are always at least 1MB aligned), so
@@ -1149,7 +1245,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (bus->self && size0 &&
!pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
- size0, add_align)) {
+ size0, min_align)) {
min_align = 1ULL << (max_order + __ffs(SZ_1M));
min_align = max(min_align, win_align);
size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align);
@@ -1182,8 +1278,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->flags = 0;
return 0;
}
- b_res->start = min_align;
- b_res->end = size0 + min_align - 1;
+
+ resource_set_range(b_res, min_align, size0);
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
@@ -1658,8 +1754,7 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
pci_info(dev, "resource %d %pR released\n",
PCI_BRIDGE_RESOURCES + idx, r);
/* Keep the old size */
- r->end = resource_size(r) - 1;
- r->start = 0;
+ resource_set_range(r, 0, resource_size(r));
r->flags = 0;
/* Avoiding touch the one without PREF */
@@ -1898,7 +1993,7 @@ static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
* Make sure prefetchable memory is reduced from
* the correct resource. Specifically we put 32-bit
* prefetchable memory in non-prefetchable window
- * if there is an 64-bit prefetchable window.
+ * if there is a 64-bit prefetchable window.
*
* See comments in __pci_bus_size_bridges() for
* more information.
@@ -2126,6 +2221,43 @@ pci_root_bus_distribute_available_resources(struct pci_bus *bus,
}
}
+static void pci_prepare_next_assign_round(struct list_head *fail_head,
+ int tried_times,
+ enum release_type rel_type)
+{
+ struct pci_dev_resource *fail_res;
+
+ pr_info("PCI: No. %d try to assign unassigned res\n", tried_times + 1);
+
+ /*
+ * Try to release leaf bridge's resources that aren't big
+ * enough to contain child device resources.
+ */
+ list_for_each_entry(fail_res, fail_head, list) {
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & PCI_RES_TYPE_MASK,
+ rel_type);
+ }
+
+ /* Restore size and flags */
+ list_for_each_entry(fail_res, fail_head, list) {
+ struct resource *res = fail_res->res;
+ struct pci_dev *dev = fail_res->dev;
+ int idx = pci_resource_num(dev, res);
+
+ restore_dev_resource(fail_res);
+
+ if (!pci_is_bridge(dev))
+ continue;
+
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
+
+ free_list(fail_head);
+}
+
/*
* First try will not touch PCI bridge res.
* Second and later try will clear small leaf bridge res.
@@ -2139,7 +2271,6 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
int tried_times = 0;
enum release_type rel_type = leaf_only;
LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
int pci_try_num = 1;
enum enable_type enable_local;
@@ -2153,78 +2284,50 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
max_depth, pci_try_num);
}
-again:
- /*
- * Last try will use add_list, otherwise will try good to have as must
- * have, so can realloc parent bridge resource
- */
- if (tried_times + 1 == pci_try_num)
- add_list = &realloc_head;
- /*
- * Depth first, calculate sizes and alignments of all subordinate buses.
- */
- __pci_bus_size_bridges(bus, add_list);
-
- pci_root_bus_distribute_available_resources(bus, add_list);
-
- /* Depth last, allocate resources and update the hardware. */
- __pci_bus_assign_resources(bus, add_list, &fail_head);
- if (add_list)
- BUG_ON(!list_empty(add_list));
- tried_times++;
-
- /* Any device complain? */
- if (list_empty(&fail_head))
- goto dump;
-
- if (tried_times >= pci_try_num) {
- if (enable_local == undefined)
- dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
- else if (enable_local == auto_enabled)
- dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
-
- free_list(&fail_head);
- goto dump;
- }
+ while (1) {
+ /*
+ * Last try will use add_list, otherwise will try good to
+ * have as must have, so can realloc parent bridge resource
+ */
+ if (tried_times + 1 == pci_try_num)
+ add_list = &realloc_head;
+ /*
+ * Depth first, calculate sizes and alignments of all
+ * subordinate buses.
+ */
+ __pci_bus_size_bridges(bus, add_list);
- dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
- tried_times + 1);
+ pci_root_bus_distribute_available_resources(bus, add_list);
- /* Third times and later will not check if it is leaf */
- if ((tried_times + 1) > 2)
- rel_type = whole_subtree;
+ /* Depth last, allocate resources and update the hardware. */
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
+ if (WARN_ON_ONCE(add_list && !list_empty(add_list)))
+ free_list(add_list);
+ tried_times++;
- /*
- * Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- rel_type);
+ /* Any device complain? */
+ if (list_empty(&fail_head))
+ break;
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
+ if (tried_times >= pci_try_num) {
+ if (enable_local == undefined) {
+ dev_info(&bus->dev,
+ "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ } else if (enable_local == auto_enabled) {
+ dev_info(&bus->dev,
+ "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+ }
+ free_list(&fail_head);
+ break;
+ }
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
+ /* Third times and later will not check if it is leaf */
+ if (tried_times + 1 > 2)
+ rel_type = whole_subtree;
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
- }
+ pci_prepare_next_assign_round(&fail_head, tried_times, rel_type);
}
- free_list(&fail_head);
-
- goto again;
-dump:
- /* Dump the resource on buses */
pci_bus_dump_resources(bus);
}
@@ -2246,71 +2349,41 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
struct pci_bus *parent = bridge->subordinate;
/* List of resources that want additional resources */
LIST_HEAD(add_list);
-
int tried_times = 0;
LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
- int retval;
-
-again:
- __pci_bus_size_bridges(parent, &add_list);
-
- /*
- * Distribute remaining resources (if any) equally between hotplug
- * bridges below. This makes it possible to extend the hierarchy
- * later without running out of resources.
- */
- pci_bridge_distribute_available_resources(bridge, &add_list);
-
- __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
- BUG_ON(!list_empty(&add_list));
- tried_times++;
-
- if (list_empty(&fail_head))
- goto enable_all;
-
- if (tried_times >= 2) {
- /* Still fail, don't need to try more */
- free_list(&fail_head);
- goto enable_all;
- }
+ int ret;
- printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
- tried_times + 1);
+ while (1) {
+ __pci_bus_size_bridges(parent, &add_list);
- /*
- * Try to release leaf bridge's resources that aren't big enough
- * to contain child device resources.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- whole_subtree);
+ /*
+ * Distribute remaining resources (if any) equally between
+ * hotplug bridges below. This makes it possible to extend
+ * the hierarchy later without running out of resources.
+ */
+ pci_bridge_distribute_available_resources(bridge, &add_list);
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
+ __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
+ if (WARN_ON_ONCE(!list_empty(&add_list)))
+ free_list(&add_list);
+ tried_times++;
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
+ if (list_empty(&fail_head))
+ break;
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
+ if (tried_times >= 2) {
+ /* Still fail, don't need to try more */
+ free_list(&fail_head);
+ break;
}
- }
- free_list(&fail_head);
- goto again;
+ pci_prepare_next_assign_round(&fail_head, tried_times,
+ whole_subtree);
+ }
-enable_all:
- retval = pci_reenable_device(bridge);
- if (retval)
- pci_err(bridge, "Error reenabling bridge (%d)\n", retval);
+ ret = pci_reenable_device(bridge);
+ if (ret)
+ pci_err(bridge, "Error reenabling bridge (%d)\n", ret);
pci_set_master(bridge);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
@@ -2368,7 +2441,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
__pci_bus_size_bridges(bridge->subordinate, &added);
__pci_bridge_assign_resources(bridge, &added, &failed);
- BUG_ON(!list_empty(&added));
+ if (WARN_ON_ONCE(!list_empty(&added)))
+ free_list(&added);
if (!list_empty(&failed)) {
ret = -ENOSPC;
@@ -2390,13 +2464,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
cleanup:
/* Restore size and flags */
- list_for_each_entry(dev_res, &failed, list) {
- struct resource *res = dev_res->res;
-
- res->start = dev_res->start;
- res->end = dev_res->end;
- res->flags = dev_res->flags;
- }
+ list_for_each_entry(dev_res, &failed, list)
+ restore_dev_resource(dev_res);
free_list(&failed);
/* Revert to the old configuration */
@@ -2404,11 +2473,9 @@ cleanup:
struct resource *res = dev_res->res;
bridge = dev_res->dev;
- i = res - bridge->resource;
+ i = pci_resource_num(bridge, res);
- res->start = dev_res->start;
- res->end = dev_res->end;
- res->flags = dev_res->flags;
+ restore_dev_resource(dev_res);
pci_claim_resource(bridge, i);
pci_setup_bridge(bridge->subordinate);
@@ -2431,6 +2498,7 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
__pci_bus_size_bridges(dev->subordinate, &add_list);
up_read(&pci_bus_sem);
__pci_bus_assign_resources(bus, &add_list, NULL);
- BUG_ON(!list_empty(&add_list));
+ if (WARN_ON_ONCE(!list_empty(&add_list)))
+ free_list(&add_list);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index ca14576bf2bf..c6657cdd06f6 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -29,7 +29,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
u16 cmd;
u32 new, check, mask;
int reg;
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
@@ -127,10 +127,8 @@ void pci_update_resource(struct pci_dev *dev, int resno)
{
if (resno <= PCI_ROM_RESOURCE)
pci_std_update_resource(dev, resno);
-#ifdef CONFIG_PCI_IOV
- else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ else if (pci_resource_is_iov(resno))
pci_iov_update_resource(dev, resno);
-#endif
}
int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -262,7 +260,7 @@ resource_size_t __weak pcibios_align_resource(void *data,
static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
int resno, resource_size_t size, resource_size_t align)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
resource_size_t min;
int ret;
@@ -325,7 +323,7 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno,
int pci_assign_resource(struct pci_dev *dev, int resno)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
resource_size_t align, size;
int ret;
@@ -372,7 +370,7 @@ EXPORT_SYMBOL(pci_assign_resource);
int pci_reassign_resource(struct pci_dev *dev, int resno,
resource_size_t addsize, resource_size_t min_align)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
unsigned long flags;
resource_size_t new_size;
@@ -389,7 +387,6 @@ int pci_reassign_resource(struct pci_dev *dev, int resno,
return -EINVAL;
}
- /* already aligned with min_align */
new_size = resource_size(res) + addsize;
ret = _pci_assign_resource(dev, resno, new_size, min_align);
if (ret) {
@@ -411,14 +408,14 @@ int pci_reassign_resource(struct pci_dev *dev, int resno,
void pci_release_resource(struct pci_dev *dev, int resno)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
- pci_info(dev, "%s %pR: releasing\n", res_name, res);
-
if (!res->parent)
return;
+ pci_info(dev, "%s %pR: releasing\n", res_name, res);
+
release_resource(res);
res->end = resource_size(res) - 1;
res->start = 0;
@@ -428,7 +425,7 @@ EXPORT_SYMBOL(pci_release_resource);
int pci_resize_resource(struct pci_dev *dev, int resno, int size)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
struct pci_host_bridge *host;
int old, ret;
u32 sizes;
@@ -497,8 +494,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
- if ((i == PCI_ROM_RESOURCE) &&
- (!(r->flags & IORESOURCE_ROM_ENABLE)))
+ if (pci_resource_is_optional(dev, i))
continue;
if (r->flags & IORESOURCE_UNSET) {
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 36b44be0489d..50fb3eb595fe 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -7,7 +7,6 @@
#include <linux/kobject.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/err.h>
#include "pci.h"
@@ -325,49 +324,6 @@ void pci_destroy_slot(struct pci_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
-#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
-#include <linux/pci_hotplug.h>
-/**
- * pci_hp_create_module_link - create symbolic link to hotplug driver module
- * @pci_slot: struct pci_slot
- *
- * Helper function for pci_hotplug_core.c to create symbolic link to
- * the hotplug driver module.
- */
-void pci_hp_create_module_link(struct pci_slot *pci_slot)
-{
- struct hotplug_slot *slot = pci_slot->hotplug;
- struct kobject *kobj = NULL;
- int ret;
-
- if (!slot || !slot->ops)
- return;
- kobj = kset_find_obj(module_kset, slot->mod_name);
- if (!kobj)
- return;
- ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
- if (ret)
- dev_err(&pci_slot->bus->dev, "Error creating sysfs link (%d)\n",
- ret);
- kobject_put(kobj);
-}
-EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
-
-/**
- * pci_hp_remove_module_link - remove symbolic link to the hotplug driver
- * module.
- * @pci_slot: struct pci_slot
- *
- * Helper function for pci_hotplug_core.c to remove symbolic link to
- * the hotplug driver module.
- */
-void pci_hp_remove_module_link(struct pci_slot *pci_slot)
-{
- sysfs_remove_link(&pci_slot->kobj, "module");
-}
-EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
-#endif
-
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index 07de59ca2ebf..77fce5e1b830 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -204,48 +204,6 @@ static u8 get_rp_completer_type(struct pci_dev *pdev)
return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg);
}
-/* Write ST to MSI-X vector control reg - Return 0 if OK, otherwise -errno */
-static int write_tag_to_msix(struct pci_dev *pdev, int msix_idx, u16 tag)
-{
-#ifdef CONFIG_PCI_MSI
- struct msi_desc *msi_desc = NULL;
- void __iomem *vec_ctrl;
- u32 val;
- int err = 0;
-
- msi_lock_descs(&pdev->dev);
-
- /* Find the msi_desc entry with matching msix_idx */
- msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ASSOCIATED) {
- if (msi_desc->msi_index == msix_idx)
- break;
- }
-
- if (!msi_desc) {
- err = -ENXIO;
- goto err_out;
- }
-
- /* Get the vector control register (offset 0xc) pointed by msix_idx */
- vec_ctrl = pdev->msix_base + msix_idx * PCI_MSIX_ENTRY_SIZE;
- vec_ctrl += PCI_MSIX_ENTRY_VECTOR_CTRL;
-
- val = readl(vec_ctrl);
- val &= ~PCI_MSIX_ENTRY_CTRL_ST;
- val |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
- writel(val, vec_ctrl);
-
- /* Read back to flush the update */
- val = readl(vec_ctrl);
-
-err_out:
- msi_unlock_descs(&pdev->dev);
- return err;
-#else
- return -ENODEV;
-#endif
-}
-
/* Write tag to ST table - Return 0 if OK, otherwise -errno */
static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
{
@@ -346,7 +304,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
switch (loc) {
case PCI_TPH_LOC_MSIX:
- err = write_tag_to_msix(pdev, index, tag);
+ err = pci_msix_write_tph_tag(pdev, index, tag);
break;
case PCI_TPH_LOC_CAP:
err = write_tag_to_st_table(pdev, index, tag);