summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt7
-rw-r--r--Documentation/devicetree/bindings/pci/pci-keystone.txt58
-rw-r--r--Documentation/devicetree/bindings/pci/pci.txt50
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi2
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c14
-rw-r--r--arch/x86/pci/irq.c10
-rw-r--r--drivers/acpi/pci_mcfg.c12
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c3
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c18
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/controller/dwc/Kconfig29
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c144
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c926
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c93
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c55
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c157
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c64
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h26
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c23
-rw-r--r--drivers/pci/controller/pci-hyperv.c23
-rw-r--r--drivers/pci/controller/pcie-iproc.c46
-rw-r--r--drivers/pci/controller/pcie-mediatek.c50
-rw-r--r--drivers/pci/controller/pcie-rcar.c21
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c9
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c5
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c10
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c4
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c3
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/pci/of.c58
-rw-r--r--drivers/pci/p2pdma.c38
-rw-r--r--drivers/pci/pci-acpi.c172
-rw-r--r--drivers/pci/pci.c25
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aspm.c47
-rw-r--r--drivers/pci/pcie/bw_notification.c14
-rw-r--r--drivers/pci/probe.c207
-rw-r--r--drivers/pci/proc.c1
-rw-r--r--drivers/pci/quirks.c77
-rw-r--r--drivers/pci/search.c10
-rw-r--r--drivers/pci/switch/switchtec.c42
-rw-r--r--drivers/pci/xen-pcifront.c2
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c2
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/msi.h18
-rw-r--r--include/linux/pci-ecam.h1
-rw-r--r--include/linux/pci-epc.h2
-rw-r--r--include/linux/pci-epf.h3
-rw-r--r--include/linux/pci.h8
-rw-r--r--include/linux/pci_hotplug.h66
-rw-r--r--include/linux/switchtec.h2
-rw-r--r--include/uapi/linux/pci_regs.h6
-rw-r--r--include/uapi/linux/switchtec_ioctl.h13
65 files changed, 1947 insertions, 773 deletions
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index c124f9bc11f3..5561a1c060d0 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -4,8 +4,11 @@ Required properties:
- compatible:
"snps,dw-pcie" for RC mode;
"snps,dw-pcie-ep" for EP mode;
-- reg: Should contain the configuration address space.
-- reg-names: Must be "config" for the PCIe configuration space.
+- reg: For designware cores version < 4.80 contains the configuration
+ address space. For designware core version >= 4.80, contains
+ the configuration and ATU address space
+- reg-names: Must be "config" for the PCIe configuration space and "atu" for
+ the ATU address space.
(The old way of getting the configuration address space from "ranges"
is deprecated and should be avoided.)
- num-lanes: number of lanes to use
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
index 2030ee0dc4f9..47202a2938f2 100644
--- a/Documentation/devicetree/bindings/pci/pci-keystone.txt
+++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt
@@ -11,16 +11,24 @@ described here as well as properties that are not applicable.
Required Properties:-
-compatibility: "ti,keystone-pcie"
-reg: index 1 is the base address and length of DW application registers.
- index 2 is the base address and length of PCI device ID register.
+compatibility: Should be "ti,keystone-pcie" for RC on Keystone2 SoC
+ Should be "ti,am654-pcie-rc" for RC on AM654x SoC
+reg: Three register ranges as listed in the reg-names property
+reg-names: "dbics" for the DesignWare PCIe registers, "app" for the
+ TI specific application registers, "config" for the
+ configuration space address
pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
interrupt-cells: should be set to 1
interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
+ (required if the compatible is "ti,keystone-pcie")
+msi-map: As specified in Documentation/devicetree/bindings/pci/pci-msi.txt
+ (required if the compatible is "ti,am654-pcie-rc".
ti,syscon-pcie-id : phandle to the device control module required to set device
id and vendor id.
+ti,syscon-pcie-mode : phandle to the device control module required to configure
+ PCI in either RC mode or EP mode.
Example:
pcie_msi_intc: msi-interrupt-controller {
@@ -61,3 +69,47 @@ Optional properties:-
DesignWare DT Properties not applicable for Keystone PCI
1. pcie_bus clock-names not used. Instead, a phandle to phys is used.
+
+AM654 PCIe Endpoint
+===================
+
+Required Properties:-
+
+compatibility: Should be "ti,am654-pcie-ep" for EP on AM654x SoC
+reg: Four register ranges as listed in the reg-names property
+reg-names: "dbics" for the DesignWare PCIe registers, "app" for the
+ TI specific application registers, "atu" for the
+ Address Translation Unit configuration registers and
+ "addr_space" used to map remote RC address space
+num-ib-windows: As specified in
+ Documentation/devicetree/bindings/pci/designware-pcie.txt
+num-ob-windows: As specified in
+ Documentation/devicetree/bindings/pci/designware-pcie.txt
+num-lanes: As specified in
+ Documentation/devicetree/bindings/pci/designware-pcie.txt
+power-domains: As documented by the generic PM domain bindings in
+ Documentation/devicetree/bindings/power/power_domain.txt.
+ti,syscon-pcie-mode: phandle to the device control module required to configure
+ PCI in either RC mode or EP mode.
+
+Optional properties:-
+
+phys: list of PHY specifiers (used by generic PHY framework)
+phy-names: must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
+ number of lanes as specified in *num-lanes* property.
+("phys" and "phy-names" DT bindings are specified in
+Documentation/devicetree/bindings/phy/phy-bindings.txt)
+interrupts: platform interrupt for error interrupts.
+
+pcie-ep {
+ compatible = "ti,am654-pcie-ep";
+ reg = <0x5500000 0x1000>, <0x5501000 0x1000>,
+ <0x10000000 0x8000000>, <0x5506000 0x1000>;
+ reg-names = "app", "dbics", "addr_space", "atu";
+ power-domains = <&k3_pds 120>;
+ ti,syscon-pcie-mode = <&pcie0_mode>;
+ num-lanes = <1>;
+ num-ib-windows = <16>;
+ num-ob-windows = <16>;
+ interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
+};
diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt
index c77981c5dd18..92c01db610df 100644
--- a/Documentation/devicetree/bindings/pci/pci.txt
+++ b/Documentation/devicetree/bindings/pci/pci.txt
@@ -24,3 +24,53 @@ driver implementation may support the following properties:
unsupported link speed, for instance, trying to do training for
unsupported link speed, etc. Must be '4' for gen4, '3' for gen3, '2'
for gen2, and '1' for gen1. Any other values are invalid.
+
+PCI-PCI Bridge properties
+-------------------------
+
+PCIe root ports and switch ports may be described explicitly in the device
+tree, as children of the host bridge node. Even though those devices are
+discoverable by probing, it might be necessary to describe properties that
+aren't provided by standard PCIe capabilities.
+
+Required properties:
+
+- reg:
+ Identifies the PCI-PCI bridge. As defined in the IEEE Std 1275-1994
+ document, it is a five-cell address encoded as (phys.hi phys.mid
+ phys.lo size.hi size.lo). phys.hi should contain the device's BDF as
+ 0b00000000 bbbbbbbb dddddfff 00000000. The other cells should be zero.
+
+ The bus number is defined by firmware, through the standard bridge
+ configuration mechanism. If this port is a switch port, then firmware
+ allocates the bus number and writes it into the Secondary Bus Number
+ register of the bridge directly above this port. Otherwise, the bus
+ number of a root port is the first number in the bus-range property,
+ defaulting to zero.
+
+ If firmware leaves the ARI Forwarding Enable bit set in the bridge
+ above this port, then phys.hi contains the 8-bit function number as
+ 0b00000000 bbbbbbbb ffffffff 00000000. Note that the PCIe specification
+ recommends that firmware only leaves ARI enabled when it knows that the
+ OS is ARI-aware.
+
+Optional properties:
+
+- external-facing:
+ When present, the port is external-facing. All bridges and endpoints
+ downstream of this port are external to the machine. The OS can, for
+ example, use this information to identify devices that cannot be
+ trusted with relaxed DMA protection, as users could easily attach
+ malicious devices to this port.
+
+Example:
+
+pcie@10000000 {
+ compatible = "pci-host-ecam-generic";
+ ...
+ pcie@0008 {
+ /* Root port 00:01.0 is external-facing */
+ reg = <0x00000800 0 0 0 0>;
+ external-facing;
+ };
+};
diff --git a/MAINTAINERS b/MAINTAINERS
index e17ebf70b548..b3d64a7daae8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11880,7 +11880,8 @@ F: include/linux/switchtec.h
F: drivers/ntb/hw/mscc/
PCI DRIVER FOR MOBIVEIL PCIE IP
-M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+M: Karthikeyan Mitran <m.karthikeyan@mobiveil.co.in>
+M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
@@ -12014,6 +12015,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/
S: Supported
F: drivers/pci/controller/
+PCIE DRIVER FOR ANNAPURNA LABS
+M: Jonathan Chocron <jonnyc@amazon.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: drivers/pci/controller/dwc/pcie-al.c
+
PCIE DRIVER FOR AMLOGIC MESON
M: Yue Wang <yue.wang@Amlogic.com>
L: linux-pci@vger.kernel.org
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 976d92a94738..43307bad3f0d 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -819,7 +819,6 @@
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
- num-lanes = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
<0 0 0 2 &pcie_intc0 1>,
@@ -840,7 +839,6 @@
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
- num-lanes = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc1 0>,
<0 0 0 2 &pcie_intc1 1>,
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index dc23d9d2a7d9..495550432f3d 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -1213,9 +1213,8 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
* Currently we only support radix and non-zero LPCR only makes sense
* for hash tables so skiboot expects the LPCR parameter to be a zero.
*/
- ret = opal_npu_map_lpar(nphb->opal_id,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn), lparid,
- 0 /* LPCR bits */);
+ ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid,
+ 0 /* LPCR bits */);
if (ret) {
dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
return ret;
@@ -1224,7 +1223,7 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n",
nphb->opal_id, msr);
ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn));
+ pci_dev_id(gpdev));
if (ret < 0)
dev_err(&gpdev->dev, "Failed to init context: %d\n", ret);
else
@@ -1258,7 +1257,7 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n",
nphb->opal_id);
ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn));
+ pci_dev_id(gpdev));
if (ret < 0) {
dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret);
return ret;
@@ -1266,9 +1265,8 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
/* Set LPID to 0 anyway, just to be safe */
dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id);
- ret = opal_npu_map_lpar(nphb->opal_id,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn), 0 /*LPID*/,
- 0 /* LPCR bits */);
+ ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/,
+ 0 /* LPCR bits */);
if (ret)
dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 52e55108404e..d3a73f9335e1 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = {
void __init pcibios_irq_init(void)
{
+ struct irq_routing_table *rtable = NULL;
+
DBG(KERN_DEBUG "PCI: IRQ init\n");
if (raw_pci_ops == NULL)
@@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void)
pirq_table = pirq_find_routing_table();
#ifdef CONFIG_PCI_BIOS
- if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
+ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
pirq_table = pcibios_get_irq_routing_table();
+ rtable = pirq_table;
+ }
#endif
if (pirq_table) {
pirq_peer_trick();
@@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void)
* If we're using the I/O APIC, avoid using the PCI IRQ
* routing table
*/
- if (io_apic_assign_pci_irqs)
+ if (io_apic_assign_pci_irqs) {
+ kfree(rtable);
pirq_table = NULL;
+ }
}
x86_init.pci.fixup_irqs();
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index a4e8432fc2fb..b42be067fb83 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -52,6 +52,18 @@ struct mcfg_fixup {
static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+#define AL_ECAM(table_id, rev, seg, ops) \
+ { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
+
+ AL_ECAM("GRAVITON", 0, 0, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 1, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 2, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 3, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 4, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 5, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 6, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 7, &al_pcie_ops),
+
#define QCOM_ECAM32(seg) \
{ "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 707aafc7c2aa..c36781a9b493 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -145,6 +145,7 @@ static struct pci_osc_bit_struct pci_osc_support_bit[] = {
{ OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
{ OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
{ OSC_PCI_MSI_SUPPORT, "MSI" },
+ { OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" },
};
static struct pci_osc_bit_struct pci_osc_control_bit[] = {
@@ -446,6 +447,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
* PCI domains, so we indicate this in _OSC support capabilities.
*/
support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+ support |= OSC_PCI_HPX_TYPE_3_SUPPORT;
if (pci_ext_cfg_avail())
support |= OSC_PCI_EXT_CONFIG_SUPPORT;
if (pcie_aspm_support_enabled())
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 09da91644f9f..b0c59a313049 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1270,8 +1270,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
- dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
- gpu->pdev->devfn);
+ dev->node_props.location_id = pci_dev_id(gpu->pdev);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
dev->node_props.max_engine_clk_ccompute =
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b319e51c379b..8e8121b14c36 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -165,7 +165,7 @@ static inline u16 get_pci_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return PCI_DEVID(pdev->bus->number, pdev->devfn);
+ return pci_dev_id(pdev);
}
static inline int get_acpihid_device_id(struct device *dev,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 87274b54febd..a772dedb05b9 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1391,7 +1391,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
/* pdev will be returned if device is not a vf */
pf_pdev = pci_physfn(pdev);
- info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
+ info->pfsid = pci_dev_id(pf_pdev);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 2d74641b7f7b..871458b873be 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -424,7 +424,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
else
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
- PCI_DEVID(dev->bus->number, dev->devfn));
+ pci_dev_id(dev));
return 0;
}
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 29582fe57151..7b015f2a1c6f 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -75,6 +75,11 @@
#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
+#define PCI_DEVICE_ID_TI_AM654 0xb00c
+
+#define is_am654_pci_dev(pdev) \
+ ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
+
static DEFINE_IDA(pci_endpoint_test_ida);
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
@@ -588,6 +593,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
int ret = -EINVAL;
enum pci_barno bar;
struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
+ struct pci_dev *pdev = test->pdev;
mutex_lock(&test->mutex);
switch (cmd) {
@@ -595,6 +601,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
bar = arg;
if (bar < 0 || bar > 5)
goto ret;
+ if (is_am654_pci_dev(pdev) && bar == BAR_0)
+ goto ret;
ret = pci_endpoint_test_bar(test, bar);
break;
case PCITEST_LEGACY_IRQ:
@@ -662,6 +670,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
data = (struct pci_endpoint_test_data *)ent->driver_data;
if (data) {
test_reg_bar = data->test_reg_bar;
+ test->test_reg_bar = test_reg_bar;
test->alignment = data->alignment;
irq_type = data->irq_type;
}
@@ -785,11 +794,20 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static const struct pci_endpoint_test_data am654_data = {
+ .test_reg_bar = BAR_2,
+ .alignment = SZ_64K,
+ .irq_type = IRQ_TYPE_MSI,
+};
+
static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
+ .driver_data = (kernel_ulong_t)&am654_data
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c29dde064078..65eb095f94b8 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7182,8 +7182,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x",
- PCI_DEVID(pdev->bus->number, pdev->devfn));
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index d819e8eaba12..5cdc17381c80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -204,7 +204,7 @@ static int quark_default_data(struct pci_dev *pdev,
ret = 1;
}
- plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ plat->bus_id = pci_dev_id(pdev);
plat->phy_addr = ret;
plat->interface = PHY_INTERFACE_MODE_RMII;
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 657d642fcc67..28cdd8c0213a 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -10,10 +10,10 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
-obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_ACPI) += pci-acpi.o
endif
+obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
obj-$(CONFIG_PCIEPORTBUS) += pcie/
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 6ea74b1c0d94..a6ce1ee51b4c 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -103,15 +103,32 @@ config PCIE_SPEAR13XX
Say Y here if you want PCIe support on SPEAr13XX SoCs.
config PCI_KEYSTONE
- bool "TI Keystone PCIe controller"
- depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST)
+ bool
+
+config PCI_KEYSTONE_HOST
+ bool "PCI Keystone Host Mode"
+ depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select PCI_KEYSTONE
+ default y
help
- Say Y here if you want to enable PCI controller support on Keystone
- SoCs. The PCI controller on Keystone is based on DesignWare hardware
- and therefore the driver re-uses the DesignWare core functions to
- implement the driver.
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in host mode. The PCI controller on Keystone is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_KEYSTONE_EP
+ bool "PCI Keystone Endpoint Mode"
+ depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_KEYSTONE
+ help
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in endpoint mode. The PCI controller on Keystone is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller"
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index b5f3b83cc2b3..b085dfd4fab7 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -28,5 +28,6 @@ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
# depending on whether ACPI, the DT driver, or both are enabled.
ifdef CONFIG_PCI
+obj-$(CONFIG_ARM64) += pcie-al.o
obj-$(CONFIG_ARM64) += pcie-hisi.o
endif
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index ae84a69ae63a..b287dbf6914c 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -406,7 +406,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
return &dra7xx_pcie_epc_features;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = dra7xx_pcie_ep_init,
.raise_irq = dra7xx_pcie_raise_irq,
.get_features = dra7xx_pcie_get_features,
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 3d627f94a166..9b5cb5b70389 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -52,6 +52,7 @@ enum imx6_pcie_variants {
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
+#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
@@ -89,9 +90,8 @@ struct imx6_pcie {
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
-#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
-#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
+#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
/* PCIe Root Complex registers (memory-mapped) */
#define PCIE_RC_IMX6_MSI_CAP 0x50
@@ -104,34 +104,29 @@ struct imx6_pcie {
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
-#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
-#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
-#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
-#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
-#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
-#define PCIE_PHY_CTRL_DATA_LOC 0
-#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
-#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
-#define PCIE_PHY_CTRL_WR_LOC 18
-#define PCIE_PHY_CTRL_RD_LOC 19
+#define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define PCIE_PHY_CTRL_CAP_ADR BIT(16)
+#define PCIE_PHY_CTRL_CAP_DAT BIT(17)
+#define PCIE_PHY_CTRL_WR BIT(18)
+#define PCIE_PHY_CTRL_RD BIT(19)
#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
-#define PCIE_PHY_STAT_ACK_LOC 16
+#define PCIE_PHY_STAT_ACK BIT(16)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_ATEOVRD 0x10
-#define PCIE_PHY_ATEOVRD_EN (0x1 << 2)
+#define PCIE_PHY_ATEOVRD_EN BIT(2)
#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
-#define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9)
+#define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9)
#define PCIE_PHY_RX_ASIC_OUT 0x100D
#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
@@ -154,19 +149,19 @@ struct imx6_pcie {
#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
#define PHY_RX_OVRD_IN_LO 0x1005
-#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
-#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
-static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
+static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
{
struct dw_pcie *pci = imx6_pcie->pci;
- u32 val;
+ bool val;
u32 max_iterations = 10;
u32 wait_counter = 0;
do {
- val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
- val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+ val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
+ PCIE_PHY_STAT_ACK;
wait_counter++;
if (val == exp_val)
@@ -184,27 +179,27 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
u32 val;
int ret;
- val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+ val |= PCIE_PHY_CTRL_CAP_ADR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
- val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(imx6_pcie, 0);
+ return pcie_phy_poll_ack(imx6_pcie, false);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
+static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
{
struct dw_pcie *pci = imx6_pcie->pci;
- u32 val, phy_ctl;
+ u32 phy_ctl;
int ret;
ret = pcie_phy_wait_ack(imx6_pcie, addr);
@@ -212,23 +207,22 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
return ret;
/* assert Read signal */
- phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+ phy_ctl = PCIE_PHY_CTRL_RD;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
- val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
- *data = val & 0xffff;
+ *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
/* deassert Read signal */
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(imx6_pcie, 0);
+ return pcie_phy_poll_ack(imx6_pcie, false);
}
-static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
+static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
{
struct dw_pcie *pci = imx6_pcie->pci;
u32 var;
@@ -240,41 +234,41 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
if (ret)
return ret;
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* capture data */
- var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+ var |= PCIE_PHY_CTRL_CAP_DAT;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
/* deassert cap data */
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, 0);
+ ret = pcie_phy_poll_ack(imx6_pcie, false);
if (ret)
return ret;
/* assert wr signal */
- var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+ var = PCIE_PHY_CTRL_WR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
if (ret)
return ret;
/* deassert wr signal */
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, 0);
+ ret = pcie_phy_poll_ack(imx6_pcie, false);
if (ret)
return ret;
@@ -285,7 +279,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
- u32 tmp;
+ u16 tmp;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
return;
@@ -455,7 +449,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
* reset time is too short, cannot meet the requirement.
* add one ~10us delay here.
*/
- udelay(10);
+ usleep_range(10, 100);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
break;
@@ -488,20 +482,14 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
{
u32 val;
- unsigned int retries;
struct device *dev = imx6_pcie->pci->dev;
- for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
- regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
-
- if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
- return;
-
- usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
- PHY_PLL_LOCK_WAIT_USLEEP_MAX);
- }
-
- dev_err(dev, "PCIe PLL lock timeout\n");
+ if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ IOMUXC_GPR22, val,
+ val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT))
+ dev_err(dev, "PCIe PLL lock timeout\n");
}
static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
@@ -687,7 +675,7 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
{
unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
int mult, div;
- u32 val;
+ u16 val;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
return 0;
@@ -730,21 +718,6 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
return 0;
}
-static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
-{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
- return -ETIMEDOUT;
-}
-
static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -761,7 +734,7 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
}
dev_err(dev, "Speed change timeout\n");
- return -EINVAL;
+ return -ETIMEDOUT;
}
static void imx6_pcie_ltssm_enable(struct device *dev)
@@ -803,7 +776,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
- ret = imx6_pcie_wait_for_link(imx6_pcie);
+ ret = dw_pcie_wait_for_link(pci);
if (ret)
goto err_reset_phy;
@@ -841,7 +814,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
}
/* Make sure link training is finished as well! */
- ret = imx6_pcie_wait_for_link(imx6_pcie);
+ ret = dw_pcie_wait_for_link(pci);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
@@ -856,8 +829,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
err_reset_phy:
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
imx6_pcie_reset_phy(imx6_pcie);
return ret;
}
@@ -993,17 +966,11 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
}
}
-static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
-{
- return (imx6_pcie->drvdata->variant == IMX7D ||
- imx6_pcie->drvdata->variant == IMX6SX);
-}
-
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- if (!imx6_pcie_supports_suspend(imx6_pcie))
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
@@ -1019,7 +986,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
struct pcie_port *pp = &imx6_pcie->pci->pp;
- if (!imx6_pcie_supports_suspend(imx6_pcie))
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
imx6_pcie_assert_core_reset(imx6_pcie);
@@ -1249,7 +1216,8 @@ static const struct imx6_pcie_drvdata drvdata[] = {
[IMX6SX] = {
.variant = IMX6SX,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1258,6 +1226,7 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
[IMX7D] = {
.variant = IMX7D,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX8MQ] = {
.variant = IMX8MQ,
@@ -1279,6 +1248,7 @@ static struct platform_driver imx6_pcie_driver = {
.of_match_table = imx6_pcie_of_match,
.suppress_bind_attrs = true,
.pm = &imx6_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = imx6_pcie_probe,
.shutdown = imx6_pcie_shutdown,
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 14f2b0b4ed5e..af677254a072 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -18,6 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
@@ -26,6 +28,7 @@
#include <linux/resource.h>
#include <linux/signal.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define PCIE_VENDORID_MASK 0xffff
@@ -44,28 +47,34 @@
#define CFG_TYPE1 BIT(24)
#define OB_SIZE 0x030
-#define SPACE0_REMOTE_CFG_OFFSET 0x1000
#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
#define OB_ENABLEN BIT(0)
#define OB_WIN_SIZE 8 /* 8MB */
+#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
+#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
+#define PCIE_EP_IRQ_SET 0x64
+#define PCIE_EP_IRQ_CLR 0x68
+#define INT_ENABLE BIT(0)
+
/* IRQ register defines */
#define IRQ_EOI 0x050
-#define IRQ_STATUS 0x184
-#define IRQ_ENABLE_SET 0x188
-#define IRQ_ENABLE_CLR 0x18c
#define MSI_IRQ 0x054
-#define MSI0_IRQ_STATUS 0x104
-#define MSI0_IRQ_ENABLE_SET 0x108
-#define MSI0_IRQ_ENABLE_CLR 0x10c
-#define IRQ_STATUS 0x184
+#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
+#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
+#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
#define MSI_IRQ_OFFSET 4
+#define IRQ_STATUS(n) (0x184 + ((n) << 4))
+#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
+#define INTx_EN BIT(0)
+
#define ERR_IRQ_STATUS 0x1c4
#define ERR_IRQ_ENABLE_SET 0x1c8
#define ERR_AER BIT(5) /* ECRC error */
+#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
#define ERR_CORR BIT(3) /* Correctable error */
#define ERR_NONFATAL BIT(2) /* Non-fatal error */
@@ -74,25 +83,45 @@
#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
ERR_NONFATAL | ERR_FATAL | ERR_SYS)
-#define MAX_MSI_HOST_IRQS 8
/* PCIE controller device IDs */
#define PCIE_RC_K2HK 0xb008
#define PCIE_RC_K2E 0xb009
#define PCIE_RC_K2L 0xb00a
#define PCIE_RC_K2G 0xb00b
+#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
+#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
+
+#define EP 0x0
+#define LEG_EP 0x1
+#define RC 0x2
+
+#define EXP_CAP_ID_OFFSET 0x70
+
+#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
+
+#define AM654_PCIE_DEV_TYPE_MASK 0x3
+#define AM654_WIN_SIZE SZ_64K
+
+#define APP_ADDR_SPACE_0 (16 * SZ_1K)
+
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+struct ks_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+ unsigned int version;
+};
+
struct keystone_pcie {
struct dw_pcie *pci;
/* PCI Device ID */
u32 device_id;
- int num_legacy_host_irqs;
int legacy_host_irqs[PCI_NUM_INTX];
struct device_node *legacy_intc_np;
- int num_msi_host_irqs;
- int msi_host_irqs[MAX_MSI_HOST_IRQS];
+ int msi_host_irq;
int num_lanes;
u32 num_viewport;
struct phy **phy;
@@ -101,28 +130,12 @@ struct keystone_pcie {
struct irq_domain *legacy_irq_domain;
struct device_node *np;
- int error_irq;
-
/* Application register space */
void __iomem *va_app_base; /* DT 1st resource */
struct resource app;
+ bool is_am6;
};
-static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
- u32 *bit_pos)
-{
- *reg_offset = offset % 8;
- *bit_pos = offset >> 3;
-}
-
-static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- return ks_pcie->app.start + MSI_IRQ;
-}
-
static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
{
return readl(ks_pcie->va_app_base + offset);
@@ -134,81 +147,114 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
writel(val, ks_pcie->va_app_base + offset);
}
-static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
+static void ks_pcie_msi_irq_ack(struct irq_data *data)
{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- u32 pending, vector;
- int src, virq;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ u32 reg_offset;
+ u32 bit_pos;
- pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
- /*
- * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
- * shows 1, 9, 17, 25 and so forth
- */
- for (src = 0; src < 4; src++) {
- if (BIT(src) & pending) {
- vector = offset + (src << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
- src, vector, virq);
- generic_handle_irq(virq);
- }
- }
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
+ BIT(bit_pos));
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
}
-static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
+static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- u32 reg_offset, bit_pos;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
struct dw_pcie *pci;
+ u64 msi_target;
pci = to_dw_pcie_from_pp(pp);
ks_pcie = to_keystone_pcie(pci);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
- BIT(bit_pos));
- ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+ msi_target = ks_pcie->app.start + MSI_IRQ;
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
+ msg->data = data->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ return -EINVAL;
+}
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
+static void ks_pcie_msi_mask(struct irq_data *data)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
-static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+static void ks_pcie_msi_unmask(struct irq_data *data)
{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
+static struct irq_chip ks_pcie_msi_irq_chip = {
+ .name = "KEYSTONE-PCI-MSI",
+ .irq_ack = ks_pcie_msi_irq_ack,
+ .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
+ .irq_set_affinity = ks_pcie_msi_set_affinity,
+ .irq_mask = ks_pcie_msi_mask,
+ .irq_unmask = ks_pcie_msi_unmask,
+};
+
static int ks_pcie_msi_host_init(struct pcie_port *pp)
{
+ pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
}
-static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
-{
- int i;
-
- for (i = 0; i < PCI_NUM_INTX; i++)
- ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
-}
-
static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
int offset)
{
@@ -217,7 +263,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
u32 pending;
int virq;
- pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
+ pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
if (BIT(0) & pending) {
virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
@@ -229,6 +275,14 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
}
+/*
+ * Dummy function so that DW core doesn't configure MSI
+ */
+static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
+{
+ return 0;
+}
+
static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
@@ -255,10 +309,10 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
if (reg & ERR_CORR)
dev_dbg(dev, "Correctable Error\n");
- if (reg & ERR_AXI)
+ if (!ks_pcie->is_am6 && (reg & ERR_AXI))
dev_err(dev, "AXI tag lookup fatal Error\n");
- if (reg & ERR_AER)
+ if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
dev_err(dev, "ECRC Error\n");
ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
@@ -356,6 +410,9 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
ks_pcie_clear_dbi_mode(ks_pcie);
+ if (ks_pcie->is_am6)
+ return;
+
val = ilog2(OB_WIN_SIZE);
ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
@@ -445,68 +502,33 @@ static int ks_pcie_link_up(struct dw_pcie *pci)
return (val == PORT_LOGIC_LTSSM_STATE_L0);
}
-static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+static void ks_pcie_stop_link(struct dw_pcie *pci)
{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 val;
/* Disable Link training */
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-
- /* Initiate Link Training */
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
}
-/**
- * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware
- *
- * Ioremap the register resources, initialize legacy irq domain
- * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
- * PCI host controller.
- */
-static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie)
+static int ks_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
-
- /* Index 0 is the config reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- /*
- * We set these same and is used in pcie rd/wr_other_conf
- * functions
- */
- pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
- pp->va_cfg1_base = pp->va_cfg0_base;
-
- /* Index 1 is the application reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ks_pcie->va_app_base))
- return PTR_ERR(ks_pcie->va_app_base);
-
- ks_pcie->app = *res;
+ u32 val;
- /* Create legacy IRQ domain */
- ks_pcie->legacy_irq_domain =
- irq_domain_add_linear(ks_pcie->legacy_intc_np,
- PCI_NUM_INTX,
- &ks_pcie_legacy_irq_domain_ops,
- NULL);
- if (!ks_pcie->legacy_irq_domain) {
- dev_err(dev, "Failed to add irq domain for legacy irqs\n");
- return -EINVAL;
+ if (dw_pcie_link_up(pci)) {
+ dev_dbg(dev, "link is already up\n");
+ return 0;
}
- return dw_pcie_host_init(pp);
+ /* Initiate Link Training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+
+ return 0;
}
static void ks_pcie_quirk(struct pci_dev *dev)
@@ -552,34 +574,16 @@ static void ks_pcie_quirk(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
-static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct device *dev = pci->dev;
-
- if (dw_pcie_link_up(pci)) {
- dev_info(dev, "Link already up\n");
- return 0;
- }
-
- ks_pcie_initiate_link_train(ks_pcie);
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- dev_err(dev, "phy link never came up\n");
- return -ETIMEDOUT;
-}
-
static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
+ unsigned int irq = desc->irq_data.hwirq;
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
- u32 offset = irq - ks_pcie->msi_host_irqs[0];
+ u32 offset = irq - ks_pcie->msi_host_irq;
struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 vector, virq, reg, pos;
dev_dbg(dev, "%s, irq %d\n", __func__, irq);
@@ -589,7 +593,23 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_pcie_handle_msi_irq(ks_pcie, offset);
+
+ reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (pos = 0; pos < 4; pos++) {
+ if (!(reg & BIT(pos)))
+ continue;
+
+ vector = offset + (pos << 3);
+ virq = irq_linear_revmap(pp->irq_domain, vector);
+ dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
+ virq);
+ generic_handle_irq(virq);
+ }
+
chained_irq_exit(chip, desc);
}
@@ -622,89 +642,119 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
- char *controller, int *num_irqs)
+static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
{
- int temp, max_host_irqs, legacy = 1, *host_irqs;
struct device *dev = ks_pcie->pci->dev;
- struct device_node *np_pcie = dev->of_node, **np_temp;
-
- if (!strcmp(controller, "msi-interrupt-controller"))
- legacy = 0;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ struct irq_data *irq_data;
+ int irq_count, irq, ret, i;
- if (legacy) {
- np_temp = &ks_pcie->legacy_intc_np;
- max_host_irqs = PCI_NUM_INTX;
- host_irqs = &ks_pcie->legacy_host_irqs[0];
- } else {
- np_temp = &ks_pcie->msi_intc_np;
- max_host_irqs = MAX_MSI_HOST_IRQS;
- host_irqs = &ks_pcie->msi_host_irqs[0];
- }
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
+ return 0;
- /* interrupt controller is in a child node */
- *np_temp = of_get_child_by_name(np_pcie, controller);
- if (!(*np_temp)) {
- dev_err(dev, "Node for %s is absent\n", controller);
+ intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
+ if (!intc_np) {
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "msi-interrupt-controller node is absent\n");
return -EINVAL;
}
- temp = of_irq_count(*np_temp);
- if (!temp) {
- dev_err(dev, "No IRQ entries in %s\n", controller);
- of_node_put(*np_temp);
- return -EINVAL;
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
}
- if (temp > max_host_irqs)
- dev_warn(dev, "Too many %s interrupts defined %u\n",
- (legacy ? "legacy" : "MSI"), temp);
-
- /*
- * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
- * 7 (MSI)
- */
- for (temp = 0; temp < max_host_irqs; temp++) {
- host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
- if (!host_irqs[temp])
- break;
- }
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
- of_node_put(*np_temp);
+ if (!ks_pcie->msi_host_irq) {
+ irq_data = irq_get_irq_data(irq);
+ if (!irq_data) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->msi_host_irq = irq_data->hwirq;
+ }
- if (temp) {
- *num_irqs = temp;
- return 0;
+ irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
+ ks_pcie);
}
- return -EINVAL;
+ of_node_put(intc_np);
+ return 0;
+
+err:
+ of_node_put(intc_np);
+ return ret;
}
-static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
+static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
{
- int i;
+ struct device *dev = ks_pcie->pci->dev;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ int irq_count, irq, ret = 0, i;
+
+ intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
+ if (!intc_np) {
+ /*
+ * Since legacy interrupts are modeled as edge-interrupts in
+ * AM6, keep it disabled for now.
+ */
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "legacy-interrupt-controller node is absent\n");
+ return -EINVAL;
+ }
- /* Legacy IRQ */
- for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
- irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->legacy_host_irqs[i] = irq;
+
+ irq_set_chained_handler_and_data(irq,
ks_pcie_legacy_irq_handler,
ks_pcie);
}
- ks_pcie_enable_legacy_irqs(ks_pcie);
- /* MSI IRQ */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
- irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i],
- ks_pcie_msi_irq_handler,
- ks_pcie);
- }
+ legacy_irq_domain =
+ irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ &ks_pcie_legacy_irq_domain_ops, NULL);
+ if (!legacy_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ ret = -EINVAL;
+ goto err;
}
+ ks_pcie->legacy_irq_domain = legacy_irq_domain;
+
+ for (i = 0; i < PCI_NUM_INTX; i++)
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
- if (ks_pcie->error_irq > 0)
- ks_pcie_enable_error_irq(ks_pcie);
+err:
+ of_node_put(intc_np);
+ return ret;
}
+#ifdef CONFIG_ARM
/*
* When a PCI device does not exist during config cycles, keystone host gets a
* bus error instead of returning 0xffffffff. This handler always returns 0
@@ -724,6 +774,7 @@ static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
return 0;
}
+#endif
static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
{
@@ -742,8 +793,10 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
if (ret)
return ret;
+ dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+ dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
@@ -754,11 +807,18 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
int ret;
+ ret = ks_pcie_config_legacy_irq(ks_pcie);
+ if (ret)
+ return ret;
+
+ ret = ks_pcie_config_msi_irq(ks_pcie);
+ if (ret)
+ return ret;
+
dw_pcie_setup_rc(pp);
- ks_pcie_establish_link(ks_pcie);
+ ks_pcie_stop_link(pci);
ks_pcie_setup_rc_app_regs(ks_pcie);
- ks_pcie_setup_interrupts(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
@@ -766,12 +826,17 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
if (ret < 0)
return ret;
+#ifdef CONFIG_ARM
/*
* PCIe access errors that result into OCP errors are caught by ARM as
* "External aborts"
*/
hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
"Asynchronous external abort");
+#endif
+
+ ks_pcie_start_link(pci);
+ dw_pcie_wait_for_link(pci);
return 0;
}
@@ -780,14 +845,15 @@ static const struct dw_pcie_host_ops ks_pcie_host_ops = {
.rd_other_conf = ks_pcie_rd_other_conf,
.wr_other_conf = ks_pcie_wr_other_conf,
.host_init = ks_pcie_host_init,
- .msi_set_irq = ks_pcie_msi_set_irq,
- .msi_clear_irq = ks_pcie_msi_clear_irq,
- .get_msi_addr = ks_pcie_get_msi_addr,
.msi_host_init = ks_pcie_msi_host_init,
- .msi_irq_ack = ks_pcie_msi_irq_ack,
.scan_bus = ks_pcie_v3_65_scan_bus,
};
+static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
+ .host_init = ks_pcie_host_init,
+ .msi_host_init = ks_pcie_am654_msi_host_init,
+};
+
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
@@ -801,41 +867,17 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
+ struct resource *res;
int ret;
- ret = ks_pcie_get_irq_controller_info(ks_pcie,
- "legacy-interrupt-controller",
- &ks_pcie->num_legacy_host_irqs);
- if (ret)
- return ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- ret = ks_pcie_get_irq_controller_info(ks_pcie,
- "msi-interrupt-controller",
- &ks_pcie->num_msi_host_irqs);
- if (ret)
- return ret;
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
- /*
- * Index 0 is the platform interrupt for error interrupt
- * from RC. This is optional.
- */
- ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
- if (ks_pcie->error_irq <= 0)
- dev_info(dev, "no error IRQ defined\n");
- else {
- ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler,
- IRQF_SHARED, "pcie-error-irq", ks_pcie);
- if (ret < 0) {
- dev_err(dev, "failed to request error IRQ %d\n",
- ks_pcie->error_irq);
- return ret;
- }
- }
+ pp->va_cfg1_base = pp->va_cfg0_base;
- pp->ops = &ks_pcie_host_ops;
- ret = ks_pcie_dw_host_init(ks_pcie);
+ ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
return ret;
@@ -844,18 +886,139 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
return 0;
}
-static const struct of_device_id ks_pcie_of_match[] = {
- {
- .type = "pci",
- .compatible = "ti,keystone-pcie",
- },
- { },
-};
+static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 val;
+
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_read(base + reg, size, &val);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+ return val;
+}
+
+static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_write(base + reg, size, val);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+}
static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+ .start_link = ks_pcie_start_link,
+ .stop_link = ks_pcie_stop_link,
.link_up = ks_pcie_link_up,
+ .read_dbi2 = ks_pcie_am654_read_dbi2,
+ .write_dbi2 = ks_pcie_am654_write_dbi2,
+};
+
+static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ int flags;
+
+ ep->page_size = AM654_WIN_SIZE;
+ flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+ dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
+}
+
+static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ u8 int_pin;
+
+ int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
+ if (int_pin == 0 || int_pin > 4)
+ return;
+
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
+ INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
+ mdelay(1);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
+ INT_ENABLE);
+}
+
+static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ ks_pcie_am654_raise_legacy_irq(ks_pcie);
+ break;
+ case PCI_EPC_IRQ_MSI:
+ dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ break;
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features ks_pcie_am654_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[2] = SZ_1M,
+ .bar_fixed_size[3] = SZ_64K,
+ .bar_fixed_size[4] = 256,
+ .bar_fixed_size[5] = SZ_1M,
+ .align = SZ_1M,
};
+static const struct pci_epc_features*
+ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
+{
+ return &ks_pcie_am654_epc_features;
+}
+
+static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
+ .ep_init = ks_pcie_am654_ep_init,
+ .raise_irq = ks_pcie_am654_raise_irq,
+ .get_features = &ks_pcie_am654_get_features,
+};
+
+static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = ks_pcie->pci;
+
+ ep = &pci->ep;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
{
int num_lanes = ks_pcie->num_lanes;
@@ -873,6 +1036,10 @@ static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
int num_lanes = ks_pcie->num_lanes;
for (i = 0; i < num_lanes; i++) {
+ ret = phy_reset(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
ret = phy_init(ks_pcie->phy[i]);
if (ret < 0)
goto err_phy;
@@ -895,20 +1062,161 @@ err_phy:
return ret;
}
+static int ks_pcie_set_mode(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
+ val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
+
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ks_pcie_am654_set_mode(struct device *dev,
+ enum dw_pcie_device_mode mode)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ mask = AM654_PCIE_DEV_TYPE_MASK;
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ val = RC;
+ break;
+ case DW_PCIE_EP_TYPE:
+ val = EP;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed)
+{
+ u32 val;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP);
+ if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= link_speed;
+ dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP,
+ val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2);
+ if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= link_speed;
+ dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2,
+ val);
+ }
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
+ .host_ops = &ks_pcie_host_ops,
+ .version = 0x365A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
+ .host_ops = &ks_pcie_am654_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
+ .version = 0x490A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
+ .ep_ops = &ks_pcie_am654_ep_ops,
+ .mode = DW_PCIE_EP_TYPE,
+ .version = 0x490A,
+};
+
+static const struct of_device_id ks_pcie_of_match[] = {
+ {
+ .type = "pci",
+ .data = &ks_pcie_rc_of_data,
+ .compatible = "ti,keystone-pcie",
+ },
+ {
+ .data = &ks_pcie_am654_rc_of_data,
+ .compatible = "ti,am654-pcie-rc",
+ },
+ {
+ .data = &ks_pcie_am654_ep_of_data,
+ .compatible = "ti,am654-pcie-ep",
+ },
+ { },
+};
+
static int __init ks_pcie_probe(struct platform_device *pdev)
{
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ const struct ks_pcie_of_data *data;
+ const struct of_device_id *match;
+ enum dw_pcie_device_mode mode;
struct dw_pcie *pci;
struct keystone_pcie *ks_pcie;
struct device_link **link;
+ struct gpio_desc *gpiod;
+ void __iomem *atu_base;
+ struct resource *res;
+ unsigned int version;
+ void __iomem *base;
u32 num_viewport;
struct phy **phy;
+ int link_speed;
u32 num_lanes;
char name[10];
int ret;
+ int irq;
int i;
+ match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
+ data = (struct ks_pcie_of_data *)match->data;
+ if (!data)
+ return -EINVAL;
+
+ version = data->version;
+ host_ops = data->host_ops;
+ ep_ops = data->ep_ops;
+ mode = data->mode;
+
ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
return -ENOMEM;
@@ -917,12 +1225,38 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ ks_pcie->app = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
+ base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
+ ks_pcie->is_am6 = true;
+
+ pci->dbi_base = base;
+ pci->dbi_base2 = base;
pci->dev = dev;
pci->ops = &ks_pcie_dw_pcie_ops;
+ pci->version = version;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "missing IRQ resource: %d\n", irq);
+ return irq;
+ }
- ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+ "ks-pcie-error-irq", ks_pcie);
if (ret < 0) {
- dev_err(dev, "unable to read *num-viewport* property\n");
+ dev_err(dev, "failed to request error IRQ %d\n",
+ irq);
return ret;
}
@@ -960,9 +1294,17 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
ks_pcie->pci = pci;
ks_pcie->link = link;
ks_pcie->num_lanes = num_lanes;
- ks_pcie->num_viewport = num_viewport;
ks_pcie->phy = phy;
+ gpiod = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset GPIO\n");
+ goto err_link;
+ }
+
ret = ks_pcie_enable_phy(ks_pcie);
if (ret) {
dev_err(dev, "failed to enable phy\n");
@@ -977,9 +1319,79 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
- if (ret < 0)
- goto err_get_sync;
+ if (pci->version >= 0x480A) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ atu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(atu_base)) {
+ ret = PTR_ERR(atu_base);
+ goto err_get_sync;
+ }
+
+ pci->atu_base = atu_base;
+
+ ret = ks_pcie_am654_set_mode(dev, mode);
+ if (ret < 0)
+ goto err_get_sync;
+ } else {
+ ret = ks_pcie_set_mode(dev);
+ if (ret < 0)
+ goto err_get_sync;
+ }
+
+ link_speed = of_pci_get_max_link_speed(np);
+ if (link_speed < 0)
+ link_speed = 2;
+
+ ks_pcie_set_link_speed(pci, link_speed);
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-viewport* property\n");
+ return ret;
+ }
+
+ /*
+ * "Power Sequencing and Reset Signal Timings" table in
+ * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
+ * indicates PERST# should be deasserted after minimum of 100us
+ * once REFCLK is stable. The REFCLK to the connector in RC
+ * mode is selected while enabling the PHY. So deassert PERST#
+ * after 100 us.
+ */
+ if (gpiod) {
+ usleep_range(100, 200);
+ gpiod_set_value_cansleep(gpiod, 1);
+ }
+
+ ks_pcie->num_viewport = num_viewport;
+ pci->pp.ops = host_ops;
+ ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ pci->ep.ops = ep_ops;
+ ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ }
+
+ ks_pcie_enable_error_irq(ks_pcie);
return 0;
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index a42c9c3ae1cc..be61d96cc95e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -79,7 +79,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = ls_pcie_ep_init,
.raise_irq = ls_pcie_ep_raise_irq,
.get_features = ls_pcie_ep_get_features,
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
new file mode 100644
index 000000000000..3ab58f0584a8
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips
+ * such as Graviton and Alpine)
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Author: Jonathan Chocron <jonnyc@amazon.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pci-acpi.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+struct al_pcie_acpi {
+ void __iomem *dbi_base;
+};
+
+static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct al_pcie_acpi *pcie = cfg->priv;
+ void __iomem *dbi_base = pcie->dbi_base;
+
+ if (bus->number == cfg->busr.start) {
+ /*
+ * The DW PCIe core doesn't filter out transactions to other
+ * devices/functions on the root bus num, so we do this here.
+ */
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+ else
+ return dbi_base + where;
+ }
+
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int al_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct al_pcie_acpi *al_pcie;
+ struct resource *res;
+ int ret;
+
+ al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+ if (!al_pcie)
+ return -ENOMEM;
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc dbi base address for SEG %d\n",
+ root->segment);
+ return ret;
+ }
+
+ dev_dbg(dev, "Root port dbi res: %pR\n", res);
+
+ al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(al_pcie->dbi_base)) {
+ long err = PTR_ERR(al_pcie->dbi_base);
+
+ dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n",
+ res, err);
+ return err;
+ }
+
+ cfg->priv = al_pcie;
+
+ return 0;
+}
+
+struct pci_ecam_ops al_pcie_ops = {
+ .bus_shift = 20,
+ .init = al_pcie_init,
+ .pci_ops = {
+ .map_bus = al_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index dba83abfe764..d00252bd8fae 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -444,7 +444,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
};
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 24f5a775ad34..2bf5a35c0570 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -46,16 +46,19 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
u8 cap_id, next_cap_ptr;
u16 reg;
+ if (!cap_ptr)
+ return 0;
+
reg = dw_pcie_readw_dbi(pci, cap_ptr);
- next_cap_ptr = (reg & 0xff00) >> 8;
cap_id = (reg & 0x00ff);
- if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
+ if (cap_id > PCI_CAP_ID_MAX)
return 0;
if (cap_id == cap)
return cap_ptr;
+ next_cap_ptr = (reg & 0xff00) >> 8;
return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
}
@@ -67,9 +70,6 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
next_cap_ptr = (reg & 0x00ff);
- if (!next_cap_ptr)
- return 0;
-
return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
}
@@ -397,6 +397,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
+ unsigned int aligned_offset;
u16 msg_ctrl, msg_data;
u32 msg_addr_lower, msg_addr_upper, reg;
u64 msg_addr;
@@ -422,13 +423,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
reg = ep->msi_cap + PCI_MSI_DATA_32;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
- msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
+ aligned_offset = msg_addr_lower & (epc->mem->page_size - 1);
+ msg_addr = ((u64)msg_addr_upper) << 32 |
+ (msg_addr_lower & ~aligned_offset);
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
epc->mem->page_size);
if (ret)
return ret;
- writel(msg_data | (interrupt_num - 1), ep->msi_mem);
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
@@ -504,10 +507,32 @@ void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
pci_epc_mem_exit(epc);
}
+static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+{
+ u32 header;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ while (pos) {
+ header = dw_pcie_readl_dbi(pci, pos);
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (!pos)
+ break;
+ }
+
+ return 0;
+}
+
int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
+ int i;
int ret;
+ u32 reg;
void *addr;
+ unsigned int nbars;
+ unsigned int offset;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
@@ -517,10 +542,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
return -EINVAL;
}
- if (pci->iatu_unroll_enabled && !pci->atu_base) {
- dev_err(dev, "atu_base is not populated\n");
- return -EINVAL;
- }
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
if (ret < 0) {
@@ -595,6 +616,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
dw_pcie_setup(pci);
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 25087d3c9a82..77db32529319 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -126,18 +126,12 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
- if (pp->ops->get_msi_addr)
- msi_target = pp->ops->get_msi_addr(pp);
- else
- msi_target = (u64)pp->msi_data;
+ msi_target = (u64)pp->msi_data;
msg->address_lo = lower_32_bits(msi_target);
msg->address_hi = upper_32_bits(msi_target);
- if (pp->ops->get_msi_data)
- msg->data = pp->ops->get_msi_data(pp, d->hwirq);
- else
- msg->data = d->hwirq;
+ msg->data = d->hwirq;
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
(int)d->hwirq, msg->address_hi, msg->address_lo);
@@ -157,17 +151,13 @@ static void dw_pci_bottom_mask(struct irq_data *d)
raw_spin_lock_irqsave(&pp->lock, flags);
- if (pp->ops->msi_clear_irq) {
- pp->ops->msi_clear_irq(pp, d->hwirq);
- } else {
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_mask[ctrl] |= BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
- }
+ pp->irq_mask[ctrl] |= BIT(bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+ pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -180,17 +170,13 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
raw_spin_lock_irqsave(&pp->lock, flags);
- if (pp->ops->msi_set_irq) {
- pp->ops->msi_set_irq(pp, d->hwirq);
- } else {
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_mask[ctrl] &= ~BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
- }
+ pp->irq_mask[ctrl] &= ~BIT(bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+ pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -199,20 +185,12 @@ static void dw_pci_bottom_ack(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned int res, bit, ctrl;
- unsigned long flags;
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- raw_spin_lock_irqsave(&pp->lock, flags);
-
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
-
- if (pp->ops->msi_irq_ack)
- pp->ops->msi_irq_ack(d->hwirq, pp);
-
- raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
@@ -245,7 +223,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, bit + i,
- &dw_pci_msi_bottom_irq_chip,
+ pp->msi_irq_chip,
pp, handle_edge_irq,
NULL, NULL);
@@ -298,25 +276,31 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
void dw_pcie_free_msi(struct pcie_port *pp)
{
- irq_set_chained_handler(pp->msi_irq, NULL);
- irq_set_handler_data(pp->msi_irq, NULL);
+ if (pp->msi_irq) {
+ irq_set_chained_handler(pp->msi_irq, NULL);
+ irq_set_handler_data(pp->msi_irq, NULL);
+ }
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
+
+ if (pp->msi_page)
+ __free_page(pp->msi_page);
}
void dw_pcie_msi_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct page *page;
u64 msi_target;
- page = alloc_page(GFP_KERNEL);
- pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ pp->msi_page = alloc_page(GFP_KERNEL);
+ pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(dev, pp->msi_data)) {
dev_err(dev, "Failed to map MSI data\n");
- __free_page(page);
+ __free_page(pp->msi_page);
+ pp->msi_page = NULL;
return;
}
msi_target = (u64)pp->msi_data;
@@ -335,7 +319,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win, *tmp;
- struct pci_bus *bus, *child;
+ struct pci_bus *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
int ret;
@@ -352,7 +336,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
dev_err(dev, "Missing *config* reg space\n");
}
- bridge = pci_alloc_host_bridge(0);
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
@@ -363,7 +347,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
ret = devm_request_pci_bus_resources(dev, &bridge->windows);
if (ret)
- goto error;
+ return ret;
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
@@ -407,8 +391,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
resource_size(pp->cfg));
if (!pci->dbi_base) {
dev_err(dev, "Error with ioremap\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
}
@@ -419,8 +402,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg0_base, pp->cfg0_size);
if (!pp->va_cfg0_base) {
dev_err(dev, "Error with ioremap in function\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
}
@@ -430,8 +412,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg1_size);
if (!pp->va_cfg1_base) {
dev_err(dev, "Error with ioremap\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
}
@@ -439,7 +420,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
pci->num_viewport = 2;
- if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
+ if (pci_msi_enabled()) {
/*
* If a specific SoC driver needs to change the
* default number of vectors, it needs to implement
@@ -454,14 +435,16 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->num_vectors == 0) {
dev_err(dev,
"Invalid number of vectors\n");
- goto error;
+ return -EINVAL;
}
}
if (!pp->ops->msi_host_init) {
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
ret = dw_pcie_allocate_domains(pp);
if (ret)
- goto error;
+ return ret;
if (pp->msi_irq)
irq_set_chained_handler_and_data(pp->msi_irq,
@@ -470,14 +453,14 @@ int dw_pcie_host_init(struct pcie_port *pp)
} else {
ret = pp->ops->msi_host_init(pp);
if (ret < 0)
- goto error;
+ return ret;
}
}
if (pp->ops->host_init) {
ret = pp->ops->host_init(pp);
if (ret)
- goto error;
+ goto err_free_msi;
}
pp->root_bus_nr = pp->busn->start;
@@ -491,24 +474,25 @@ int dw_pcie_host_init(struct pcie_port *pp)
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
- goto error;
+ goto err_free_msi;
- bus = bridge->bus;
+ pp->root_bus = bridge->bus;
if (pp->ops->scan_bus)
pp->ops->scan_bus(pp);
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ pci_bus_size_bridges(pp->root_bus);
+ pci_bus_assign_resources(pp->root_bus);
- list_for_each_entry(child, &bus->children, node)
+ list_for_each_entry(child, &pp->root_bus->children, node)
pcie_bus_configure_settings(child);
- pci_bus_add_devices(bus);
+ pci_bus_add_devices(pp->root_bus);
return 0;
-error:
- pci_free_host_bridge(bridge);
+err_free_msi:
+ if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ dw_pcie_free_msi(pp);
return ret;
}
@@ -628,17 +612,6 @@ static struct pci_ops dw_pcie_ops = {
.write = dw_pcie_wr_conf,
};
-static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
- if (val == 0xffffffff)
- return 1;
-
- return 0;
-}
-
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val, ctrl, num_ctrls;
@@ -646,17 +619,19 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_setup(pci);
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
-
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- pp->irq_mask[ctrl] = ~0;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, pp->irq_mask[ctrl]);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, ~0);
+ if (!pp->ops->msi_host_init) {
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ pp->irq_mask[ctrl] = ~0;
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, pp->irq_mask[ctrl]);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, ~0);
+ }
}
/* Setup RC BARs */
@@ -690,14 +665,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
- /* Get iATU unroll support */
- pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
- dev_dbg(pci->dev, "iATU unroll: %s\n",
- pci->iatu_unroll_enabled ? "enabled" : "disabled");
-
- if (pci->iatu_unroll_enabled && !pci->atu_base)
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
-
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 932dbd0b34b6..b58fdcbc664b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -106,7 +106,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
return &dw_plat_pcie_epc_features;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = dw_plat_pcie_ep_init,
.raise_irq = dw_plat_pcie_ep_raise_irq,
.get_features = dw_plat_pcie_get_features,
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 31f6331ca46f..9d7c51c32b3b 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -14,12 +14,6 @@
#include "pcie-designware.h"
-/* PCIe Port Logic registers */
-#define PLR_OFFSET 0x700
-#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
-#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
-#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
-
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
if (!IS_ALIGNED((uintptr_t)addr, size)) {
@@ -89,6 +83,37 @@ void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
dev_err(pci->dev, "Write DBI address failed\n");
}
+u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size)
+{
+ int ret;
+ u32 val;
+
+ if (pci->ops->read_dbi2)
+ return pci->ops->read_dbi2(pci, base, reg, size);
+
+ ret = dw_pcie_read(base + reg, size, &val);
+ if (ret)
+ dev_err(pci->dev, "read DBI address failed\n");
+
+ return val;
+}
+
+void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ int ret;
+
+ if (pci->ops->write_dbi2) {
+ pci->ops->write_dbi2(pci, base, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "write DBI address failed\n");
+}
+
static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
@@ -334,9 +359,20 @@ int dw_pcie_link_up(struct dw_pcie *pci)
if (pci->ops->link_up)
return pci->ops->link_up(pci);
- val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1);
- return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
- (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
+ val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
+ return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
+ (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
+}
+
+static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+ if (val == 0xffffffff)
+ return 1;
+
+ return 0;
}
void dw_pcie_setup(struct dw_pcie *pci)
@@ -347,6 +383,16 @@ void dw_pcie_setup(struct dw_pcie *pci)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ if (pci->version >= 0x480A || (!pci->version &&
+ dw_pcie_iatu_unroll_enabled(pci))) {
+ pci->iatu_unroll_enabled = true;
+ if (!pci->atu_base)
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+ dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
+ "enabled" : "disabled");
+
+
ret = of_property_read_u32(np, "num-lanes", &lanes);
if (ret)
lanes = 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 377f4c0b52da..b8993f2b78df 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -41,6 +41,9 @@
#define PCIE_PORT_DEBUG0 0x728
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
+#define PCIE_PORT_DEBUG1 0x72C
+#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
+#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE BIT(17)
@@ -145,14 +148,9 @@ struct dw_pcie_host_ops {
int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 val);
int (*host_init)(struct pcie_port *pp);
- void (*msi_set_irq)(struct pcie_port *pp, int irq);
- void (*msi_clear_irq)(struct pcie_port *pp, int irq);
- phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
- u32 (*get_msi_data)(struct pcie_port *pp, int pos);
void (*scan_bus)(struct pcie_port *pp);
void (*set_num_vectors)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp);
- void (*msi_irq_ack)(int irq, struct pcie_port *pp);
};
struct pcie_port {
@@ -179,8 +177,11 @@ struct pcie_port {
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
dma_addr_t msi_data;
+ struct page *msi_page;
+ struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
+ struct pci_bus *root_bus;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
@@ -200,7 +201,7 @@ struct dw_pcie_ep_ops {
struct dw_pcie_ep {
struct pci_epc *epc;
- struct dw_pcie_ep_ops *ops;
+ const struct dw_pcie_ep_ops *ops;
phys_addr_t phys_base;
size_t addr_size;
size_t page_size;
@@ -222,6 +223,10 @@ struct dw_pcie_ops {
size_t size);
void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
+ u32 (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size);
+ void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size, u32 val);
int (*link_up)(struct dw_pcie *pcie);
int (*start_link)(struct dw_pcie *pcie);
void (*stop_link)(struct dw_pcie *pcie);
@@ -238,6 +243,7 @@ struct dw_pcie {
struct pcie_port pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
+ unsigned int version;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -252,6 +258,10 @@ u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
size_t size);
void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
size_t size, u32 val);
+u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size);
+void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
@@ -295,12 +305,12 @@ static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val);
+ __dw_pcie_write_dbi2(pci, pci->dbi_base2, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
+ return __dw_pcie_read_dbi2(pci, pci->dbi_base2, reg, 0x4);
}
static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index a7f703556790..0ed235d560e3 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1129,25 +1129,8 @@ err_deinit:
return ret;
}
-static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- /* the device class is not reported correctly from the register */
- if (where == PCI_CLASS_REVISION && size == 4) {
- *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
- *val &= 0xff; /* keep revision id */
- *val |= PCI_CLASS_BRIDGE_PCI << 16;
- return PCIBIOS_SUCCESSFUL;
- }
-
- return dw_pcie_read(pci->dbi_base + where, size, val);
-}
-
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.host_init = qcom_pcie_host_init,
- .rd_own_conf = qcom_pcie_rd_own_conf,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1309,6 +1292,12 @@ static const struct of_device_id qcom_pcie_match[] = {
{ }
};
+static void qcom_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
+
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
.driver = {
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 95441a35eceb..82acd6155adf 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1486,6 +1486,21 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
}
}
+/*
+ * Remove entries in sysfs pci slot directory.
+ */
+static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
+{
+ struct hv_pci_dev *hpdev;
+
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if (!hpdev->pci_slot)
+ continue;
+ pci_destroy_slot(hpdev->pci_slot);
+ hpdev->pci_slot = NULL;
+ }
+}
+
/**
* create_root_hv_pci_bus() - Expose a new root PCI bus
* @hbus: Root PCI bus, as understood by this driver
@@ -1761,6 +1776,10 @@ static void pci_devices_present_work(struct work_struct *work)
hpdev = list_first_entry(&removed, struct hv_pci_dev,
list_entry);
list_del(&hpdev->list_entry);
+
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+
put_pcichild(hpdev);
}
@@ -1900,6 +1919,9 @@ static void hv_eject_device_work(struct work_struct *work)
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
+ /* For the get_pcichild() in hv_pci_eject_device() */
+ put_pcichild(hpdev);
+ /* For the two refs got in new_pcichild_device() */
put_pcichild(hpdev);
put_pcichild(hpdev);
put_hvpcibus(hpdev->hbus);
@@ -2677,6 +2699,7 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_lock_rescan_remove();
pci_stop_root_bus(hbus->pci_bus);
pci_remove_root_bus(hbus->pci_bus);
+ hv_pci_remove_slots(hbus);
pci_unlock_rescan_remove();
hbus->state = hv_pcibus_removed;
}
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index c20fd6bd68fd..dd11d0226ff0 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -60,6 +60,10 @@
#define APB_ERR_EN_SHIFT 0
#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
+#define CFG_RD_SUCCESS 0
+#define CFG_RD_UR 1
+#define CFG_RD_CRS 2
+#define CFG_RD_CA 3
#define CFG_RETRY_STATUS 0xffff0001
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
@@ -289,6 +293,9 @@ enum iproc_pcie_reg {
IPROC_PCIE_IARR4,
IPROC_PCIE_IMAP4,
+ /* config read status */
+ IPROC_PCIE_CFG_RD_STATUS,
+
/* link status */
IPROC_PCIE_LINK_STATUS,
@@ -350,6 +357,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = {
[IPROC_PCIE_IMAP3] = 0xe08,
[IPROC_PCIE_IARR4] = 0xe68,
[IPROC_PCIE_IMAP4] = 0xe70,
+ [IPROC_PCIE_CFG_RD_STATUS] = 0xee0,
[IPROC_PCIE_LINK_STATUS] = 0xf0c,
[IPROC_PCIE_APB_ERR_EN] = 0xf40,
};
@@ -474,10 +482,12 @@ static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
return (pcie->base + offset);
}
-static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
+static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
+ void __iomem *cfg_data_p)
{
int timeout = CFG_RETRY_STATUS_TIMEOUT_US;
unsigned int data;
+ u32 status;
/*
* As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
@@ -498,6 +508,15 @@ static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
*/
data = readl(cfg_data_p);
while (data == CFG_RETRY_STATUS && timeout--) {
+ /*
+ * CRS state is set in CFG_RD status register
+ * This will handle the case where CFG_RETRY_STATUS is
+ * valid config data.
+ */
+ status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
+ if (status != CFG_RD_CRS)
+ return data;
+
udelay(1);
data = readl(cfg_data_p);
}
@@ -576,7 +595,7 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
if (!cfg_data_p)
return PCIBIOS_DEVICE_NOT_FOUND;
- data = iproc_pcie_cfg_retry(cfg_data_p);
+ data = iproc_pcie_cfg_retry(pcie, cfg_data_p);
*val = data;
if (size <= 2)
@@ -936,8 +955,25 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
resource_size_t window_size =
ob_map->window_sizes[size_idx] * SZ_1M;
- if (size < window_size)
- continue;
+ /*
+ * Keep iterating until we reach the last window and
+ * with the minimal window size at index zero. In this
+ * case, we take a compromise by mapping it using the
+ * minimum window size that can be supported
+ */
+ if (size < window_size) {
+ if (size_idx > 0 || window_idx > 0)
+ continue;
+
+ /*
+ * For the corner case of reaching the minimal
+ * window size that can be supported on the
+ * last window
+ */
+ axi_addr = ALIGN_DOWN(axi_addr, window_size);
+ pci_addr = ALIGN_DOWN(pci_addr, window_size);
+ size = window_size;
+ }
if (!IS_ALIGNED(axi_addr, window_size) ||
!IS_ALIGNED(pci_addr, window_size)) {
@@ -1347,7 +1383,6 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
break;
case IPROC_PCIE_PAXB:
regs = iproc_pcie_reg_paxb;
- pcie->iproc_cfg_read = true;
pcie->has_apb_err_disable = true;
if (pcie->need_ob_cfg) {
pcie->ob_map = paxb_ob_map;
@@ -1356,6 +1391,7 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
break;
case IPROC_PCIE_PAXB_V2:
regs = iproc_pcie_reg_paxb_v2;
+ pcie->iproc_cfg_read = true;
pcie->has_apb_err_disable = true;
if (pcie->need_ob_cfg) {
pcie->ob_map = paxb_v2_ob_map;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 0b6c72804e03..adb6cb15daa2 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -915,49 +915,29 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
/* sys_ck might be divided into the following parts in some chips */
snprintf(name, sizeof(name), "ahb_ck%d", slot);
- port->ahb_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->ahb_ck)) {
- if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->ahb_ck = NULL;
- }
+ port->ahb_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->ahb_ck))
+ return PTR_ERR(port->ahb_ck);
snprintf(name, sizeof(name), "axi_ck%d", slot);
- port->axi_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->axi_ck)) {
- if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->axi_ck = NULL;
- }
+ port->axi_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->axi_ck))
+ return PTR_ERR(port->axi_ck);
snprintf(name, sizeof(name), "aux_ck%d", slot);
- port->aux_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->aux_ck)) {
- if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->aux_ck = NULL;
- }
+ port->aux_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->aux_ck))
+ return PTR_ERR(port->aux_ck);
snprintf(name, sizeof(name), "obff_ck%d", slot);
- port->obff_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->obff_ck)) {
- if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->obff_ck = NULL;
- }
+ port->obff_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->obff_ck))
+ return PTR_ERR(port->obff_ck);
snprintf(name, sizeof(name), "pipe_ck%d", slot);
- port->pipe_ck = devm_clk_get(dev, name);
- if (IS_ERR(port->pipe_ck)) {
- if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- port->pipe_ck = NULL;
- }
+ port->pipe_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->pipe_ck))
+ return PTR_ERR(port->pipe_ck);
snprintf(name, sizeof(name), "pcie-rst%d", slot);
port->reset = devm_reset_control_get_optional_exclusive(dev, name);
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index e4cebeb18470..f6a669a9af41 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -46,6 +46,7 @@
/* Transfer control */
#define PCIETCTLR 0x02000
+#define DL_DOWN BIT(3)
#define CFINIT BIT(0)
#define PCIETSTR 0x02004
#define DATA_LINK_ACTIVE BIT(0)
@@ -94,6 +95,7 @@
#define MACCTLR 0x011058
#define SPEED_CHANGE BIT(24)
#define SCRAMBLE_DISABLE BIT(27)
+#define PMSR 0x01105c
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
#define SPCNGRSN BIT(31)
@@ -1134,6 +1136,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = dev;
+ platform_set_drvdata(pdev, pcie);
err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
if (err)
@@ -1225,10 +1228,28 @@ err_free_bridge:
return err;
}
+static int rcar_pcie_resume_noirq(struct device *dev)
+{
+ struct rcar_pcie *pcie = dev_get_drvdata(dev);
+
+ if (rcar_pci_read_reg(pcie, PMSR) &&
+ !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
+ return 0;
+
+ /* Re-establish the PCIe link */
+ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+ return rcar_pcie_wait_for_dl(pcie);
+}
+
+static const struct dev_pm_ops rcar_pcie_pm_ops = {
+ .resume_noirq = rcar_pcie_resume_noirq,
+};
+
static struct platform_driver rcar_pcie_driver = {
.driver = {
.name = "rcar-pcie",
.of_match_table = rcar_pcie_of_match,
+ .pm = &rcar_pcie_pm_ops,
.suppress_bind_attrs = true,
},
.probe = rcar_pcie_probe,
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 81538d77f790..3b031f00a94a 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -438,11 +438,10 @@ static const struct irq_domain_ops legacy_domain_ops = {
#ifdef CONFIG_PCI_MSI
static struct irq_chip nwl_msi_irq_chip = {
.name = "nwl_pcie:msi",
- .irq_enable = unmask_msi_irq,
- .irq_disable = mask_msi_irq,
- .irq_mask = mask_msi_irq,
- .irq_unmask = unmask_msi_irq,
-
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
};
static struct msi_domain_info nwl_msi_domain_info = {
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index d0b91da49bf4..c0786ca74312 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -438,7 +438,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
epc_features = epf_test->epc_features;
base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
- test_reg_bar);
+ test_reg_bar, epc_features->align);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -453,7 +453,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
- base = pci_epf_alloc_space(epf, bar_size[bar], bar);
+ base = pci_epf_alloc_space(epf, bar_size[bar], bar,
+ epc_features->align);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 8bfdcd291196..fb1306de8f40 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -109,10 +109,12 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* pci_epf_alloc_space() - allocate memory for the PCI EPF register space
* @size: the size of the memory that has to be allocated
* @bar: the BAR number corresponding to the allocated register space
+ * @align: alignment size for the allocation region
*
* Invoke to allocate memory for the PCI EPF register space.
*/
-void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ size_t align)
{
void *space;
struct device *dev = epf->epc->dev.parent;
@@ -120,7 +122,11 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
if (size < 128)
size = 128;
- size = roundup_pow_of_two(size);
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
if (!space) {
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index e2356a9c7088..182f9e3443ee 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -51,6 +51,7 @@ static struct device_node *find_vio_slot_node(char *drc_name)
if (rc == 0)
break;
}
+ of_node_put(parent);
return dn;
}
@@ -71,6 +72,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name,
return np;
}
+/* Returns a device_node with its reference count incremented */
static struct device_node *find_dlpar_node(char *drc_name, int *node_type)
{
struct device_node *dn;
@@ -306,6 +308,7 @@ int dlpar_add_slot(char *drc_name)
rc = dlpar_add_phb(drc_name, dn);
break;
}
+ of_node_put(dn);
printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
exit:
@@ -439,6 +442,7 @@ int dlpar_remove_slot(char *drc_name)
rc = dlpar_remove_pci_slot(drc_name, dn);
break;
}
+ of_node_put(dn);
vm_unmap_aliases();
printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 5282aa3e33c5..93b4a945c55d 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -21,6 +21,7 @@
/* free up the memory used by a slot */
void dealloc_slot_struct(struct slot *slot)
{
+ of_node_put(slot->dn);
kfree(slot->name);
kfree(slot);
}
@@ -36,7 +37,7 @@ struct slot *alloc_slot_struct(struct device_node *dn,
slot->name = kstrdup(drc_name, GFP_KERNEL);
if (!slot->name)
goto error_slot;
- slot->dn = dn;
+ slot->dn = of_node_get(dn);
slot->index = drc_index;
slot->power_domain = power_domain;
slot->hotplug_slot.ops = &rpaphp_hotplug_slot_ops;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 73986825d221..e039b740fe74 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1338,7 +1338,7 @@ irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
struct msi_desc *desc)
{
return (irq_hw_number_t)desc->msi_attrib.entry_nr |
- PCI_DEVID(dev->bus->number, dev->devfn) << 11 |
+ pci_dev_id(dev) << 11 |
(pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
}
@@ -1508,7 +1508,7 @@ static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
{
struct device_node *of_node;
- u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
@@ -1531,7 +1531,7 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
struct irq_domain *dom;
- u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
dom = of_msi_map_get_device_domain(&pdev->dev, rid);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 3d32da15c215..73d5adec0a28 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -15,6 +15,7 @@
#include <linux/of_pci.h>
#include "pci.h"
+#ifdef CONFIG_PCI
void pci_set_of_node(struct pci_dev *dev)
{
if (!dev->bus->dev.of_node)
@@ -31,10 +32,16 @@ void pci_release_of_node(struct pci_dev *dev)
void pci_set_bus_of_node(struct pci_bus *bus)
{
- if (bus->self == NULL)
- bus->dev.of_node = pcibios_get_phb_of_node(bus);
- else
- bus->dev.of_node = of_node_get(bus->self->dev.of_node);
+ struct device_node *node;
+
+ if (bus->self == NULL) {
+ node = pcibios_get_phb_of_node(bus);
+ } else {
+ node = of_node_get(bus->self->dev.of_node);
+ if (node && of_property_read_bool(node, "external-facing"))
+ bus->self->untrusted = true;
+ }
+ bus->dev.of_node = node;
}
void pci_release_bus_of_node(struct pci_bus *bus)
@@ -197,27 +204,6 @@ int of_get_pci_domain_nr(struct device_node *node)
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
/**
- * This function will try to find the limitation of link speed by finding
- * a property called "max-link-speed" of the given device node.
- *
- * @node: device tree node with the max link speed information
- *
- * Returns the associated max link speed from DT, or a negative value if the
- * required property is not found or is invalid.
- */
-int of_pci_get_max_link_speed(struct device_node *node)
-{
- u32 max_link_speed;
-
- if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
- max_link_speed > 4)
- return -EINVAL;
-
- return max_link_speed;
-}
-EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
-
-/**
* of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
* is present and valid
*/
@@ -537,3 +523,25 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
return err;
}
+#endif /* CONFIG_PCI */
+
+/**
+ * This function will try to find the limitation of link speed by finding
+ * a property called "max-link-speed" of the given device node.
+ *
+ * @node: device tree node with the max link speed information
+ *
+ * Returns the associated max link speed from DT, or a negative value if the
+ * required property is not found or is invalid.
+ */
+int of_pci_get_max_link_speed(struct device_node *node)
+{
+ u32 max_link_speed;
+
+ if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
+ max_link_speed > 4)
+ return -EINVAL;
+
+ return max_link_speed;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index c52298d76e64..742928d0053e 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -275,6 +275,30 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
}
/*
+ * If we can't find a common upstream bridge take a look at the root
+ * complex and compare it to a whitelist of known good hardware.
+ */
+static bool root_complex_whitelist(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
+ struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
+ unsigned short vendor, device;
+
+ if (!root)
+ return false;
+
+ vendor = root->vendor;
+ device = root->device;
+ pci_dev_put(root);
+
+ /* AMD ZEN host bridges can do peer to peer */
+ if (vendor == PCI_VENDOR_ID_AMD && device == 0x1450)
+ return true;
+
+ return false;
+}
+
+/*
* Find the distance through the nearest common upstream bridge between
* two PCI devices.
*
@@ -317,13 +341,13 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
* In this case, a list of all infringing bridge addresses will be
* populated in acs_list (assuming it's non-null) for printk purposes.
*/
-static int upstream_bridge_distance(struct pci_dev *a,
- struct pci_dev *b,
+static int upstream_bridge_distance(struct pci_dev *provider,
+ struct pci_dev *client,
struct seq_buf *acs_list)
{
+ struct pci_dev *a = provider, *b = client, *bb;
int dist_a = 0;
int dist_b = 0;
- struct pci_dev *bb = NULL;
int acs_cnt = 0;
/*
@@ -354,6 +378,14 @@ static int upstream_bridge_distance(struct pci_dev *a,
dist_a++;
}
+ /*
+ * Allow the connection if both devices are on a whitelisted root
+ * complex, but add an arbitary large value to the distance.
+ */
+ if (root_complex_whitelist(provider) &&
+ root_complex_whitelist(client))
+ return 0x1000 + dist_a + dist_b;
+
return -1;
check_b_path_acs:
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index e1949f7efd9c..03e02dd6c1d9 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -119,7 +119,7 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
}
static acpi_status decode_type0_hpx_record(union acpi_object *record,
- struct hotplug_params *hpx)
+ struct hpp_type0 *hpx0)
{
int i;
union acpi_object *fields = record->package.elements;
@@ -132,12 +132,11 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record,
for (i = 2; i < 6; i++)
if (fields[i].type != ACPI_TYPE_INTEGER)
return AE_ERROR;
- hpx->t0 = &hpx->type0_data;
- hpx->t0->revision = revision;
- hpx->t0->cache_line_size = fields[2].integer.value;
- hpx->t0->latency_timer = fields[3].integer.value;
- hpx->t0->enable_serr = fields[4].integer.value;
- hpx->t0->enable_perr = fields[5].integer.value;
+ hpx0->revision = revision;
+ hpx0->cache_line_size = fields[2].integer.value;
+ hpx0->latency_timer = fields[3].integer.value;
+ hpx0->enable_serr = fields[4].integer.value;
+ hpx0->enable_perr = fields[5].integer.value;
break;
default:
printk(KERN_WARNING
@@ -149,7 +148,7 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record,
}
static acpi_status decode_type1_hpx_record(union acpi_object *record,
- struct hotplug_params *hpx)
+ struct hpp_type1 *hpx1)
{
int i;
union acpi_object *fields = record->package.elements;
@@ -162,11 +161,10 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record,
for (i = 2; i < 5; i++)
if (fields[i].type != ACPI_TYPE_INTEGER)
return AE_ERROR;
- hpx->t1 = &hpx->type1_data;
- hpx->t1->revision = revision;
- hpx->t1->max_mem_read = fields[2].integer.value;
- hpx->t1->avg_max_split = fields[3].integer.value;
- hpx->t1->tot_max_split = fields[4].integer.value;
+ hpx1->revision = revision;
+ hpx1->max_mem_read = fields[2].integer.value;
+ hpx1->avg_max_split = fields[3].integer.value;
+ hpx1->tot_max_split = fields[4].integer.value;
break;
default:
printk(KERN_WARNING
@@ -178,7 +176,7 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record,
}
static acpi_status decode_type2_hpx_record(union acpi_object *record,
- struct hotplug_params *hpx)
+ struct hpp_type2 *hpx2)
{
int i;
union acpi_object *fields = record->package.elements;
@@ -191,24 +189,23 @@ static acpi_status decode_type2_hpx_record(union acpi_object *record,
for (i = 2; i < 18; i++)
if (fields[i].type != ACPI_TYPE_INTEGER)
return AE_ERROR;
- hpx->t2 = &hpx->type2_data;
- hpx->t2->revision = revision;
- hpx->t2->unc_err_mask_and = fields[2].integer.value;
- hpx->t2->unc_err_mask_or = fields[3].integer.value;
- hpx->t2->unc_err_sever_and = fields[4].integer.value;
- hpx->t2->unc_err_sever_or = fields[5].integer.value;
- hpx->t2->cor_err_mask_and = fields[6].integer.value;
- hpx->t2->cor_err_mask_or = fields[7].integer.value;
- hpx->t2->adv_err_cap_and = fields[8].integer.value;
- hpx->t2->adv_err_cap_or = fields[9].integer.value;
- hpx->t2->pci_exp_devctl_and = fields[10].integer.value;
- hpx->t2->pci_exp_devctl_or = fields[11].integer.value;
- hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value;
- hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value;
- hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
- hpx->t2->sec_unc_err_sever_or = fields[15].integer.value;
- hpx->t2->sec_unc_err_mask_and = fields[16].integer.value;
- hpx->t2->sec_unc_err_mask_or = fields[17].integer.value;
+ hpx2->revision = revision;
+ hpx2->unc_err_mask_and = fields[2].integer.value;
+ hpx2->unc_err_mask_or = fields[3].integer.value;
+ hpx2->unc_err_sever_and = fields[4].integer.value;
+ hpx2->unc_err_sever_or = fields[5].integer.value;
+ hpx2->cor_err_mask_and = fields[6].integer.value;
+ hpx2->cor_err_mask_or = fields[7].integer.value;
+ hpx2->adv_err_cap_and = fields[8].integer.value;
+ hpx2->adv_err_cap_or = fields[9].integer.value;
+ hpx2->pci_exp_devctl_and = fields[10].integer.value;
+ hpx2->pci_exp_devctl_or = fields[11].integer.value;
+ hpx2->pci_exp_lnkctl_and = fields[12].integer.value;
+ hpx2->pci_exp_lnkctl_or = fields[13].integer.value;
+ hpx2->sec_unc_err_sever_and = fields[14].integer.value;
+ hpx2->sec_unc_err_sever_or = fields[15].integer.value;
+ hpx2->sec_unc_err_mask_and = fields[16].integer.value;
+ hpx2->sec_unc_err_mask_or = fields[17].integer.value;
break;
default:
printk(KERN_WARNING
@@ -219,17 +216,76 @@ static acpi_status decode_type2_hpx_record(union acpi_object *record,
return AE_OK;
}
-static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
+static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
+ union acpi_object *reg_fields)
+{
+ hpx3_reg->device_type = reg_fields[0].integer.value;
+ hpx3_reg->function_type = reg_fields[1].integer.value;
+ hpx3_reg->config_space_location = reg_fields[2].integer.value;
+ hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value;
+ hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value;
+ hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value;
+ hpx3_reg->dvsec_id = reg_fields[6].integer.value;
+ hpx3_reg->dvsec_rev = reg_fields[7].integer.value;
+ hpx3_reg->match_offset = reg_fields[8].integer.value;
+ hpx3_reg->match_mask_and = reg_fields[9].integer.value;
+ hpx3_reg->match_value = reg_fields[10].integer.value;
+ hpx3_reg->reg_offset = reg_fields[11].integer.value;
+ hpx3_reg->reg_mask_and = reg_fields[12].integer.value;
+ hpx3_reg->reg_mask_or = reg_fields[13].integer.value;
+}
+
+static acpi_status program_type3_hpx_record(struct pci_dev *dev,
+ union acpi_object *record,
+ const struct hotplug_program_ops *hp_ops)
+{
+ union acpi_object *fields = record->package.elements;
+ u32 desc_count, expected_length, revision;
+ union acpi_object *reg_fields;
+ struct hpx_type3 hpx3;
+ int i;
+
+ revision = fields[1].integer.value;
+ switch (revision) {
+ case 1:
+ desc_count = fields[2].integer.value;
+ expected_length = 3 + desc_count * 14;
+
+ if (record->package.count != expected_length)
+ return AE_ERROR;
+
+ for (i = 2; i < expected_length; i++)
+ if (fields[i].type != ACPI_TYPE_INTEGER)
+ return AE_ERROR;
+
+ for (i = 0; i < desc_count; i++) {
+ reg_fields = fields + 3 + i * 14;
+ parse_hpx3_register(&hpx3, reg_fields);
+ hp_ops->program_type3(dev, &hpx3);
+ }
+
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Type 3 Revision %d record not supported\n",
+ __func__, revision);
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle,
+ const struct hotplug_program_ops *hp_ops)
{
acpi_status status;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *package, *record, *fields;
+ struct hpp_type0 hpx0;
+ struct hpp_type1 hpx1;
+ struct hpp_type2 hpx2;
u32 type;
int i;
- /* Clear the return buffer with zeros */
- memset(hpx, 0, sizeof(struct hotplug_params));
-
status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
if (ACPI_FAILURE(status))
return status;
@@ -257,17 +313,28 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
type = fields[0].integer.value;
switch (type) {
case 0:
- status = decode_type0_hpx_record(record, hpx);
+ memset(&hpx0, 0, sizeof(hpx0));
+ status = decode_type0_hpx_record(record, &hpx0);
if (ACPI_FAILURE(status))
goto exit;
+ hp_ops->program_type0(dev, &hpx0);
break;
case 1:
- status = decode_type1_hpx_record(record, hpx);
+ memset(&hpx1, 0, sizeof(hpx1));
+ status = decode_type1_hpx_record(record, &hpx1);
if (ACPI_FAILURE(status))
goto exit;
+ hp_ops->program_type1(dev, &hpx1);
break;
case 2:
- status = decode_type2_hpx_record(record, hpx);
+ memset(&hpx2, 0, sizeof(hpx2));
+ status = decode_type2_hpx_record(record, &hpx2);
+ if (ACPI_FAILURE(status))
+ goto exit;
+ hp_ops->program_type2(dev, &hpx2);
+ break;
+ case 3:
+ status = program_type3_hpx_record(dev, record, hp_ops);
if (ACPI_FAILURE(status))
goto exit;
break;
@@ -283,14 +350,16 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
return status;
}
-static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
+static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle,
+ const struct hotplug_program_ops *hp_ops)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package, *fields;
+ struct hpp_type0 hpp0;
int i;
- memset(hpp, 0, sizeof(struct hotplug_params));
+ memset(&hpp0, 0, sizeof(hpp0));
status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -311,12 +380,13 @@ static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
}
}
- hpp->t0 = &hpp->type0_data;
- hpp->t0->revision = 1;
- hpp->t0->cache_line_size = fields[0].integer.value;
- hpp->t0->latency_timer = fields[1].integer.value;
- hpp->t0->enable_serr = fields[2].integer.value;
- hpp->t0->enable_perr = fields[3].integer.value;
+ hpp0.revision = 1;
+ hpp0.cache_line_size = fields[0].integer.value;
+ hpp0.latency_timer = fields[1].integer.value;
+ hpp0.enable_serr = fields[2].integer.value;
+ hpp0.enable_perr = fields[3].integer.value;
+
+ hp_ops->program_type0(dev, &hpp0);
exit:
kfree(buffer.pointer);
@@ -328,7 +398,8 @@ exit:
* @dev - the pci_dev for which we want parameters
* @hpp - allocated by the caller
*/
-int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
+int pci_acpi_program_hp_params(struct pci_dev *dev,
+ const struct hotplug_program_ops *hp_ops)
{
acpi_status status;
acpi_handle handle, phandle;
@@ -351,10 +422,10 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
* this pci dev.
*/
while (handle) {
- status = acpi_run_hpx(handle, hpp);
+ status = acpi_run_hpx(dev, handle, hp_ops);
if (ACPI_SUCCESS(status))
return 0;
- status = acpi_run_hpp(handle, hpp);
+ status = acpi_run_hpp(dev, handle, hp_ops);
if (ACPI_SUCCESS(status))
return 0;
if (acpi_is_root_bridge(handle))
@@ -366,7 +437,6 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
}
return -ENODEV;
}
-EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
* pciehp_is_native - Check whether a hotplug port is handled by the OS
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7c1b362f599a..d185b49e105a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3707,31 +3707,6 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
EXPORT_SYMBOL(pci_request_region);
/**
- * pci_request_region_exclusive - Reserved PCI I/O and memory resource
- * @pdev: PCI device whose resources are to be reserved
- * @bar: BAR to be reserved
- * @res_name: Name to be associated with resource.
- *
- * Mark the PCI region associated with PCI device @pdev BR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
- *
- * Returns 0 on success, or %EBUSY on error. A warning
- * message is also printed on failure.
- *
- * The key difference that _exclusive makes it that userspace is
- * explicitly not allowed to map the resource via /dev/mem or
- * sysfs.
- */
-int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
- const char *res_name)
-{
- return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
-}
-EXPORT_SYMBOL(pci_request_region_exclusive);
-
-/**
* pci_release_selected_regions - Release selected PCI I/O and memory resources
* @pdev: PCI device whose resources were previously reserved
* @bars: Bitmask of BARs to be released
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 224d88634115..17c4ed2021de 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -596,7 +596,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev);
void pci_aer_clear_device_status(struct pci_dev *dev);
#else
static inline void pci_no_aer(void) { }
-static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
+static inline void pci_aer_init(struct pci_dev *d) { }
static inline void pci_aer_exit(struct pci_dev *d) { }
static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 727e3c1ef9a4..fd4cb75088f9 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -196,6 +196,36 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
link->clkpm_capable = (blacklist) ? 0 : capable;
}
+static bool pcie_retrain_link(struct pcie_link_state *link)
+{
+ struct pci_dev *parent = link->pdev;
+ unsigned long end_jiffies;
+ u16 reg16;
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+ reg16 |= PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ if (parent->clear_retrain_link) {
+ /*
+ * Due to an erratum in some devices the Retrain Link bit
+ * needs to be cleared again manually to allow the link
+ * training to succeed.
+ */
+ reg16 &= ~PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ }
+
+ /* Wait for link training end. Break out after waiting for timeout */
+ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
+ do {
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+ msleep(1);
+ } while (time_before(jiffies, end_jiffies));
+ return !(reg16 & PCI_EXP_LNKSTA_LT);
+}
+
/*
* pcie_aspm_configure_common_clock: check if the 2 ends of a link
* could use common clock. If they are, configure them to use the
@@ -205,7 +235,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
int same_clock = 1;
u16 reg16, parent_reg, child_reg[8];
- unsigned long start_jiffies;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
/*
@@ -263,21 +292,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
reg16 &= ~PCI_EXP_LNKCTL_CCC;
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
- /* Retrain link */
- reg16 |= PCI_EXP_LNKCTL_RL;
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
-
- /* Wait for link training end. Break out after waiting for timeout */
- start_jiffies = jiffies;
- for (;;) {
- pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
- break;
- if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
- break;
- msleep(1);
- }
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ if (pcie_retrain_link(link))
return;
/* Training failed. Restore common clock configurations */
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
index d2eae3b7cc0f..971eb7e90fb0 100644
--- a/drivers/pci/pcie/bw_notification.c
+++ b/drivers/pci/pcie/bw_notification.c
@@ -96,11 +96,25 @@ static void pcie_bandwidth_notification_remove(struct pcie_device *srv)
free_irq(srv->irq, srv);
}
+static int pcie_bandwidth_notification_suspend(struct pcie_device *srv)
+{
+ pcie_disable_link_bandwidth_notification(srv->port);
+ return 0;
+}
+
+static int pcie_bandwidth_notification_resume(struct pcie_device *srv)
+{
+ pcie_enable_link_bandwidth_notification(srv->port);
+ return 0;
+}
+
static struct pcie_port_service_driver pcie_bandwidth_notification_driver = {
.name = "pcie_bw_notification",
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_BWNOTIF,
.probe = pcie_bandwidth_notification_probe,
+ .suspend = pcie_bandwidth_notification_suspend,
+ .resume = pcie_bandwidth_notification_resume,
.remove = pcie_bandwidth_notification_remove,
};
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2ec0df04e0dc..50cd9c17c08f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -586,16 +586,9 @@ static void pci_release_host_bridge_dev(struct device *dev)
kfree(to_pci_host_bridge(dev));
}
-struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
+static void pci_init_host_bridge(struct pci_host_bridge *bridge)
{
- struct pci_host_bridge *bridge;
-
- bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
- if (!bridge)
- return NULL;
-
INIT_LIST_HEAD(&bridge->windows);
- bridge->dev.release = pci_release_host_bridge_dev;
/*
* We assume we can manage these PCIe features. Some systems may
@@ -608,6 +601,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
bridge->native_shpc_hotplug = 1;
bridge->native_pme = 1;
bridge->native_ltr = 1;
+}
+
+struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
+ if (!bridge)
+ return NULL;
+
+ pci_init_host_bridge(bridge);
+ bridge->dev.release = pci_release_host_bridge_dev;
return bridge;
}
@@ -622,7 +627,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
if (!bridge)
return NULL;
- INIT_LIST_HEAD(&bridge->windows);
+ pci_init_host_bridge(bridge);
bridge->dev.release = devm_pci_release_host_bridge_dev;
return bridge;
@@ -1081,6 +1086,36 @@ static void pci_enable_crs(struct pci_dev *pdev)
static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
unsigned int available_buses);
+/**
+ * pci_ea_fixed_busnrs() - Read fixed Secondary and Subordinate bus
+ * numbers from EA capability.
+ * @dev: Bridge
+ * @sec: updated with secondary bus number from EA
+ * @sub: updated with subordinate bus number from EA
+ *
+ * If @dev is a bridge with EA capability, update @sec and @sub with
+ * fixed bus numbers from the capability and return true. Otherwise,
+ * return false.
+ */
+static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
+{
+ int ea, offset;
+ u32 dw;
+
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
+ return false;
+
+ /* find PCI EA capability in list */
+ ea = pci_find_capability(dev, PCI_CAP_ID_EA);
+ if (!ea)
+ return false;
+
+ offset = ea + PCI_EA_FIRST_ENT;
+ pci_read_config_dword(dev, offset, &dw);
+ *sec = dw & PCI_EA_SEC_BUS_MASK;
+ *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ return true;
+}
/*
* pci_scan_bridge_extend() - Scan buses behind a bridge
@@ -1115,6 +1150,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
u16 bctl;
u8 primary, secondary, subordinate;
int broken = 0;
+ bool fixed_buses;
+ u8 fixed_sec, fixed_sub;
+ int next_busnr;
/*
* Make sure the bridge is powered on to be able to access config
@@ -1214,17 +1252,24 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
/* Clear errors */
pci_write_config_word(dev, PCI_STATUS, 0xffff);
+ /* Read bus numbers from EA Capability (if present) */
+ fixed_buses = pci_ea_fixed_busnrs(dev, &fixed_sec, &fixed_sub);
+ if (fixed_buses)
+ next_busnr = fixed_sec;
+ else
+ next_busnr = max + 1;
+
/*
* Prevent assigning a bus number that already exists.
* This can happen when a bridge is hot-plugged, so in this
* case we only re-scan this bus.
*/
- child = pci_find_bus(pci_domain_nr(bus), max+1);
+ child = pci_find_bus(pci_domain_nr(bus), next_busnr);
if (!child) {
- child = pci_add_new_bus(bus, dev, max+1);
+ child = pci_add_new_bus(bus, dev, next_busnr);
if (!child)
goto out;
- pci_bus_insert_busn_res(child, max+1,
+ pci_bus_insert_busn_res(child, next_busnr,
bus->busn_res.end);
}
max++;
@@ -1285,7 +1330,13 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
max += i;
}
- /* Set subordinate bus number to its real value */
+ /*
+ * Set subordinate bus number to its real value.
+ * If fixed subordinate bus number exists from EA
+ * capability then use it.
+ */
+ if (fixed_buses)
+ max = fixed_sub;
pci_bus_update_busn_res_end(child, max);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
}
@@ -2026,6 +2077,119 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
*/
}
+static u16 hpx3_device_type(struct pci_dev *dev)
+{
+ u16 pcie_type = pci_pcie_type(dev);
+ const int pcie_to_hpx3_type[] = {
+ [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
+ [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
+ [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
+ [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC,
+ [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT,
+ [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM,
+ [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM,
+ [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE,
+ [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
+ };
+
+ if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
+ return 0;
+
+ return pcie_to_hpx3_type[pcie_type];
+}
+
+static u8 hpx3_function_type(struct pci_dev *dev)
+{
+ if (dev->is_virtfn)
+ return HPX_FN_SRIOV_VIRT;
+ else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
+ return HPX_FN_SRIOV_PHYS;
+ else
+ return HPX_FN_NORMAL;
+}
+
+static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
+{
+ u8 cap_ver = hpx3_cap_id & 0xf;
+
+ if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
+ return true;
+ else if (cap_ver == pcie_cap_id)
+ return true;
+
+ return false;
+}
+
+static void program_hpx_type3_register(struct pci_dev *dev,
+ const struct hpx_type3 *reg)
+{
+ u32 match_reg, write_reg, header, orig_value;
+ u16 pos;
+
+ if (!(hpx3_device_type(dev) & reg->device_type))
+ return;
+
+ if (!(hpx3_function_type(dev) & reg->function_type))
+ return;
+
+ switch (reg->config_space_location) {
+ case HPX_CFG_PCICFG:
+ pos = 0;
+ break;
+ case HPX_CFG_PCIE_CAP:
+ pos = pci_find_capability(dev, reg->pci_exp_cap_id);
+ if (pos == 0)
+ return;
+
+ break;
+ case HPX_CFG_PCIE_CAP_EXT:
+ pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
+ if (pos == 0)
+ return;
+
+ pci_read_config_dword(dev, pos, &header);
+ if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
+ reg->pci_exp_cap_ver))
+ return;
+
+ break;
+ case HPX_CFG_VEND_CAP: /* Fall through */
+ case HPX_CFG_DVSEC: /* Fall through */
+ default:
+ pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
+ return;
+ }
+
+ pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
+
+ if ((match_reg & reg->match_mask_and) != reg->match_value)
+ return;
+
+ pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
+ orig_value = write_reg;
+ write_reg &= reg->reg_mask_and;
+ write_reg |= reg->reg_mask_or;
+
+ if (orig_value == write_reg)
+ return;
+
+ pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
+
+ pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
+ pos, orig_value, write_reg);
+}
+
+static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx3)
+{
+ if (!hpx3)
+ return;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ program_hpx_type3_register(dev, hpx3);
+}
+
int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
{
struct pci_host_bridge *host;
@@ -2206,8 +2370,12 @@ static void pci_configure_serr(struct pci_dev *dev)
static void pci_configure_device(struct pci_dev *dev)
{
- struct hotplug_params hpp;
- int ret;
+ static const struct hotplug_program_ops hp_ops = {
+ .program_type0 = program_hpp_type0,
+ .program_type1 = program_hpp_type1,
+ .program_type2 = program_hpp_type2,
+ .program_type3 = program_hpx_type3,
+ };
pci_configure_mps(dev);
pci_configure_extended_tags(dev, NULL);
@@ -2216,14 +2384,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_eetlp_prefix(dev);
pci_configure_serr(dev);
- memset(&hpp, 0, sizeof(hpp));
- ret = pci_get_hp_params(dev, &hpp);
- if (ret)
- return;
-
- program_hpp_type2(dev, hpp.t2);
- program_hpp_type1(dev, hpp.t1);
- program_hpp_type0(dev, hpp.t0);
+ pci_acpi_program_hp_params(dev, &hp_ops);
}
static void pci_release_capabilities(struct pci_dev *dev)
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 6fa1627ce08d..445b51db75b0 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -222,6 +222,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
}
/* If arch decided it can't, fall through... */
#endif /* HAVE_PCI_MMAP */
+ /* fall through */
default:
ret = -EINVAL;
break;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a59ad09ce911..9d32019411fd 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2245,6 +2245,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+/*
+ * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
+ * Link bit cleared after starting the link retrain process to allow this
+ * process to finish.
+ *
+ * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the
+ * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
+ */
+static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
+{
+ dev->clear_retrain_link = 1;
+ pci_info(dev, "Enable PCIe Retrain Link quirk\n");
+}
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
+
static void fixup_rev1_53c810(struct pci_dev *dev)
{
u32 class = dev->class;
@@ -3408,6 +3425,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
/*
* Root port on some Cavium CN8xxx chips do not successfully complete a bus
@@ -4903,6 +4921,7 @@ static void quirk_no_ats(struct pci_dev *pdev)
/* AMD Stoney platform GPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
@@ -5120,3 +5139,61 @@ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
+
+/*
+ * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
+ * not always reset the secondary Nvidia GPU between reboots if the system
+ * is configured to use Hybrid Graphics mode. This results in the GPU
+ * being left in whatever state it was in during the *previous* boot, which
+ * causes spurious interrupts from the GPU, which in turn causes us to
+ * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly,
+ * this also completely breaks nouveau.
+ *
+ * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
+ * clean state and fixes all these issues.
+ *
+ * When the machine is configured in Dedicated display mode, the issue
+ * doesn't occur. Fortunately the GPU advertises NoReset+ when in this
+ * mode, so we can detect that and avoid resetting it.
+ */
+static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
+{
+ void __iomem *map;
+ int ret;
+
+ if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
+ pdev->subsystem_device != 0x222e ||
+ !pdev->reset_fn)
+ return;
+
+ if (pci_enable_device_mem(pdev))
+ return;
+
+ /*
+ * Based on nvkm_device_ctor() in
+ * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+ */
+ map = pci_iomap(pdev, 0, 0x23000);
+ if (!map) {
+ pci_err(pdev, "Can't map MMIO space\n");
+ goto out_disable;
+ }
+
+ /*
+ * Make sure the GPU looks like it's been POSTed before resetting
+ * it.
+ */
+ if (ioread32(map + 0x2240c) & 0x2) {
+ pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
+ ret = pci_reset_function(pdev);
+ if (ret < 0)
+ pci_err(pdev, "Failed to reset GPU: %d\n", ret);
+ }
+
+ iounmap(map);
+out_disable:
+ pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
+ PCI_CLASS_DISPLAY_VGA, 8,
+ quirk_reset_lenovo_thinkpad_p50_nvgpu);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 2b5f720862d3..5c7922612733 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -33,7 +33,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
struct pci_bus *bus;
int ret;
- ret = fn(pdev, PCI_DEVID(pdev->bus->number, pdev->devfn), data);
+ ret = fn(pdev, pci_dev_id(pdev), data);
if (ret)
return ret;
@@ -88,9 +88,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
return ret;
continue;
case PCI_EXP_TYPE_PCIE_BRIDGE:
- ret = fn(tmp,
- PCI_DEVID(tmp->bus->number,
- tmp->devfn), data);
+ ret = fn(tmp, pci_dev_id(tmp), data);
if (ret)
return ret;
continue;
@@ -101,9 +99,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
PCI_DEVID(tmp->subordinate->number,
PCI_DEVFN(0, 0)), data);
else
- ret = fn(tmp,
- PCI_DEVID(tmp->bus->number,
- tmp->devfn), data);
+ ret = fn(tmp, pci_dev_id(tmp), data);
if (ret)
return ret;
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index e22766c79fe9..30f6e080bad8 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -658,19 +658,25 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
static int ioctl_event_summary(struct switchtec_dev *stdev,
struct switchtec_user *stuser,
- struct switchtec_ioctl_event_summary __user *usum)
+ struct switchtec_ioctl_event_summary __user *usum,
+ size_t size)
{
- struct switchtec_ioctl_event_summary s = {0};
+ struct switchtec_ioctl_event_summary *s;
int i;
u32 reg;
+ int ret = 0;
- s.global = ioread32(&stdev->mmio_sw_event->global_summary);
- s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
- s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->global = ioread32(&stdev->mmio_sw_event->global_summary);
+ s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+ s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
for (i = 0; i < stdev->partition_count; i++) {
reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
- s.part[i] = reg;
+ s->part[i] = reg;
}
for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
@@ -679,15 +685,19 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
break;
reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
- s.pff[i] = reg;
+ s->pff[i] = reg;
}
- if (copy_to_user(usum, &s, sizeof(s)))
- return -EFAULT;
+ if (copy_to_user(usum, s, size)) {
+ ret = -EFAULT;
+ goto error_case;
+ }
stuser->event_cnt = atomic_read(&stdev->event_cnt);
- return 0;
+error_case:
+ kfree(s);
+ return ret;
}
static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
@@ -977,8 +987,9 @@ static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
case SWITCHTEC_IOCTL_FLASH_PART_INFO:
rc = ioctl_flash_part_info(stdev, argp);
break;
- case SWITCHTEC_IOCTL_EVENT_SUMMARY:
- rc = ioctl_event_summary(stdev, stuser, argp);
+ case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
+ rc = ioctl_event_summary(stdev, stuser, argp,
+ sizeof(struct switchtec_ioctl_event_summary_legacy));
break;
case SWITCHTEC_IOCTL_EVENT_CTL:
rc = ioctl_event_ctl(stdev, argp);
@@ -989,6 +1000,10 @@ static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
case SWITCHTEC_IOCTL_PORT_TO_PFF:
rc = ioctl_port_to_pff(stdev, argp);
break;
+ case SWITCHTEC_IOCTL_EVENT_SUMMARY:
+ rc = ioctl_event_summary(stdev, stuser, argp,
+ sizeof(struct switchtec_ioctl_event_summary));
+ break;
default:
rc = -ENOTTY;
break;
@@ -1162,7 +1177,8 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0;
- if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
+ if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
+ eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
return 0;
dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index eba6e33147a2..14cf0f41ecf0 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1104,7 +1104,7 @@ static void __ref pcifront_backend_changed(struct xenbus_device *xdev,
case XenbusStateClosed:
if (xdev->state == XenbusStateClosed)
break;
- /* Missed the backend's CLOSING state -- fallthrough */
+ /* fall through - Missed the backend's CLOSING state. */
case XenbusStateClosing:
dev_warn(&xdev->dev, "backend going away!\n");
pcifront_try_disconnect(pdev);
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 24326eecd787..7abbb6167766 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -125,7 +125,7 @@ static bool chromeos_laptop_match_adapter_devid(struct device *dev, u32 devid)
return false;
pdev = to_pci_dev(dev);
- return devid == PCI_DEVID(pdev->bus->number, pdev->devfn);
+ return devid == pci_dev_id(pdev);
}
static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d5dcebd7aad3..b84bcd705fe1 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -513,7 +513,8 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008
#define OSC_PCI_MSI_SUPPORT 0x00000010
-#define OSC_PCI_SUPPORT_MASKS 0x0000001f
+#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100
+#define OSC_PCI_SUPPORT_MASKS 0x0000011f
/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 7e9b81c3b50d..052f04fcf953 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -148,24 +148,6 @@ u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);
-/* Conversion helpers. Should be removed after merging */
-static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
-{
- __pci_write_msi_msg(entry, msg);
-}
-static inline void write_msi_msg(int irq, struct msi_msg *msg)
-{
- pci_write_msi_msg(irq, msg);
-}
-static inline void mask_msi_irq(struct irq_data *data)
-{
- pci_msi_mask_irq(data);
-}
-static inline void unmask_msi_irq(struct irq_data *data)
-{
- pci_msi_unmask_irq(data);
-}
-
/*
* The arch hooks to setup up msi irqs. Those functions are
* implemented as weak symbols so that they /can/ be overriden by
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 29efa09d686b..a73164c85e78 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -56,6 +56,7 @@ extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
+extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
#endif
#ifdef CONFIG_PCI_HOST_COMMON
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index c3ffa3917f88..f641badc2c61 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -109,6 +109,7 @@ struct pci_epc {
* @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver
* @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs
* @bar_fixed_size: Array specifying the size supported by each BAR
+ * @align: alignment size required for BAR buffer allocation
*/
struct pci_epc_features {
unsigned int linkup_notifier : 1;
@@ -117,6 +118,7 @@ struct pci_epc_features {
u8 reserved_bar;
u8 bar_fixed_64bit;
u64 bar_fixed_size[BAR_5 + 1];
+ size_t align;
};
#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index ec02f58758c8..2d6f07556682 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -149,7 +149,8 @@ void pci_epf_destroy(struct pci_epf *epf);
int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner);
void pci_epf_unregister_driver(struct pci_epf_driver *driver);
-void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar);
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ size_t align);
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 77448215ef5b..6fc361a69e0c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -348,6 +348,8 @@ struct pci_dev {
unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
controlled exclusively by
user sysfs */
+ unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
+ bit manually */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
@@ -596,6 +598,11 @@ struct pci_bus {
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
+static inline u16 pci_dev_id(struct pci_dev *dev)
+{
+ return PCI_DEVID(dev->bus->number, dev->devfn);
+}
+
/*
* Returns true if the PCI bus is root (behind host-PCI bridge),
* false otherwise
@@ -1233,7 +1240,6 @@ int __must_check pci_request_regions(struct pci_dev *, const char *);
int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
void pci_release_regions(struct pci_dev *);
int __must_check pci_request_region(struct pci_dev *, int, const char *);
-int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
void pci_release_region(struct pci_dev *, int);
int pci_request_selected_regions(struct pci_dev *, int, const char *);
int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 7acc9f91e72b..f694eb2ca978 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -124,26 +124,72 @@ struct hpp_type2 {
u32 sec_unc_err_mask_or;
};
-struct hotplug_params {
- struct hpp_type0 *t0; /* Type0: NULL if not available */
- struct hpp_type1 *t1; /* Type1: NULL if not available */
- struct hpp_type2 *t2; /* Type2: NULL if not available */
- struct hpp_type0 type0_data;
- struct hpp_type1 type1_data;
- struct hpp_type2 type2_data;
+/*
+ * _HPX PCI Express Setting Record (Type 3)
+ */
+struct hpx_type3 {
+ u16 device_type;
+ u16 function_type;
+ u16 config_space_location;
+ u16 pci_exp_cap_id;
+ u16 pci_exp_cap_ver;
+ u16 pci_exp_vendor_id;
+ u16 dvsec_id;
+ u16 dvsec_rev;
+ u16 match_offset;
+ u32 match_mask_and;
+ u32 match_value;
+ u16 reg_offset;
+ u32 reg_mask_and;
+ u32 reg_mask_or;
+};
+
+struct hotplug_program_ops {
+ void (*program_type0)(struct pci_dev *dev, struct hpp_type0 *hpp);
+ void (*program_type1)(struct pci_dev *dev, struct hpp_type1 *hpp);
+ void (*program_type2)(struct pci_dev *dev, struct hpp_type2 *hpp);
+ void (*program_type3)(struct pci_dev *dev, struct hpx_type3 *hpp);
+};
+
+enum hpx_type3_dev_type {
+ HPX_TYPE_ENDPOINT = BIT(0),
+ HPX_TYPE_LEG_END = BIT(1),
+ HPX_TYPE_RC_END = BIT(2),
+ HPX_TYPE_RC_EC = BIT(3),
+ HPX_TYPE_ROOT_PORT = BIT(4),
+ HPX_TYPE_UPSTREAM = BIT(5),
+ HPX_TYPE_DOWNSTREAM = BIT(6),
+ HPX_TYPE_PCI_BRIDGE = BIT(7),
+ HPX_TYPE_PCIE_BRIDGE = BIT(8),
+};
+
+enum hpx_type3_fn_type {
+ HPX_FN_NORMAL = BIT(0),
+ HPX_FN_SRIOV_PHYS = BIT(1),
+ HPX_FN_SRIOV_VIRT = BIT(2),
+};
+
+enum hpx_type3_cfg_loc {
+ HPX_CFG_PCICFG = 0,
+ HPX_CFG_PCIE_CAP = 1,
+ HPX_CFG_PCIE_CAP_EXT = 2,
+ HPX_CFG_VEND_CAP = 3,
+ HPX_CFG_DVSEC = 4,
+ HPX_CFG_MAX,
};
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
-int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp);
+int pci_acpi_program_hp_params(struct pci_dev *dev,
+ const struct hotplug_program_ops *hp_ops);
bool pciehp_is_native(struct pci_dev *bridge);
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge);
bool shpchp_is_native(struct pci_dev *bridge);
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
int acpi_pci_detect_ejectable(acpi_handle handle);
#else
-static inline int pci_get_hp_params(struct pci_dev *dev,
- struct hotplug_params *hpp)
+static inline int pci_acpi_program_hp_params(struct pci_dev *dev,
+ const struct hotplug_program_ops *hp_ops)
{
return -ENODEV;
}
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 52a079b3a9a6..0cfc34ac37fb 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -20,7 +20,7 @@
#include <linux/cdev.h>
#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
-#define SWITCHTEC_MAX_PFF_CSR 48
+#define SWITCHTEC_MAX_PFF_CSR 255
#define SWITCHTEC_EVENT_OCCURRED BIT(0)
#define SWITCHTEC_EVENT_CLEAR BIT(0)
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 5c98133f2c94..c51e0066de8b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -372,6 +372,12 @@
#define PCI_EA_FIRST_ENT_BRIDGE 8 /* First EA Entry for Bridges */
#define PCI_EA_ES 0x00000007 /* Entry Size */
#define PCI_EA_BEI 0x000000f0 /* BAR Equivalent Indicator */
+
+/* EA fixed Secondary and Subordinate bus numbers for Bridge */
+#define PCI_EA_SEC_BUS_MASK 0xff
+#define PCI_EA_SUB_BUS_MASK 0xff00
+#define PCI_EA_SUB_BUS_SHIFT 8
+
/* 0-5 map to BARs 0-5 respectively */
#define PCI_EA_BEI_BAR0 0
#define PCI_EA_BEI_BAR5 5
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
index 4f4daf8db954..c912b5a678e4 100644
--- a/include/uapi/linux/switchtec_ioctl.h
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -50,7 +50,7 @@ struct switchtec_ioctl_flash_part_info {
__u32 active;
};
-struct switchtec_ioctl_event_summary {
+struct switchtec_ioctl_event_summary_legacy {
__u64 global;
__u64 part_bitmap;
__u32 local_part;
@@ -59,6 +59,15 @@ struct switchtec_ioctl_event_summary {
__u32 pff[48];
};
+struct switchtec_ioctl_event_summary {
+ __u64 global;
+ __u64 part_bitmap;
+ __u32 local_part;
+ __u32 padding;
+ __u32 part[48];
+ __u32 pff[255];
+};
+
#define SWITCHTEC_IOCTL_EVENT_STACK_ERROR 0
#define SWITCHTEC_IOCTL_EVENT_PPU_ERROR 1
#define SWITCHTEC_IOCTL_EVENT_ISP_ERROR 2
@@ -127,6 +136,8 @@ struct switchtec_ioctl_pff_port {
_IOWR('W', 0x41, struct switchtec_ioctl_flash_part_info)
#define SWITCHTEC_IOCTL_EVENT_SUMMARY \
_IOR('W', 0x42, struct switchtec_ioctl_event_summary)
+#define SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY \
+ _IOR('W', 0x42, struct switchtec_ioctl_event_summary_legacy)
#define SWITCHTEC_IOCTL_EVENT_CTL \
_IOWR('W', 0x43, struct switchtec_ioctl_event_ctl)
#define SWITCHTEC_IOCTL_PFF_TO_PORT \