summaryrefslogtreecommitdiff
path: root/drivers/pci/controller
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller')
-rw-r--r--drivers/pci/controller/Kconfig30
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c1
-rw-r--r--drivers/pci/controller/dwc/Kconfig57
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c13
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c643
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c48
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c47
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c266
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h63
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c39
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c78
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c1
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig6
-rw-r--r--drivers/pci/controller/pci-aardvark.c22
-rw-r--r--drivers/pci/controller/pci-ftpci100.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c131
-rw-r--r--drivers/pci/controller/pci-mvebu.c52
-rw-r--r--drivers/pci/controller/pci-tegra.c20
-rw-r--r--drivers/pci/controller/pci-v3-semi.c3
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c2
-rw-r--r--drivers/pci/controller/pci-xgene.c1
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c2
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c85
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c1
-rw-r--r--drivers/pci/controller/pcie-iproc.c1
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c2
-rw-r--r--drivers/pci/controller/pcie-mt7621.c3
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c1
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c1
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c8
-rw-r--r--drivers/pci/controller/vmd.c27
34 files changed, 1344 insertions, 315 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index bfd9bac37e24..1569d9a3ada0 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -19,7 +19,7 @@ config PCI_AARDVARK
tristate "Aardvark PCIe controller"
depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCI_BRIDGE_EMUL
help
Add support for Aardvark 64bit PCIe Host Controller. This
@@ -29,7 +29,7 @@ config PCI_AARDVARK
config PCIE_XILINX_NWL
bool "NWL PCIe Core"
depends on ARCH_ZYNQMP || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
help
Say 'Y' here if you want kernel support for Xilinx
NWL PCIe controller. The controller can act as Root Port
@@ -53,7 +53,7 @@ config PCI_IXP4XX
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -70,7 +70,7 @@ config PCI_RCAR_GEN2
config PCIE_RCAR_HOST
bool "Renesas R-Car PCIe host controller"
depends on ARCH_RENESAS || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
help
Say Y here if you want PCIe controller support on R-Car SoCs in host
mode.
@@ -99,7 +99,7 @@ config PCI_HOST_GENERIC
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
depends on OF || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@@ -124,7 +124,7 @@ config PCI_XGENE
config PCI_XGENE_MSI
bool "X-Gene v1 PCIe MSI feature"
depends on PCI_XGENE
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
default y
help
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
@@ -170,7 +170,7 @@ config PCIE_IPROC_BCMA
config PCIE_IPROC_MSI
bool "Broadcom iProc PCIe MSI support"
depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
default ARCH_BCM_IPROC
help
Say Y here if you want to enable MSI support for Broadcom's iProc
@@ -186,7 +186,7 @@ config PCIE_ALTERA
config PCIE_ALTERA_MSI
tristate "Altera PCIe MSI feature"
depends on PCIE_ALTERA
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
help
Say Y here if you want PCIe MSI support for the Altera FPGA.
This MSI driver supports Altera MSI to GIC controller IP.
@@ -215,7 +215,7 @@ config PCIE_ROCKCHIP_HOST
tristate "Rockchip PCIe host controller"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select MFD_SYSCON
select PCIE_ROCKCHIP
help
@@ -239,7 +239,7 @@ config PCIE_MEDIATEK
tristate "MediaTek PCIe controller"
depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on OF
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
help
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
@@ -247,7 +247,7 @@ config PCIE_MEDIATEK
config PCIE_MEDIATEK_GEN3
tristate "MediaTek Gen3 PCIe controller"
depends on ARCH_MEDIATEK || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
help
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
@@ -277,7 +277,7 @@ config PCIE_BRCMSTB
depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCMBCA || \
BMIPS_GENERIC || COMPILE_TEST
depends on OF
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
default ARCH_BRCMSTB || BMIPS_GENERIC
help
Say Y here to enable PCIe host controller support for
@@ -285,7 +285,7 @@ config PCIE_BRCMSTB
config PCI_HYPERV_INTERFACE
tristate "Hyper-V PCI Interface"
- depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN
+ depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI
help
The Hyper-V PCI Interface is a helper driver allows other drivers to
have a common interface with the Hyper-V PCI frontend driver.
@@ -303,8 +303,6 @@ config PCI_LOONGSON
config PCIE_MICROCHIP_HOST
bool "Microchip AXI PCIe host bridge support"
depends on PCI_MSI && OF
- select PCI_MSI_IRQ_DOMAIN
- select GENERIC_MSI_IRQ_DOMAIN
select PCI_HOST_COMMON
help
Say Y here if you want kernel to support the Microchip AXI PCIe
@@ -326,7 +324,7 @@ config PCIE_APPLE
tristate "Apple PCIe controller"
depends on ARCH_APPLE || COMPILE_TEST
depends on OF
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCI_HOST_COMMON
help
Say Y here if you want to enable PCIe controller support on Apple
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index a82f845cc4b5..cc83a8925ce0 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -15,7 +15,6 @@
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 62ce3abf0f19..a0d2713f0e88 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -21,7 +21,7 @@ config PCI_DRA7XX_HOST
tristate "TI DRA7xx PCIe controller Host Mode"
depends on SOC_DRA7XX || COMPILE_TEST
depends on OF && HAS_IOMEM && TI_PIPE3
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
select PCI_DRA7XX
default y if SOC_DRA7XX
@@ -53,7 +53,7 @@ config PCIE_DW_PLAT
config PCIE_DW_PLAT_HOST
bool "Platform bus based DesignWare PCIe Controller - Host mode"
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
select PCIE_DW_PLAT
help
@@ -67,7 +67,7 @@ config PCIE_DW_PLAT_HOST
config PCIE_DW_PLAT_EP
bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI && PCI_MSI
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCIE_DW_PLAT
@@ -83,7 +83,7 @@ config PCIE_DW_PLAT_EP
config PCI_EXYNOS
tristate "Samsung Exynos PCIe controller"
depends on ARCH_EXYNOS || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
help
Enables support for the PCIe controller in the Samsung Exynos SoCs
@@ -94,13 +94,13 @@ config PCI_EXYNOS
config PCI_IMX6
bool "Freescale i.MX6/7/8 PCIe controller"
depends on ARCH_MXC || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
depends on ARCH_SPEAR13XX || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
help
Say Y here if you want PCIe support on SPEAr13XX SoCs.
@@ -111,7 +111,7 @@ config PCI_KEYSTONE
config PCI_KEYSTONE_HOST
bool "PCI Keystone Host Mode"
depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
select PCI_KEYSTONE
help
@@ -135,7 +135,7 @@ config PCI_KEYSTONE_EP
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller - Host mode"
depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
select MFD_SYSCON
help
@@ -160,7 +160,7 @@ config PCI_LAYERSCAPE_EP
config PCI_HISI
depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
select PCI_HOST_COMMON
help
@@ -170,7 +170,7 @@ config PCI_HISI
config PCIE_QCOM
bool "Qualcomm PCIe controller"
depends on OF && (ARCH_QCOM || COMPILE_TEST)
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
select CRC8
help
@@ -191,7 +191,7 @@ config PCIE_QCOM_EP
config PCIE_ARMADA_8K
bool "Marvell Armada-8K PCIe controller"
depends on ARCH_MVEBU || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
help
Say Y here if you want to enable PCIe controller support on
@@ -205,7 +205,7 @@ config PCIE_ARTPEC6
config PCIE_ARTPEC6_HOST
bool "Axis ARTPEC-6 PCIe controller Host Mode"
depends on MACH_ARTPEC6 || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
select PCIE_ARTPEC6
help
@@ -222,11 +222,20 @@ config PCIE_ARTPEC6_EP
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
endpoint mode. This uses the DesignWare core.
+config PCIE_BT1
+ tristate "Baikal-T1 PCIe controller"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Baikal-T1 SoC to work
+ in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core.
+
config PCIE_ROCKCHIP_DW_HOST
bool "Rockchip DesignWare PCIe controller"
select PCIE_DW
select PCIE_DW_HOST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
help
@@ -236,7 +245,7 @@ config PCIE_ROCKCHIP_DW_HOST
config PCIE_INTEL_GW
bool "Intel Gateway PCIe host controller support"
depends on OF && (X86 || COMPILE_TEST)
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
help
Say 'Y' here to enable PCIe Host controller support on Intel
@@ -250,7 +259,7 @@ config PCIE_KEEMBAY
config PCIE_KEEMBAY_HOST
bool "Intel Keem Bay PCIe controller - Host mode"
depends on ARCH_KEEMBAY || COMPILE_TEST
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
select PCIE_KEEMBAY
help
@@ -262,7 +271,7 @@ config PCIE_KEEMBAY_HOST
config PCIE_KEEMBAY_EP
bool "Intel Keem Bay PCIe controller - Endpoint mode"
depends on ARCH_KEEMBAY || COMPILE_TEST
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCIE_KEEMBAY
@@ -275,7 +284,7 @@ config PCIE_KEEMBAY_EP
config PCIE_KIRIN
depends on OF && (ARM64 || COMPILE_TEST)
tristate "HiSilicon Kirin series SoCs PCIe controllers"
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support
@@ -284,7 +293,7 @@ config PCIE_KIRIN
config PCIE_HISI_STB
bool "HiSilicon STB SoCs PCIe controllers"
depends on ARCH_HISI || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support on HiSilicon STB SoCs
@@ -292,7 +301,7 @@ config PCIE_HISI_STB
config PCI_MESON
tristate "MESON PCIe controller"
default m if ARCH_MESON
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
help
Say Y here if you want to enable PCI controller support on Amlogic
@@ -306,7 +315,7 @@ config PCIE_TEGRA194
config PCIE_TEGRA194_HOST
tristate "NVIDIA Tegra194 (and later) PCIe controller - Host Mode"
depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
select PHY_TEGRA194_P2U
select PCIE_TEGRA194
@@ -336,7 +345,7 @@ config PCIE_TEGRA194_EP
config PCIE_VISCONTI_HOST
bool "Toshiba Visconti PCIe controllers"
depends on ARCH_VISCONTI || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support on Toshiba Visconti SoC.
@@ -346,7 +355,7 @@ config PCIE_UNIPHIER
bool "Socionext UniPhier PCIe host controllers"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF && HAS_IOMEM
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
help
Say Y here if you want PCIe host controller support on UniPhier SoCs.
@@ -365,7 +374,7 @@ config PCIE_UNIPHIER_EP
config PCIE_AL
bool "Amazon Annapurna Labs PCIe controller"
depends on OF && (ARM64 || COMPILE_TEST)
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_DW_HOST
select PCI_ECAM
help
@@ -377,7 +386,7 @@ config PCIE_AL
config PCIE_FU740
bool "SiFive FU740 PCIe host controller"
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
depends on SOC_SIFIVE || COMPILE_TEST
select PCIE_DW_HOST
help
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 8ba7b67f5e50..bf5c311875a1 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PCIE_DW) += pcie-designware.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 2616585ca5f8..1dde5c579edc 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -952,12 +952,6 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
}
- ret = imx6_pcie_deassert_core_reset(imx6_pcie);
- if (ret < 0) {
- dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
- goto err_phy_off;
- }
-
if (imx6_pcie->phy) {
ret = phy_power_on(imx6_pcie->phy);
if (ret) {
@@ -965,6 +959,13 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
goto err_phy_off;
}
}
+
+ ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ if (ret < 0) {
+ dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
+ goto err_phy_off;
+ }
+
imx6_setup_phy_mpll(imx6_pcie);
return 0;
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 879b8692f96a..ed5fb492fe08 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
-#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index dc469ef8e99b..5c999e15c357 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -21,7 +21,6 @@
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/of_pci.h>
-#include <linux/of_irq.h>
#include "pcie-designware.h"
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
new file mode 100644
index 000000000000..3346770e6654
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-bt1.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Vadim Vlasov <Vadim.Vlasov@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 PCIe controller driver
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* Baikal-T1 System CCU control registers */
+#define BT1_CCU_PCIE_CLKC 0x140
+#define BT1_CCU_PCIE_REQ_PCS_CLK BIT(16)
+#define BT1_CCU_PCIE_REQ_MAC_CLK BIT(17)
+#define BT1_CCU_PCIE_REQ_PIPE_CLK BIT(18)
+
+#define BT1_CCU_PCIE_RSTC 0x144
+#define BT1_CCU_PCIE_REQ_LINK_RST BIT(13)
+#define BT1_CCU_PCIE_REQ_SMLH_RST BIT(14)
+#define BT1_CCU_PCIE_REQ_PHY_RST BIT(16)
+#define BT1_CCU_PCIE_REQ_CORE_RST BIT(24)
+#define BT1_CCU_PCIE_REQ_STICKY_RST BIT(26)
+#define BT1_CCU_PCIE_REQ_NSTICKY_RST BIT(27)
+
+#define BT1_CCU_PCIE_PMSC 0x148
+#define BT1_CCU_PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
+#define BT1_CCU_PCIE_LTSSM_DET_QUIET 0x00
+#define BT1_CCU_PCIE_LTSSM_DET_ACT 0x01
+#define BT1_CCU_PCIE_LTSSM_POLL_ACT 0x02
+#define BT1_CCU_PCIE_LTSSM_POLL_COMP 0x03
+#define BT1_CCU_PCIE_LTSSM_POLL_CONF 0x04
+#define BT1_CCU_PCIE_LTSSM_PRE_DET_QUIET 0x05
+#define BT1_CCU_PCIE_LTSSM_DET_WAIT 0x06
+#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_START 0x07
+#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_ACEPT 0x08
+#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_WAIT 0x09
+#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_ACEPT 0x0a
+#define BT1_CCU_PCIE_LTSSM_CFG_COMPLETE 0x0b
+#define BT1_CCU_PCIE_LTSSM_CFG_IDLE 0x0c
+#define BT1_CCU_PCIE_LTSSM_RCVR_LOCK 0x0d
+#define BT1_CCU_PCIE_LTSSM_RCVR_SPEED 0x0e
+#define BT1_CCU_PCIE_LTSSM_RCVR_RCVRCFG 0x0f
+#define BT1_CCU_PCIE_LTSSM_RCVR_IDLE 0x10
+#define BT1_CCU_PCIE_LTSSM_L0 0x11
+#define BT1_CCU_PCIE_LTSSM_L0S 0x12
+#define BT1_CCU_PCIE_LTSSM_L123_SEND_IDLE 0x13
+#define BT1_CCU_PCIE_LTSSM_L1_IDLE 0x14
+#define BT1_CCU_PCIE_LTSSM_L2_IDLE 0x15
+#define BT1_CCU_PCIE_LTSSM_L2_WAKE 0x16
+#define BT1_CCU_PCIE_LTSSM_DIS_ENTRY 0x17
+#define BT1_CCU_PCIE_LTSSM_DIS_IDLE 0x18
+#define BT1_CCU_PCIE_LTSSM_DISABLE 0x19
+#define BT1_CCU_PCIE_LTSSM_LPBK_ENTRY 0x1a
+#define BT1_CCU_PCIE_LTSSM_LPBK_ACTIVE 0x1b
+#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT 0x1c
+#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT_TOUT 0x1d
+#define BT1_CCU_PCIE_LTSSM_HOT_RST_ENTRY 0x1e
+#define BT1_CCU_PCIE_LTSSM_HOT_RST 0x1f
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ0 0x20
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ1 0x21
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ2 0x22
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ3 0x23
+#define BT1_CCU_PCIE_SMLH_LINKUP BIT(6)
+#define BT1_CCU_PCIE_RDLH_LINKUP BIT(7)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L0S BIT(8)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L1 BIT(9)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L2 BIT(10)
+#define BT1_CCU_PCIE_L1_PENDING BIT(12)
+#define BT1_CCU_PCIE_REQ_EXIT_L1 BIT(14)
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ BIT(15)
+#define BT1_CCU_PCIE_PM_DSTAT_MASK GENMASK(18, 16)
+#define BT1_CCU_PCIE_PM_PME_EN BIT(20)
+#define BT1_CCU_PCIE_PM_PME_STATUS BIT(21)
+#define BT1_CCU_PCIE_AUX_PM_EN BIT(22)
+#define BT1_CCU_PCIE_AUX_PWR_DET BIT(23)
+#define BT1_CCU_PCIE_WAKE_DET BIT(24)
+#define BT1_CCU_PCIE_TURNOFF_REQ BIT(30)
+#define BT1_CCU_PCIE_TURNOFF_ACK BIT(31)
+
+#define BT1_CCU_PCIE_GENC 0x14c
+#define BT1_CCU_PCIE_LTSSM_EN BIT(1)
+#define BT1_CCU_PCIE_DBI2_MODE BIT(2)
+#define BT1_CCU_PCIE_MGMT_EN BIT(3)
+#define BT1_CCU_PCIE_RXLANE_FLIP_EN BIT(16)
+#define BT1_CCU_PCIE_TXLANE_FLIP_EN BIT(17)
+#define BT1_CCU_PCIE_SLV_XFER_PEND BIT(24)
+#define BT1_CCU_PCIE_RCV_XFER_PEND BIT(25)
+#define BT1_CCU_PCIE_DBI_XFER_PEND BIT(26)
+#define BT1_CCU_PCIE_DMA_XFER_PEND BIT(27)
+
+#define BT1_CCU_PCIE_LTSSM_LINKUP(_pmsc) \
+({ \
+ int __state = FIELD_GET(BT1_CCU_PCIE_LTSSM_STATE_MASK, _pmsc); \
+ __state >= BT1_CCU_PCIE_LTSSM_L0 && __state <= BT1_CCU_PCIE_LTSSM_L2_WAKE; \
+})
+
+/* Baikal-T1 PCIe specific control registers */
+#define BT1_PCIE_AXI2MGM_LANENUM 0xd04
+#define BT1_PCIE_AXI2MGM_LANESEL_MASK GENMASK(3, 0)
+
+#define BT1_PCIE_AXI2MGM_ADDRCTL 0xd08
+#define BT1_PCIE_AXI2MGM_PHYREG_ADDR_MASK GENMASK(20, 0)
+#define BT1_PCIE_AXI2MGM_READ_FLAG BIT(29)
+#define BT1_PCIE_AXI2MGM_DONE BIT(30)
+#define BT1_PCIE_AXI2MGM_BUSY BIT(31)
+
+#define BT1_PCIE_AXI2MGM_WRITEDATA 0xd0c
+#define BT1_PCIE_AXI2MGM_WDATA GENMASK(15, 0)
+
+#define BT1_PCIE_AXI2MGM_READDATA 0xd10
+#define BT1_PCIE_AXI2MGM_RDATA GENMASK(15, 0)
+
+/* Generic Baikal-T1 PCIe interface resources */
+#define BT1_PCIE_NUM_APP_CLKS ARRAY_SIZE(bt1_pcie_app_clks)
+#define BT1_PCIE_NUM_CORE_CLKS ARRAY_SIZE(bt1_pcie_core_clks)
+#define BT1_PCIE_NUM_APP_RSTS ARRAY_SIZE(bt1_pcie_app_rsts)
+#define BT1_PCIE_NUM_CORE_RSTS ARRAY_SIZE(bt1_pcie_core_rsts)
+
+/* PCIe bus setup delays and timeouts */
+#define BT1_PCIE_RST_DELAY_MS 100
+#define BT1_PCIE_RUN_DELAY_US 100
+#define BT1_PCIE_REQ_DELAY_US 1
+#define BT1_PCIE_REQ_TIMEOUT_US 1000
+#define BT1_PCIE_LNK_DELAY_US 1000
+#define BT1_PCIE_LNK_TIMEOUT_US 1000000
+
+static const enum dw_pcie_app_clk bt1_pcie_app_clks[] = {
+ DW_PCIE_DBI_CLK, DW_PCIE_MSTR_CLK, DW_PCIE_SLV_CLK,
+};
+
+static const enum dw_pcie_core_clk bt1_pcie_core_clks[] = {
+ DW_PCIE_REF_CLK,
+};
+
+static const enum dw_pcie_app_rst bt1_pcie_app_rsts[] = {
+ DW_PCIE_MSTR_RST, DW_PCIE_SLV_RST,
+};
+
+static const enum dw_pcie_core_rst bt1_pcie_core_rsts[] = {
+ DW_PCIE_NON_STICKY_RST, DW_PCIE_STICKY_RST, DW_PCIE_CORE_RST,
+ DW_PCIE_PIPE_RST, DW_PCIE_PHY_RST, DW_PCIE_HOT_RST, DW_PCIE_PWR_RST,
+};
+
+struct bt1_pcie {
+ struct dw_pcie dw;
+ struct platform_device *pdev;
+ struct regmap *sys_regs;
+};
+#define to_bt1_pcie(_dw) container_of(_dw, struct bt1_pcie, dw)
+
+/*
+ * Baikal-T1 MMIO space must be read/written by the dword-aligned
+ * instructions. Note the methods are optimized to have the dword operations
+ * performed with minimum overhead as the most frequently used ones.
+ */
+static int bt1_pcie_read_mmio(void __iomem *addr, int size, u32 *val)
+{
+ unsigned int ofs = (uintptr_t)addr & 0x3;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return -EINVAL;
+
+ *val = readl(addr - ofs) >> ofs * BITS_PER_BYTE;
+ if (size == 4) {
+ return 0;
+ } else if (size == 2) {
+ *val &= 0xffff;
+ return 0;
+ } else if (size == 1) {
+ *val &= 0xff;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int bt1_pcie_write_mmio(void __iomem *addr, int size, u32 val)
+{
+ unsigned int ofs = (uintptr_t)addr & 0x3;
+ u32 tmp, mask;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return -EINVAL;
+
+ if (size == 4) {
+ writel(val, addr);
+ return 0;
+ } else if (size == 2 || size == 1) {
+ mask = GENMASK(size * BITS_PER_BYTE - 1, 0);
+ tmp = readl(addr - ofs) & ~(mask << ofs * BITS_PER_BYTE);
+ tmp |= (val & mask) << ofs * BITS_PER_BYTE;
+ writel(tmp, addr - ofs);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static u32 bt1_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size)
+{
+ int ret;
+ u32 val;
+
+ ret = bt1_pcie_read_mmio(base + reg, size, &val);
+ if (ret) {
+ dev_err(pci->dev, "Read DBI address failed\n");
+ return ~0U;
+ }
+
+ return val;
+}
+
+static void bt1_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ int ret;
+
+ ret = bt1_pcie_write_mmio(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI address failed\n");
+}
+
+static void bt1_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ int ret;
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE, BT1_CCU_PCIE_DBI2_MODE);
+
+ ret = bt1_pcie_write_mmio(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI2 address failed\n");
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE, 0);
+}
+
+static int bt1_pcie_start_link(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ u32 val;
+ int ret;
+
+ /*
+ * Enable LTSSM and make sure it was able to establish both PHY and
+ * data links. This procedure shall work fine to reach 2.5 GT/s speed.
+ */
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, BT1_CCU_PCIE_LTSSM_EN);
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ (val & BT1_CCU_PCIE_SMLH_LINKUP),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to set PHY link up\n");
+ return ret;
+ }
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ (val & BT1_CCU_PCIE_RDLH_LINKUP),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to set data link up\n");
+ return ret;
+ }
+
+ /*
+ * Activate direct speed change after the link is established in an
+ * attempt to reach a higher bus performance (up to Gen.3 - 8.0 GT/s).
+ * This is required at least to get 8.0 GT/s speed.
+ */
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ BT1_CCU_PCIE_LTSSM_LINKUP(val),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret)
+ dev_err(pci->dev, "LTSSM failed to get into L0 state\n");
+
+ return ret;
+}
+
+static void bt1_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, 0);
+}
+
+static const struct dw_pcie_ops bt1_pcie_ops = {
+ .read_dbi = bt1_pcie_read_dbi,
+ .write_dbi = bt1_pcie_write_dbi,
+ .write_dbi2 = bt1_pcie_write_dbi2,
+ .start_link = bt1_pcie_start_link,
+ .stop_link = bt1_pcie_stop_link,
+};
+
+static struct pci_ops bt1_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static int bt1_pcie_get_resources(struct bt1_pcie *btpci)
+{
+ struct device *dev = btpci->dw.dev;
+ int i;
+
+ /* DBI access is supposed to be performed by the dword-aligned IOs */
+ btpci->dw.pp.bridge->ops = &bt1_pci_ops;
+
+ /* These CSRs are in MMIO so we won't check the regmap-methods status */
+ btpci->sys_regs =
+ syscon_regmap_lookup_by_phandle(dev->of_node, "baikal,bt1-syscon");
+ if (IS_ERR(btpci->sys_regs))
+ return dev_err_probe(dev, PTR_ERR(btpci->sys_regs),
+ "Failed to get syscon\n");
+
+ /* Make sure all the required resources have been specified */
+ for (i = 0; i < BT1_PCIE_NUM_APP_CLKS; i++) {
+ if (!btpci->dw.app_clks[bt1_pcie_app_clks[i]].clk) {
+ dev_err(dev, "App clocks set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_CORE_CLKS; i++) {
+ if (!btpci->dw.core_clks[bt1_pcie_core_clks[i]].clk) {
+ dev_err(dev, "Core clocks set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_APP_RSTS; i++) {
+ if (!btpci->dw.app_rsts[bt1_pcie_app_rsts[i]].rstc) {
+ dev_err(dev, "App resets set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_CORE_RSTS; i++) {
+ if (!btpci->dw.core_rsts[bt1_pcie_core_rsts[i]].rstc) {
+ dev_err(dev, "Core resets set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
+static void bt1_pcie_full_stop_bus(struct bt1_pcie *btpci, bool init)
+{
+ struct device *dev = btpci->dw.dev;
+ struct dw_pcie *pci = &btpci->dw;
+ int ret;
+
+ /* Disable LTSSM for sure */
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, 0);
+
+ /*
+ * Application reset controls are trigger-based so assert the core
+ * resets only.
+ */
+ ret = reset_control_bulk_assert(DW_PCIE_NUM_CORE_RSTS, pci->core_rsts);
+ if (ret)
+ dev_err(dev, "Failed to assert core resets\n");
+
+ /*
+ * Clocks are disabled by default at least in accordance with the clk
+ * enable counter value on init stage.
+ */
+ if (!init) {
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ }
+
+ /* The peripheral devices are unavailable anyway so reset them too */
+ gpiod_set_value_cansleep(pci->pe_rst, 1);
+
+ /* Make sure all the resets are settled */
+ msleep(BT1_PCIE_RST_DELAY_MS);
+}
+
+/*
+ * Implements the cold reset procedure in accordance with the reference manual
+ * and available PM signals.
+ */
+static int bt1_pcie_cold_start_bus(struct bt1_pcie *btpci)
+{
+ struct device *dev = btpci->dw.dev;
+ struct dw_pcie *pci = &btpci->dw;
+ u32 val;
+ int ret;
+
+ /* First get out of the Power/Hot reset state */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PHY reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert hot reset\n");
+ goto err_assert_pwr_rst;
+ }
+
+ /* Wait for the PM-core to stop requesting the PHY reset */
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
+ !(val & BT1_CCU_PCIE_REQ_PHY_RST),
+ BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Timed out waiting for PM to stop PHY resetting\n");
+ goto err_assert_hot_rst;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PHY reset\n");
+ goto err_assert_hot_rst;
+ }
+
+ /* Clocks can be now enabled, but the ref one is crucial at this stage */
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ if (ret) {
+ dev_err(dev, "Failed to enable app clocks\n");
+ goto err_assert_phy_rst;
+ }
+
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+ if (ret) {
+ dev_err(dev, "Failed to enable ref clocks\n");
+ goto err_disable_app_clk;
+ }
+
+ /* Wait for the PM to stop requesting the controller core reset */
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
+ !(val & BT1_CCU_PCIE_REQ_CORE_RST),
+ BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Timed out waiting for PM to stop core resetting\n");
+ goto err_disable_core_clk;
+ }
+
+ /* PCS-PIPE interface and controller core can be now activated */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PIPE reset\n");
+ goto err_disable_core_clk;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core reset\n");
+ goto err_assert_pipe_rst;
+ }
+
+ /* It's recommended to reset the core and application logic together */
+ ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts);
+ if (ret) {
+ dev_err(dev, "Failed to reset app domain\n");
+ goto err_assert_core_rst;
+ }
+
+ /* Sticky/Non-sticky CSR flags can be now unreset too */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert sticky reset\n");
+ goto err_assert_core_rst;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_NON_STICKY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert non-sticky reset\n");
+ goto err_assert_sticky_rst;
+ }
+
+ /* Activate the PCIe bus peripheral devices */
+ gpiod_set_value_cansleep(pci->pe_rst, 0);
+
+ /* Make sure the state is settled (LTSSM is still disabled though) */
+ usleep_range(BT1_PCIE_RUN_DELAY_US, BT1_PCIE_RUN_DELAY_US + 100);
+
+ return 0;
+
+err_assert_sticky_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
+
+err_assert_core_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
+
+err_assert_pipe_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
+
+err_disable_core_clk:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+
+err_disable_app_clk:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+
+err_assert_phy_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
+
+err_assert_hot_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
+
+err_assert_pwr_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
+
+ return ret;
+}
+
+static int bt1_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ int ret;
+
+ ret = bt1_pcie_get_resources(btpci);
+ if (ret)
+ return ret;
+
+ bt1_pcie_full_stop_bus(btpci, true);
+
+ return bt1_pcie_cold_start_bus(btpci);
+}
+
+static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+
+ bt1_pcie_full_stop_bus(btpci, false);
+}
+
+static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
+ .host_init = bt1_pcie_host_init,
+ .host_deinit = bt1_pcie_host_deinit,
+};
+
+static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci;
+
+ btpci = devm_kzalloc(&pdev->dev, sizeof(*btpci), GFP_KERNEL);
+ if (!btpci)
+ return ERR_PTR(-ENOMEM);
+
+ btpci->pdev = pdev;
+
+ platform_set_drvdata(pdev, btpci);
+
+ return btpci;
+}
+
+static int bt1_pcie_add_port(struct bt1_pcie *btpci)
+{
+ struct device *dev = &btpci->pdev->dev;
+ int ret;
+
+ btpci->dw.version = DW_PCIE_VER_460A;
+ btpci->dw.dev = dev;
+ btpci->dw.ops = &bt1_pcie_ops;
+
+ btpci->dw.pp.num_vectors = MAX_MSI_IRQS;
+ btpci->dw.pp.ops = &bt1_pcie_host_ops;
+
+ dw_pcie_cap_set(&btpci->dw, REQ_RES);
+
+ ret = dw_pcie_host_init(&btpci->dw.pp);
+
+ return dev_err_probe(dev, ret, "Failed to initialize DWC PCIe host\n");
+}
+
+static void bt1_pcie_del_port(struct bt1_pcie *btpci)
+{
+ dw_pcie_host_deinit(&btpci->dw.pp);
+}
+
+static int bt1_pcie_probe(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci;
+
+ btpci = bt1_pcie_create_data(pdev);
+ if (IS_ERR(btpci))
+ return PTR_ERR(btpci);
+
+ return bt1_pcie_add_port(btpci);
+}
+
+static int bt1_pcie_remove(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci = platform_get_drvdata(pdev);
+
+ bt1_pcie_del_port(btpci);
+
+ return 0;
+}
+
+static const struct of_device_id bt1_pcie_of_match[] = {
+ { .compatible = "baikal,bt1-pcie" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bt1_pcie_of_match);
+
+static struct platform_driver bt1_pcie_driver = {
+ .probe = bt1_pcie_probe,
+ .remove = bt1_pcie_remove,
+ .driver = {
+ .name = "bt1-pcie",
+ .of_match_table = bt1_pcie_of_match,
+ },
+};
+module_platform_driver(bt1_pcie_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 PCIe driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 83ddb190292e..d06654895eba 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -13,8 +13,6 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
-#include "../../pci.h"
-
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
@@ -171,8 +169,8 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
return -EINVAL;
}
- ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar);
+ ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
+ cpu_addr, bar);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
@@ -643,7 +641,7 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int offset;
+ unsigned int offset, ptm_cap_base;
unsigned int nbars;
u8 hdr_type;
u32 reg;
@@ -659,6 +657,7 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
}
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
dw_pcie_dbi_ro_wr_en(pci);
@@ -671,6 +670,22 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
}
+ /*
+ * PTM responder capability can be disabled only after disabling
+ * PTM root capability.
+ */
+ if (ptm_cap_base) {
+ dw_pcie_dbi_ro_wr_en(pci);
+ reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
+ reg &= ~PCI_PTM_CAP_ROOT;
+ dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
+
+ reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
+ reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK);
+ dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
dw_pcie_setup(pci);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -694,23 +709,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
INIT_LIST_HEAD(&ep->func_list);
- if (!pci->dbi_base) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
- }
-
- if (!pci->dbi_base2) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- if (!res) {
- pci->dbi_base2 = pci->dbi_base + SZ_4K;
- } else {
- pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
- }
- }
+ ret = dw_pcie_get_resources(pci);
+ if (ret)
+ return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
@@ -739,9 +740,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return -ENOMEM;
ep->outbound_addr = addr;
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
-
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 39f3b37d4033..3ab6ae3712c4 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -16,7 +16,6 @@
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
-#include "../../pci.h"
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
@@ -395,6 +394,10 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
raw_spin_lock_init(&pp->lock);
+ ret = dw_pcie_get_resources(pci);
+ if (ret)
+ return ret;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (res) {
pp->cfg0_size = resource_size(res);
@@ -408,13 +411,6 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
return -ENODEV;
}
- if (!pci->dbi_base) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
- }
-
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
@@ -429,9 +425,6 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
pp->io_base = pci_pio_to_address(win->res->start);
}
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
-
/* Set default bus ops */
bridge->ops = &dw_pcie_ops;
bridge->child_ops = &dw_child_pcie_ops;
@@ -643,12 +636,15 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
}
/*
- * Ensure all outbound windows are disabled before proceeding with
- * the MEM/IO ranges setups.
+ * Ensure all out/inbound windows are disabled before proceeding with
+ * the MEM/IO (dma-)ranges setups.
*/
for (i = 0; i < pci->num_ob_windows; i++)
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
+ for (i = 0; i < pci->num_ib_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
+
i = 0;
resource_list_for_each_entry(entry, &pp->bridge->windows) {
if (resource_type(entry->res) != IORESOURCE_MEM)
@@ -685,9 +681,32 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
}
if (pci->num_ob_windows <= i)
- dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
+ dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
pci->num_ob_windows);
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ib_windows <= i)
+ break;
+
+ ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set DMA range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pci->num_ib_windows <= i)
+ dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
+ pci->num_ib_windows);
+
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index c6725c519a47..6d5d619ab2e9 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -10,7 +10,10 @@
#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/sizes.h>
@@ -19,6 +22,148 @@
#include "../../pci.h"
#include "pcie-designware.h"
+static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = {
+ [DW_PCIE_DBI_CLK] = "dbi",
+ [DW_PCIE_MSTR_CLK] = "mstr",
+ [DW_PCIE_SLV_CLK] = "slv",
+};
+
+static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = {
+ [DW_PCIE_PIPE_CLK] = "pipe",
+ [DW_PCIE_CORE_CLK] = "core",
+ [DW_PCIE_AUX_CLK] = "aux",
+ [DW_PCIE_REF_CLK] = "ref",
+};
+
+static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = {
+ [DW_PCIE_DBI_RST] = "dbi",
+ [DW_PCIE_MSTR_RST] = "mstr",
+ [DW_PCIE_SLV_RST] = "slv",
+};
+
+static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
+ [DW_PCIE_NON_STICKY_RST] = "non-sticky",
+ [DW_PCIE_STICKY_RST] = "sticky",
+ [DW_PCIE_CORE_RST] = "core",
+ [DW_PCIE_PIPE_RST] = "pipe",
+ [DW_PCIE_PHY_RST] = "phy",
+ [DW_PCIE_HOT_RST] = "hot",
+ [DW_PCIE_PWR_RST] = "pwr",
+};
+
+static int dw_pcie_get_clocks(struct dw_pcie *pci)
+{
+ int i, ret;
+
+ for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++)
+ pci->app_clks[i].id = dw_pcie_app_clks[i];
+
+ for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++)
+ pci->core_clks[i].id = dw_pcie_core_clks[i];
+
+ ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS,
+ pci->app_clks);
+ if (ret)
+ return ret;
+
+ return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS,
+ pci->core_clks);
+}
+
+static int dw_pcie_get_resets(struct dw_pcie *pci)
+{
+ int i, ret;
+
+ for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++)
+ pci->app_rsts[i].id = dw_pcie_app_rsts[i];
+
+ for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++)
+ pci->core_rsts[i].id = dw_pcie_core_rsts[i];
+
+ ret = devm_reset_control_bulk_get_optional_shared(pci->dev,
+ DW_PCIE_NUM_APP_RSTS,
+ pci->app_rsts);
+ if (ret)
+ return ret;
+
+ ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev,
+ DW_PCIE_NUM_CORE_RSTS,
+ pci->core_rsts);
+ if (ret)
+ return ret;
+
+ pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pci->pe_rst))
+ return PTR_ERR(pci->pe_rst);
+
+ return 0;
+}
+
+int dw_pcie_get_resources(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ struct device_node *np = dev_of_node(pci->dev);
+ struct resource *res;
+ int ret;
+
+ if (!pci->dbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ }
+
+ /* DBI2 is mainly useful for the endpoint controller */
+ if (!pci->dbi_base2) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ if (res) {
+ pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+ } else {
+ pci->dbi_base2 = pci->dbi_base + SZ_4K;
+ }
+ }
+
+ /* For non-unrolled iATU/eDMA platforms this range will be ignored */
+ if (!pci->atu_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ if (res) {
+ pci->atu_size = resource_size(res);
+ pci->atu_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->atu_base))
+ return PTR_ERR(pci->atu_base);
+ } else {
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+ }
+
+ /* Set a default value suitable for at most 8 in and 8 out windows */
+ if (!pci->atu_size)
+ pci->atu_size = SZ_4K;
+
+ /* LLDD is supposed to manually switch the clocks and resets state */
+ if (dw_pcie_cap_is(pci, REQ_RES)) {
+ ret = dw_pcie_get_clocks(pci);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_get_resets(pci);
+ if (ret)
+ return ret;
+ }
+
+ if (pci->link_gen < 1)
+ pci->link_gen = of_pci_get_max_link_speed(np);
+
+ of_property_read_u32(np, "num-lanes", &pci->num_lanes);
+
+ if (of_property_read_bool(np, "snps,enable-cdm-check"))
+ dw_pcie_cap_set(pci, CDM_CHECK);
+
+ return 0;
+}
+
void dw_pcie_version_detect(struct dw_pcie *pci)
{
u32 ver;
@@ -211,7 +356,7 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
u32 index)
{
- if (pci->iatu_unroll_enabled)
+ if (dw_pcie_cap_is(pci, IATU_UNROLL))
return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
@@ -393,8 +538,60 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg
dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
}
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar)
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
+{
+ u64 limit_addr = pci_addr + size - 1;
+ u32 retries, val;
+
+ if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ return -EINVAL;
+ }
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(pci_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ val = type;
+ if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val);
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ mdelay(LINK_WAIT_IATU);
+ }
+
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
+
+ return -ETIMEDOUT;
+}
+
+int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u8 bar)
{
u32 retries, val;
@@ -448,7 +645,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
}
if (retries >= LINK_WAIT_MAX_RETRIES) {
- dev_err(pci->dev, "Phy link never came up\n");
+ dev_info(pci->dev, "Phy link never came up\n");
return -ETIMEDOUT;
}
@@ -522,26 +719,21 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
}
-static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
- if (val == 0xffffffff)
- return true;
-
- return false;
-}
-
-static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
int max_region, ob, ib;
u32 val, min, dir;
u64 max;
- if (pci->iatu_unroll_enabled) {
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+ if (val == 0xFFFFFFFF) {
+ dw_pcie_cap_set(pci, IATU_UNROLL);
+
max_region = min((int)pci->atu_size / 512, 256);
} else {
+ pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
+ pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
}
@@ -583,46 +775,15 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
pci->num_ib_windows = ib;
pci->region_align = 1 << fls(min);
pci->region_limit = (max << 32) | (SZ_4G - 1);
-}
-void dw_pcie_iatu_detect(struct dw_pcie *pci)
-{
- struct platform_device *pdev = to_platform_device(pci->dev);
-
- pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
- if (pci->iatu_unroll_enabled) {
- if (!pci->atu_base) {
- struct resource *res =
- platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
- if (res) {
- pci->atu_size = resource_size(res);
- pci->atu_base = devm_ioremap_resource(pci->dev, res);
- }
- if (!pci->atu_base || IS_ERR(pci->atu_base))
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
- }
-
- if (!pci->atu_size)
- /* Pick a minimal default, enough for 8 in and 8 out windows */
- pci->atu_size = SZ_4K;
- } else {
- pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
- pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
- }
-
- dw_pcie_iatu_detect_regions(pci);
-
- dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
- "enabled" : "disabled");
-
- dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n",
+ dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n",
+ dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F",
pci->num_ob_windows, pci->num_ib_windows,
pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
}
void dw_pcie_setup(struct dw_pcie *pci)
{
- struct device_node *np = pci->dev->of_node;
u32 val;
if (pci->link_gen > 0)
@@ -641,7 +802,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
if (pci->n_fts[1]) {
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_N_FTS_MASK;
- val |= pci->n_fts[pci->link_gen - 1];
+ val |= pci->n_fts[1];
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
@@ -650,14 +811,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
- if (of_property_read_bool(np, "snps,enable-cdm-check")) {
+ if (dw_pcie_cap_is(pci, CDM_CHECK)) {
val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
PCIE_PL_CHK_REG_CHK_REG_START;
dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
}
- of_property_read_u32(np, "num-lanes", &pci->num_lanes);
if (!pci->num_lanes) {
dev_dbg(pci->dev, "Using h/w default number of lanes\n");
return;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index a871ae7eb59e..393dfb931df6 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -12,10 +12,14 @@
#define _PCIE_DESIGNWARE_H
#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/reset.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
@@ -43,6 +47,17 @@
(__dw_pcie_ver_cmp(_pci, _ver, ==) && \
__dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
+/* DWC PCIe controller capabilities */
+#define DW_PCIE_CAP_REQ_RES 0
+#define DW_PCIE_CAP_IATU_UNROLL 1
+#define DW_PCIE_CAP_CDM_CHECK 2
+
+#define dw_pcie_cap_is(_pci, _cap) \
+ test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
+
+#define dw_pcie_cap_set(_pci, _cap) \
+ set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
+
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
@@ -222,6 +237,39 @@ enum dw_pcie_device_mode {
DW_PCIE_RC_TYPE,
};
+enum dw_pcie_app_clk {
+ DW_PCIE_DBI_CLK,
+ DW_PCIE_MSTR_CLK,
+ DW_PCIE_SLV_CLK,
+ DW_PCIE_NUM_APP_CLKS
+};
+
+enum dw_pcie_core_clk {
+ DW_PCIE_PIPE_CLK,
+ DW_PCIE_CORE_CLK,
+ DW_PCIE_AUX_CLK,
+ DW_PCIE_REF_CLK,
+ DW_PCIE_NUM_CORE_CLKS
+};
+
+enum dw_pcie_app_rst {
+ DW_PCIE_DBI_RST,
+ DW_PCIE_MSTR_RST,
+ DW_PCIE_SLV_RST,
+ DW_PCIE_NUM_APP_RSTS
+};
+
+enum dw_pcie_core_rst {
+ DW_PCIE_NON_STICKY_RST,
+ DW_PCIE_STICKY_RST,
+ DW_PCIE_CORE_RST,
+ DW_PCIE_PIPE_RST,
+ DW_PCIE_PHY_RST,
+ DW_PCIE_HOT_RST,
+ DW_PCIE_PWR_RST,
+ DW_PCIE_NUM_CORE_RSTS
+};
+
struct dw_pcie_host_ops {
int (*host_init)(struct dw_pcie_rp *pp);
void (*host_deinit)(struct dw_pcie_rp *pp);
@@ -317,10 +365,15 @@ struct dw_pcie {
const struct dw_pcie_ops *ops;
u32 version;
u32 type;
+ unsigned long caps;
int num_lanes;
int link_gen;
u8 n_fts[2];
- bool iatu_unroll_enabled: 1;
+ struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
+ struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
+ struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
+ struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
+ struct gpio_desc *pe_rst;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -328,6 +381,8 @@ struct dw_pcie {
#define to_dw_pcie_from_ep(endpoint) \
container_of((endpoint), struct dw_pcie, ep)
+int dw_pcie_get_resources(struct dw_pcie *pci);
+
void dw_pcie_version_detect(struct dw_pcie *pci);
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
@@ -346,8 +401,10 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int type, u64 cpu_addr, u64 pci_addr, u64 size);
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u8 bar);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index e2b80f10030d..43c27812dd6d 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -10,11 +10,11 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -60,7 +60,7 @@ struct histb_pcie {
struct reset_control *sys_reset;
struct reset_control *bus_reset;
void __iomem *ctrl;
- int reset_gpio;
+ struct gpio_desc *reset_gpio;
struct regulator *vpcie;
};
@@ -212,8 +212,8 @@ static void histb_pcie_host_disable(struct histb_pcie *hipcie)
clk_disable_unprepare(hipcie->sys_clk);
clk_disable_unprepare(hipcie->bus_clk);
- if (gpio_is_valid(hipcie->reset_gpio))
- gpio_set_value_cansleep(hipcie->reset_gpio, 0);
+ if (hipcie->reset_gpio)
+ gpiod_set_value_cansleep(hipcie->reset_gpio, 1);
if (hipcie->vpcie)
regulator_disable(hipcie->vpcie);
@@ -235,8 +235,8 @@ static int histb_pcie_host_enable(struct dw_pcie_rp *pp)
}
}
- if (gpio_is_valid(hipcie->reset_gpio))
- gpio_set_value_cansleep(hipcie->reset_gpio, 1);
+ if (hipcie->reset_gpio)
+ gpiod_set_value_cansleep(hipcie->reset_gpio, 0);
ret = clk_prepare_enable(hipcie->bus_clk);
if (ret) {
@@ -298,10 +298,7 @@ static int histb_pcie_probe(struct platform_device *pdev)
struct histb_pcie *hipcie;
struct dw_pcie *pci;
struct dw_pcie_rp *pp;
- struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- enum of_gpio_flags of_flags;
- unsigned long flag = GPIOF_DIR_OUT;
int ret;
hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL);
@@ -336,17 +333,19 @@ static int histb_pcie_probe(struct platform_device *pdev)
hipcie->vpcie = NULL;
}
- hipcie->reset_gpio = of_get_named_gpio_flags(np,
- "reset-gpios", 0, &of_flags);
- if (of_flags & OF_GPIO_ACTIVE_LOW)
- flag |= GPIOF_ACTIVE_LOW;
- if (gpio_is_valid(hipcie->reset_gpio)) {
- ret = devm_gpio_request_one(dev, hipcie->reset_gpio,
- flag, "PCIe device power control");
- if (ret) {
- dev_err(dev, "unable to request gpio\n");
- return ret;
- }
+ hipcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(hipcie->reset_gpio);
+ if (ret) {
+ dev_err(dev, "unable to request reset gpio: %d\n", ret);
+ return ret;
+ }
+
+ ret = gpiod_set_consumer_name(hipcie->reset_gpio,
+ "PCIe device power control");
+ if (ret) {
+ dev_err(dev, "unable to set reset gpio name: %d\n", ret);
+ return ret;
}
hipcie->aux_clk = devm_clk_get(dev, "aux");
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 7db94a22238d..77e5dc7b88ad 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -12,6 +12,7 @@
#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -224,6 +225,7 @@ struct qcom_pcie {
union qcom_pcie_resources res;
struct phy *phy;
struct gpio_desc *reset;
+ struct icc_path *icc_mem;
const struct qcom_pcie_cfg *cfg;
};
@@ -1237,7 +1239,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
ret = reset_control_assert(res->pci_reset);
if (ret < 0) {
- dev_err(dev, "cannot deassert pci reset\n");
+ dev_err(dev, "cannot assert pci reset\n");
goto err_disable_clocks;
}
@@ -1644,6 +1646,74 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = qcom_pcie_start_link,
};
+static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ int ret;
+
+ pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem");
+ if (IS_ERR(pcie->icc_mem))
+ return PTR_ERR(pcie->icc_mem);
+
+ /*
+ * Some Qualcomm platforms require interconnect bandwidth constraints
+ * to be set before enabling interconnect clocks.
+ *
+ * Set an initial peak bandwidth corresponding to single-lane Gen 1
+ * for the pcie-mem path.
+ */
+ ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250));
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u32 offset, status, bw;
+ int speed, width;
+ int ret;
+
+ if (!pcie->icc_mem)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ /* Only update constraints if link is up. */
+ if (!(status & PCI_EXP_LNKSTA_DLLLA))
+ return;
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
+
+ switch (speed) {
+ case 1:
+ bw = MBps_to_icc(250);
+ break;
+ case 2:
+ bw = MBps_to_icc(500);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ fallthrough;
+ case 3:
+ bw = MBps_to_icc(985);
+ break;
+ }
+
+ ret = icc_set_bw(pcie->icc_mem, 0, width * bw);
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ }
+}
+
static int qcom_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1704,6 +1774,10 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
+ ret = qcom_pcie_icc_init(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+
ret = pcie->cfg->ops->get_resources(pcie);
if (ret)
goto err_pm_runtime_put;
@@ -1722,6 +1796,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_phy_exit;
}
+ qcom_pcie_icc_update(pcie);
+
return 0;
err_phy_exit:
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 1b6b437823d2..02d78a12b6e7 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -21,7 +21,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
index e4643fb94e78..1d7a07ba9ccd 100644
--- a/drivers/pci/controller/mobiveil/Kconfig
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -8,14 +8,14 @@ config PCIE_MOBIVEIL
config PCIE_MOBIVEIL_HOST
bool
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_MOBIVEIL
config PCIE_MOBIVEIL_PLAT
bool "Mobiveil AXI PCIe controller"
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on OF
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_MOBIVEIL_HOST
help
Say Y here if you want to enable support for the Mobiveil AXI PCIe
@@ -25,7 +25,7 @@ config PCIE_MOBIVEIL_PLAT
config PCIE_LAYERSCAPE_GEN4
bool "Freescale Layerscape PCIe Gen4 controller"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI
select PCIE_MOBIVEIL_HOST
help
Say Y here if you want PCIe Gen4 controller support on
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index ba36bbc5897d..513d8edf3a5c 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -1859,20 +1859,18 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
- "reset-gpios", 0,
- GPIOD_OUT_LOW,
- "pcie1-reset");
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
if (ret) {
- if (ret == -ENOENT) {
- pcie->reset_gpio = NULL;
- } else {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get reset-gpio: %i\n",
- ret);
- return ret;
- }
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset-gpio: %i\n", ret);
+ return ret;
+ }
+
+ ret = gpiod_set_consumer_name(pcie->reset_gpio, "pcie1-reset");
+ if (ret) {
+ dev_err(dev, "Failed to set reset gpio name: %d\n", ret);
+ return ret;
}
ret = of_pci_get_max_link_speed(dev->of_node);
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index 0cfd9d5a497c..ecd3009df586 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -553,7 +553,7 @@ static const struct of_device_id faraday_pci_of_match[] = {
static struct platform_driver faraday_pci_driver = {
.driver = {
.name = "ftpci100",
- .of_match_table = of_match_ptr(faraday_pci_of_match),
+ .of_match_table = faraday_pci_of_match,
.suppress_bind_attrs = true,
},
.probe = faraday_pci_probe,
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index e7c6f6629e7c..084f5313895c 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -611,20 +611,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
return cfg->vector;
}
-static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *info)
-{
- int ret = pci_msi_prepare(domain, dev, nvec, info);
-
- /*
- * By using the interrupt remapper in the hypervisor IOMMU, contiguous
- * CPU vectors is not needed for multi-MSI
- */
- if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
- info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
-
- return ret;
-}
+#define hv_msi_prepare pci_msi_prepare
/**
* hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
@@ -735,9 +722,9 @@ exit_unlock:
* during hibernation does not matter (at this time all the devices
* have been frozen). Note: the correct affinity info is still updated
* into the irqdata data structure in migrate_one_irq() ->
- * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
- * resumes, hv_pci_restore_msi_state() is able to correctly restore
- * the interrupt with the correct affinity.
+ * irq_do_set_affinity(), so later when the VM resumes,
+ * hv_pci_restore_msi_state() is able to correctly restore the
+ * interrupt with the correct affinity.
*/
if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
dev_err(&hbus->hdev->device,
@@ -1613,8 +1600,8 @@ out:
}
static u32 hv_compose_msi_req_v1(
- struct pci_create_interrupt *int_pkt, const struct cpumask *affinity,
- u32 slot, u8 vector, u8 vector_count)
+ struct pci_create_interrupt *int_pkt,
+ u32 slot, u8 vector, u16 vector_count)
{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = slot;
@@ -1632,6 +1619,35 @@ static u32 hv_compose_msi_req_v1(
}
/*
+ * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
+ * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
+ * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
+ * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
+ * not irrelevant because Hyper-V chooses the physical CPU to handle the
+ * interrupts based on the vCPU specified in message sent to the vPCI VSP in
+ * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
+ * but assigning too many vPCI device interrupts to the same pCPU can cause a
+ * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
+ * to spread out the pCPUs that it selects.
+ *
+ * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
+ * to always return the same dummy vCPU, because a second call to
+ * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
+ * new pCPU for the interrupt. But for the multi-MSI case, the second call to
+ * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
+ * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
+ * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
+ * the same pCPU, even though the vCPUs will be spread out by later calls
+ * to hv_irq_unmask(), but that is the best we can do now.
+ *
+ * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
+ * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
+ * enhancement is planned for a future version. With that enhancement, the
+ * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
+ * device will be spread across multiple pCPUs.
+ */
+
+/*
* Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
* by subsequent retarget in hv_irq_unmask().
*/
@@ -1640,18 +1656,39 @@ static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
return cpumask_first_and(affinity, cpu_online_mask);
}
-static u32 hv_compose_msi_req_v2(
- struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity,
- u32 slot, u8 vector, u8 vector_count)
+/*
+ * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
+ */
+static int hv_compose_multi_msi_req_get_cpu(void)
{
+ static DEFINE_SPINLOCK(multi_msi_cpu_lock);
+
+ /* -1 means starting with CPU 0 */
+ static int cpu_next = -1;
+
+ unsigned long flags;
int cpu;
+ spin_lock_irqsave(&multi_msi_cpu_lock, flags);
+
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
+ false);
+ cpu = cpu_next;
+
+ spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
+
+ return cpu;
+}
+
+static u32 hv_compose_msi_req_v2(
+ struct pci_create_interrupt2 *int_pkt, int cpu,
+ u32 slot, u8 vector, u16 vector_count)
+{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
- cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
int_pkt->int_desc.processor_count = 1;
@@ -1660,18 +1697,15 @@ static u32 hv_compose_msi_req_v2(
}
static u32 hv_compose_msi_req_v3(
- struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity,
- u32 slot, u32 vector, u8 vector_count)
+ struct pci_create_interrupt3 *int_pkt, int cpu,
+ u32 slot, u32 vector, u16 vector_count)
{
- int cpu;
-
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.reserved = 0;
int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
- cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
int_pkt->int_desc.processor_count = 1;
@@ -1701,7 +1735,12 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
struct msi_desc *msi_desc;
- u8 vector, vector_count;
+ /*
+ * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
+ * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
+ */
+ u16 vector_count;
+ u32 vector;
struct {
struct pci_packet pci_pkt;
union {
@@ -1710,12 +1749,18 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct pci_create_interrupt3 v3;
} int_pkts;
} __packed ctxt;
+ bool multi_msi;
u64 trans_id;
u32 size;
int ret;
+ int cpu;
+
+ msi_desc = irq_data_get_msi_desc(data);
+ multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
+ msi_desc->nvec_used > 1;
/* Reuse the previous allocation */
- if (data->chip_data) {
+ if (data->chip_data && multi_msi) {
int_desc = data->chip_data;
msg->address_hi = int_desc->address >> 32;
msg->address_lo = int_desc->address & 0xffffffff;
@@ -1723,7 +1768,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
return;
}
- msi_desc = irq_data_get_msi_desc(data);
pdev = msi_desc_to_pci_dev(msi_desc);
dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
@@ -1733,11 +1777,18 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (!hpdev)
goto return_null_message;
+ /* Free any previous message that might have already been composed. */
+ if (data->chip_data && !multi_msi) {
+ int_desc = data->chip_data;
+ data->chip_data = NULL;
+ hv_int_desc_free(hpdev, int_desc);
+ }
+
int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
if (!int_desc)
goto drop_reference;
- if (!msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1) {
+ if (multi_msi) {
/*
* If this is not the first MSI of Multi MSI, we already have
* a mapping. Can exit early.
@@ -1762,11 +1813,18 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
*/
vector = 32;
vector_count = msi_desc->nvec_used;
+ cpu = hv_compose_multi_msi_req_get_cpu();
} else {
vector = hv_msi_get_int_vector(data);
vector_count = 1;
+ cpu = hv_compose_msi_req_get_cpu(dest);
}
+ /*
+ * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
+ * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
+ * for better readability.
+ */
memset(&ctxt, 0, sizeof(ctxt));
init_completion(&comp.comp_pkt.host_event);
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
@@ -1775,24 +1833,23 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
switch (hbus->protocol_version) {
case PCI_PROTOCOL_VERSION_1_1:
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
- dest,
hpdev->desc.win_slot.slot,
- vector,
+ (u8)vector,
vector_count);
break;
case PCI_PROTOCOL_VERSION_1_2:
case PCI_PROTOCOL_VERSION_1_3:
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
- dest,
+ cpu,
hpdev->desc.win_slot.slot,
- vector,
+ (u8)vector,
vector_count);
break;
case PCI_PROTOCOL_VERSION_1_4:
size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
- dest,
+ cpu,
hpdev->desc.win_slot.slot,
vector,
vector_count);
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 1ced73726a26..1dc209f6f53a 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -11,14 +11,15 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/mbus.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
@@ -1261,9 +1262,8 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
struct mvebu_pcie_port *port, struct device_node *child)
{
struct device *dev = &pcie->pdev->dev;
- enum of_gpio_flags flags;
u32 slot_power_limit;
- int reset_gpio, ret;
+ int ret;
u32 num_lanes;
port->pcie = pcie;
@@ -1327,40 +1327,24 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
port->name, child);
}
- reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
- if (reset_gpio == -EPROBE_DEFER) {
- ret = reset_gpio;
+ port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
+ port->name);
+ if (!port->reset_name) {
+ ret = -ENOMEM;
goto err;
}
- if (gpio_is_valid(reset_gpio)) {
- unsigned long gpio_flags;
-
- port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
- port->name);
- if (!port->reset_name) {
- ret = -ENOMEM;
+ port->reset_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(child),
+ "reset", GPIOD_OUT_HIGH,
+ port->name);
+ ret = PTR_ERR_OR_ZERO(port->reset_gpio);
+ if (ret) {
+ if (ret != -ENOENT)
goto err;
- }
-
- if (flags & OF_GPIO_ACTIVE_LOW) {
- dev_info(dev, "%pOF: reset gpio is active low\n",
- child);
- gpio_flags = GPIOF_ACTIVE_LOW |
- GPIOF_OUT_INIT_LOW;
- } else {
- gpio_flags = GPIOF_OUT_INIT_HIGH;
- }
-
- ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
- port->reset_name);
- if (ret) {
- if (ret == -EPROBE_DEFER)
- goto err;
- goto skip;
- }
-
- port->reset_gpio = gpio_to_desc(reset_gpio);
+ /* reset gpio is optional */
+ port->reset_gpio = NULL;
+ devm_kfree(dev, port->reset_name);
+ port->reset_name = NULL;
}
slot_power_limit = of_pci_get_slot_power_limit(child,
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 24478ae5a345..929f9363e94b 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -415,6 +415,13 @@ static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
* address (access to which generates correct config transaction) falls in
* this 4 KiB region.
*/
+static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
+ unsigned int where)
+{
+ return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
+ (PCI_FUNC(devfn) << 8) | (where & 0xff);
+}
+
static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn,
int where)
@@ -436,9 +443,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int offset;
u32 base;
- offset = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where) &
- ~PCI_CONF1_ENABLE;
+ offset = tegra_pcie_conf_offset(bus->number, devfn, where);
/* move 4 KiB window to offset within the FPCI region */
base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
@@ -2197,10 +2202,11 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
* and in this case fall back to using AFI per port register
* to toggle PERST# SFIO line.
*/
- rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
- "reset-gpios", 0,
- GPIOD_OUT_LOW,
- label);
+ rp->reset_gpio = devm_fwnode_gpiod_get(dev,
+ of_fwnode_handle(port),
+ "reset",
+ GPIOD_OUT_LOW,
+ label);
if (IS_ERR(rp->reset_gpio)) {
if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
rp->reset_gpio = NULL;
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index 154a5398633c..ca44b0c83d1b 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -902,7 +901,7 @@ static const struct of_device_id v3_pci_of_match[] = {
static struct platform_driver v3_pci_driver = {
.driver = {
.name = "pci-v3-semi",
- .of_match_table = of_match_ptr(v3_pci_of_match),
+ .of_match_table = v3_pci_of_match,
.suppress_bind_attrs = true,
},
.probe = v3_pci_probe,
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index bfa259781b69..d7987b281f79 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -8,9 +8,9 @@
*/
#include <linux/cpu.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/msi.h>
-#include <linux/of_irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 549d3bd6d1c2..887b4941ff32 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 7b1d3ebc34ec..65e8a20cc442 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -9,11 +9,11 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 521acd632f1a..edf283e2b5dd 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
@@ -52,6 +53,8 @@
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
#define PCIE_MISC_MISC_CTRL 0x4008
+#define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80
+#define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400
#define PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000
#define PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000
#define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000
@@ -302,42 +305,34 @@ static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
/* negative return value indicates error */
static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
{
- int tries;
u32 data;
+ int err;
writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
base + PCIE_RC_DL_MDIO_ADDR);
readl(base + PCIE_RC_DL_MDIO_ADDR);
-
- data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
- for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) {
- udelay(10);
- data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
- }
-
+ err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_RD_DATA, data,
+ MDIO_RD_DONE(data), 10, 100);
*val = FIELD_GET(MDIO_DATA_MASK, data);
- return MDIO_RD_DONE(data) ? 0 : -EIO;
+
+ return err;
}
/* negative return value indicates error */
static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
u8 regad, u16 wrdata)
{
- int tries;
u32 data;
+ int err;
writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
base + PCIE_RC_DL_MDIO_ADDR);
readl(base + PCIE_RC_DL_MDIO_ADDR);
writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
- data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
- for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) {
- udelay(10);
- data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
- }
-
- return MDIO_WT_DONE(data) ? 0 : -EIO;
+ err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
+ MDIO_WT_DONE(data), 10, 100);
+ return err;
}
/*
@@ -445,7 +440,8 @@ static struct irq_chip brcm_msi_irq_chip = {
static struct msi_domain_info brcm_msi_domain_info = {
/* Multi MSI is supported by the controller, but not by this driver */
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
.chip = &brcm_msi_irq_chip,
};
@@ -505,21 +501,23 @@ static struct irq_chip brcm_msi_bottom_irq_chip = {
.irq_ack = brcm_msi_ack_irq,
};
-static int brcm_msi_alloc(struct brcm_msi *msi)
+static int brcm_msi_alloc(struct brcm_msi *msi, unsigned int nr_irqs)
{
int hwirq;
mutex_lock(&msi->lock);
- hwirq = bitmap_find_free_region(msi->used, msi->nr, 0);
+ hwirq = bitmap_find_free_region(msi->used, msi->nr,
+ order_base_2(nr_irqs));
mutex_unlock(&msi->lock);
return hwirq;
}
-static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
+static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq,
+ unsigned int nr_irqs)
{
mutex_lock(&msi->lock);
- bitmap_release_region(msi->used, hwirq, 0);
+ bitmap_release_region(msi->used, hwirq, order_base_2(nr_irqs));
mutex_unlock(&msi->lock);
}
@@ -527,16 +525,17 @@ static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
struct brcm_msi *msi = domain->host_data;
- int hwirq;
+ int hwirq, i;
- hwirq = brcm_msi_alloc(msi);
+ hwirq = brcm_msi_alloc(msi, nr_irqs);
if (hwirq < 0)
return hwirq;
- irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq,
- &brcm_msi_bottom_irq_chip, domain->host_data,
- handle_edge_irq, NULL, NULL);
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &brcm_msi_bottom_irq_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
return 0;
}
@@ -546,7 +545,7 @@ static void brcm_irq_domain_free(struct irq_domain *domain,
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
- brcm_msi_free(msi, d->hwirq);
+ brcm_msi_free(msi, d->hwirq, nr_irqs);
}
static const struct irq_domain_ops msi_domain_ops = {
@@ -726,7 +725,7 @@ static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
return base + DATA_ADDR(pcie);
}
-static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
+static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
@@ -736,7 +735,7 @@ static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie,
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
}
-static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
+static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
@@ -746,7 +745,7 @@ static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
}
-static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
+static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
{
if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
return;
@@ -757,7 +756,7 @@ static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
reset_control_deassert(pcie->perst_reset);
}
-static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
+static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
@@ -767,7 +766,7 @@ static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
}
-static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
+static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
@@ -776,7 +775,7 @@ static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
}
-static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
+static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
u64 *rc_bar2_size,
u64 *rc_bar2_offset)
{
@@ -903,11 +902,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
else
burst = 0x2; /* 512 bytes */
- /* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
+ /*
+ * Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN,
+ * RCB_MPS_MODE, RCB_64B_MODE
+ */
tmp = readl(base + PCIE_MISC_MISC_CTRL);
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK);
writel(tmp, base + PCIE_MISC_MISC_CTRL);
ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
@@ -1033,8 +1037,15 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
pcie->perst_set(pcie, 0);
/*
- * Give the RC/EP time to wake up, before trying to configure RC.
- * Intermittently check status for link-up, up to a total of 100ms.
+ * Wait for 100ms after PERST# deassertion; see PCIe CEM specification
+ * sections 2.2, PCIe r5.0, 6.6.1.
+ */
+ msleep(100);
+
+ /*
+ * Give the RC/EP even more time to wake up, before trying to
+ * configure RC. Intermittently check status for link-up, up to a
+ * total of 100ms.
*/
for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
msleep(5);
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index 538115246c79..4142a73e611d 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -12,7 +12,6 @@
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 2519201b0e51..83029bdfd884 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -18,7 +18,6 @@
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 7263d175b5ad..0ebf7015e9af 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -9,10 +9,10 @@
#include <linux/clk.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 4bd1abf26008..ee7aad09d627 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -466,7 +466,8 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host)
}
static const struct soc_device_attribute mt7621_pcie_quirks_match[] = {
- { .soc_id = "mt7621", .revision = "E2" }
+ { .soc_id = "mt7621", .revision = "E2" },
+ { /* sentinel */ }
};
static int mt7621_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 7352b5ff8d35..c96c0f454570 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -28,7 +28,6 @@
#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
-#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/phy/phy.h>
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index e4ab48041eb6..4a787a941674 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -16,7 +16,6 @@
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
-#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pci-ecam.h>
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 40d070e54ad2..176686bdb15c 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -17,7 +17,6 @@
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
-#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
@@ -474,15 +473,15 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
- domain->host_data, handle_simple_irq,
- NULL, NULL);
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
}
mutex_unlock(&msi->lock);
return 0;
}
static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+ unsigned int nr_irqs)
{
struct irq_data *data = irq_domain_get_irq_data(domain, virq);
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -722,7 +721,6 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
/* Enable all misc interrupts */
nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
-
/* Disable all legacy interrupts */
nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index e06e9f4fc50f..769eedeb8802 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -719,6 +719,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
resource_size_t offset[2] = {0};
resource_size_t membar2_offset = 0x2000;
struct pci_bus *child;
+ struct pci_dev *dev;
int ret;
/*
@@ -859,8 +860,25 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_scan_child_bus(vmd->bus);
vmd_domain_reset(vmd);
- list_for_each_entry(child, &vmd->bus->children, node)
- pci_reset_bus(child->self);
+
+ /* When Intel VMD is enabled, the OS does not discover the Root Ports
+ * owned by Intel VMD within the MMCFG space. pci_reset_bus() applies
+ * a reset to the parent of the PCI device supplied as argument. This
+ * is why we pass a child device, so the reset can be triggered at
+ * the Intel bridge level and propagated to all the children in the
+ * hierarchy.
+ */
+ list_for_each_entry(child, &vmd->bus->children, node) {
+ if (!list_empty(&child->devices)) {
+ dev = list_first_entry(&child->devices,
+ struct pci_dev, bus_list);
+ if (pci_reset_bus(dev))
+ pci_warn(dev, "can't reset device: %d\n", ret);
+
+ break;
+ }
+ }
+
pci_assign_unassigned_bus_resources(vmd->bus);
/*
@@ -980,6 +998,11 @@ static int vmd_resume(struct device *dev)
struct vmd_dev *vmd = pci_get_drvdata(pdev);
int err, i;
+ if (vmd->irq_domain)
+ vmd_set_msi_remapping(vmd, true);
+ else
+ vmd_set_msi_remapping(vmd, false);
+
for (i = 0; i < vmd->msix_count; i++) {
err = devm_request_irq(dev, vmd->irqs[i].virq,
vmd_irq, IRQF_NO_THREAD,