summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c12
-rw-r--r--drivers/pci/dwc/pci-exynos.c12
-rw-r--r--drivers/pci/dwc/pci-imx6.c11
-rw-r--r--drivers/pci/dwc/pci-keystone-dw.c12
-rw-r--r--drivers/pci/dwc/pci-keystone.c10
-rw-r--r--drivers/pci/dwc/pci-keystone.h4
-rw-r--r--drivers/pci/dwc/pci-layerscape.c102
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c12
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c14
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c15
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c4
-rw-r--r--drivers/pci/dwc/pcie-designware.c12
-rw-r--r--drivers/pci/dwc/pcie-designware.h27
-rw-r--r--drivers/pci/dwc/pcie-hisi.c5
-rw-r--r--drivers/pci/dwc/pcie-kirin.c6
-rw-r--r--drivers/pci/dwc/pcie-qcom.c409
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c8
-rw-r--r--drivers/pci/host/Kconfig7
-rw-r--r--drivers/pci/host/pci-aardvark.c5
-rw-r--r--drivers/pci/host/pci-ftpci100.c6
-rw-r--r--drivers/pci/host/pci-hyperv.c8
-rw-r--r--drivers/pci/host/pci-mvebu.c3
-rw-r--r--drivers/pci/host/pci-tegra.c6
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/host/pci-xgene.c41
-rw-r--r--drivers/pci/host/pcie-altera-msi.c6
-rw-r--r--drivers/pci/host/pcie-altera.c13
-rw-r--r--drivers/pci/host/pcie-iproc-msi.c2
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c8
-rw-r--r--drivers/pci/host/pcie-iproc.c400
-rw-r--r--drivers/pci/host/pcie-iproc.h1
-rw-r--r--drivers/pci/host/pcie-mediatek.c756
-rw-r--r--drivers/pci/host/pcie-rcar.c12
-rw-r--r--drivers/pci/host/pcie-rockchip.c424
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c11
-rw-r--r--drivers/pci/host/pcie-xilinx.c60
-rw-r--r--drivers/pci/host/vmd.c19
-rw-r--r--drivers/pci/quirks.c17
38 files changed, 1807 insertions, 675 deletions
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index ee61f833b62c..1e2aaedbebf2 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -195,7 +195,7 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
dra7xx_pcie_enable_msi_interrupts(dra7xx);
}
-static void dra7xx_pcie_host_init(struct pcie_port *pp)
+static int dra7xx_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
@@ -206,6 +206,8 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
dw_pcie_wait_for_link(pci);
dw_pcie_msi_init(pp);
dra7xx_pcie_enable_interrupts(dra7xx);
+
+ return 0;
}
static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
@@ -238,7 +240,7 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
return -ENODEV;
}
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
+ dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&intx_domain_ops, pp);
if (!dra7xx->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -448,7 +450,7 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
pp->irq = platform_get_irq(pdev, 1);
if (pp->irq < 0) {
dev_err(dev, "missing IRQ resource\n");
- return -EINVAL;
+ return pp->irq;
}
ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
@@ -629,8 +631,8 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "missing IRQ resource\n");
- return -EINVAL;
+ dev_err(dev, "missing IRQ resource: %d\n", irq);
+ return irq;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index c78c06552590..5596fdedbb94 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -581,13 +581,15 @@ static int exynos_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static void exynos_pcie_host_init(struct pcie_port *pp)
+static int exynos_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct exynos_pcie *ep = to_exynos_pcie(pci);
exynos_pcie_establish_link(ep);
exynos_pcie_enable_interrupts(ep);
+
+ return 0;
}
static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
@@ -605,9 +607,9 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
int ret;
pp->irq = platform_get_irq(pdev, 1);
- if (!pp->irq) {
+ if (pp->irq < 0) {
dev_err(dev, "failed to get irq\n");
- return -ENODEV;
+ return pp->irq;
}
ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
IRQF_SHARED, "exynos-pcie", ep);
@@ -618,9 +620,9 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq(pdev, 0);
- if (!pp->msi_irq) {
+ if (pp->msi_irq < 0) {
dev_err(dev, "failed to get msi irq\n");
- return -ENODEV;
+ return pp->msi_irq;
}
ret = devm_request_irq(dev, pp->msi_irq,
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index bf5c3616e344..b73483534a5b 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -636,7 +636,7 @@ err_reset_phy:
return ret;
}
-static void imx6_pcie_host_init(struct pcie_port *pp)
+static int imx6_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
@@ -649,6 +649,8 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
+
+ return 0;
}
static int imx6_pcie_link_up(struct dw_pcie *pci)
@@ -778,14 +780,15 @@ static int imx6_pcie_probe(struct platform_device *pdev)
}
break;
case IMX7D:
- imx6_pcie->pciephy_reset = devm_reset_control_get(dev,
- "pciephy");
+ imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
+ "pciephy");
if (IS_ERR(imx6_pcie->pciephy_reset)) {
dev_err(dev, "Failed to get PCIEPHY reset control\n");
return PTR_ERR(imx6_pcie->pciephy_reset);
}
- imx6_pcie->apps_reset = devm_reset_control_get(dev, "apps");
+ imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
+ "apps");
if (IS_ERR(imx6_pcie->apps_reset)) {
dev_err(dev, "Failed to get PCIE APPS reset control\n");
return PTR_ERR(imx6_pcie->apps_reset);
diff --git a/drivers/pci/dwc/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c
index 8bc626e640c8..3b0f206590f9 100644
--- a/drivers/pci/dwc/pci-keystone-dw.c
+++ b/drivers/pci/dwc/pci-keystone-dw.c
@@ -168,16 +168,12 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
{
- struct keystone_pcie *ks_pcie;
struct msi_desc *msi;
struct pcie_port *pp;
- struct dw_pcie *pci;
u32 offset;
msi = irq_data_get_msi_desc(d);
pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
- pci = to_dw_pcie_from_pp(pp);
- ks_pcie = to_keystone_pcie(pci);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
@@ -191,16 +187,12 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
{
- struct keystone_pcie *ks_pcie;
struct msi_desc *msi;
struct pcie_port *pp;
- struct dw_pcie *pci;
u32 offset;
msi = irq_data_get_msi_desc(d);
pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
- pci = to_dw_pcie_from_pp(pp);
- ks_pcie = to_keystone_pcie(pci);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
@@ -259,7 +251,7 @@ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
{
int i;
- for (i = 0; i < MAX_LEGACY_IRQS; i++)
+ for (i = 0; i < PCI_NUM_INTX; i++)
ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
}
@@ -565,7 +557,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
/* Create legacy IRQ domain */
ks_pcie->legacy_irq_domain =
irq_domain_add_linear(ks_pcie->legacy_intc_np,
- MAX_LEGACY_IRQS,
+ PCI_NUM_INTX,
&ks_dw_pcie_legacy_irq_domain_ops,
NULL);
if (!ks_pcie->legacy_irq_domain) {
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index 4783cec1f78d..5bee3af47588 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -32,10 +32,6 @@
#define DRIVER_NAME "keystone-pcie"
-/* driver specific constants */
-#define MAX_MSI_HOST_IRQS 8
-#define MAX_LEGACY_HOST_IRQS 4
-
/* DEV_STAT_CTRL */
#define PCIE_CAP_BASE 0x70
@@ -173,7 +169,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
if (legacy) {
np_temp = &ks_pcie->legacy_intc_np;
- max_host_irqs = MAX_LEGACY_HOST_IRQS;
+ max_host_irqs = PCI_NUM_INTX;
host_irqs = &ks_pcie->legacy_host_irqs[0];
} else {
np_temp = &ks_pcie->msi_intc_np;
@@ -261,7 +257,7 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
return 0;
}
-static void __init ks_pcie_host_init(struct pcie_port *pp)
+static int __init ks_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
@@ -289,6 +285,8 @@ static void __init ks_pcie_host_init(struct pcie_port *pp)
*/
hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
"Asynchronous external abort");
+
+ return 0;
}
static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
diff --git a/drivers/pci/dwc/pci-keystone.h b/drivers/pci/dwc/pci-keystone.h
index 74c5825882df..30b7bc2ac380 100644
--- a/drivers/pci/dwc/pci-keystone.h
+++ b/drivers/pci/dwc/pci-keystone.h
@@ -12,9 +12,7 @@
* published by the Free Software Foundation.
*/
-#define MAX_LEGACY_IRQS 4
#define MAX_MSI_HOST_IRQS 8
-#define MAX_LEGACY_HOST_IRQS 4
struct keystone_pcie {
struct dw_pcie *pci;
@@ -22,7 +20,7 @@ struct keystone_pcie {
/* PCI Device ID */
u32 device_id;
int num_legacy_host_irqs;
- int legacy_host_irqs[MAX_LEGACY_HOST_IRQS];
+ int legacy_host_irqs[PCI_NUM_INTX];
struct device_node *legacy_intc_np;
int num_msi_host_irqs;
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index fd861289ad8b..87fa486bee2c 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -33,7 +33,8 @@
/* PEX Internal Configuration Registers */
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
-#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
+
+#define PCIE_IATU_NUM 6
struct ls_pcie_drvdata {
u32 lut_offset;
@@ -72,14 +73,6 @@ static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE);
}
-/* Fix class value */
-static void ls_pcie_fix_class(struct ls_pcie *pcie)
-{
- struct dw_pcie *pci = pcie->pci;
-
- iowrite16(PCI_CLASS_BRIDGE_PCI, pci->dbi_base + PCI_CLASS_DEVICE);
-}
-
/* Drop MSG TLP except for Vendor MSG */
static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
{
@@ -91,6 +84,14 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
}
+static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
+{
+ int i;
+
+ for (i = 0; i < PCIE_IATU_NUM; i++)
+ dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i);
+}
+
static int ls1021_pcie_link_up(struct dw_pcie *pci)
{
u32 state;
@@ -108,33 +109,6 @@ static int ls1021_pcie_link_up(struct dw_pcie *pci)
return 1;
}
-static void ls1021_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct ls_pcie *pcie = to_ls_pcie(pci);
- struct device *dev = pci->dev;
- u32 index[2];
-
- pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
- "fsl,pcie-scfg");
- if (IS_ERR(pcie->scfg)) {
- dev_err(dev, "No syscfg phandle specified\n");
- pcie->scfg = NULL;
- return;
- }
-
- if (of_property_read_u32_array(dev->of_node,
- "fsl,pcie-scfg", index, 2)) {
- pcie->scfg = NULL;
- return;
- }
- pcie->index = index[1];
-
- dw_pcie_setup_rc(pp);
-
- ls_pcie_drop_msg_tlp(pcie);
-}
-
static int ls_pcie_link_up(struct dw_pcie *pci)
{
struct ls_pcie *pcie = to_ls_pcie(pci);
@@ -150,16 +124,54 @@ static int ls_pcie_link_up(struct dw_pcie *pci)
return 1;
}
-static void ls_pcie_host_init(struct pcie_port *pp)
+static int ls_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
- iowrite32(1, pci->dbi_base + PCIE_DBI_RO_WR_EN);
- ls_pcie_fix_class(pcie);
+ /*
+ * Disable outbound windows configured by the bootloader to avoid
+ * one transaction hitting multiple outbound windows.
+ * dw_pcie_setup_rc() will reconfigure the outbound windows.
+ */
+ ls_pcie_disable_outbound_atus(pcie);
+
+ dw_pcie_dbi_ro_wr_en(pci);
ls_pcie_clear_multifunction(pcie);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
ls_pcie_drop_msg_tlp(pcie);
- iowrite32(0, pci->dbi_base + PCIE_DBI_RO_WR_EN);
+
+ dw_pcie_setup_rc(pp);
+
+ return 0;
+}
+
+static int ls1021_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ struct device *dev = pci->dev;
+ u32 index[2];
+ int ret;
+
+ pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "fsl,pcie-scfg");
+ if (IS_ERR(pcie->scfg)) {
+ ret = PTR_ERR(pcie->scfg);
+ dev_err(dev, "No syscfg phandle specified\n");
+ pcie->scfg = NULL;
+ return ret;
+ }
+
+ if (of_property_read_u32_array(dev->of_node,
+ "fsl,pcie-scfg", index, 2)) {
+ pcie->scfg = NULL;
+ return -EINVAL;
+ }
+ pcie->index = index[1];
+
+ return ls_pcie_host_init(pp);
}
static int ls_pcie_msi_host_init(struct pcie_port *pp,
@@ -232,12 +244,22 @@ static struct ls_pcie_drvdata ls2080_drvdata = {
.dw_pcie_ops = &dw_ls_pcie_ops,
};
+static struct ls_pcie_drvdata ls2088_drvdata = {
+ .lut_offset = 0x80000,
+ .ltssm_shift = 0,
+ .lut_dbg = 0x407fc,
+ .ops = &ls_pcie_host_ops,
+ .dw_pcie_ops = &dw_ls_pcie_ops,
+};
+
static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
+ { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata },
+ { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata },
{ },
};
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index ea8f34af6a85..370d057c0046 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -134,13 +134,15 @@ static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
dev_err(pci->dev, "Link not up after reconfiguration\n");
}
-static void armada8k_pcie_host_init(struct pcie_port *pp)
+static int armada8k_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
dw_pcie_setup_rc(pp);
armada8k_pcie_establish_link(pcie);
+
+ return 0;
}
static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
@@ -176,9 +178,9 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
pp->ops = &armada8k_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 0);
- if (!pp->irq) {
+ if (pp->irq < 0) {
dev_err(dev, "failed to get irq for port\n");
- return -ENODEV;
+ return pp->irq;
}
ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
@@ -226,7 +228,9 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pcie->clk))
return PTR_ERR(pcie->clk);
- clk_prepare_enable(pcie->clk);
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return ret;
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index 01c6f7823672..6653619db6a1 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -141,12 +141,6 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(100, 200);
- /*
- * Enable writing to config regs. This is required as the Synopsys
- * driver changes the class code. That register needs DBI write enable.
- */
- dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
-
/* setup root complex */
dw_pcie_setup_rc(pp);
@@ -175,13 +169,15 @@ static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
dw_pcie_msi_init(pp);
}
-static void artpec6_pcie_host_init(struct pcie_port *pp)
+static int artpec6_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
artpec6_pcie_establish_link(artpec6_pcie);
artpec6_pcie_enable_interrupts(artpec6_pcie);
+
+ return 0;
}
static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
@@ -207,9 +203,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq <= 0) {
+ if (pp->msi_irq < 0) {
dev_err(dev, "failed to get MSI irq\n");
- return -ENODEV;
+ return pp->msi_irq;
}
ret = devm_request_irq(dev, pp->msi_irq,
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index d29c020da082..0985aeee917c 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -71,9 +71,9 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
while ((pos = find_next_bit((unsigned long *) &val, 32,
pos)) != 32) {
irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
+ generic_handle_irq(irq);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
4, 1 << pos);
- generic_handle_irq(irq);
pos++;
}
}
@@ -401,8 +401,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
- if (pp->ops->host_init)
- pp->ops->host_init(pp);
+ if (pp->ops->host_init) {
+ ret = pp->ops->host_init(pp);
+ if (ret)
+ goto error;
+ }
pp->root_bus_nr = pp->busn->start;
@@ -594,10 +597,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
/* setup interrupt pins */
+ dw_pcie_dbi_ro_wr_en(pci);
val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
val |= 0x00000100;
dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
/* setup bus numbers */
val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
@@ -634,8 +639,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+ /* Enable write permission for the DBI read-only register */
+ dw_pcie_dbi_ro_wr_en(pci);
/* program correct class for RC */
dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+ /* Better disable write permission right after the update */
+ dw_pcie_dbi_ro_wr_dis(pci);
dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
val |= PORT_LOGIC_SPEED_CHANGE;
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 091b4e7ad059..168e2380f493 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -35,7 +35,7 @@ static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
return dw_handle_msi_irq(pp);
}
-static void dw_plat_pcie_host_init(struct pcie_port *pp)
+static int dw_plat_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -44,6 +44,8 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp)
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
+
+ return 0;
}
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
index 0e03af279259..50cef47fc25d 100644
--- a/drivers/pci/dwc/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -107,8 +107,9 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
dw_pcie_writel_dbi(pci, offset + reg, val);
}
-void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u32 size)
+static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
+ int type, u64 cpu_addr,
+ u64 pci_addr, u32 size)
{
u32 retries, val;
@@ -177,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
- if (val == PCIE_ATU_ENABLE)
+ if (val & PCIE_ATU_ENABLE)
return;
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
@@ -200,8 +201,9 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
dw_pcie_writel_dbi(pci, offset + reg, val);
}
-int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, int bar,
- u64 cpu_addr, enum dw_pcie_as_type as_type)
+static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
+ int bar, u64 cpu_addr,
+ enum dw_pcie_as_type as_type)
{
int type;
u32 retries, val;
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index 714f6f21ea93..744876d8ef66 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -76,6 +76,9 @@
#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET 0x91C
+#define PCIE_MISC_CONTROL_1_OFF 0x8BC
+#define PCIE_DBI_RO_WR_EN (0x1 << 0)
+
/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
@@ -134,7 +137,7 @@ struct dw_pcie_host_ops {
unsigned int devfn, int where, int size, u32 *val);
int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 val);
- void (*host_init)(struct pcie_port *pp);
+ int (*host_init)(struct pcie_port *pp);
void (*msi_set_irq)(struct pcie_port *pp, int irq);
void (*msi_clear_irq)(struct pcie_port *pp, int irq);
phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
@@ -280,6 +283,28 @@ static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
}
+static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
+{
+ u32 reg;
+ u32 val;
+
+ reg = PCIE_MISC_CONTROL_1_OFF;
+ val = dw_pcie_readl_dbi(pci, reg);
+ val |= PCIE_DBI_RO_WR_EN;
+ dw_pcie_writel_dbi(pci, reg, val);
+}
+
+static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
+{
+ u32 reg;
+ u32 val;
+
+ reg = PCIE_MISC_CONTROL_1_OFF;
+ val = dw_pcie_readl_dbi(pci, reg);
+ val &= ~PCIE_DBI_RO_WR_EN;
+ dw_pcie_writel_dbi(pci, reg, val);
+}
+
#ifdef CONFIG_PCIE_DW_HOST
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c
index e51acee0ddf3..a20179169e06 100644
--- a/drivers/pci/dwc/pcie-hisi.c
+++ b/drivers/pci/dwc/pcie-hisi.c
@@ -223,7 +223,7 @@ static int hisi_pcie_link_up(struct dw_pcie *pci)
return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie);
}
-static struct dw_pcie_host_ops hisi_pcie_host_ops = {
+static const struct dw_pcie_host_ops hisi_pcie_host_ops = {
.rd_own_conf = hisi_pcie_cfg_read,
.wr_own_conf = hisi_pcie_cfg_write,
};
@@ -268,7 +268,6 @@ static int hisi_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct hisi_pcie *hisi_pcie;
struct resource *reg;
- struct device_driver *driver;
int ret;
hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL);
@@ -282,8 +281,6 @@ static int hisi_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- driver = dev->driver;
-
hisi_pcie->pci = pci;
hisi_pcie->soc_ops = of_device_get_match_data(dev);
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
index 33fddb9f6739..dc3033cf3c19 100644
--- a/drivers/pci/dwc/pcie-kirin.c
+++ b/drivers/pci/dwc/pcie-kirin.c
@@ -430,9 +430,11 @@ static int kirin_pcie_establish_link(struct pcie_port *pp)
return 0;
}
-static void kirin_pcie_host_init(struct pcie_port *pp)
+static int kirin_pcie_host_init(struct pcie_port *pp)
{
kirin_pcie_establish_link(pp);
+
+ return 0;
}
static struct dw_pcie_ops kirin_dw_pcie_ops = {
@@ -441,7 +443,7 @@ static struct dw_pcie_ops kirin_dw_pcie_ops = {
.link_up = kirin_pcie_link_up,
};
-static struct dw_pcie_host_ops kirin_pcie_host_ops = {
+static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
.rd_own_conf = kirin_pcie_rd_own_conf,
.wr_own_conf = kirin_pcie_wr_own_conf,
.host_init = kirin_pcie_host_init,
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 68c5f2ab5bc8..ce7ba5b7552a 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -37,6 +37,20 @@
#include "pcie-designware.h"
#define PCIE20_PARF_SYS_CTRL 0x00
+#define MST_WAKEUP_EN BIT(13)
+#define SLV_WAKEUP_EN BIT(12)
+#define MSTR_ACLK_CGC_DIS BIT(10)
+#define SLV_ACLK_CGC_DIS BIT(9)
+#define CORE_CLK_CGC_DIS BIT(6)
+#define AUX_PWR_DET BIT(4)
+#define L23_CLK_RMV_DIS BIT(2)
+#define L1_CLK_RMV_DIS BIT(1)
+
+#define PCIE20_COMMAND_STATUS 0x04
+#define CMD_BME_VAL 0x4
+#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
+#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
+
#define PCIE20_PARF_PHY_CTRL 0x40
#define PCIE20_PARF_PHY_REFCLK 0x4C
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
@@ -58,10 +72,22 @@
#define CFG_BRIDGE_SB_INIT BIT(0)
#define PCIE20_CAP 0x70
+#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
+#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
+#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
+#define PCIE_CAP_LINK1_VAL 0x2FD7F
+
+#define PCIE20_PARF_Q2A_FLUSH 0x1AC
+
+#define PCIE20_MISC_CONTROL_1_REG 0x8BC
+#define DBI_RO_WR_EN 1
#define PERST_DELAY_US 1000
-struct qcom_pcie_resources_v0 {
+#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define SLV_ADDR_SPACE_SZ 0x10000000
+
+struct qcom_pcie_resources_2_1_0 {
struct clk *iface_clk;
struct clk *core_clk;
struct clk *phy_clk;
@@ -75,7 +101,7 @@ struct qcom_pcie_resources_v0 {
struct regulator *vdda_refclk;
};
-struct qcom_pcie_resources_v1 {
+struct qcom_pcie_resources_1_0_0 {
struct clk *iface;
struct clk *aux;
struct clk *master_bus;
@@ -84,7 +110,7 @@ struct qcom_pcie_resources_v1 {
struct regulator *vdda;
};
-struct qcom_pcie_resources_v2 {
+struct qcom_pcie_resources_2_3_2 {
struct clk *aux_clk;
struct clk *master_clk;
struct clk *slave_clk;
@@ -92,7 +118,7 @@ struct qcom_pcie_resources_v2 {
struct clk *pipe_clk;
};
-struct qcom_pcie_resources_v3 {
+struct qcom_pcie_resources_2_4_0 {
struct clk *aux_clk;
struct clk *master_clk;
struct clk *slave_clk;
@@ -110,11 +136,21 @@ struct qcom_pcie_resources_v3 {
struct reset_control *phy_ahb_reset;
};
+struct qcom_pcie_resources_2_3_3 {
+ struct clk *iface;
+ struct clk *axi_m_clk;
+ struct clk *axi_s_clk;
+ struct clk *ahb_clk;
+ struct clk *aux_clk;
+ struct reset_control *rst[7];
+};
+
union qcom_pcie_resources {
- struct qcom_pcie_resources_v0 v0;
- struct qcom_pcie_resources_v1 v1;
- struct qcom_pcie_resources_v2 v2;
- struct qcom_pcie_resources_v3 v3;
+ struct qcom_pcie_resources_1_0_0 v1_0_0;
+ struct qcom_pcie_resources_2_1_0 v2_1_0;
+ struct qcom_pcie_resources_2_3_2 v2_3_2;
+ struct qcom_pcie_resources_2_3_3 v2_3_3;
+ struct qcom_pcie_resources_2_4_0 v2_4_0;
};
struct qcom_pcie;
@@ -124,6 +160,7 @@ struct qcom_pcie_ops {
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
+ void (*post_deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
};
@@ -141,13 +178,13 @@ struct qcom_pcie {
static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
{
- gpiod_set_value(pcie->reset, 1);
+ gpiod_set_value_cansleep(pcie->reset, 1);
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
- gpiod_set_value(pcie->reset, 0);
+ gpiod_set_value_cansleep(pcie->reset, 0);
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
@@ -172,7 +209,7 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
return dw_pcie_wait_for_link(pci);
}
-static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
+static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
@@ -182,9 +219,9 @@ static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
}
-static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
+static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
@@ -212,29 +249,29 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
if (IS_ERR(res->phy_clk))
return PTR_ERR(res->phy_clk);
- res->pci_reset = devm_reset_control_get(dev, "pci");
+ res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
if (IS_ERR(res->pci_reset))
return PTR_ERR(res->pci_reset);
- res->axi_reset = devm_reset_control_get(dev, "axi");
+ res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
if (IS_ERR(res->axi_reset))
return PTR_ERR(res->axi_reset);
- res->ahb_reset = devm_reset_control_get(dev, "ahb");
+ res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
if (IS_ERR(res->ahb_reset))
return PTR_ERR(res->ahb_reset);
- res->por_reset = devm_reset_control_get(dev, "por");
+ res->por_reset = devm_reset_control_get_exclusive(dev, "por");
if (IS_ERR(res->por_reset))
return PTR_ERR(res->por_reset);
- res->phy_reset = devm_reset_control_get(dev, "phy");
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
return PTR_ERR_OR_ZERO(res->phy_reset);
}
-static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
reset_control_assert(res->pci_reset);
reset_control_assert(res->axi_reset);
@@ -249,9 +286,9 @@ static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
regulator_disable(res->vdda_refclk);
}
-static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
+static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
u32 val;
@@ -367,9 +404,9 @@ err_refclk:
return ret;
}
-static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
+static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
@@ -393,13 +430,13 @@ static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
if (IS_ERR(res->slave_bus))
return PTR_ERR(res->slave_bus);
- res->core = devm_reset_control_get(dev, "core");
+ res->core = devm_reset_control_get_exclusive(dev, "core");
return PTR_ERR_OR_ZERO(res->core);
}
-static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
reset_control_assert(res->core);
clk_disable_unprepare(res->slave_bus);
@@ -409,9 +446,9 @@ static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
regulator_disable(res->vdda);
}
-static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
+static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
int ret;
@@ -477,7 +514,7 @@ err_res:
return ret;
}
-static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
+static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
@@ -487,9 +524,9 @@ static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
writel(val, pcie->parf + PCIE20_PARF_LTSSM);
}
-static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
+static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
@@ -513,20 +550,26 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
return PTR_ERR_OR_ZERO(res->pipe_clk);
}
-static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
- clk_disable_unprepare(res->pipe_clk);
clk_disable_unprepare(res->slave_clk);
clk_disable_unprepare(res->master_clk);
clk_disable_unprepare(res->cfg_clk);
clk_disable_unprepare(res->aux_clk);
}
-static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
+static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_disable_unprepare(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
u32 val;
@@ -589,9 +632,9 @@ err_cfg_clk:
return ret;
}
-static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
int ret;
@@ -605,9 +648,9 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie)
+static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
@@ -623,60 +666,64 @@ static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie)
if (IS_ERR(res->slave_clk))
return PTR_ERR(res->slave_clk);
- res->axi_m_reset = devm_reset_control_get(dev, "axi_m");
+ res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
if (IS_ERR(res->axi_m_reset))
return PTR_ERR(res->axi_m_reset);
- res->axi_s_reset = devm_reset_control_get(dev, "axi_s");
+ res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
if (IS_ERR(res->axi_s_reset))
return PTR_ERR(res->axi_s_reset);
- res->pipe_reset = devm_reset_control_get(dev, "pipe");
+ res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
if (IS_ERR(res->pipe_reset))
return PTR_ERR(res->pipe_reset);
- res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid");
+ res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_vmid");
if (IS_ERR(res->axi_m_vmid_reset))
return PTR_ERR(res->axi_m_vmid_reset);
- res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu");
+ res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
+ "axi_s_xpu");
if (IS_ERR(res->axi_s_xpu_reset))
return PTR_ERR(res->axi_s_xpu_reset);
- res->parf_reset = devm_reset_control_get(dev, "parf");
+ res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
if (IS_ERR(res->parf_reset))
return PTR_ERR(res->parf_reset);
- res->phy_reset = devm_reset_control_get(dev, "phy");
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
if (IS_ERR(res->phy_reset))
return PTR_ERR(res->phy_reset);
- res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky");
+ res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_sticky");
if (IS_ERR(res->axi_m_sticky_reset))
return PTR_ERR(res->axi_m_sticky_reset);
- res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky");
+ res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
+ "pipe_sticky");
if (IS_ERR(res->pipe_sticky_reset))
return PTR_ERR(res->pipe_sticky_reset);
- res->pwr_reset = devm_reset_control_get(dev, "pwr");
+ res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
if (IS_ERR(res->pwr_reset))
return PTR_ERR(res->pwr_reset);
- res->ahb_reset = devm_reset_control_get(dev, "ahb");
+ res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
if (IS_ERR(res->ahb_reset))
return PTR_ERR(res->ahb_reset);
- res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb");
+ res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
if (IS_ERR(res->phy_ahb_reset))
return PTR_ERR(res->phy_ahb_reset);
return 0;
}
-static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
reset_control_assert(res->axi_m_reset);
reset_control_assert(res->axi_s_reset);
@@ -692,9 +739,9 @@ static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie)
clk_disable_unprepare(res->slave_clk);
}
-static int qcom_pcie_init_v3(struct qcom_pcie *pcie)
+static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
u32 val;
@@ -884,6 +931,166 @@ err_rst_phy:
return ret;
}
+static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int i;
+ const char *rst_names[] = { "axi_m", "axi_s", "pipe",
+ "axi_m_sticky", "sticky",
+ "ahb", "sleep", };
+
+ res->iface = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface))
+ return PTR_ERR(res->iface);
+
+ res->axi_m_clk = devm_clk_get(dev, "axi_m");
+ if (IS_ERR(res->axi_m_clk))
+ return PTR_ERR(res->axi_m_clk);
+
+ res->axi_s_clk = devm_clk_get(dev, "axi_s");
+ if (IS_ERR(res->axi_s_clk))
+ return PTR_ERR(res->axi_s_clk);
+
+ res->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(res->ahb_clk))
+ return PTR_ERR(res->ahb_clk);
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
+ res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
+ if (IS_ERR(res->rst[i]))
+ return PTR_ERR(res->rst[i]);
+ }
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+
+ clk_disable_unprepare(res->iface);
+ clk_disable_unprepare(res->axi_m_clk);
+ clk_disable_unprepare(res->axi_s_clk);
+ clk_disable_unprepare(res->ahb_clk);
+ clk_disable_unprepare(res->aux_clk);
+}
+
+static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int i, ret;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+ ret = reset_control_assert(res->rst[i]);
+ if (ret) {
+ dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
+ return ret;
+ }
+ }
+
+ usleep_range(2000, 2500);
+
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+ ret = reset_control_deassert(res->rst[i]);
+ if (ret) {
+ dev_err(dev, "reset #%d deassert failed (%d)\n", i,
+ ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Don't have a way to see if the reset has completed.
+ * Wait for some time.
+ */
+ usleep_range(2000, 2500);
+
+ ret = clk_prepare_enable(res->iface);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_iface;
+ }
+
+ ret = clk_prepare_enable(res->axi_m_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_axi_m;
+ }
+
+ ret = clk_prepare_enable(res->axi_s_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable axi slave clock\n");
+ goto err_clk_axi_s;
+ }
+
+ ret = clk_prepare_enable(res->ahb_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable ahb clock\n");
+ goto err_clk_ahb;
+ }
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_clk_aux;
+ }
+
+ writel(SLV_ADDR_SPACE_SZ,
+ pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+ | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PCIE20_PARF_SYS_CTRL);
+ writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+
+ writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
+ writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
+ writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+
+ val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
+ writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+
+ writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
+ PCIE20_DEVICE_CONTROL2_STATUS2);
+
+ return 0;
+
+err_clk_aux:
+ clk_disable_unprepare(res->ahb_clk);
+err_clk_ahb:
+ clk_disable_unprepare(res->axi_s_clk);
+err_clk_axi_s:
+ clk_disable_unprepare(res->axi_m_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->iface);
+err_clk_iface:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++)
+ reset_control_assert(res->rst[i]);
+
+ return ret;
+}
+
static int qcom_pcie_link_up(struct dw_pcie *pci)
{
u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
@@ -891,7 +1098,7 @@ static int qcom_pcie_link_up(struct dw_pcie *pci)
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
-static void qcom_pcie_host_init(struct pcie_port *pp)
+static int qcom_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
@@ -901,14 +1108,17 @@ static void qcom_pcie_host_init(struct pcie_port *pp)
ret = pcie->ops->init(pcie);
if (ret)
- goto err_deinit;
+ return ret;
ret = phy_power_on(pcie->phy);
if (ret)
goto err_deinit;
- if (pcie->ops->post_init)
- pcie->ops->post_init(pcie);
+ if (pcie->ops->post_init) {
+ ret = pcie->ops->post_init(pcie);
+ if (ret)
+ goto err_disable_phy;
+ }
dw_pcie_setup_rc(pp);
@@ -921,12 +1131,17 @@ static void qcom_pcie_host_init(struct pcie_port *pp)
if (ret)
goto err;
- return;
+ return 0;
err:
qcom_ep_reset_assert(pcie);
+ if (pcie->ops->post_deinit)
+ pcie->ops->post_deinit(pcie);
+err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
pcie->ops->deinit(pcie);
+
+ return ret;
}
static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
@@ -950,37 +1165,50 @@ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.rd_own_conf = qcom_pcie_rd_own_conf,
};
-static const struct qcom_pcie_ops ops_v0 = {
- .get_resources = qcom_pcie_get_resources_v0,
- .init = qcom_pcie_init_v0,
- .deinit = qcom_pcie_deinit_v0,
- .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
+/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
+static const struct qcom_pcie_ops ops_2_1_0 = {
+ .get_resources = qcom_pcie_get_resources_2_1_0,
+ .init = qcom_pcie_init_2_1_0,
+ .deinit = qcom_pcie_deinit_2_1_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
};
-static const struct qcom_pcie_ops ops_v1 = {
- .get_resources = qcom_pcie_get_resources_v1,
- .init = qcom_pcie_init_v1,
- .deinit = qcom_pcie_deinit_v1,
- .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
+/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
+static const struct qcom_pcie_ops ops_1_0_0 = {
+ .get_resources = qcom_pcie_get_resources_1_0_0,
+ .init = qcom_pcie_init_1_0_0,
+ .deinit = qcom_pcie_deinit_1_0_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
};
-static const struct qcom_pcie_ops ops_v2 = {
- .get_resources = qcom_pcie_get_resources_v2,
- .init = qcom_pcie_init_v2,
- .post_init = qcom_pcie_post_init_v2,
- .deinit = qcom_pcie_deinit_v2,
- .ltssm_enable = qcom_pcie_v2_ltssm_enable,
+/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
+static const struct qcom_pcie_ops ops_2_3_2 = {
+ .get_resources = qcom_pcie_get_resources_2_3_2,
+ .init = qcom_pcie_init_2_3_2,
+ .post_init = qcom_pcie_post_init_2_3_2,
+ .deinit = qcom_pcie_deinit_2_3_2,
+ .post_deinit = qcom_pcie_post_deinit_2_3_2,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
-static const struct dw_pcie_ops dw_pcie_ops = {
- .link_up = qcom_pcie_link_up,
+/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
+static const struct qcom_pcie_ops ops_2_4_0 = {
+ .get_resources = qcom_pcie_get_resources_2_4_0,
+ .init = qcom_pcie_init_2_4_0,
+ .deinit = qcom_pcie_deinit_2_4_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
-static const struct qcom_pcie_ops ops_v3 = {
- .get_resources = qcom_pcie_get_resources_v3,
- .init = qcom_pcie_init_v3,
- .deinit = qcom_pcie_deinit_v3,
- .ltssm_enable = qcom_pcie_v2_ltssm_enable,
+/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_3_3 = {
+ .get_resources = qcom_pcie_get_resources_2_3_3,
+ .init = qcom_pcie_init_2_3_3,
+ .deinit = qcom_pcie_deinit_2_3_3,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = qcom_pcie_link_up,
};
static int qcom_pcie_probe(struct platform_device *pdev)
@@ -1069,11 +1297,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
}
static const struct of_device_id qcom_pcie_match[] = {
- { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
- { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
- { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
- { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
- { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 },
+ { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
+ { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
+ { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
{ }
};
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c
index 80897291e0fb..709189d23b31 100644
--- a/drivers/pci/dwc/pcie-spear13xx.c
+++ b/drivers/pci/dwc/pcie-spear13xx.c
@@ -177,13 +177,15 @@ static int spear13xx_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static void spear13xx_pcie_host_init(struct pcie_port *pp)
+static int spear13xx_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
spear13xx_pcie_establish_link(spear13xx_pcie);
spear13xx_pcie_enable_interrupts(spear13xx_pcie);
+
+ return 0;
}
static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
@@ -199,9 +201,9 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
int ret;
pp->irq = platform_get_irq(pdev, 0);
- if (!pp->irq) {
+ if (pp->irq < 0) {
dev_err(dev, "failed to get irq\n");
- return -ENODEV;
+ return pp->irq;
}
ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 89d61c2cbfaa..b868803792d8 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -71,7 +71,7 @@ config PCI_HOST_GENERIC
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
- depends on ARCH_ZYNQ || MICROBLAZE
+ depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC)
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@@ -182,14 +182,13 @@ config PCIE_ROCKCHIP
config PCIE_MEDIATEK
bool "MediaTek PCIe controller"
- depends on ARM && (ARCH_MEDIATEK || COMPILE_TEST)
+ depends on (ARM || ARM64) && (ARCH_MEDIATEK || COMPILE_TEST)
depends on OF
depends on PCI
select PCIEPORTBUS
help
Say Y here if you want to enable PCIe controller support on
- MT7623 series SoCs. There is one single root complex with 3 root
- ports available. Each port supports Gen2 lane x1.
+ MediaTek SoCs.
config PCIE_TANGO_SMP8759
bool "Tango SMP8759 PCIe controller (DANGEROUS)"
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 5fb9b620ac78..89f4e3d072d7 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -191,7 +191,6 @@
#define LINK_WAIT_USLEEP_MIN 90000
#define LINK_WAIT_USLEEP_MAX 100000
-#define LEGACY_IRQ_NUM 4
#define MSI_IRQ_NUM 32
struct advk_pcie {
@@ -729,7 +728,7 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
irq_chip->irq_unmask = advk_pcie_irq_unmask;
pcie->irq_domain =
- irq_domain_add_linear(pcie_intc_node, LEGACY_IRQ_NUM,
+ irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&advk_pcie_irq_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -786,7 +785,7 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
advk_pcie_handle_msi(pcie);
/* Process legacy interrupts */
- for (i = 0; i < LEGACY_IRQ_NUM; i++) {
+ for (i = 0; i < PCI_NUM_INTX; i++) {
if (!(status & PCIE_ISR0_INTX_ASSERT(i)))
continue;
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index 5162dffc102b..96028f01bc90 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -350,12 +350,12 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
/* All PCI IRQs cascade off this one */
irq = of_irq_get(intc, 0);
- if (!irq) {
+ if (irq <= 0) {
dev_err(p->dev, "failed to get parent IRQ\n");
- return -EINVAL;
+ return irq ?: -EINVAL;
}
- p->irqdomain = irq_domain_add_linear(intc, 4,
+ p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
&faraday_pci_irqdomain_ops, p);
if (!p->irqdomain) {
dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 415dcc69a502..334c9a7b8991 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -50,6 +50,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include <linux/semaphore.h>
#include <linux/irqdomain.h>
#include <asm/irqdomain.h>
@@ -1159,7 +1160,12 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
goto free_int_desc;
}
- wait_for_completion(&comp.comp_pkt.host_event);
+ /*
+ * Since this function is called with IRQ locks held, can't
+ * do normal wait for completion; instead poll.
+ */
+ while (!try_wait_for_completion(&comp.comp_pkt.host_event))
+ udelay(100);
if (comp.comp_pkt.completion_status < 0) {
dev_err(&hbus->hdev->device,
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index f353a6eb2f01..fa227d81bec6 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1186,8 +1186,7 @@ static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
*/
static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
{
- if (port->reset_gpio)
- gpiod_set_value_cansleep(port->reset_gpio, 1);
+ gpiod_set_value_cansleep(port->reset_gpio, 1);
clk_disable_unprepare(port->clk);
}
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index b3722b7709df..a64bd0a19176 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -1147,15 +1147,15 @@ static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
- pcie->pex_rst = devm_reset_control_get(dev, "pex");
+ pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
if (IS_ERR(pcie->pex_rst))
return PTR_ERR(pcie->pex_rst);
- pcie->afi_rst = devm_reset_control_get(dev, "afi");
+ pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
if (IS_ERR(pcie->afi_rst))
return PTR_ERR(pcie->afi_rst);
- pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x");
+ pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
if (IS_ERR(pcie->pcie_xrst))
return PTR_ERR(pcie->pcie_xrst);
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index f1b633bce525..1f42a202b021 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -489,7 +489,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
if (virt_msir < 0) {
dev_err(&pdev->dev, "Cannot translate IRQ index %d\n",
irq_index);
- rc = -EINVAL;
+ rc = virt_msir;
goto error;
}
xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index bd897479a215..087645116ecb 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -61,7 +61,7 @@
#define SZ_1T (SZ_1G*1024ULL)
#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
-#define ROOT_CAP_AND_CTRL 0x5C
+#define XGENE_V1_PCI_EXP_CAP 0x40
/* PCIe IP version */
#define XGENE_PCIE_IP_VER_UNKN 0
@@ -160,7 +160,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
}
static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
- int offset)
+ int offset)
{
if ((pci_is_root_bus(bus) && devfn != 0) ||
xgene_pcie_hide_rc_bars(bus, offset))
@@ -189,7 +189,7 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
* Avoid this by not claiming to support CRS.
*/
if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
- ((where & ~0x3) == ROOT_CAP_AND_CTRL))
+ ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
if (size <= 2)
@@ -265,12 +265,12 @@ static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
}
struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
- .bus_shift = 16,
- .init = xgene_v1_pcie_ecam_init,
- .pci_ops = {
- .map_bus = xgene_pcie_map_bus,
- .read = xgene_pcie_config_read32,
- .write = pci_generic_config_write,
+ .bus_shift = 16,
+ .init = xgene_v1_pcie_ecam_init,
+ .pci_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write,
}
};
@@ -280,12 +280,12 @@ static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
}
struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
- .bus_shift = 16,
- .init = xgene_v2_pcie_ecam_init,
- .pci_ops = {
- .map_bus = xgene_pcie_map_bus,
- .read = xgene_pcie_config_read32,
- .write = pci_generic_config_write,
+ .bus_shift = 16,
+ .init = xgene_v2_pcie_ecam_init,
+ .pci_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write,
}
};
#endif
@@ -318,7 +318,7 @@ static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
}
static void xgene_pcie_linkup(struct xgene_pcie_port *port,
- u32 *lanes, u32 *speed)
+ u32 *lanes, u32 *speed)
{
u32 val32;
@@ -593,8 +593,7 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
xgene_pcie_writel(port, i, 0);
}
-static int xgene_pcie_setup(struct xgene_pcie_port *port,
- struct list_head *res,
+static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
resource_size_t io_base)
{
struct device *dev = port->dev;
@@ -706,9 +705,9 @@ static const struct of_device_id xgene_pcie_match_table[] = {
static struct platform_driver xgene_pcie_driver = {
.driver = {
- .name = "xgene-pcie",
- .of_match_table = of_match_ptr(xgene_pcie_match_table),
- .suppress_bind_attrs = true,
+ .name = "xgene-pcie",
+ .of_match_table = of_match_ptr(xgene_pcie_match_table),
+ .suppress_bind_attrs = true,
},
.probe = xgene_pcie_probe_bridge,
};
diff --git a/drivers/pci/host/pcie-altera-msi.c b/drivers/pci/host/pcie-altera-msi.c
index 4e5d628e8cd4..d8141f4865de 100644
--- a/drivers/pci/host/pcie-altera-msi.c
+++ b/drivers/pci/host/pcie-altera-msi.c
@@ -64,13 +64,11 @@ static void altera_msi_isr(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct altera_msi *msi;
unsigned long status;
- u32 num_of_vectors;
u32 bit;
u32 virq;
chained_irq_enter(chip, desc);
msi = irq_desc_get_handler_data(desc);
- num_of_vectors = msi->num_of_vectors;
while ((status = msi_readl(msi, MSI_STATUS)) != 0) {
for_each_set_bit(bit, &status, msi->num_of_vectors) {
@@ -267,9 +265,9 @@ static int altera_msi_probe(struct platform_device *pdev)
return ret;
msi->irq = platform_get_irq(pdev, 0);
- if (msi->irq <= 0) {
+ if (msi->irq < 0) {
dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq);
- ret = -ENODEV;
+ ret = msi->irq;
goto err;
}
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index 4ea4f8f5dc77..b468b8cccf8d 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -76,8 +76,6 @@
#define LINK_UP_TIMEOUT HZ
#define LINK_RETRAIN_TIMEOUT HZ
-#define INTX_NUM 4
-
#define DWORD_MASK 3
struct altera_pcie {
@@ -464,6 +462,7 @@ static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
static const struct irq_domain_ops intx_domain_ops = {
.map = altera_pcie_intx_map,
+ .xlate = pci_irqd_intx_xlate,
};
static void altera_pcie_isr(struct irq_desc *desc)
@@ -481,11 +480,11 @@ static void altera_pcie_isr(struct irq_desc *desc)
while ((status = cra_readl(pcie, P2A_INT_STATUS)
& P2A_INT_STS_ALL) != 0) {
- for_each_set_bit(bit, &status, INTX_NUM) {
+ for_each_set_bit(bit, &status, PCI_NUM_INTX) {
/* clear interrupts */
cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
- virq = irq_find_mapping(pcie->irq_domain, bit + 1);
+ virq = irq_find_mapping(pcie->irq_domain, bit);
if (virq)
generic_handle_irq(virq);
else
@@ -536,7 +535,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
struct device_node *node = dev->of_node;
/* Setup INTx */
- pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM + 1,
+ pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
&intx_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -559,9 +558,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
/* setup IRQ */
pcie->irq = platform_get_irq(pdev, 0);
- if (pcie->irq <= 0) {
+ if (pcie->irq < 0) {
dev_err(dev, "failed to get IRQ: %d\n", pcie->irq);
- return -EINVAL;
+ return pcie->irq;
}
irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c
index 9fad7915f82a..2d0f535a2f69 100644
--- a/drivers/pci/host/pcie-iproc-msi.c
+++ b/drivers/pci/host/pcie-iproc-msi.c
@@ -317,7 +317,6 @@ static void iproc_msi_handler(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct iproc_msi_grp *grp;
struct iproc_msi *msi;
- struct iproc_pcie *pcie;
u32 eq, head, tail, nr_events;
unsigned long hwirq;
int virq;
@@ -326,7 +325,6 @@ static void iproc_msi_handler(struct irq_desc *desc)
grp = irq_desc_get_handler_data(desc);
msi = grp->msi;
- pcie = msi->pcie;
eq = grp->eq;
/*
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 22531190bc40..a5073a921a04 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -134,6 +134,13 @@ static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
return iproc_pcie_remove(pcie);
}
+static void iproc_pcie_pltfm_shutdown(struct platform_device *pdev)
+{
+ struct iproc_pcie *pcie = platform_get_drvdata(pdev);
+
+ iproc_pcie_shutdown(pcie);
+}
+
static struct platform_driver iproc_pcie_pltfm_driver = {
.driver = {
.name = "iproc-pcie",
@@ -141,6 +148,7 @@ static struct platform_driver iproc_pcie_pltfm_driver = {
},
.probe = iproc_pcie_pltfm_probe,
.remove = iproc_pcie_pltfm_remove,
+ .shutdown = iproc_pcie_pltfm_shutdown,
};
module_platform_driver(iproc_pcie_pltfm_driver);
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index c57486348856..3a8b9d20ee57 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -31,68 +31,71 @@
#include "pcie-iproc.h"
-#define EP_PERST_SOURCE_SELECT_SHIFT 2
-#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT)
-#define EP_MODE_SURVIVE_PERST_SHIFT 1
-#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT)
-#define RC_PCIE_RST_OUTPUT_SHIFT 0
-#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT)
-#define PAXC_RESET_MASK 0x7f
-
-#define GIC_V3_CFG_SHIFT 0
-#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT)
-
-#define MSI_ENABLE_CFG_SHIFT 0
-#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT)
-
-#define CFG_IND_ADDR_MASK 0x00001ffc
-
-#define CFG_ADDR_BUS_NUM_SHIFT 20
-#define CFG_ADDR_BUS_NUM_MASK 0x0ff00000
-#define CFG_ADDR_DEV_NUM_SHIFT 15
-#define CFG_ADDR_DEV_NUM_MASK 0x000f8000
-#define CFG_ADDR_FUNC_NUM_SHIFT 12
-#define CFG_ADDR_FUNC_NUM_MASK 0x00007000
-#define CFG_ADDR_REG_NUM_SHIFT 2
-#define CFG_ADDR_REG_NUM_MASK 0x00000ffc
-#define CFG_ADDR_CFG_TYPE_SHIFT 0
-#define CFG_ADDR_CFG_TYPE_MASK 0x00000003
-
-#define SYS_RC_INTX_MASK 0xf
-
-#define PCIE_PHYLINKUP_SHIFT 3
-#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT)
-#define PCIE_DL_ACTIVE_SHIFT 2
-#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT)
-
-#define APB_ERR_EN_SHIFT 0
-#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
+#define EP_PERST_SOURCE_SELECT_SHIFT 2
+#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT)
+#define EP_MODE_SURVIVE_PERST_SHIFT 1
+#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT)
+#define RC_PCIE_RST_OUTPUT_SHIFT 0
+#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT)
+#define PAXC_RESET_MASK 0x7f
+
+#define GIC_V3_CFG_SHIFT 0
+#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT)
+
+#define MSI_ENABLE_CFG_SHIFT 0
+#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT)
+
+#define CFG_IND_ADDR_MASK 0x00001ffc
+
+#define CFG_ADDR_BUS_NUM_SHIFT 20
+#define CFG_ADDR_BUS_NUM_MASK 0x0ff00000
+#define CFG_ADDR_DEV_NUM_SHIFT 15
+#define CFG_ADDR_DEV_NUM_MASK 0x000f8000
+#define CFG_ADDR_FUNC_NUM_SHIFT 12
+#define CFG_ADDR_FUNC_NUM_MASK 0x00007000
+#define CFG_ADDR_REG_NUM_SHIFT 2
+#define CFG_ADDR_REG_NUM_MASK 0x00000ffc
+#define CFG_ADDR_CFG_TYPE_SHIFT 0
+#define CFG_ADDR_CFG_TYPE_MASK 0x00000003
+
+#define SYS_RC_INTX_MASK 0xf
+
+#define PCIE_PHYLINKUP_SHIFT 3
+#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT)
+#define PCIE_DL_ACTIVE_SHIFT 2
+#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT)
+
+#define APB_ERR_EN_SHIFT 0
+#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
+
+#define CFG_RETRY_STATUS 0xffff0001
+#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
/* derive the enum index of the outbound/inbound mapping registers */
-#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2)
+#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2)
/*
* Maximum number of outbound mapping window sizes that can be supported by any
* OARR/OMAP mapping pair
*/
-#define MAX_NUM_OB_WINDOW_SIZES 4
+#define MAX_NUM_OB_WINDOW_SIZES 4
-#define OARR_VALID_SHIFT 0
-#define OARR_VALID BIT(OARR_VALID_SHIFT)
-#define OARR_SIZE_CFG_SHIFT 1
+#define OARR_VALID_SHIFT 0
+#define OARR_VALID BIT(OARR_VALID_SHIFT)
+#define OARR_SIZE_CFG_SHIFT 1
/*
* Maximum number of inbound mapping region sizes that can be supported by an
* IARR
*/
-#define MAX_NUM_IB_REGION_SIZES 9
+#define MAX_NUM_IB_REGION_SIZES 9
-#define IMAP_VALID_SHIFT 0
-#define IMAP_VALID BIT(IMAP_VALID_SHIFT)
+#define IMAP_VALID_SHIFT 0
+#define IMAP_VALID BIT(IMAP_VALID_SHIFT)
-#define PCI_EXP_CAP 0xac
+#define IPROC_PCI_EXP_CAP 0xac
-#define IPROC_PCIE_REG_INVALID 0xffff
+#define IPROC_PCIE_REG_INVALID 0xffff
/**
* iProc PCIe outbound mapping controller specific parameters
@@ -304,80 +307,80 @@ enum iproc_pcie_reg {
/* iProc PCIe PAXB BCMA registers */
static const u16 iproc_pcie_reg_paxb_bcma[] = {
- [IPROC_PCIE_CLK_CTRL] = 0x000,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
- [IPROC_PCIE_CFG_IND_DATA] = 0x124,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
- [IPROC_PCIE_INTX_EN] = 0x330,
- [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
};
/* iProc PCIe PAXB registers */
static const u16 iproc_pcie_reg_paxb[] = {
- [IPROC_PCIE_CLK_CTRL] = 0x000,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
- [IPROC_PCIE_CFG_IND_DATA] = 0x124,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
- [IPROC_PCIE_INTX_EN] = 0x330,
- [IPROC_PCIE_OARR0] = 0xd20,
- [IPROC_PCIE_OMAP0] = 0xd40,
- [IPROC_PCIE_OARR1] = 0xd28,
- [IPROC_PCIE_OMAP1] = 0xd48,
- [IPROC_PCIE_LINK_STATUS] = 0xf0c,
- [IPROC_PCIE_APB_ERR_EN] = 0xf40,
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_OARR0] = 0xd20,
+ [IPROC_PCIE_OMAP0] = 0xd40,
+ [IPROC_PCIE_OARR1] = 0xd28,
+ [IPROC_PCIE_OMAP1] = 0xd48,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_APB_ERR_EN] = 0xf40,
};
/* iProc PCIe PAXB v2 registers */
static const u16 iproc_pcie_reg_paxb_v2[] = {
- [IPROC_PCIE_CLK_CTRL] = 0x000,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
- [IPROC_PCIE_CFG_IND_DATA] = 0x124,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
- [IPROC_PCIE_INTX_EN] = 0x330,
- [IPROC_PCIE_OARR0] = 0xd20,
- [IPROC_PCIE_OMAP0] = 0xd40,
- [IPROC_PCIE_OARR1] = 0xd28,
- [IPROC_PCIE_OMAP1] = 0xd48,
- [IPROC_PCIE_OARR2] = 0xd60,
- [IPROC_PCIE_OMAP2] = 0xd68,
- [IPROC_PCIE_OARR3] = 0xdf0,
- [IPROC_PCIE_OMAP3] = 0xdf8,
- [IPROC_PCIE_IARR0] = 0xd00,
- [IPROC_PCIE_IMAP0] = 0xc00,
- [IPROC_PCIE_IARR2] = 0xd10,
- [IPROC_PCIE_IMAP2] = 0xcc0,
- [IPROC_PCIE_IARR3] = 0xe00,
- [IPROC_PCIE_IMAP3] = 0xe08,
- [IPROC_PCIE_IARR4] = 0xe68,
- [IPROC_PCIE_IMAP4] = 0xe70,
- [IPROC_PCIE_LINK_STATUS] = 0xf0c,
- [IPROC_PCIE_APB_ERR_EN] = 0xf40,
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_OARR0] = 0xd20,
+ [IPROC_PCIE_OMAP0] = 0xd40,
+ [IPROC_PCIE_OARR1] = 0xd28,
+ [IPROC_PCIE_OMAP1] = 0xd48,
+ [IPROC_PCIE_OARR2] = 0xd60,
+ [IPROC_PCIE_OMAP2] = 0xd68,
+ [IPROC_PCIE_OARR3] = 0xdf0,
+ [IPROC_PCIE_OMAP3] = 0xdf8,
+ [IPROC_PCIE_IARR0] = 0xd00,
+ [IPROC_PCIE_IMAP0] = 0xc00,
+ [IPROC_PCIE_IARR2] = 0xd10,
+ [IPROC_PCIE_IMAP2] = 0xcc0,
+ [IPROC_PCIE_IARR3] = 0xe00,
+ [IPROC_PCIE_IMAP3] = 0xe08,
+ [IPROC_PCIE_IARR4] = 0xe68,
+ [IPROC_PCIE_IMAP4] = 0xe70,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_APB_ERR_EN] = 0xf40,
};
/* iProc PCIe PAXC v1 registers */
static const u16 iproc_pcie_reg_paxc[] = {
- [IPROC_PCIE_CLK_CTRL] = 0x000,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
- [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
};
/* iProc PCIe PAXC v2 registers */
static const u16 iproc_pcie_reg_paxc_v2[] = {
- [IPROC_PCIE_MSI_GIC_MODE] = 0x050,
- [IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
- [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
- [IPROC_PCIE_MSI_ADDR_LO] = 0x07c,
- [IPROC_PCIE_MSI_ADDR_HI] = 0x080,
- [IPROC_PCIE_MSI_EN_CFG] = 0x09c,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
- [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_MSI_GIC_MODE] = 0x050,
+ [IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
+ [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
+ [IPROC_PCIE_MSI_ADDR_LO] = 0x07c,
+ [IPROC_PCIE_MSI_ADDR_HI] = 0x080,
+ [IPROC_PCIE_MSI_EN_CFG] = 0x09c,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
};
static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
@@ -448,18 +451,112 @@ static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
}
}
+static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
+ unsigned int busno,
+ unsigned int slot,
+ unsigned int fn,
+ int where)
+{
+ u16 offset;
+ u32 val;
+
+ /* EP device access */
+ val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
+ (slot << CFG_ADDR_DEV_NUM_SHIFT) |
+ (fn << CFG_ADDR_FUNC_NUM_SHIFT) |
+ (where & CFG_ADDR_REG_NUM_MASK) |
+ (1 & CFG_ADDR_CFG_TYPE_MASK);
+
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
+ offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
+
+ if (iproc_pcie_reg_is_invalid(offset))
+ return NULL;
+
+ return (pcie->base + offset);
+}
+
+static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
+{
+ int timeout = CFG_RETRY_STATUS_TIMEOUT_US;
+ unsigned int data;
+
+ /*
+ * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
+ * affects config reads of the Vendor ID. For config writes or any
+ * other config reads, the Root may automatically reissue the
+ * configuration request again as a new request.
+ *
+ * For config reads, this hardware returns CFG_RETRY_STATUS data
+ * when it receives a CRS completion, regardless of the address of
+ * the read or the CRS Software Visibility Enable bit. As a
+ * partial workaround for this, we retry in software any read that
+ * returns CFG_RETRY_STATUS.
+ *
+ * Note that a non-Vendor ID config register may have a value of
+ * CFG_RETRY_STATUS. If we read that, we can't distinguish it from
+ * a CRS completion, so we will incorrectly retry the read and
+ * eventually return the wrong data (0xffffffff).
+ */
+ data = readl(cfg_data_p);
+ while (data == CFG_RETRY_STATUS && timeout--) {
+ udelay(1);
+ data = readl(cfg_data_p);
+ }
+
+ if (data == CFG_RETRY_STATUS)
+ data = 0xffffffff;
+
+ return data;
+}
+
+static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct iproc_pcie *pcie = iproc_data(bus);
+ unsigned int slot = PCI_SLOT(devfn);
+ unsigned int fn = PCI_FUNC(devfn);
+ unsigned int busno = bus->number;
+ void __iomem *cfg_data_p;
+ unsigned int data;
+ int ret;
+
+ /* root complex access */
+ if (busno == 0) {
+ ret = pci_generic_config_read32(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ /* Don't advertise CRS SV support */
+ if ((where & ~0x3) == IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL)
+ *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
+
+ if (!cfg_data_p)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ data = iproc_pcie_cfg_retry(cfg_data_p);
+
+ *val = data;
+ if (size <= 2)
+ *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
/**
* Note access to the configuration registers are protected at the higher layer
* by 'pci_lock' in drivers/pci/access.c
*/
static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
- int busno,
- unsigned int devfn,
+ int busno, unsigned int devfn,
int where)
{
unsigned slot = PCI_SLOT(devfn);
unsigned fn = PCI_FUNC(devfn);
- u32 val;
u16 offset;
/* root complex access */
@@ -484,18 +581,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
if (slot > 0)
return NULL;
- /* EP device access */
- val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
- (slot << CFG_ADDR_DEV_NUM_SHIFT) |
- (fn << CFG_ADDR_FUNC_NUM_SHIFT) |
- (where & CFG_ADDR_REG_NUM_MASK) |
- (1 & CFG_ADDR_CFG_TYPE_MASK);
- iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
- offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
- if (iproc_pcie_reg_is_invalid(offset))
- return NULL;
- else
- return (pcie->base + offset);
+ return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
}
static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
@@ -554,9 +640,13 @@ static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
int ret;
+ struct iproc_pcie *pcie = iproc_data(bus);
iproc_pcie_apb_err_disable(bus, true);
- ret = pci_generic_config_read32(bus, devfn, where, size, val);
+ if (pcie->type == IPROC_PCIE_PAXB_V2)
+ ret = iproc_pcie_config_read(bus, devfn, where, size, val);
+ else
+ ret = pci_generic_config_read32(bus, devfn, where, size, val);
iproc_pcie_apb_err_disable(bus, false);
return ret;
@@ -580,7 +670,7 @@ static struct pci_ops iproc_pcie_ops = {
.write = iproc_pcie_config_write32,
};
-static void iproc_pcie_reset(struct iproc_pcie *pcie)
+static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert)
{
u32 val;
@@ -592,26 +682,33 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
if (pcie->ep_is_internal)
return;
- /*
- * Select perst_b signal as reset source. Put the device into reset,
- * and then bring it out of reset
- */
- val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
- val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
- ~RC_PCIE_RST_OUTPUT;
- iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
- udelay(250);
-
- val |= RC_PCIE_RST_OUTPUT;
- iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
- msleep(100);
+ if (assert) {
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
+ val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
+ ~RC_PCIE_RST_OUTPUT;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
+ udelay(250);
+ } else {
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
+ val |= RC_PCIE_RST_OUTPUT;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
+ msleep(100);
+ }
+}
+
+int iproc_pcie_shutdown(struct iproc_pcie *pcie)
+{
+ iproc_pcie_perst_ctrl(pcie, true);
+ msleep(500);
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(iproc_pcie_shutdown);
static int iproc_pcie_check_link(struct iproc_pcie *pcie)
{
struct device *dev = pcie->dev;
u32 hdr_type, link_ctrl, link_status, class, val;
- u16 pos = PCI_EXP_CAP;
bool link_is_active = false;
/*
@@ -628,16 +725,16 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie)
}
/* make sure we are not in EP mode */
- iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
+ iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
return -EFAULT;
}
/* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
-#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
-#define PCI_CLASS_BRIDGE_MASK 0xffff00
-#define PCI_CLASS_BRIDGE_SHIFT 8
+#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
+#define PCI_CLASS_BRIDGE_MASK 0xffff00
+#define PCI_CLASS_BRIDGE_SHIFT 8
iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
4, &class);
class &= ~PCI_CLASS_BRIDGE_MASK;
@@ -646,31 +743,31 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie)
4, class);
/* check link status to see if link is active */
- iproc_pci_raw_config_read32(pcie, 0, pos + PCI_EXP_LNKSTA,
+ iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
2, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
link_is_active = true;
if (!link_is_active) {
/* try GEN 1 link speed */
-#define PCI_TARGET_LINK_SPEED_MASK 0xf
-#define PCI_TARGET_LINK_SPEED_GEN2 0x2
-#define PCI_TARGET_LINK_SPEED_GEN1 0x1
+#define PCI_TARGET_LINK_SPEED_MASK 0xf
+#define PCI_TARGET_LINK_SPEED_GEN2 0x2
+#define PCI_TARGET_LINK_SPEED_GEN1 0x1
iproc_pci_raw_config_read32(pcie, 0,
- pos + PCI_EXP_LNKCTL2, 4,
- &link_ctrl);
+ IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
+ 4, &link_ctrl);
if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
PCI_TARGET_LINK_SPEED_GEN2) {
link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
iproc_pci_raw_config_write32(pcie, 0,
- pos + PCI_EXP_LNKCTL2,
- 4, link_ctrl);
+ IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
+ 4, link_ctrl);
msleep(100);
iproc_pci_raw_config_read32(pcie, 0,
- pos + PCI_EXP_LNKSTA,
- 2, &link_status);
+ IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
+ 2, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
link_is_active = true;
}
@@ -1223,6 +1320,8 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
pcie->ib_map = paxb_v2_ib_map;
pcie->need_msi_steer = true;
+ dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n",
+ CFG_RETRY_STATUS);
break;
case IPROC_PCIE_PAXC:
regs = iproc_pcie_reg_paxc;
@@ -1286,7 +1385,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
goto err_exit_phy;
}
- iproc_pcie_reset(pcie);
+ iproc_pcie_perst_ctrl(pcie, true);
+ iproc_pcie_perst_ctrl(pcie, false);
if (pcie->need_ob_cfg) {
ret = iproc_pcie_map_ranges(pcie, res);
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index 0bbe2ea44f3e..a6b55cec9a66 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -110,6 +110,7 @@ struct iproc_pcie {
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res);
int iproc_pcie_remove(struct iproc_pcie *pcie);
+int iproc_pcie_shutdown(struct iproc_pcie *pcie);
#ifdef CONFIG_PCIE_IPROC_MSI
int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node);
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
index 5a9d8589ea0b..db93efdf1d63 100644
--- a/drivers/pci/host/pcie-mediatek.c
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2017 MediaTek Inc.
* Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Honghui Zhang <honghui.zhang@mediatek.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,6 +17,9 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -63,16 +67,104 @@
#define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
#define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
+/* PCIe V2 share registers */
+#define PCIE_SYS_CFG_V2 0x0
+#define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8)
+#define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8)
+
+/* PCIe V2 per-port registers */
+#define PCIE_MSI_VECTOR 0x0c0
+#define PCIE_INT_MASK 0x420
+#define INTX_MASK GENMASK(19, 16)
+#define INTX_SHIFT 16
+#define PCIE_INT_STATUS 0x424
+#define MSI_STATUS BIT(23)
+#define PCIE_IMSI_STATUS 0x42c
+#define PCIE_IMSI_ADDR 0x430
+#define MSI_MASK BIT(23)
+#define MTK_MSI_IRQS_NUM 32
+
+#define PCIE_AHB_TRANS_BASE0_L 0x438
+#define PCIE_AHB_TRANS_BASE0_H 0x43c
+#define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
+#define PCIE_AXI_WINDOW0 0x448
+#define WIN_ENABLE BIT(7)
+
+/* PCIe V2 configuration transaction header */
+#define PCIE_CFG_HEADER0 0x460
+#define PCIE_CFG_HEADER1 0x464
+#define PCIE_CFG_HEADER2 0x468
+#define PCIE_CFG_WDATA 0x470
+#define PCIE_APP_TLP_REQ 0x488
+#define PCIE_CFG_RDATA 0x48c
+#define APP_CFG_REQ BIT(0)
+#define APP_CPL_STATUS GENMASK(7, 5)
+
+#define CFG_WRRD_TYPE_0 4
+#define CFG_WR_FMT 2
+#define CFG_RD_FMT 0
+
+#define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0))
+#define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24))
+#define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29))
+#define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2))
+#define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16))
+#define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19))
+#define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24))
+#define CFG_HEADER_DW0(type, fmt) \
+ (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
+#define CFG_HEADER_DW1(where, size) \
+ (GENMASK(((size) - 1), 0) << ((where) & 0x3))
+#define CFG_HEADER_DW2(regn, fun, dev, bus) \
+ (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
+ CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
+
+#define PCIE_RST_CTRL 0x510
+#define PCIE_PHY_RSTB BIT(0)
+#define PCIE_PIPE_SRSTB BIT(1)
+#define PCIE_MAC_SRSTB BIT(2)
+#define PCIE_CRSTB BIT(3)
+#define PCIE_PERSTB BIT(8)
+#define PCIE_LINKDOWN_RST_EN GENMASK(15, 13)
+#define PCIE_LINK_STATUS_V2 0x804
+#define PCIE_PORT_LINKUP_V2 BIT(10)
+
+struct mtk_pcie_port;
+
+/**
+ * struct mtk_pcie_soc - differentiate between host generations
+ * @has_msi: whether this host supports MSI interrupts or not
+ * @ops: pointer to configuration access functions
+ * @startup: pointer to controller setting functions
+ * @setup_irq: pointer to initialize IRQ functions
+ */
+struct mtk_pcie_soc {
+ bool has_msi;
+ struct pci_ops *ops;
+ int (*startup)(struct mtk_pcie_port *port);
+ int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
+};
+
/**
* struct mtk_pcie_port - PCIe port information
* @base: IO mapped register base
* @list: port list
* @pcie: pointer to PCIe host info
* @reset: pointer to port reset control
- * @sys_ck: pointer to bus clock
- * @phy: pointer to phy control block
+ * @sys_ck: pointer to transaction/data link layer clock
+ * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
+ * and RC initiated MMIO access
+ * @axi_ck: pointer to application layer MMIO channel operating clock
+ * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
+ * when pcie_mac_ck/pcie_pipe_ck is turned off
+ * @obff_ck: pointer to OBFF functional block operating clock
+ * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
+ * @phy: pointer to PHY control block
* @lane: lane count
- * @index: port index
+ * @slot: port slot
+ * @irq_domain: legacy INTx IRQ domain
+ * @msi_domain: MSI IRQ domain
+ * @msi_irq_in_use: bit map for assigned MSI IRQ
*/
struct mtk_pcie_port {
void __iomem *base;
@@ -80,9 +172,17 @@ struct mtk_pcie_port {
struct mtk_pcie *pcie;
struct reset_control *reset;
struct clk *sys_ck;
+ struct clk *ahb_ck;
+ struct clk *axi_ck;
+ struct clk *aux_ck;
+ struct clk *obff_ck;
+ struct clk *pipe_ck;
struct phy *phy;
u32 lane;
- u32 index;
+ u32 slot;
+ struct irq_domain *irq_domain;
+ struct irq_domain *msi_domain;
+ DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
};
/**
@@ -96,6 +196,7 @@ struct mtk_pcie_port {
* @busn: bus range
* @offset: IO / Memory offset
* @ports: pointer to PCIe port information
+ * @soc: pointer to SoC-dependent operations
*/
struct mtk_pcie {
struct device *dev;
@@ -111,13 +212,9 @@ struct mtk_pcie {
resource_size_t io;
} offset;
struct list_head ports;
+ const struct mtk_pcie_soc *soc;
};
-static inline bool mtk_pcie_link_up(struct mtk_pcie_port *port)
-{
- return !!(readl(port->base + PCIE_LINK_STATUS) & PCIE_PORT_LINKUP);
-}
-
static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -146,6 +243,12 @@ static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
phy_power_off(port->phy);
+ phy_exit(port->phy);
+ clk_disable_unprepare(port->pipe_ck);
+ clk_disable_unprepare(port->obff_ck);
+ clk_disable_unprepare(port->axi_ck);
+ clk_disable_unprepare(port->aux_ck);
+ clk_disable_unprepare(port->ahb_ck);
clk_disable_unprepare(port->sys_ck);
mtk_pcie_port_free(port);
}
@@ -153,11 +256,412 @@ static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
mtk_pcie_subsys_powerdown(pcie);
}
+static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
+{
+ u32 val;
+ int err;
+
+ err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
+ !(val & APP_CFG_REQ), 10,
+ 100 * USEC_PER_MSEC);
+ if (err)
+ return PCIBIOS_SET_FAILED;
+
+ if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
+ return PCIBIOS_SET_FAILED;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ u32 tmp;
+
+ /* Write PCIe configuration transaction header for Cfgrd */
+ writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
+ port->base + PCIE_CFG_HEADER0);
+ writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
+ writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
+ port->base + PCIE_CFG_HEADER2);
+
+ /* Trigger h/w to transmit Cfgrd TLP */
+ tmp = readl(port->base + PCIE_APP_TLP_REQ);
+ tmp |= APP_CFG_REQ;
+ writel(tmp, port->base + PCIE_APP_TLP_REQ);
+
+ /* Check completion status */
+ if (mtk_pcie_check_cfg_cpld(port))
+ return PCIBIOS_SET_FAILED;
+
+ /* Read cpld payload of Cfgrd */
+ *val = readl(port->base + PCIE_CFG_RDATA);
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ /* Write PCIe configuration transaction header for Cfgwr */
+ writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
+ port->base + PCIE_CFG_HEADER0);
+ writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
+ writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
+ port->base + PCIE_CFG_HEADER2);
+
+ /* Write Cfgwr data */
+ val = val << 8 * (where & 3);
+ writel(val, port->base + PCIE_CFG_WDATA);
+
+ /* Trigger h/w to transmit Cfgwr TLP */
+ val = readl(port->base + PCIE_APP_TLP_REQ);
+ val |= APP_CFG_REQ;
+ writel(val, port->base + PCIE_APP_TLP_REQ);
+
+ /* Check completion status */
+ return mtk_pcie_check_cfg_cpld(port);
+}
+
+static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
+ unsigned int devfn)
+{
+ struct mtk_pcie *pcie = bus->sysdata;
+ struct mtk_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ if (port->slot == PCI_SLOT(devfn))
+ return port;
+
+ return NULL;
+}
+
+static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct mtk_pcie_port *port;
+ u32 bn = bus->number;
+ int ret;
+
+ port = mtk_pcie_find_port(bus, devfn);
+ if (!port) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
+ if (ret)
+ *val = ~0;
+
+ return ret;
+}
+
+static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct mtk_pcie_port *port;
+ u32 bn = bus->number;
+
+ port = mtk_pcie_find_port(bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
+}
+
+static struct pci_ops mtk_pcie_ops_v2 = {
+ .read = mtk_pcie_config_read,
+ .write = mtk_pcie_config_write,
+};
+
+static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct resource *mem = &pcie->mem;
+ u32 val;
+ size_t size;
+ int err;
+
+ /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
+ if (pcie->base) {
+ val = readl(pcie->base + PCIE_SYS_CFG_V2);
+ val |= PCIE_CSR_LTSSM_EN(port->slot) |
+ PCIE_CSR_ASPM_L1_EN(port->slot);
+ writel(val, pcie->base + PCIE_SYS_CFG_V2);
+ }
+
+ /* Assert all reset signals */
+ writel(0, port->base + PCIE_RST_CTRL);
+
+ /*
+ * Enable PCIe link down reset, if link status changed from link up to
+ * link down, this will reset MAC control registers and configuration
+ * space.
+ */
+ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
+
+ /* De-assert PHY, PE, PIPE, MAC and configuration reset */
+ val = readl(port->base + PCIE_RST_CTRL);
+ val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
+ PCIE_MAC_SRSTB | PCIE_CRSTB;
+ writel(val, port->base + PCIE_RST_CTRL);
+
+ /* 100ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
+ !!(val & PCIE_PORT_LINKUP_V2), 20,
+ 100 * USEC_PER_MSEC);
+ if (err)
+ return -ETIMEDOUT;
+
+ /* Set INTx mask */
+ val = readl(port->base + PCIE_INT_MASK);
+ val &= ~INTX_MASK;
+ writel(val, port->base + PCIE_INT_MASK);
+
+ /* Set AHB to PCIe translation windows */
+ size = mem->end - mem->start;
+ val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
+
+ val = upper_32_bits(mem->start);
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
+
+ /* Set PCIe to AXI translation memory space.*/
+ val = fls(0xffffffff) | WIN_ENABLE;
+ writel(val, port->base + PCIE_AXI_WINDOW0);
+
+ return 0;
+}
+
+static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port)
+{
+ int msi;
+
+ msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
+ if (msi < MTK_MSI_IRQS_NUM)
+ set_bit(msi, port->msi_irq_in_use);
+ else
+ return -ENOSPC;
+
+ return msi;
+}
+
+static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq)
+{
+ clear_bit(hwirq, port->msi_irq_in_use);
+}
+
+static int mtk_pcie_msi_setup_irq(struct msi_controller *chip,
+ struct pci_dev *pdev, struct msi_desc *desc)
+{
+ struct mtk_pcie_port *port;
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+ phys_addr_t msg_addr;
+
+ port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
+ if (!port)
+ return -EINVAL;
+
+ hwirq = mtk_pcie_msi_alloc(port);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_create_mapping(port->msi_domain, hwirq);
+ if (!irq) {
+ mtk_pcie_msi_free(port, hwirq);
+ return -EINVAL;
+ }
+
+ chip->dev = &pdev->dev;
+
+ irq_set_msi_desc(irq, desc);
+
+ /* MT2712/MT7622 only support 32-bit MSI addresses */
+ msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+ msg.address_hi = 0;
+ msg.address_lo = lower_32_bits(msg_addr);
+ msg.data = hwirq;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+{
+ struct pci_dev *pdev = to_pci_dev(chip->dev);
+ struct irq_data *d = irq_get_irq_data(irq);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct mtk_pcie_port *port;
+
+ port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
+ if (!port)
+ return;
+
+ irq_dispose_mapping(irq);
+ mtk_pcie_msi_free(port, hwirq);
+}
+
+static struct msi_controller mtk_pcie_msi_chip = {
+ .setup_irq = mtk_pcie_msi_setup_irq,
+ .teardown_irq = mtk_msi_teardown_irq,
+};
+
+static struct irq_chip mtk_msi_irq_chip = {
+ .name = "MTK PCIe MSI",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = mtk_pcie_msi_map,
+};
+
+static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
+{
+ u32 val;
+ phys_addr_t msg_addr;
+
+ msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+ val = lower_32_bits(msg_addr);
+ writel(val, port->base + PCIE_IMSI_ADDR);
+
+ val = readl(port->base + PCIE_INT_MASK);
+ val &= ~MSI_MASK;
+ writel(val, port->base + PCIE_INT_MASK);
+}
+
+static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mtk_pcie_intx_map,
+};
+
+static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
+ struct device_node *node)
+{
+ struct device *dev = port->pcie->dev;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "no PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->irq_domain) {
+ dev_err(dev, "failed to get INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM,
+ &msi_domain_ops,
+ &mtk_pcie_msi_chip);
+ if (!port->msi_domain) {
+ dev_err(dev, "failed to create MSI IRQ domain\n");
+ return -ENODEV;
+ }
+ mtk_pcie_enable_msi(port);
+ }
+
+ return 0;
+}
+
+static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
+{
+ struct mtk_pcie_port *port = (struct mtk_pcie_port *)data;
+ unsigned long status;
+ u32 virq;
+ u32 bit = INTX_SHIFT;
+
+ while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) {
+ for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
+ /* Clear the INTx */
+ writel(1 << bit, port->base + PCIE_INT_STATUS);
+ virq = irq_find_mapping(port->irq_domain,
+ bit - INTX_SHIFT);
+ generic_handle_irq(virq);
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) {
+ unsigned long imsi_status;
+
+ while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
+ for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
+ /* Clear the MSI */
+ writel(1 << bit, port->base + PCIE_IMSI_STATUS);
+ virq = irq_find_mapping(port->msi_domain, bit);
+ generic_handle_irq(virq);
+ }
+ }
+ /* Clear MSI interrupt status */
+ writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
+ struct device_node *node)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int err, irq;
+
+ irq = platform_get_irq(pdev, port->slot);
+ err = devm_request_irq(dev, irq, mtk_pcie_intr_handler,
+ IRQF_SHARED, "mtk-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request IRQ %d\n", irq);
+ return err;
+ }
+
+ err = mtk_pcie_init_irq_domain(port, node);
+ if (err) {
+ dev_err(dev, "failed to init PCIe IRQ domain\n");
+ return err;
+ }
+
+ return 0;
+}
+
static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct pci_host_bridge *host = pci_find_host_bridge(bus);
- struct mtk_pcie *pcie = pci_host_bridge_priv(host);
+ struct mtk_pcie *pcie = bus->sysdata;
writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
bus->number), pcie->base + PCIE_CFG_ADDR);
@@ -171,16 +675,34 @@ static struct pci_ops mtk_pcie_ops = {
.write = pci_generic_config_write,
};
-static void mtk_pcie_configure_rc(struct mtk_pcie_port *port)
+static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
- u32 func = PCI_FUNC(port->index << 3);
- u32 slot = PCI_SLOT(port->index << 3);
+ u32 func = PCI_FUNC(port->slot << 3);
+ u32 slot = PCI_SLOT(port->slot << 3);
u32 val;
+ int err;
+
+ /* assert port PERST_N */
+ val = readl(pcie->base + PCIE_SYS_CFG);
+ val |= PCIE_PORT_PERST(port->slot);
+ writel(val, pcie->base + PCIE_SYS_CFG);
+
+ /* de-assert port PERST_N */
+ val = readl(pcie->base + PCIE_SYS_CFG);
+ val &= ~PCIE_PORT_PERST(port->slot);
+ writel(val, pcie->base + PCIE_SYS_CFG);
+
+ /* 100ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
+ !!(val & PCIE_PORT_LINKUP), 20,
+ 100 * USEC_PER_MSEC);
+ if (err)
+ return -ETIMEDOUT;
/* enable interrupt */
val = readl(pcie->base + PCIE_INT_ENABLE);
- val |= PCIE_PORT_INT_EN(port->index);
+ val |= PCIE_PORT_INT_EN(port->slot);
writel(val, pcie->base + PCIE_INT_ENABLE);
/* map to all DDR region. We need to set it before cfg operation. */
@@ -209,67 +731,94 @@ static void mtk_pcie_configure_rc(struct mtk_pcie_port *port)
writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
pcie->base + PCIE_CFG_ADDR);
writel(val, pcie->base + PCIE_CFG_DATA);
+
+ return 0;
}
-static void mtk_pcie_assert_ports(struct mtk_pcie_port *port)
+static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
- u32 val;
+ struct device *dev = pcie->dev;
+ int err;
- /* assert port PERST_N */
- val = readl(pcie->base + PCIE_SYS_CFG);
- val |= PCIE_PORT_PERST(port->index);
- writel(val, pcie->base + PCIE_SYS_CFG);
+ err = clk_prepare_enable(port->sys_ck);
+ if (err) {
+ dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
+ goto err_sys_clk;
+ }
- /* de-assert port PERST_N */
- val = readl(pcie->base + PCIE_SYS_CFG);
- val &= ~PCIE_PORT_PERST(port->index);
- writel(val, pcie->base + PCIE_SYS_CFG);
+ err = clk_prepare_enable(port->ahb_ck);
+ if (err) {
+ dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
+ goto err_ahb_clk;
+ }
- /* PCIe v2.0 need at least 100ms delay to train from Gen1 to Gen2 */
- msleep(100);
-}
+ err = clk_prepare_enable(port->aux_ck);
+ if (err) {
+ dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
+ goto err_aux_clk;
+ }
-static void mtk_pcie_enable_ports(struct mtk_pcie_port *port)
-{
- struct device *dev = port->pcie->dev;
- int err;
+ err = clk_prepare_enable(port->axi_ck);
+ if (err) {
+ dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
+ goto err_axi_clk;
+ }
- err = clk_prepare_enable(port->sys_ck);
+ err = clk_prepare_enable(port->obff_ck);
if (err) {
- dev_err(dev, "failed to enable port%d clock\n", port->index);
- goto err_sys_clk;
+ dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
+ goto err_obff_clk;
+ }
+
+ err = clk_prepare_enable(port->pipe_ck);
+ if (err) {
+ dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
+ goto err_pipe_clk;
}
reset_control_assert(port->reset);
reset_control_deassert(port->reset);
+ err = phy_init(port->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize port%d phy\n", port->slot);
+ goto err_phy_init;
+ }
+
err = phy_power_on(port->phy);
if (err) {
- dev_err(dev, "failed to power on port%d phy\n", port->index);
+ dev_err(dev, "failed to power on port%d phy\n", port->slot);
goto err_phy_on;
}
- mtk_pcie_assert_ports(port);
-
- /* if link up, then setup root port configuration space */
- if (mtk_pcie_link_up(port)) {
- mtk_pcie_configure_rc(port);
+ if (!pcie->soc->startup(port))
return;
- }
- dev_info(dev, "Port%d link down\n", port->index);
+ dev_info(dev, "Port%d link down\n", port->slot);
phy_power_off(port->phy);
err_phy_on:
+ phy_exit(port->phy);
+err_phy_init:
+ clk_disable_unprepare(port->pipe_ck);
+err_pipe_clk:
+ clk_disable_unprepare(port->obff_ck);
+err_obff_clk:
+ clk_disable_unprepare(port->axi_ck);
+err_axi_clk:
+ clk_disable_unprepare(port->aux_ck);
+err_aux_clk:
+ clk_disable_unprepare(port->ahb_ck);
+err_ahb_clk:
clk_disable_unprepare(port->sys_ck);
err_sys_clk:
mtk_pcie_port_free(port);
}
-static int mtk_pcie_parse_ports(struct mtk_pcie *pcie,
- struct device_node *node,
- int index)
+static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
+ struct device_node *node,
+ int slot)
{
struct mtk_pcie_port *port;
struct resource *regs;
@@ -288,34 +837,87 @@ static int mtk_pcie_parse_ports(struct mtk_pcie *pcie,
return err;
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
+ snprintf(name, sizeof(name), "port%d", slot);
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
port->base = devm_ioremap_resource(dev, regs);
if (IS_ERR(port->base)) {
- dev_err(dev, "failed to map port%d base\n", index);
+ dev_err(dev, "failed to map port%d base\n", slot);
return PTR_ERR(port->base);
}
- snprintf(name, sizeof(name), "sys_ck%d", index);
+ snprintf(name, sizeof(name), "sys_ck%d", slot);
port->sys_ck = devm_clk_get(dev, name);
if (IS_ERR(port->sys_ck)) {
- dev_err(dev, "failed to get port%d clock\n", index);
+ dev_err(dev, "failed to get sys_ck%d clock\n", slot);
return PTR_ERR(port->sys_ck);
}
- snprintf(name, sizeof(name), "pcie-rst%d", index);
- port->reset = devm_reset_control_get_optional(dev, name);
+ /* sys_ck might be divided into the following parts in some chips */
+ snprintf(name, sizeof(name), "ahb_ck%d", slot);
+ port->ahb_ck = devm_clk_get(dev, name);
+ if (IS_ERR(port->ahb_ck)) {
+ if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ port->ahb_ck = NULL;
+ }
+
+ snprintf(name, sizeof(name), "axi_ck%d", slot);
+ port->axi_ck = devm_clk_get(dev, name);
+ if (IS_ERR(port->axi_ck)) {
+ if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ port->axi_ck = NULL;
+ }
+
+ snprintf(name, sizeof(name), "aux_ck%d", slot);
+ port->aux_ck = devm_clk_get(dev, name);
+ if (IS_ERR(port->aux_ck)) {
+ if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ port->aux_ck = NULL;
+ }
+
+ snprintf(name, sizeof(name), "obff_ck%d", slot);
+ port->obff_ck = devm_clk_get(dev, name);
+ if (IS_ERR(port->obff_ck)) {
+ if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ port->obff_ck = NULL;
+ }
+
+ snprintf(name, sizeof(name), "pipe_ck%d", slot);
+ port->pipe_ck = devm_clk_get(dev, name);
+ if (IS_ERR(port->pipe_ck)) {
+ if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ port->pipe_ck = NULL;
+ }
+
+ snprintf(name, sizeof(name), "pcie-rst%d", slot);
+ port->reset = devm_reset_control_get_optional_exclusive(dev, name);
if (PTR_ERR(port->reset) == -EPROBE_DEFER)
return PTR_ERR(port->reset);
/* some platforms may use default PHY setting */
- snprintf(name, sizeof(name), "pcie-phy%d", index);
+ snprintf(name, sizeof(name), "pcie-phy%d", slot);
port->phy = devm_phy_optional_get(dev, name);
if (IS_ERR(port->phy))
return PTR_ERR(port->phy);
- port->index = index;
+ port->slot = slot;
port->pcie = pcie;
+ if (pcie->soc->setup_irq) {
+ err = pcie->soc->setup_irq(port, node);
+ if (err)
+ return err;
+ }
+
INIT_LIST_HEAD(&port->list);
list_add_tail(&port->list, &pcie->ports);
@@ -329,12 +931,14 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
struct resource *regs;
int err;
- /* get shared registers */
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pcie->base = devm_ioremap_resource(dev, regs);
- if (IS_ERR(pcie->base)) {
- dev_err(dev, "failed to map shared register\n");
- return PTR_ERR(pcie->base);
+ /* get shared registers, which are optional */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
+ if (regs) {
+ pcie->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(pcie->base)) {
+ dev_err(dev, "failed to map shared register\n");
+ return PTR_ERR(pcie->base);
+ }
}
pcie->free_ck = devm_clk_get(dev, "free_ck");
@@ -422,7 +1026,7 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
}
for_each_available_child_of_node(node, child) {
- int index;
+ int slot;
err = of_pci_get_devfn(child);
if (err < 0) {
@@ -430,9 +1034,9 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
return err;
}
- index = PCI_SLOT(err);
+ slot = PCI_SLOT(err);
- err = mtk_pcie_parse_ports(pcie, child, index);
+ err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
return err;
}
@@ -443,7 +1047,7 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
/* enable each port, and then check link status */
list_for_each_entry_safe(port, tmp, &pcie->ports, list)
- mtk_pcie_enable_ports(port);
+ mtk_pcie_enable_port(port);
/* power down PCIe subsys if slots are all empty (link down) */
if (list_empty(&pcie->ports))
@@ -480,9 +1084,12 @@ static int mtk_pcie_register_host(struct pci_host_bridge *host)
host->busnr = pcie->busn.start;
host->dev.parent = pcie->dev;
- host->ops = &mtk_pcie_ops;
+ host->ops = pcie->soc->ops;
host->map_irq = of_irq_parse_and_map_pci;
host->swizzle_irq = pci_common_swizzle;
+ host->sysdata = pcie;
+ if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi)
+ host->msi = &mtk_pcie_msi_chip;
err = pci_scan_root_bus_bridge(host);
if (err < 0)
@@ -513,6 +1120,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(host);
pcie->dev = dev;
+ pcie->soc = of_device_get_match_data(dev);
platform_set_drvdata(pdev, pcie);
INIT_LIST_HEAD(&pcie->ports);
@@ -537,9 +1145,23 @@ put_resources:
return err;
}
+static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
+ .ops = &mtk_pcie_ops,
+ .startup = mtk_pcie_startup_port,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
+ .has_msi = true,
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_v2,
+ .setup_irq = mtk_pcie_setup_irq,
+};
+
static const struct of_device_id mtk_pcie_ids[] = {
- { .compatible = "mediatek,mt7623-pcie"},
- { .compatible = "mediatek,mt2701-pcie"},
+ { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
+ { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
+ { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 },
+ { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 },
{},
};
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 246d485b24c6..4e0b25d09b0c 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -471,10 +471,8 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
bridge->msi = &pcie->msi.chip;
ret = pci_scan_root_bus_bridge(bridge);
- if (ret < 0) {
- kfree(bridge);
+ if (ret < 0)
return ret;
- }
bus = bridge->bus;
@@ -1190,14 +1188,16 @@ static int rcar_pcie_probe(struct platform_device *pdev)
return 0;
-err_free_bridge:
- pci_free_host_bridge(bridge);
-
err_pm_put:
pm_runtime_put(dev);
err_pm_disable:
pm_runtime_disable(dev);
+
+err_free_bridge:
+ pci_free_host_bridge(bridge);
+ pci_free_resource_list(&pcie->resources);
+
return err;
}
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index 7bb9870f6d8c..d205381c7ec4 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -15,6 +15,7 @@
* (at your option) any later version.
*/
+#include <linux/bitrev.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
@@ -47,6 +48,7 @@
#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+#define MAX_LANE_NUM 4
#define PCIE_CLIENT_BASE 0x0
#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
@@ -111,6 +113,9 @@
#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
(((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
+#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
+#define PCIE_CORE_LANE_MAP_MASK 0x0000000f
+#define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
#define PCIE_CORE_INT_PRFPE BIT(0)
#define PCIE_CORE_INT_CRFPE BIT(1)
@@ -210,7 +215,8 @@
struct rockchip_pcie {
void __iomem *reg_base; /* DT axi-base */
void __iomem *apb_base; /* DT apb-base */
- struct phy *phy;
+ bool legacy_phy;
+ struct phy *phys[MAX_LANE_NUM];
struct reset_control *core_rst;
struct reset_control *mgmt_rst;
struct reset_control *mgmt_sticky_rst;
@@ -222,11 +228,13 @@ struct rockchip_pcie {
struct clk *aclk_perf_pcie;
struct clk *hclk_pcie;
struct clk *clk_pcie_pm;
+ struct regulator *vpcie12v; /* 12V power supply */
struct regulator *vpcie3v3; /* 3.3V power supply */
struct regulator *vpcie1v8; /* 1.8V power supply */
struct regulator *vpcie0v9; /* 0.9V power supply */
struct gpio_desc *ep_gpio;
u32 lanes;
+ u8 lanes_map;
u8 root_bus_nr;
int link_gen;
struct device *dev;
@@ -299,6 +307,24 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
return 1;
}
+static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+ u8 map;
+
+ if (rockchip->legacy_phy)
+ return GENMASK(MAX_LANE_NUM - 1, 0);
+
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
+ map = val & PCIE_CORE_LANE_MAP_MASK;
+
+ /* The link may be using a reverse-indexed mapping. */
+ if (val & PCIE_CORE_LANE_MAP_REVERSE)
+ map = bitrev8(map) >> 4;
+
+ return map;
+}
+
static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
int where, int size, u32 *val)
{
@@ -514,10 +540,10 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
- int err;
+ int err, i;
u32 status;
- gpiod_set_value(rockchip->ep_gpio, 0);
+ gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
err = reset_control_assert(rockchip->aclk_rst);
if (err) {
@@ -537,34 +563,36 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
return err;
}
- err = phy_init(rockchip->phy);
- if (err < 0) {
- dev_err(dev, "fail to init phy, err %d\n", err);
- return err;
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ err = phy_init(rockchip->phys[i]);
+ if (err) {
+ dev_err(dev, "init phy%d err %d\n", i, err);
+ goto err_exit_phy;
+ }
}
err = reset_control_assert(rockchip->core_rst);
if (err) {
dev_err(dev, "assert core_rst err %d\n", err);
- return err;
+ goto err_exit_phy;
}
err = reset_control_assert(rockchip->mgmt_rst);
if (err) {
dev_err(dev, "assert mgmt_rst err %d\n", err);
- return err;
+ goto err_exit_phy;
}
err = reset_control_assert(rockchip->mgmt_sticky_rst);
if (err) {
dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
- return err;
+ goto err_exit_phy;
}
err = reset_control_assert(rockchip->pipe_rst);
if (err) {
dev_err(dev, "assert pipe_rst err %d\n", err);
- return err;
+ goto err_exit_phy;
}
udelay(10);
@@ -572,19 +600,19 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
err = reset_control_deassert(rockchip->pm_rst);
if (err) {
dev_err(dev, "deassert pm_rst err %d\n", err);
- return err;
+ goto err_exit_phy;
}
err = reset_control_deassert(rockchip->aclk_rst);
if (err) {
dev_err(dev, "deassert aclk_rst err %d\n", err);
- return err;
+ goto err_exit_phy;
}
err = reset_control_deassert(rockchip->pclk_rst);
if (err) {
dev_err(dev, "deassert pclk_rst err %d\n", err);
- return err;
+ goto err_exit_phy;
}
if (rockchip->link_gen == 2)
@@ -602,10 +630,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
PCIE_CLIENT_MODE_RC,
PCIE_CLIENT_CONFIG);
- err = phy_power_on(rockchip->phy);
- if (err) {
- dev_err(dev, "fail to power on phy, err %d\n", err);
- return err;
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ err = phy_power_on(rockchip->phys[i]);
+ if (err) {
+ dev_err(dev, "power on phy%d err %d\n", i, err);
+ goto err_power_off_phy;
+ }
}
/*
@@ -615,25 +645,25 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
err = reset_control_deassert(rockchip->mgmt_sticky_rst);
if (err) {
dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- return err;
+ goto err_power_off_phy;
}
err = reset_control_deassert(rockchip->core_rst);
if (err) {
dev_err(dev, "deassert core_rst err %d\n", err);
- return err;
+ goto err_power_off_phy;
}
err = reset_control_deassert(rockchip->mgmt_rst);
if (err) {
dev_err(dev, "deassert mgmt_rst err %d\n", err);
- return err;
+ goto err_power_off_phy;
}
err = reset_control_deassert(rockchip->pipe_rst);
if (err) {
dev_err(dev, "deassert pipe_rst err %d\n", err);
- return err;
+ goto err_power_off_phy;
}
/* Fix the transmitted FTS count desired to exit from L0s. */
@@ -658,7 +688,7 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
PCIE_CLIENT_CONFIG);
- gpiod_set_value(rockchip->ep_gpio, 1);
+ gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
/* 500ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
@@ -666,7 +696,7 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
500 * USEC_PER_MSEC);
if (err) {
dev_err(dev, "PCIe link training gen1 timeout!\n");
- return -ETIMEDOUT;
+ goto err_power_off_phy;
}
if (rockchip->link_gen == 2) {
@@ -691,6 +721,15 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
PCIE_CORE_PL_CONF_LANE_SHIFT);
dev_dbg(dev, "current link width is x%d\n", status);
+ /* Power off unused lane(s) */
+ rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ if (!(rockchip->lanes_map & BIT(i))) {
+ dev_dbg(dev, "idling lane %d\n", i);
+ phy_power_off(rockchip->phys[i]);
+ }
+ }
+
rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
PCIE_CORE_CONFIG_VENDOR);
rockchip_pcie_write(rockchip,
@@ -715,6 +754,26 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
return 0;
+err_power_off_phy:
+ while (i--)
+ phy_power_off(rockchip->phys[i]);
+ i = MAX_LANE_NUM;
+err_exit_phy:
+ while (i--)
+ phy_exit(rockchip->phys[i]);
+ return err;
+}
+
+static void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
+{
+ int i;
+
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ /* inactive lanes are already powered off */
+ if (rockchip->lanes_map & BIT(i))
+ phy_power_off(rockchip->phys[i]);
+ phy_exit(rockchip->phys[i]);
+ }
}
static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
@@ -853,6 +912,91 @@ static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct phy *phy;
+ char *name;
+ u32 i;
+
+ phy = devm_phy_get(dev, "pcie-phy");
+ if (!IS_ERR(phy)) {
+ rockchip->legacy_phy = true;
+ rockchip->phys[0] = phy;
+ dev_warn(dev, "legacy phy model is deprecated!\n");
+ return 0;
+ }
+
+ if (PTR_ERR(phy) == -EPROBE_DEFER)
+ return PTR_ERR(phy);
+
+ dev_dbg(dev, "missing legacy phy; search for per-lane PHY\n");
+
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ name = kasprintf(GFP_KERNEL, "pcie-phy-%u", i);
+ if (!name)
+ return -ENOMEM;
+
+ phy = devm_of_phy_get(dev, dev->of_node, name);
+ kfree(name);
+
+ if (IS_ERR(phy)) {
+ if (PTR_ERR(phy) != -EPROBE_DEFER)
+ dev_err(dev, "missing phy for lane %d: %ld\n",
+ i, PTR_ERR(phy));
+ return PTR_ERR(phy);
+ }
+
+ rockchip->phys[i] = phy;
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+{
+ int irq, err;
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0) {
+ dev_err(dev, "missing sys IRQ resource\n");
+ return irq;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
+ IRQF_SHARED, "pcie-sys", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe subsystem IRQ\n");
+ return err;
+ }
+
+ irq = platform_get_irq_byname(pdev, "legacy");
+ if (irq < 0) {
+ dev_err(dev, "missing legacy IRQ resource\n");
+ return irq;
+ }
+
+ irq_set_chained_handler_and_data(irq,
+ rockchip_pcie_legacy_int_handler,
+ rockchip);
+
+ irq = platform_get_irq_byname(pdev, "client");
+ if (irq < 0) {
+ dev_err(dev, "missing client IRQ resource\n");
+ return irq;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
+ IRQF_SHARED, "pcie-client", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe client IRQ\n");
+ return err;
+ }
+
+ return 0;
+}
/**
* rockchip_pcie_parse_dt - Parse Device Tree
@@ -866,7 +1010,6 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct resource *regs;
- int irq;
int err;
regs = platform_get_resource_byname(pdev,
@@ -883,12 +1026,9 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (IS_ERR(rockchip->apb_base))
return PTR_ERR(rockchip->apb_base);
- rockchip->phy = devm_phy_get(dev, "pcie-phy");
- if (IS_ERR(rockchip->phy)) {
- if (PTR_ERR(rockchip->phy) != -EPROBE_DEFER)
- dev_err(dev, "missing phy\n");
- return PTR_ERR(rockchip->phy);
- }
+ err = rockchip_pcie_get_phys(rockchip);
+ if (err)
+ return err;
rockchip->lanes = 1;
err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
@@ -903,49 +1043,50 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
rockchip->link_gen = 2;
- rockchip->core_rst = devm_reset_control_get(dev, "core");
+ rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
if (IS_ERR(rockchip->core_rst)) {
if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
dev_err(dev, "missing core reset property in node\n");
return PTR_ERR(rockchip->core_rst);
}
- rockchip->mgmt_rst = devm_reset_control_get(dev, "mgmt");
+ rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
if (IS_ERR(rockchip->mgmt_rst)) {
if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
dev_err(dev, "missing mgmt reset property in node\n");
return PTR_ERR(rockchip->mgmt_rst);
}
- rockchip->mgmt_sticky_rst = devm_reset_control_get(dev, "mgmt-sticky");
+ rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
+ "mgmt-sticky");
if (IS_ERR(rockchip->mgmt_sticky_rst)) {
if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
dev_err(dev, "missing mgmt-sticky reset property in node\n");
return PTR_ERR(rockchip->mgmt_sticky_rst);
}
- rockchip->pipe_rst = devm_reset_control_get(dev, "pipe");
+ rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
if (IS_ERR(rockchip->pipe_rst)) {
if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
dev_err(dev, "missing pipe reset property in node\n");
return PTR_ERR(rockchip->pipe_rst);
}
- rockchip->pm_rst = devm_reset_control_get(dev, "pm");
+ rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
if (IS_ERR(rockchip->pm_rst)) {
if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
dev_err(dev, "missing pm reset property in node\n");
return PTR_ERR(rockchip->pm_rst);
}
- rockchip->pclk_rst = devm_reset_control_get(dev, "pclk");
+ rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
if (IS_ERR(rockchip->pclk_rst)) {
if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
dev_err(dev, "missing pclk reset property in node\n");
return PTR_ERR(rockchip->pclk_rst);
}
- rockchip->aclk_rst = devm_reset_control_get(dev, "aclk");
+ rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
if (IS_ERR(rockchip->aclk_rst)) {
if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
dev_err(dev, "missing aclk reset property in node\n");
@@ -982,40 +1123,15 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
return PTR_ERR(rockchip->clk_pcie_pm);
}
- irq = platform_get_irq_byname(pdev, "sys");
- if (irq < 0) {
- dev_err(dev, "missing sys IRQ resource\n");
- return -EINVAL;
- }
-
- err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
- IRQF_SHARED, "pcie-sys", rockchip);
- if (err) {
- dev_err(dev, "failed to request PCIe subsystem IRQ\n");
+ err = rockchip_pcie_setup_irq(rockchip);
+ if (err)
return err;
- }
-
- irq = platform_get_irq_byname(pdev, "legacy");
- if (irq < 0) {
- dev_err(dev, "missing legacy IRQ resource\n");
- return -EINVAL;
- }
-
- irq_set_chained_handler_and_data(irq,
- rockchip_pcie_legacy_int_handler,
- rockchip);
-
- irq = platform_get_irq_byname(pdev, "client");
- if (irq < 0) {
- dev_err(dev, "missing client IRQ resource\n");
- return -EINVAL;
- }
- err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
- IRQF_SHARED, "pcie-client", rockchip);
- if (err) {
- dev_err(dev, "failed to request PCIe client IRQ\n");
- return err;
+ rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+ if (IS_ERR(rockchip->vpcie12v)) {
+ if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie12v regulator found\n");
}
rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
@@ -1047,11 +1163,19 @@ static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
struct device *dev = rockchip->dev;
int err;
+ if (!IS_ERR(rockchip->vpcie12v)) {
+ err = regulator_enable(rockchip->vpcie12v);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie12v regulator\n");
+ goto err_out;
+ }
+ }
+
if (!IS_ERR(rockchip->vpcie3v3)) {
err = regulator_enable(rockchip->vpcie3v3);
if (err) {
dev_err(dev, "fail to enable vpcie3v3 regulator\n");
- goto err_out;
+ goto err_disable_12v;
}
}
@@ -1079,6 +1203,9 @@ err_disable_1v8:
err_disable_3v3:
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
+err_disable_12v:
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
err_out:
return err;
}
@@ -1116,7 +1243,7 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, 4,
+ rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
&intx_domain_ops, rockchip);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -1270,6 +1397,56 @@ static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
return 0;
}
+static int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ err = clk_prepare_enable(rockchip->aclk_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable aclk_pcie clock\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(rockchip->aclk_perf_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
+ goto err_aclk_perf_pcie;
+ }
+
+ err = clk_prepare_enable(rockchip->hclk_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable hclk_pcie clock\n");
+ goto err_hclk_pcie;
+ }
+
+ err = clk_prepare_enable(rockchip->clk_pcie_pm);
+ if (err) {
+ dev_err(dev, "unable to enable clk_pcie_pm clock\n");
+ goto err_clk_pcie_pm;
+ }
+
+ return 0;
+
+err_clk_pcie_pm:
+ clk_disable_unprepare(rockchip->hclk_pcie);
+err_hclk_pcie:
+ clk_disable_unprepare(rockchip->aclk_perf_pcie);
+err_aclk_perf_pcie:
+ clk_disable_unprepare(rockchip->aclk_pcie);
+ return err;
+}
+
+static void rockchip_pcie_disable_clocks(void *data)
+{
+ struct rockchip_pcie *rockchip = data;
+
+ clk_disable_unprepare(rockchip->clk_pcie_pm);
+ clk_disable_unprepare(rockchip->hclk_pcie);
+ clk_disable_unprepare(rockchip->aclk_perf_pcie);
+ clk_disable_unprepare(rockchip->aclk_pcie);
+}
+
static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
{
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
@@ -1286,13 +1463,9 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
return ret;
}
- phy_power_off(rockchip->phy);
- phy_exit(rockchip->phy);
+ rockchip_pcie_deinit_phys(rockchip);
- clk_disable_unprepare(rockchip->clk_pcie_pm);
- clk_disable_unprepare(rockchip->hclk_pcie);
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
- clk_disable_unprepare(rockchip->aclk_pcie);
+ rockchip_pcie_disable_clocks(rockchip);
if (!IS_ERR(rockchip->vpcie0v9))
regulator_disable(rockchip->vpcie0v9);
@@ -1313,21 +1486,9 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
}
}
- err = clk_prepare_enable(rockchip->clk_pcie_pm);
+ err = rockchip_pcie_enable_clocks(rockchip);
if (err)
- goto err_pcie_pm;
-
- err = clk_prepare_enable(rockchip->hclk_pcie);
- if (err)
- goto err_hclk_pcie;
-
- err = clk_prepare_enable(rockchip->aclk_perf_pcie);
- if (err)
- goto err_aclk_perf_pcie;
-
- err = clk_prepare_enable(rockchip->aclk_pcie);
- if (err)
- goto err_aclk_pcie;
+ goto err_disable_0v9;
err = rockchip_pcie_init_port(rockchip);
if (err)
@@ -1335,7 +1496,7 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
err = rockchip_pcie_cfg_atu(rockchip);
if (err)
- goto err_pcie_resume;
+ goto err_err_deinit_port;
/* Need this to enter L1 again */
rockchip_pcie_update_txcredit_mui(rockchip);
@@ -1343,15 +1504,13 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
return 0;
+err_err_deinit_port:
+ rockchip_pcie_deinit_phys(rockchip);
err_pcie_resume:
- clk_disable_unprepare(rockchip->aclk_pcie);
-err_aclk_pcie:
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
-err_aclk_perf_pcie:
- clk_disable_unprepare(rockchip->hclk_pcie);
-err_hclk_pcie:
- clk_disable_unprepare(rockchip->clk_pcie_pm);
-err_pcie_pm:
+ rockchip_pcie_disable_clocks(rockchip);
+err_disable_0v9:
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
return err;
}
@@ -1385,29 +1544,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err)
return err;
- err = clk_prepare_enable(rockchip->aclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_pcie clock\n");
- goto err_aclk_pcie;
- }
-
- err = clk_prepare_enable(rockchip->aclk_perf_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
- goto err_aclk_perf_pcie;
- }
-
- err = clk_prepare_enable(rockchip->hclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable hclk_pcie clock\n");
- goto err_hclk_pcie;
- }
-
- err = clk_prepare_enable(rockchip->clk_pcie_pm);
- if (err) {
- dev_err(dev, "unable to enable hclk_pcie clock\n");
- goto err_pcie_pm;
- }
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ return err;
err = rockchip_pcie_set_vpcie(rockchip);
if (err) {
@@ -1423,12 +1562,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err = rockchip_pcie_init_irq_domain(rockchip);
if (err < 0)
- goto err_vpcie;
+ goto err_deinit_port;
err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
&res, &io_base);
if (err)
- goto err_vpcie;
+ goto err_remove_irq_domain;
err = devm_request_pci_bus_resources(dev, &res);
if (err)
@@ -1466,12 +1605,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err = rockchip_pcie_cfg_atu(rockchip);
if (err)
- goto err_free_res;
+ goto err_unmap_iospace;
rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
if (!rockchip->msg_region) {
err = -ENOMEM;
- goto err_free_res;
+ goto err_unmap_iospace;
}
list_splice_init(&res, &bridge->windows);
@@ -1484,7 +1623,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err = pci_scan_root_bus_bridge(bridge);
if (err < 0)
- goto err_free_res;
+ goto err_unmap_iospace;
bus = bridge->bus;
@@ -1498,9 +1637,17 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
return 0;
+err_unmap_iospace:
+ pci_unmap_iospace(rockchip->io);
err_free_res:
pci_free_resource_list(&res);
+err_remove_irq_domain:
+ irq_domain_remove(rockchip->irq_domain);
+err_deinit_port:
+ rockchip_pcie_deinit_phys(rockchip);
err_vpcie:
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
if (!IS_ERR(rockchip->vpcie1v8))
@@ -1508,14 +1655,7 @@ err_vpcie:
if (!IS_ERR(rockchip->vpcie0v9))
regulator_disable(rockchip->vpcie0v9);
err_set_vpcie:
- clk_disable_unprepare(rockchip->clk_pcie_pm);
-err_pcie_pm:
- clk_disable_unprepare(rockchip->hclk_pcie);
-err_hclk_pcie:
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
-err_aclk_perf_pcie:
- clk_disable_unprepare(rockchip->aclk_pcie);
-err_aclk_pcie:
+ rockchip_pcie_disable_clocks(rockchip);
return err;
}
@@ -1529,14 +1669,12 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
pci_unmap_iospace(rockchip->io);
irq_domain_remove(rockchip->irq_domain);
- phy_power_off(rockchip->phy);
- phy_exit(rockchip->phy);
+ rockchip_pcie_deinit_phys(rockchip);
- clk_disable_unprepare(rockchip->clk_pcie_pm);
- clk_disable_unprepare(rockchip->hclk_pcie);
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
- clk_disable_unprepare(rockchip->aclk_pcie);
+ rockchip_pcie_disable_clocks(rockchip);
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
if (!IS_ERR(rockchip->vpcie1v8))
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index eec641a34fc5..65dea98b2643 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -133,7 +133,6 @@
#define CFG_DMA_REG_BAR GENMASK(2, 0)
#define INT_PCI_MSI_NR (2 * 32)
-#define INTX_NUM 4
/* Readin the PS_LINKUP */
#define PS_LINKUP_OFFSET 0x00000238
@@ -334,9 +333,8 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL) != 0) {
- for_each_set_bit(bit, &status, INTX_NUM) {
- virq = irq_find_mapping(pcie->legacy_irq_domain,
- bit + 1);
+ for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+ virq = irq_find_mapping(pcie->legacy_irq_domain, bit);
if (virq)
generic_handle_irq(virq);
}
@@ -436,6 +434,7 @@ static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
static const struct irq_domain_ops legacy_domain_ops = {
.map = nwl_legacy_map,
+ .xlate = pci_irqd_intx_xlate,
};
#ifdef CONFIG_PCI_MSI
@@ -559,7 +558,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
}
pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
- INTX_NUM,
+ PCI_NUM_INTX,
&legacy_domain_ops,
pcie);
@@ -813,7 +812,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
if (pcie->irq_intx < 0) {
dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx);
- return -EINVAL;
+ return pcie->irq_intx;
}
irq_set_chained_handler_and_data(pcie->irq_intx,
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index f63fa5e0278c..f30d03309c7f 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -60,6 +60,7 @@
#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
#define XILINX_PCIE_INTR_MST_ERRP BIT(28)
#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
+#define XILINX_PCIE_IMR_ENABLE_MASK 0x1FF30F0D
#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
/* Root Port Error FIFO Read Register definitions */
@@ -369,6 +370,7 @@ static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
/* INTx IRQ Domain operations */
static const struct irq_domain_ops intx_domain_ops = {
.map = xilinx_pcie_intx_map,
+ .xlate = pci_irqd_intx_xlate,
};
/* PCIe HW Functions */
@@ -384,7 +386,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
{
struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
struct device *dev = port->dev;
- u32 val, mask, status, msi_data;
+ u32 val, mask, status;
/* Read interrupt decode and mask registers */
val = pcie_read(port, XILINX_PCIE_REG_IDR);
@@ -424,8 +426,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
xilinx_pcie_clear_err_interrupts(port);
}
- if (status & XILINX_PCIE_INTR_INTX) {
- /* INTx interrupt received */
+ if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) {
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
/* Check whether interrupt valid */
@@ -434,41 +435,24 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
goto error;
}
- if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) {
- /* Clear interrupt FIFO register 1 */
- pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
- XILINX_PCIE_REG_RPIFR1);
-
- /* Handle INTx Interrupt */
- val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
- XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
- generic_handle_irq(irq_find_mapping(port->leg_domain,
- val));
- }
- }
-
- if (status & XILINX_PCIE_INTR_MSI) {
- /* MSI Interrupt */
- val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
-
- if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
- dev_warn(dev, "RP Intr FIFO1 read error\n");
- goto error;
- }
-
+ /* Decode the IRQ number */
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
- msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
- XILINX_PCIE_RPIFR2_MSG_DATA;
+ val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
+ XILINX_PCIE_RPIFR2_MSG_DATA;
+ } else {
+ val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
+ XILINX_PCIE_RPIFR1_INTR_SHIFT;
+ val = irq_find_mapping(port->leg_domain, val);
+ }
- /* Clear interrupt FIFO register 1 */
- pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
- XILINX_PCIE_REG_RPIFR1);
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- /* Handle MSI Interrupt */
- generic_handle_irq(msi_data);
- }
- }
+ /* Handle the interrupt */
+ if (IS_ENABLED(CONFIG_PCI_MSI) ||
+ !(val & XILINX_PCIE_RPIFR1_MSI_INTR))
+ generic_handle_irq(val);
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
@@ -524,7 +508,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
return -ENODEV;
}
- port->leg_domain = irq_domain_add_linear(pcie_intc_node, 4,
+ port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&intx_domain_ops,
port);
if (!port->leg_domain) {
@@ -571,8 +555,8 @@ static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
XILINX_PCIE_IMR_ALL_MASK,
XILINX_PCIE_REG_IDR);
- /* Enable all interrupts */
- pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR);
+ /* Enable all interrupts we handle */
+ pcie_write(port, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR);
/* Enable the Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 6088c3083194..509893bc3e63 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -183,7 +183,7 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
int i, best = 1;
unsigned long flags;
- if (!desc->msi_attrib.is_msix || vmd->msix_count == 1)
+ if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1)
return &vmd->irqs[0];
raw_spin_lock_irqsave(&list_lock, flags);
@@ -697,7 +697,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ PCI_IRQ_MSIX);
if (vmd->msix_count < 0)
return vmd->msix_count;
@@ -755,6 +755,11 @@ static void vmd_remove(struct pci_dev *dev)
static int vmd_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct vmd_dev *vmd = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < vmd->msix_count; i++)
+ devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
pci_save_state(pdev);
return 0;
@@ -763,6 +768,16 @@ static int vmd_suspend(struct device *dev)
static int vmd_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct vmd_dev *vmd = pci_get_drvdata(pdev);
+ int err, i;
+
+ for (i = 0; i < vmd->msix_count; i++) {
+ err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+ vmd_irq, IRQF_NO_THREAD,
+ "vmd", &vmd->irqs[i]);
+ if (err)
+ return err;
+ }
pci_restore_state(pdev);
return 0;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6967c6b4cf6b..0b0a2ae9a853 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4657,23 +4657,6 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
-/*
- * VMD-enabled root ports will change the source ID for all messages
- * to the VMD device. Rather than doing device matching with the source
- * ID, the AER driver should traverse the child device tree, reading
- * AER registers to find the faulting device.
- */
-static void quirk_no_aersid(struct pci_dev *pdev)
-{
- /* VMD Domain */
- if (pdev->bus->sysdata && pci_domain_nr(pdev->bus) >= 0x10000)
- pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
-}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
-
/* FLR may cause some 82579 devices to hang. */
static void quirk_intel_no_flr(struct pci_dev *dev)
{