summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c3
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c21
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c10
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h7
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c119
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c91
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c9
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c8
-rw-r--r--drivers/pci/controller/pcie-mediatek.c1
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c18
-rw-r--r--drivers/pci/hotplug/pnv_php.c1
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c1
-rw-r--r--drivers/pci/p2pdma.c25
-rw-r--r--drivers/pci/pci-acpi.c41
-rw-r--r--drivers/pci/pci-driver.c45
-rw-r--r--drivers/pci/pci.c353
-rw-r--r--drivers/pci/pcie/aer.c7
-rw-r--r--drivers/pci/quirks.c47
23 files changed, 561 insertions, 276 deletions
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 768d33f9ebc8..a82f845cc4b5 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -69,6 +69,7 @@ struct j721e_pcie_data {
enum j721e_pcie_mode mode;
unsigned int quirk_retrain_flag:1;
unsigned int quirk_detect_quiet_flag:1;
+ unsigned int quirk_disable_flr:1;
u32 linkdown_irq_regfield;
unsigned int byte_access_allowed:1;
};
@@ -307,6 +308,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
+ .quirk_disable_flr = true,
};
static const struct j721e_pcie_data am64_pcie_rc_data = {
@@ -405,6 +407,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+ ep->quirk_disable_flr = data->quirk_disable_flr;
cdns_pcie = &ep->pcie;
cdns_pcie->dev = dev;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 88e05b9c2e5b..b8b655d4047e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -187,8 +187,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
struct cdns_pcie *pcie = &ep->pcie;
u32 r;
- r = find_first_zero_bit(&ep->ob_region_map,
- sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
if (r >= ep->max_regions - 1) {
dev_err(&epc->dev, "no free outbound region\n");
return -EINVAL;
@@ -565,7 +564,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
- int ret;
+ int max_epfs = sizeof(epc->function_num_map) * 8;
+ int ret, value, epf;
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
@@ -573,6 +573,21 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
*/
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
+ if (ep->quirk_disable_flr) {
+ for (epf = 0; epf < max_epfs; epf++) {
+ if (!(epc->function_num_map & BIT(epf)))
+ continue;
+
+ value = cdns_pcie_ep_fn_readl(pcie, epf,
+ CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
+ PCI_EXP_DEVCAP);
+ value &= ~PCI_EXP_DEVCAP_FLR;
+ cdns_pcie_ep_fn_writel(pcie, epf,
+ CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
+ PCI_EXP_DEVCAP, value);
+ }
+ }
+
ret = cdns_pcie_start_link(pcie);
if (ret) {
dev_err(dev, "Failed to start link\n");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index fb96d37a135c..940c7dd701d6 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -123,6 +123,14 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie)
return ret;
}
+static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
+}
+
static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -501,6 +509,8 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
if (rc->quirk_detect_quiet_flag)
cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+ cdns_pcie_host_enable_ptm_response(pcie);
+
ret = cdns_pcie_start_link(pcie);
if (ret) {
dev_err(dev, "Failed to start link\n");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index c8a27b6290ce..190786e47df9 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -116,6 +116,10 @@
#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
(((aperture) - 2) << ((bar) * 8))
+/* PTM Control Register */
+#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8)
+#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
+
/*
* Endpoint Function Registers (PCI configuration space for endpoint functions)
*/
@@ -123,6 +127,7 @@
#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
+#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
@@ -357,6 +362,7 @@ struct cdns_pcie_epf {
* minimize time between read and write
* @epf: Structure to hold info about endpoint function
* @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+ * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag
*/
struct cdns_pcie_ep {
struct cdns_pcie pcie;
@@ -372,6 +378,7 @@ struct cdns_pcie_ep {
spinlock_t lock;
struct cdns_pcie_epf *epf;
unsigned int quirk_detect_quiet_flag:1;
+ unsigned int quirk_disable_flr:1;
};
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 6619e3caffe2..7a285fb0f619 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -408,6 +408,11 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
dev_err(dev, "failed to disable vpcie regulator: %d\n",
ret);
}
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio))
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
}
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
@@ -540,15 +545,6 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
/* allow the clocks to stabilize */
usleep_range(200, 500);
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
- msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- !imx6_pcie->gpio_active_high);
- }
-
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ:
reset_control_deassert(imx6_pcie->pciephy_reset);
@@ -595,6 +591,15 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
break;
}
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ msleep(100);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ !imx6_pcie->gpio_active_high);
+ /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ msleep(100);
+ }
+
return;
err_ref_clk:
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 2fa86f32d964..9979302532b7 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -396,7 +396,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
sizeof(pp->msi_msg),
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(pci->dev, pp->msi_data)) {
+ ret = dma_mapping_error(pci->dev, pp->msi_data);
+ if (ret) {
dev_err(pci->dev, "Failed to map MSI data\n");
pp->msi_data = 0;
goto err_free_msi;
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index c9b341e55cbb..8c5bb9d7cc36 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -10,9 +10,12 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -26,6 +29,7 @@
*/
#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+#define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val)
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
@@ -36,10 +40,12 @@
#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
#define PCIE_L0S_ENTRY 0x11
#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
#define PCIE_CLIENT_GENERAL_DEBUG 0x104
-#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
#define PCIE_CLIENT_LTSSM_STATUS 0x300
-#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
@@ -51,6 +57,7 @@ struct rockchip_pcie {
struct reset_control *rst;
struct gpio_desc *rst_gpio;
struct regulator *vpcie3v3;
+ struct irq_domain *irq_domain;
};
static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
@@ -65,6 +72,78 @@ static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
writel_relaxed(val, rockchip->apb_base + reg);
}
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ unsigned long reg, hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY);
+
+ for_each_set_bit(hwirq, &reg, 4)
+ generic_handle_domain_irq(rockchip->irq_domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void rockchip_intx_mask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ HIWORD_UPDATE_BIT(BIT(data->hwirq)),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static void rockchip_intx_unmask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ HIWORD_DISABLE_BIT(BIT(data->hwirq)),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static struct irq_chip rockchip_intx_irq_chip = {
+ .name = "INTx",
+ .irq_mask = rockchip_intx_mask,
+ .irq_unmask = rockchip_intx_unmask,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rockchip_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ struct device_node *intc;
+
+ intc = of_get_child_by_name(dev->of_node, "legacy-interrupt-controller");
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
+ of_node_put(intc);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
@@ -111,7 +190,20 @@ static int rockchip_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ struct device *dev = rockchip->pci.dev;
u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ int irq, ret;
+
+ irq = of_irq_get_byname(dev->of_node, "legacy");
+ if (irq < 0)
+ return irq;
+
+ ret = rockchip_pcie_init_irq_domain(rockchip);
+ if (ret < 0)
+ dev_err(dev, "failed to init irq domain\n");
+
+ irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler,
+ rockchip);
/* LTSSM enable control mode */
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
@@ -152,6 +244,11 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
if (IS_ERR(rockchip->rst_gpio))
return PTR_ERR(rockchip->rst_gpio);
+ rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(rockchip->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
return 0;
}
@@ -182,18 +279,6 @@ static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
phy_power_off(rockchip->phy);
}
-static int rockchip_pcie_reset_control_release(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->pci.dev;
-
- rockchip->rst = devm_reset_control_array_get_exclusive(dev);
- if (IS_ERR(rockchip->rst))
- return dev_err_probe(dev, PTR_ERR(rockchip->rst),
- "failed to get reset lines\n");
-
- return reset_control_deassert(rockchip->rst);
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = rockchip_pcie_link_up,
.start_link = rockchip_pcie_start_link,
@@ -222,6 +307,10 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = reset_control_assert(rockchip->rst);
+ if (ret)
+ return ret;
+
/* DON'T MOVE ME: must be enable before PHY init */
rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
if (IS_ERR(rockchip->vpcie3v3)) {
@@ -241,7 +330,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
goto disable_regulator;
- ret = rockchip_pcie_reset_control_release(rockchip);
+ ret = reset_control_deassert(rockchip->rst);
if (ret)
goto deinit_phy;
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 6ce8eddf3a37..ec99116ad05c 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -223,11 +223,8 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
disable_irq(pcie_ep->perst_irq);
}
-static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
{
- struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
- struct device *dev = pci->dev;
- u32 val, offset;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
@@ -247,6 +244,38 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
if (ret)
goto err_phy_exit;
+ return 0;
+
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+
+ return ret;
+}
+
+static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+}
+
+static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+ u32 val, offset;
+ int ret;
+
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ return ret;
+ }
+
/* Assert WAKE# to RC to indicate device is ready */
gpiod_set_value_cansleep(pcie_ep->wake, 1);
usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
@@ -335,7 +364,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
- goto err_phy_power_off;
+ goto err_disable_resources;
}
/*
@@ -355,13 +384,8 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
return 0;
-err_phy_power_off:
- phy_power_off(pcie_ep->phy);
-err_phy_exit:
- phy_exit(pcie_ep->phy);
-err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
return ret;
}
@@ -376,10 +400,7 @@ static void qcom_pcie_perst_assert(struct dw_pcie *pci)
return;
}
- phy_power_off(pcie_ep->phy);
- phy_exit(pcie_ep->phy);
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
}
@@ -643,43 +664,26 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
- if (ret)
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
return ret;
-
- ret = qcom_pcie_ep_core_reset(pcie_ep);
- if (ret)
- goto err_disable_clk;
-
- ret = phy_init(pcie_ep->phy);
- if (ret)
- goto err_disable_clk;
-
- /* PHY needs to be powered on for dw_pcie_ep_init() */
- ret = phy_power_on(pcie_ep->phy);
- if (ret)
- goto err_phy_exit;
+ }
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
- goto err_phy_power_off;
+ goto err_disable_resources;
}
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
if (ret)
- goto err_phy_power_off;
+ goto err_disable_resources;
return 0;
-err_phy_power_off:
- phy_power_off(pcie_ep->phy);
-err_phy_exit:
- phy_exit(pcie_ep->phy);
-err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
return ret;
}
@@ -691,10 +695,7 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev)
if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
return 0;
- phy_power_off(pcie_ep->phy);
- phy_exit(pcie_ep->phy);
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ qcom_pcie_disable_resources(pcie_ep);
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index b1b5f836a806..cc2678490162 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -186,8 +186,6 @@
#define N_FTS_VAL 52
#define FTS_VAL 52
-#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
-
#define GEN3_EQ_CONTROL_OFF 0x8a8
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
@@ -2189,9 +2187,6 @@ static int tegra194_pcie_suspend_noirq(struct device *dev)
if (!pcie->link_state)
return 0;
- /* Save MSI interrupt vector */
- pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
- PORT_LOGIC_MSI_CTRL_INT_0_EN);
tegra_pcie_downstream_dev_to_D0(pcie);
tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
@@ -2223,10 +2218,6 @@ static int tegra194_pcie_resume_noirq(struct device *dev)
if (ret < 0)
goto fail_host_init;
- /* Restore MSI interrupt vector */
- dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
- pcie->msi_ctrl_int);
-
return 0;
fail_host_init:
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 3e8d70bfabc6..5d9fd36b02d1 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -838,6 +838,14 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
if (err)
return err;
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_assert(pcie->phy_reset);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(10, 20);
+
/* Don't touch the hardware registers before power up */
err = mtk_pcie_power_up(pcie);
if (err)
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index ddfbd4aebdec..be8bd919cb88 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1008,6 +1008,7 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
"mediatek,generic-pciecfg");
if (cfg_node) {
pcie->cfg = syscon_node_to_regmap(cfg_node);
+ of_node_put(cfg_node);
if (IS_ERR(pcie->cfg))
return PTR_ERR(pcie->cfg);
}
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 29d8e81e4181..dd5dba419047 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -406,6 +406,7 @@ static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base)
static void mc_handle_msi(struct irq_desc *desc)
{
struct mc_pcie *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct device *dev = port->dev;
struct mc_msi *msi = &port->msi;
void __iomem *bridge_base_addr =
@@ -414,8 +415,11 @@ static void mc_handle_msi(struct irq_desc *desc)
u32 bit;
int ret;
+ chained_irq_enter(chip, desc);
+
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_MSI_MASK) {
+ writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
for_each_set_bit(bit, &status, msi->num_vectors) {
ret = generic_handle_domain_irq(msi->dev_domain, bit);
@@ -424,6 +428,8 @@ static void mc_handle_msi(struct irq_desc *desc)
bit);
}
}
+
+ chained_irq_exit(chip, desc);
}
static void mc_msi_bottom_irq_ack(struct irq_data *data)
@@ -432,13 +438,8 @@ static void mc_msi_bottom_irq_ack(struct irq_data *data)
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
u32 bitpos = data->hwirq;
- unsigned long status;
writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
- status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
- if (!status)
- writel_relaxed(BIT(PM_MSI_INT_MSI_SHIFT),
- bridge_base_addr + ISTATUS_LOCAL);
}
static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -563,6 +564,7 @@ static int mc_allocate_msi_domains(struct mc_pcie *port)
static void mc_handle_intx(struct irq_desc *desc)
{
struct mc_pcie *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct device *dev = port->dev;
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -570,6 +572,8 @@ static void mc_handle_intx(struct irq_desc *desc)
u32 bit;
int ret;
+ chained_irq_enter(chip, desc);
+
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_INTX_MASK) {
status &= PM_MSI_INT_INTX_MASK;
@@ -581,6 +585,8 @@ static void mc_handle_intx(struct irq_desc *desc)
bit);
}
}
+
+ chained_irq_exit(chip, desc);
}
static void mc_ack_intx_irq(struct irq_data *data)
@@ -1115,7 +1121,7 @@ static const struct of_device_id mc_pcie_of_match[] = {
{},
};
-MODULE_DEVICE_TABLE(of, mc_pcie_of_match)
+MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
static struct platform_driver mc_pcie_driver = {
.probe = pci_host_common_probe,
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index f4c2e6e01be0..881d420637bf 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/of_fdt.h>
#include <asm/opal.h>
#include <asm/pnv-pci.h>
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index e6991ff67526..980bb3afd092 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 9887c9de08c3..491986197c47 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/smp.h>
@@ -20,6 +21,7 @@
#include <asm/eeh.h> /* for eeh_add_device() */
#include <asm/rtas.h> /* rtas_call */
#include <asm/pci-bridge.h> /* for pci_controller */
+#include <asm/prom.h>
#include "../pci.h" /* for pci_add_new_bus */
/* and pci_do_scan_bus */
#include "rpaphp.h"
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index c380bdacd146..630f77057c23 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -8,6 +8,7 @@
* Send feedback to <lxie@us.ibm.com>
*
*/
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/string.h>
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 93b4a945c55d..779eab12e981 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sysfs.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 30b1df3c9d2f..462b429ad243 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -326,15 +326,16 @@ static const struct pci_p2pdma_whitelist_entry {
};
/*
- * This lookup function tries to find the PCI device corresponding to a given
- * host bridge.
+ * If the first device on host's root bus is either devfn 00.0 or a PCIe
+ * Root Port, return it. Otherwise return NULL.
*
- * It assumes the host bridge device is the first PCI device in the
- * bus->devices list and that the devfn is 00.0. These assumptions should hold
- * for all the devices in the whitelist above.
+ * We often use a devfn 00.0 "host bridge" in the pci_p2pdma_whitelist[]
+ * (though there is no PCI/PCIe requirement for such a device). On some
+ * platforms, e.g., Intel Skylake, there is no such host bridge device, and
+ * pci_p2pdma_whitelist[] may contain a Root Port at any devfn.
*
- * This function is equivalent to pci_get_slot(host->bus, 0), however it does
- * not take the pci_bus_sem lock seeing __host_bridge_whitelist() must not
+ * This function is similar to pci_get_slot(host->bus, 0), but it does
+ * not take the pci_bus_sem lock since __host_bridge_whitelist() must not
* sleep.
*
* For this to be safe, the caller should hold a reference to a device on the
@@ -350,10 +351,14 @@ static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host)
if (!root)
return NULL;
- if (root->devfn != PCI_DEVFN(0, 0))
- return NULL;
- return root;
+ if (root->devfn == PCI_DEVFN(0, 0))
+ return root;
+
+ if (pci_pcie_type(root) == PCI_EXP_TYPE_ROOT_PORT)
+ return root;
+
+ return NULL;
}
static bool __host_bridge_whitelist(struct pci_host_bridge *host,
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1f15ab7eabf8..3ae435beaf0a 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -974,9 +974,11 @@ bool acpi_pci_power_manageable(struct pci_dev *dev)
bool acpi_pci_bridge_d3(struct pci_dev *dev)
{
- const union acpi_object *obj;
- struct acpi_device *adev;
struct pci_dev *rpdev;
+ struct acpi_device *adev;
+ acpi_status status;
+ unsigned long long state;
+ const union acpi_object *obj;
if (acpi_pci_disabled || !dev->is_hotplug_bridge)
return false;
@@ -985,12 +987,6 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
if (acpi_pci_power_manageable(dev))
return true;
- /*
- * The ACPI firmware will provide the device-specific properties through
- * _DSD configuration object. Look for the 'HotPlugSupportInD3' property
- * for the root port and if it is set we know the hierarchy behind it
- * supports D3 just fine.
- */
rpdev = pcie_find_root_port(dev);
if (!rpdev)
return false;
@@ -999,11 +995,34 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
if (!adev)
return false;
- if (acpi_dev_get_property(adev, "HotPlugSupportInD3",
- ACPI_TYPE_INTEGER, &obj) < 0)
+ /*
+ * If the Root Port cannot signal wakeup signals at all, i.e., it
+ * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
+ * events from low-power states including D3hot and D3cold.
+ */
+ if (!adev->wakeup.flags.valid)
return false;
- return obj->integer.value == 1;
+ /*
+ * If the Root Port cannot wake itself from D3hot or D3cold, we
+ * can't use D3.
+ */
+ status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
+ if (ACPI_SUCCESS(status) && state < ACPI_STATE_D3_HOT)
+ return false;
+
+ /*
+ * The "HotPlugSupportInD3" property in a Root Port _DSD indicates
+ * the Port can signal hotplug events while in D3. We assume any
+ * bridges *below* that Root Port can also signal hotplug events
+ * while in D3.
+ */
+ if (!acpi_dev_get_property(adev, "HotPlugSupportInD3",
+ ACPI_TYPE_INTEGER, &obj) &&
+ obj->integer.value == 1)
+ return true;
+
+ return false;
}
int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 4ceeb75fc899..2f3b69adfc9e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -522,9 +522,9 @@ static void pci_device_shutdown(struct device *dev)
pci_clear_master(pci_dev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
-/* Auxiliary functions used for system resume and run-time resume. */
+/* Auxiliary functions used for system resume */
/**
* pci_restore_standard_config - restore standard config registers of PCI device
@@ -544,6 +544,11 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
pci_pme_restore(pci_dev);
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+
+/* Auxiliary functions used for system resume and run-time resume */
static void pci_pm_default_resume(struct pci_dev *pci_dev)
{
@@ -551,18 +556,34 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev)
pci_enable_wake(pci_dev, PCI_D0, false);
}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-
-static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
+static void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
{
pci_power_up(pci_dev);
pci_update_current_state(pci_dev, PCI_D0);
+}
+
+static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
+{
+ pci_pm_power_up_and_verify_state(pci_dev);
pci_restore_state(pci_dev);
pci_pme_restore(pci_dev);
}
+static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev)
+{
+ pci_bridge_wait_for_secondary_bus(pci_dev);
+ /*
+ * When powering on a bridge from D3cold, the whole hierarchy may be
+ * powered on into D0uninitialized state, resume them to give them a
+ * chance to suspend again
+ */
+ pci_resume_bus(pci_dev->subordinate);
+}
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+
/*
* Default "suspend" method for devices that have no driver provided suspend,
* or not even a driver at all (second part).
@@ -934,7 +955,7 @@ static int pci_pm_resume_noirq(struct device *dev)
pcie_pme_root_status_cleanup(pci_dev);
if (!skip_bus_pm && prev_state == PCI_D3cold)
- pci_bridge_wait_for_secondary_bus(pci_dev);
+ pci_pm_bridge_power_up_actions(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
return 0;
@@ -1068,7 +1089,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
* in case the driver's "freeze" callbacks put it into a low-power
* state.
*/
- pci_set_power_state(pci_dev, PCI_D0);
+ pci_pm_power_up_and_verify_state(pci_dev);
pci_restore_state(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
@@ -1312,7 +1333,7 @@ static int pci_pm_runtime_resume(struct device *dev)
* to a driver because although we left it in D0, it may have gone to
* D3cold when the bridge above it runtime suspended.
*/
- pci_restore_standard_config(pci_dev);
+ pci_pm_default_resume_early(pci_dev);
if (!pci_dev->driver)
return 0;
@@ -1321,13 +1342,11 @@ static int pci_pm_runtime_resume(struct device *dev)
pci_pm_default_resume(pci_dev);
if (prev_state == PCI_D3cold)
- pci_bridge_wait_for_secondary_bus(pci_dev);
+ pci_pm_bridge_power_up_actions(pci_dev);
if (pm && pm->runtime_resume)
error = pm->runtime_resume(dev);
- pci_dev->runtime_d3cold = false;
-
return error;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 9ecce435fb3f..eb7c0a08ff57 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1068,126 +1068,6 @@ static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
}
/**
- * pci_raw_set_power_state - Use PCI PM registers to set the power state of
- * given PCI device
- * @dev: PCI device to handle.
- * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
- *
- * RETURN VALUE:
- * -EINVAL if the requested state is invalid.
- * -EIO if device does not support PCI PM or its PM capabilities register has a
- * wrong version, or device doesn't support the requested state.
- * 0 if device already is in the requested state.
- * 0 if device's power state has been successfully changed.
- */
-static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
-{
- u16 pmcsr;
- bool need_restore = false;
-
- /* Check if we're already there */
- if (dev->current_state == state)
- return 0;
-
- if (!dev->pm_cap)
- return -EIO;
-
- if (state < PCI_D0 || state > PCI_D3hot)
- return -EINVAL;
-
- /*
- * Validate transition: We can enter D0 from any state, but if
- * we're already in a low-power state, we can only go deeper. E.g.,
- * we can go from D1 to D3, but we can't go directly from D3 to D1;
- * we'd have to go from D3 to D0, then to D1.
- */
- if (state != PCI_D0 && dev->current_state <= PCI_D3cold
- && dev->current_state > state) {
- pci_err(dev, "invalid power transition (from %s to %s)\n",
- pci_power_name(dev->current_state),
- pci_power_name(state));
- return -EINVAL;
- }
-
- /* Check if this device supports the desired state */
- if ((state == PCI_D1 && !dev->d1_support)
- || (state == PCI_D2 && !dev->d2_support))
- return -EIO;
-
- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- if (PCI_POSSIBLE_ERROR(pmcsr)) {
- pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
- pci_power_name(dev->current_state),
- pci_power_name(state));
- return -EIO;
- }
-
- /*
- * If we're (effectively) in D3, force entire word to 0.
- * This doesn't affect PME_Status, disables PME_En, and
- * sets PowerState to 0.
- */
- switch (dev->current_state) {
- case PCI_D0:
- case PCI_D1:
- case PCI_D2:
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= state;
- break;
- case PCI_D3hot:
- case PCI_D3cold:
- case PCI_UNKNOWN: /* Boot-up */
- if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
- && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
- need_restore = true;
- fallthrough; /* force to D0 */
- default:
- pmcsr = 0;
- break;
- }
-
- /* Enter specified state */
- pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
-
- /*
- * Mandatory power management transition delays; see PCI PM 1.1
- * 5.6.1 table 18
- */
- if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
- pci_dev_d3_sleep(dev);
- else if (state == PCI_D2 || dev->current_state == PCI_D2)
- udelay(PCI_PM_D2_DELAY);
-
- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
- if (dev->current_state != state)
- pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
- pci_power_name(dev->current_state),
- pci_power_name(state));
-
- /*
- * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
- * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
- * from D3hot to D0 _may_ perform an internal reset, thereby
- * going to "D0 Uninitialized" rather than "D0 Initialized".
- * For example, at least some versions of the 3c905B and the
- * 3c556B exhibit this behaviour.
- *
- * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
- * devices in a D3hot state at boot. Consequently, we need to
- * restore at least the BARs so that the device will be
- * accessible to its driver.
- */
- if (need_restore)
- pci_restore_bars(dev);
-
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
- return 0;
-}
-
-/**
* pci_update_current_state - Read power state of given device and cache it
* @dev: PCI device to handle.
* @state: State to cache in case the device doesn't have the PM capability
@@ -1201,14 +1081,17 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
*/
void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
{
- if (platform_pci_get_power_state(dev) == PCI_D3cold ||
- !pci_device_is_present(dev)) {
+ if (platform_pci_get_power_state(dev) == PCI_D3cold) {
dev->current_state = PCI_D3cold;
} else if (dev->pm_cap) {
u16 pmcsr;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ if (PCI_POSSIBLE_ERROR(pmcsr)) {
+ dev->current_state = PCI_D3cold;
+ return;
+ }
+ dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
} else {
dev->current_state = state;
}
@@ -1306,26 +1189,114 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
/**
* pci_power_up - Put the given device into D0
* @dev: PCI device to power up
+ *
+ * On success, return 0 or 1, depending on whether or not it is necessary to
+ * restore the device's BARs subsequently (1 is returned in that case).
*/
int pci_power_up(struct pci_dev *dev)
{
- pci_platform_power_transition(dev, PCI_D0);
+ bool need_restore;
+ pci_power_t state;
+ u16 pmcsr;
+
+ platform_pci_set_power_state(dev, PCI_D0);
+
+ if (!dev->pm_cap) {
+ state = platform_pci_get_power_state(dev);
+ if (state == PCI_UNKNOWN)
+ dev->current_state = PCI_D0;
+ else
+ dev->current_state = state;
+
+ if (state == PCI_D0)
+ return 0;
+
+ return -EIO;
+ }
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (PCI_POSSIBLE_ERROR(pmcsr)) {
+ pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
+ pci_power_name(dev->current_state));
+ dev->current_state = PCI_D3cold;
+ return -EIO;
+ }
+
+ state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+
+ need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
+ !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
+
+ if (state == PCI_D0)
+ goto end;
/*
- * Mandatory power management transition delays are handled in
- * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
- * corresponding bridge.
+ * Force the entire word to 0. This doesn't affect PME_Status, disables
+ * PME_En, and sets PowerState to 0.
*/
- if (dev->runtime_d3cold) {
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
+
+ /* Mandatory transition delays; see PCI PM 1.2. */
+ if (state == PCI_D3hot)
+ pci_dev_d3_sleep(dev);
+ else if (state == PCI_D2)
+ udelay(PCI_PM_D2_DELAY);
+
+end:
+ dev->current_state = PCI_D0;
+ if (need_restore)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * pci_set_full_power_state - Put a PCI device into D0 and update its state
+ * @dev: PCI device to power up
+ *
+ * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
+ * to confirm the state change, restore its BARs if they might be lost and
+ * reconfigure ASPM in acordance with the new power state.
+ *
+ * If pci_restore_state() is going to be called right after a power state change
+ * to D0, it is more efficient to use pci_power_up() directly instead of this
+ * function.
+ */
+static int pci_set_full_power_state(struct pci_dev *dev)
+{
+ u16 pmcsr;
+ int ret;
+
+ ret = pci_power_up(dev);
+ if (ret < 0)
+ return ret;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+ if (dev->current_state != PCI_D0) {
+ pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
+ pci_power_name(dev->current_state));
+ } else if (ret > 0) {
/*
- * When powering on a bridge from D3cold, the whole hierarchy
- * may be powered on into D0uninitialized state, resume them to
- * give them a chance to suspend again
+ * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
+ * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
+ * from D3hot to D0 _may_ perform an internal reset, thereby
+ * going to "D0 Uninitialized" rather than "D0 Initialized".
+ * For example, at least some versions of the 3c905B and the
+ * 3c556B exhibit this behaviour.
+ *
+ * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
+ * devices in a D3hot state at boot. Consequently, we need to
+ * restore at least the BARs so that the device will be
+ * accessible to its driver.
*/
- pci_resume_bus(dev->subordinate);
+ pci_restore_bars(dev);
}
- return pci_raw_set_power_state(dev, PCI_D0);
+ if (dev->bus->self)
+ pcie_aspm_pm_state_change(dev->bus->self);
+
+ return 0;
}
/**
@@ -1353,6 +1324,79 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
}
/**
+ * pci_set_low_power_state - Put a PCI device into a low-power state.
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D1, D2, D3hot) to put the device into.
+ *
+ * Use the device's PCI_PM_CTRL register to put it into a low-power state.
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if device already is in the requested state.
+ * 0 if device's power state has been successfully changed.
+ */
+static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ u16 pmcsr;
+
+ if (!dev->pm_cap)
+ return -EIO;
+
+ /*
+ * Validate transition: We can enter D0 from any state, but if
+ * we're already in a low-power state, we can only go deeper. E.g.,
+ * we can go from D1 to D3, but we can't go directly from D3 to D1;
+ * we'd have to go from D3 to D0, then to D1.
+ */
+ if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
+ pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
+ return -EINVAL;
+ }
+
+ /* Check if this device supports the desired state */
+ if ((state == PCI_D1 && !dev->d1_support)
+ || (state == PCI_D2 && !dev->d2_support))
+ return -EIO;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (PCI_POSSIBLE_ERROR(pmcsr)) {
+ pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
+ dev->current_state = PCI_D3cold;
+ return -EIO;
+ }
+
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= state;
+
+ /* Enter specified state */
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+
+ /* Mandatory power management transition delays; see PCI PM 1.2. */
+ if (state == PCI_D3hot)
+ pci_dev_d3_sleep(dev);
+ else if (state == PCI_D2)
+ udelay(PCI_PM_D2_DELAY);
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+ if (dev->current_state != state)
+ pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
+
+ if (dev->bus->self)
+ pcie_aspm_pm_state_change(dev->bus->self);
+
+ return 0;
+}
+
+/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
@@ -1393,7 +1437,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
if (state == PCI_D0)
- return pci_power_up(dev);
+ return pci_set_full_power_state(dev);
/*
* This device is quirked not to be put into D3, so don't put it in
@@ -1402,19 +1446,25 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
return 0;
- /*
- * To put device in D3cold, we put device into D3hot in native
- * way, then put device into D3cold with platform ops
- */
- error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
- PCI_D3hot : state);
+ if (state == PCI_D3cold) {
+ /*
+ * To put the device in D3cold, put it into D3hot in the native
+ * way, then put it into D3cold using platform ops.
+ */
+ error = pci_set_low_power_state(dev, PCI_D3hot);
+
+ if (pci_platform_power_transition(dev, PCI_D3cold))
+ return error;
- if (pci_platform_power_transition(dev, state))
- return error;
+ /* Powering off a bridge may power off the whole hierarchy */
+ if (dev->current_state == PCI_D3cold)
+ pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ } else {
+ error = pci_set_low_power_state(dev, state);
- /* Powering off a bridge may power off the whole hierarchy */
- if (state == PCI_D3cold)
- pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ if (pci_platform_power_transition(dev, state))
+ return error;
+ }
return 0;
}
@@ -2718,8 +2768,6 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- dev->runtime_d3cold = target_state == PCI_D3cold;
-
/*
* There are systems (for example, Intel mobile chips since Coffee
* Lake) where the power drawn while suspended can be significantly
@@ -2737,7 +2785,6 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
if (error) {
pci_enable_wake(dev, target_state, false);
pci_restore_ptm_state(dev);
- dev->runtime_d3cold = false;
}
return error;
@@ -5103,19 +5150,19 @@ static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
void pci_dev_lock(struct pci_dev *dev)
{
- pci_cfg_access_lock(dev);
/* block PM suspend, driver probe, etc. */
device_lock(&dev->dev);
+ pci_cfg_access_lock(dev);
}
EXPORT_SYMBOL_GPL(pci_dev_lock);
/* Return 1 on successful lock, 0 on contention */
int pci_dev_trylock(struct pci_dev *dev)
{
- if (pci_cfg_access_trylock(dev)) {
- if (device_trylock(&dev->dev))
+ if (device_trylock(&dev->dev)) {
+ if (pci_cfg_access_trylock(dev))
return 1;
- pci_cfg_access_unlock(dev);
+ device_unlock(&dev->dev);
}
return 0;
@@ -5124,8 +5171,8 @@ EXPORT_SYMBOL_GPL(pci_dev_trylock);
void pci_dev_unlock(struct pci_dev *dev)
{
- device_unlock(&dev->dev);
pci_cfg_access_unlock(dev);
+ device_unlock(&dev->dev);
}
EXPORT_SYMBOL_GPL(pci_dev_unlock);
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 9fa1f97e5b27..7952e5efd6cf 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -101,6 +101,11 @@ struct aer_stats {
#define ERR_COR_ID(d) (d & 0xffff)
#define ERR_UNCOR_ID(d) (d >> 16)
+#define AER_ERR_STATUS_MASK (PCI_ERR_ROOT_UNCOR_RCV | \
+ PCI_ERR_ROOT_COR_RCV | \
+ PCI_ERR_ROOT_MULTI_COR_RCV | \
+ PCI_ERR_ROOT_MULTI_UNCOR_RCV)
+
static int pcie_aer_disable;
static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
@@ -1196,7 +1201,7 @@ static irqreturn_t aer_irq(int irq, void *context)
struct aer_err_source e_src = {};
pci_read_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, &e_src.status);
- if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV)))
+ if (!(e_src.status & AER_ERR_STATUS_MASK))
return IRQ_NONE;
pci_read_config_dword(rp, aer + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index da829274fc66..41aeaa235132 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -12,6 +12,7 @@
* file, where their drivers can use them.
*/
+#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/export.h>
@@ -5895,3 +5896,49 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1533, rom_bar_overlap_defect);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1536, rom_bar_overlap_defect);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1537, rom_bar_overlap_defect);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1538, rom_bar_overlap_defect);
+
+#ifdef CONFIG_PCIEASPM
+/*
+ * Several Intel DG2 graphics devices advertise that they can only tolerate
+ * 1us latency when transitioning from L1 to L0, which may prevent ASPM L1
+ * from being enabled. But in fact these devices can tolerate unlimited
+ * latency. Override their Device Capabilities value to allow ASPM L1 to
+ * be enabled.
+ */
+static void aspm_l1_acceptable_latency(struct pci_dev *dev)
+{
+ u32 l1_lat = FIELD_GET(PCI_EXP_DEVCAP_L1, dev->devcap);
+
+ if (l1_lat < 7) {
+ dev->devcap |= FIELD_PREP(PCI_EXP_DEVCAP_L1, 7);
+ pci_info(dev, "ASPM: overriding L1 acceptable latency from %#x to 0x7\n",
+ l1_lat);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f80, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f81, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f82, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f83, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f84, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f85, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f86, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f87, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f88, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5690, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5691, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5692, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5693, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5694, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5695, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a0, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a1, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a2, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a3, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a4, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a5, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a6, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b0, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b1, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c0, aspm_l1_acceptable_latency);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency);
+#endif