summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c27
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c2
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c449
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c13
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c52
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c56
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c69
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c7
-rw-r--r--drivers/pci/controller/pci-host-common.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c1
-rw-r--r--drivers/pci/controller/pci-mvebu.c1
-rw-r--r--drivers/pci/controller/pcie-apple.c75
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c117
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c5
-rw-r--r--drivers/pci/controller/pcie-rockchip.c219
-rw-r--r--drivers/pci/controller/pcie-rockchip.h35
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c50
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c96
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c17
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h6
-rw-r--r--drivers/pci/devres.c74
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c25
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c37
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c6
-rw-r--r--drivers/pci/iov.c8
-rw-r--r--drivers/pci/msi/irqdomain.c7
-rw-r--r--drivers/pci/msi/msi.c4
-rw-r--r--drivers/pci/of.c22
-rw-r--r--drivers/pci/of_property.c4
-rw-r--r--drivers/pci/p2pdma.c6
-rw-r--r--drivers/pci/pci-driver.c14
-rw-r--r--drivers/pci/pci-sysfs.c150
-rw-r--r--drivers/pci/pci.c281
-rw-r--r--drivers/pci/pci.h23
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer.c15
-rw-r--r--drivers/pci/pcie/aspm.c30
-rw-r--r--drivers/pci/pcie/bwctrl.c25
-rw-r--r--drivers/pci/pcie/dpc.c22
-rw-r--r--drivers/pci/pcie/portdrv.c4
-rw-r--r--drivers/pci/pcie/tlp.c115
-rw-r--r--drivers/pci/probe.c110
-rw-r--r--drivers/pci/quirks.c21
-rw-r--r--drivers/pci/switch/switchtec.c26
-rw-r--r--drivers/pci/tph.c2
-rw-r--r--drivers/pci/vpd.c14
52 files changed, 1459 insertions, 928 deletions
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 6afff1f1b143..c6b266c772c8 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -410,7 +410,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
- if (!pdev->eetlp_prefix_path && !pdev->pasid_no_tlp)
+ if (!pdev->eetlp_prefix_max && !pdev->pasid_no_tlp)
return -EINVAL;
if (!pasid)
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 5c62e1a3ba52..33d6bf460ffe 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -635,30 +635,20 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
- struct of_phandle_args args;
+ unsigned int args[2];
struct regmap *regmap;
- regmap = syscon_regmap_lookup_by_phandle(np,
- "ti,syscon-unaligned-access");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-unaligned-access",
+ 2, args);
if (IS_ERR(regmap)) {
dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
return -EINVAL;
}
- ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
- 2, 0, &args);
- if (ret) {
- dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
- return ret;
- }
-
- ret = regmap_update_bits(regmap, args.args[0], args.args[1],
- args.args[1]);
+ ret = regmap_update_bits(regmap, args[0], args[1], args[1]);
if (ret)
dev_err(dev, "failed to enable unaligned access\n");
- of_node_put(args.np);
-
return ret;
}
@@ -671,18 +661,13 @@ static int dra7xx_pcie_configure_two_lane(struct device *dev,
u32 mask;
u32 val;
- pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+ pcie_syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-lane-sel",
+ 1, &pcie_reg);
if (IS_ERR(pcie_syscon)) {
dev_err(dev, "unable to get ti,syscon-lane-sel\n");
return -EINVAL;
}
- if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
- &pcie_reg)) {
- dev_err(dev, "couldn't get lane selection reg offset\n");
- return -EINVAL;
- }
-
mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 6a830166d37f..ace736b025b1 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -300,7 +300,7 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (IS_ERR(ep->elbi_base))
return PTR_ERR(ep->elbi_base);
- ret = devm_clk_bulk_get_all_enable(dev, &ep->clks);
+ ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index c8d5c90aa4d4..90ace941090f 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -33,6 +33,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
@@ -55,6 +56,22 @@
#define IMX95_PE0_GEN_CTRL_3 0x1058
#define IMX95_PCIE_LTSSM_EN BIT(0)
+#define IMX95_PE0_LUT_ACSCTRL 0x1008
+#define IMX95_PEO_LUT_RWA BIT(16)
+#define IMX95_PE0_LUT_ENLOC GENMASK(4, 0)
+
+#define IMX95_PE0_LUT_DATA1 0x100c
+#define IMX95_PE0_LUT_VLD BIT(31)
+#define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8)
+#define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0)
+
+#define IMX95_PE0_LUT_DATA2 0x1010
+#define IMX95_PE0_LUT_REQID GENMASK(31, 16)
+#define IMX95_PE0_LUT_MASK GENMASK(15, 0)
+
+#define IMX95_SID_MASK GENMASK(5, 0)
+#define IMX95_MAX_LUT 32
+
#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
enum imx_pcie_variants {
@@ -70,6 +87,7 @@ enum imx_pcie_variants {
IMX8MQ_EP,
IMX8MM_EP,
IMX8MP_EP,
+ IMX8Q_EP,
IMX95_EP,
};
@@ -87,6 +105,7 @@ enum imx_pcie_variants {
* workaround suspend resume on some devices which are affected by this errata.
*/
#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
+#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
@@ -103,6 +122,7 @@ struct imx_pcie_drvdata {
const char *gpr;
const char * const *clk_names;
const u32 clks_cnt;
+ const u32 clks_optional_cnt;
const u32 ltssm_off;
const u32 ltssm_mask;
const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
@@ -111,19 +131,18 @@ struct imx_pcie_drvdata {
int (*init_phy)(struct imx_pcie *pcie);
int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ const struct dw_pcie_host_ops *ops;
};
struct imx_pcie {
struct dw_pcie *pci;
struct gpio_desc *reset_gpiod;
- bool link_is_up;
struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS];
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
- struct reset_control *turnoff_reset;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@@ -139,6 +158,9 @@ struct imx_pcie {
struct device *pd_pcie_phy;
struct phy *phy;
const struct imx_pcie_drvdata *drvdata;
+
+ /* Ensure that only one device's LUT is configured at any given time */
+ struct mutex lock;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -234,11 +256,11 @@ static void imx_pcie_configure_type(struct imx_pcie *imx_pcie)
id = imx_pcie->controller_id;
- /* If mode_mask is 0, then generic PHY driver is used to set the mode */
+ /* If mode_mask is 0, generic PHY driver is used to set the mode */
if (!drvdata->mode_mask[0])
return;
- /* If mode_mask[id] is zero, means each controller have its individual gpr */
+ /* If mode_mask[id] is 0, each controller has its individual GPR */
if (!drvdata->mode_mask[id])
id = 0;
@@ -375,14 +397,15 @@ static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data)
static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- /* TODO: Currently this code assumes external oscillator is being used */
+ /* TODO: This code assumes external oscillator is being used */
regmap_update_bits(imx_pcie->iomuxc_gpr,
imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_REF_USE_PAD,
IMX8MQ_GPR_PCIE_REF_USE_PAD);
/*
- * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
- * supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
+ * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the
+ * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared
+ * to zero.
*/
if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000)
regmap_update_bits(imx_pcie->iomuxc_gpr,
@@ -393,13 +416,6 @@ static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
return 0;
}
-static int imx7d_pcie_init_phy(struct imx_pcie *imx_pcie)
-{
- regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
-
- return 0;
-}
-
static int imx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
@@ -576,7 +592,7 @@ static int imx_pcie_attach_pd(struct device *dev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie pd\n");
return -EINVAL;
}
@@ -589,7 +605,7 @@ static int imx_pcie_attach_pd(struct device *dev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie_phy pd\n");
return -EINVAL;
}
@@ -598,10 +614,9 @@ static int imx_pcie_attach_pd(struct device *dev)
static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- if (enable)
- regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
-
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
return 0;
}
@@ -611,10 +626,10 @@ static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
/* power up core phy and enable ref clock */
regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
/*
- * the async reset input need ref clock to sync internally,
+ * The async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
- * add one ~10us delay here.
+ * Add a ~10us delay here.
*/
usleep_range(10, 100);
regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
@@ -630,19 +645,20 @@ static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
int offset = imx_pcie_grp_offset(imx_pcie);
- if (enable) {
- regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
- regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
- }
-
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0);
return 0;
}
static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- if (!enable)
- regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
return 0;
}
@@ -775,6 +791,7 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
{
reset_control_deassert(imx_pcie->pciephy_reset);
+ reset_control_deassert(imx_pcie->apps_reset);
if (imx_pcie->drvdata->core_reset)
imx_pcie->drvdata->core_reset(imx_pcie, false);
@@ -884,6 +901,7 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
if (imx_pcie->drvdata->flags &
IMX_PCIE_FLAG_IMX_SPEED_CHANGE) {
+
/*
* On i.MX7, DIRECT_SPEED_CHANGE behaves differently
* from i.MX6 family when no link speed transition
@@ -892,7 +910,6 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
* which will cause the following code to report false
* failure.
*/
-
ret = imx_pcie_wait_for_speed_change(imx_pcie);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
@@ -908,13 +925,11 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- imx_pcie->link_is_up = true;
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
- imx_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
@@ -930,6 +945,184 @@ static void imx_pcie_stop_link(struct dw_pcie *pci)
imx_pcie_ltssm_disable(dev);
}
+static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 data1, data2;
+ int free = -1;
+ int i;
+
+ if (sid >= 64) {
+ dev_err(dev, "Invalid SID for index %d\n", sid);
+ return -EINVAL;
+ }
+
+ guard(mutex)(&imx_pcie->lock);
+
+ /*
+ * Iterate through all LUT entries to check for duplicate RID and
+ * identify the first available entry. Configure this available entry
+ * immediately after verification to avoid rescanning it.
+ */
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+
+ if (!(data1 & IMX95_PE0_LUT_VLD)) {
+ if (free < 0)
+ free = i;
+ continue;
+ }
+
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+
+ /* Do not add duplicate RID */
+ if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) {
+ dev_warn(dev, "Existing LUT entry available for RID (%d)", rid);
+ return 0;
+ }
+ }
+
+ if (free < 0) {
+ dev_err(dev, "LUT entry is not available\n");
+ return -ENOSPC;
+ }
+
+ data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0);
+ data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid);
+ data1 |= IMX95_PE0_LUT_VLD;
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1);
+
+ data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */
+ data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2);
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free);
+
+ return 0;
+}
+
+static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid)
+{
+ u32 data2;
+ int i;
+
+ guard(mutex)(&imx_pcie->lock);
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA1, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA2, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, i);
+
+ break;
+ }
+ }
+}
+
+static int imx_pcie_enable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ u32 sid_i, sid_m, rid = pci_dev_id(pdev);
+ struct device_node *target;
+ struct device *dev;
+ int err_i, err_m;
+ u32 sid = 0;
+
+ dev = imx_pcie->pci->dev;
+
+ target = NULL;
+ err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask",
+ &target, &sid_i);
+ if (target) {
+ of_node_put(target);
+ } else {
+ /*
+ * "target == NULL && err_i == 0" means RID out of map range.
+ * Use 1:1 map RID to streamID. Hardware can't support this
+ * because the streamID is only 6 bits
+ */
+ err_i = -EINVAL;
+ }
+
+ target = NULL;
+ err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask",
+ &target, &sid_m);
+
+ /*
+ * err_m target
+ * 0 NULL RID out of range. Use 1:1 map RID to
+ * streamID, Current hardware can't
+ * support it, so return -EINVAL.
+ * != 0 NULL msi-map does not exist, use built-in MSI
+ * 0 != NULL Get correct streamID from RID
+ * != 0 != NULL Invalid combination
+ */
+ if (!err_m && !target)
+ return -EINVAL;
+ else if (target)
+ of_node_put(target); /* Find streamID map entry for RID in msi-map */
+
+ /*
+ * msi-map iommu-map
+ * N N DWC MSI Ctrl
+ * Y Y ITS + SMMU, require the same SID
+ * Y N ITS
+ * N Y DWC MSI Ctrl + SMMU
+ */
+ if (err_i && err_m)
+ return 0;
+
+ if (!err_i && !err_m) {
+ /*
+ * Glue Layer
+ * <==========>
+ * ┌─────┐ ┌──────────┐
+ * │ LUT │ 6-bit streamID │ │
+ * │ │─────────────────►│ MSI │
+ * └─────┘ 2-bit ctrl ID │ │
+ * ┌───────────►│ │
+ * (i.MX95) │ │ │
+ * 00 PCIe0 │ │ │
+ * 01 ENETC │ │ │
+ * 10 PCIe1 │ │ │
+ * │ └──────────┘
+ * The MSI glue layer auto adds 2 bits controller ID ahead of
+ * streamID, so mask these 2 bits to get streamID. The
+ * IOMMU glue layer doesn't do that.
+ */
+ if (sid_i != (sid_m & IMX95_SID_MASK)) {
+ dev_err(dev, "iommu-map and msi-map entries mismatch!\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!err_i)
+ sid = sid_i;
+ else if (!err_m)
+ sid = sid_m & IMX95_SID_MASK;
+
+ return imx_pcie_add_lut(imx_pcie, rid, sid);
+}
+
+static void imx_pcie_disable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie;
+
+ imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev));
+}
+
static int imx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -946,6 +1139,11 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
}
}
+ if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) {
+ pp->bridge->enable_device = imx_pcie_enable_device;
+ pp->bridge->disable_device = imx_pcie_disable_device;
+ }
+
imx_pcie_assert_core_reset(imx_pcie);
if (imx_pcie->drvdata->init_phy)
@@ -966,7 +1164,9 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
goto err_clk_disable;
}
- ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE,
+ imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ?
+ PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC);
if (ret) {
dev_err(dev, "unable to set PCIe PHY mode\n");
goto err_phy_exit;
@@ -1033,9 +1233,31 @@ static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr)
return cpu_addr - entry->offset;
}
+/*
+ * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
+ * register is reserved, so the generic DWC implementation of sending the
+ * PME_Turn_Off message using a dummy MMIO write cannot be used.
+ */
+static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+
+ usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US);
+}
+
static const struct dw_pcie_host_ops imx_pcie_host_ops = {
.init = imx_pcie_host_init,
.deinit = imx_pcie_host_exit,
+ .pme_turn_off = imx_pcie_pme_turn_off,
+};
+
+static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
};
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -1082,16 +1304,27 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
.align = SZ_64K,
};
+static const struct pci_epc_features imx8q_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
/*
- * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme
- * ================================================================================================
- * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
- * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
- * BAR1 should be disabled if BAR0 is 64bit.
- * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
- * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
- * BAR4 | Enable | 32-bit | 1M | Programmable Size
- * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ * | Default | Default | Default | BAR Sizing
+ * BAR# | Enable? | Type | Size | Scheme
+ * =======================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * (BAR1 should be disabled if BAR0 is 64-bit)
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
*/
static const struct pci_epc_features imx95_pcie_epc_features = {
.msi_capable = true,
@@ -1118,7 +1351,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
struct platform_device *pdev)
{
int ret;
- unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
struct dw_pcie *pci = imx_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
@@ -1128,28 +1360,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- switch (imx_pcie->drvdata->variant) {
- case IMX8MQ_EP:
- case IMX8MM_EP:
- case IMX8MP_EP:
- pcie_dbi2_offset = SZ_1M;
- break;
- default:
- pcie_dbi2_offset = SZ_4K;
- break;
- }
-
- pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
-
- /*
- * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining
- * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC
- * core code can fetch that from DT. But once all platform DTs were fixed, this and the
- * above "dbi_base2" setting should be removed.
- */
- if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
- pci->dbi_base2 = NULL;
-
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT))
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
@@ -1176,43 +1386,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
return 0;
}
-static void imx_pcie_pm_turnoff(struct imx_pcie *imx_pcie)
-{
- struct device *dev = imx_pcie->pci->dev;
-
- /* Some variants have a turnoff reset in DT */
- if (imx_pcie->turnoff_reset) {
- reset_control_assert(imx_pcie->turnoff_reset);
- reset_control_deassert(imx_pcie->turnoff_reset);
- goto pm_turnoff_sleep;
- }
-
- /* Others poke directly at IOMUXC registers */
- switch (imx_pcie->drvdata->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
- break;
- default:
- dev_err(dev, "PME_Turn_Off not implemented\n");
- return;
- }
-
- /*
- * Components with an upstream port must respond to
- * PME_Turn_Off with PME_TO_Ack but we can't check.
- *
- * The standard recommends a 1-10ms timeout after which to
- * proceed anyway as if acks were received.
- */
-pm_turnoff_sleep:
- usleep_range(1000, 10000);
-}
-
static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
{
u8 offset;
@@ -1236,7 +1409,6 @@ static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
static int imx_pcie_suspend_noirq(struct device *dev)
{
struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx_pcie->pci->pp;
if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
@@ -1251,9 +1423,7 @@ static int imx_pcie_suspend_noirq(struct device *dev)
imx_pcie_assert_core_reset(imx_pcie);
imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
} else {
- imx_pcie_pm_turnoff(imx_pcie);
- imx_pcie_stop_link(imx_pcie->pci);
- imx_pcie_host_exit(pp);
+ return dw_pcie_suspend_noirq(imx_pcie->pci);
}
return 0;
@@ -1263,7 +1433,6 @@ static int imx_pcie_resume_noirq(struct device *dev)
{
int ret;
struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx_pcie->pci->pp;
if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
@@ -1275,6 +1444,7 @@ static int imx_pcie_resume_noirq(struct device *dev)
ret = imx_pcie_deassert_core_reset(imx_pcie);
if (ret)
return ret;
+
/*
* Using PCIE_TEST_PD seems to disable MSI and powers down the
* root complex. This is why we have to setup the rc again and
@@ -1283,17 +1453,12 @@ static int imx_pcie_resume_noirq(struct device *dev)
ret = dw_pcie_setup_rc(&imx_pcie->pci->pp);
if (ret)
return ret;
- imx_pcie_msi_save_restore(imx_pcie, false);
} else {
- ret = imx_pcie_host_init(pp);
+ ret = dw_pcie_resume_noirq(imx_pcie->pci);
if (ret)
return ret;
- imx_pcie_msi_save_restore(imx_pcie, false);
- dw_pcie_setup_rc(pp);
-
- if (imx_pcie->link_is_up)
- imx_pcie_start_link(imx_pcie->pci);
}
+ imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
}
@@ -1311,9 +1476,8 @@ static int imx_pcie_probe(struct platform_device *pdev)
struct device_node *np;
struct resource *dbi_base;
struct device_node *node = dev->of_node;
- int ret;
+ int i, ret, req_cnt;
u16 val;
- int i;
imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
if (!imx_pcie)
@@ -1325,11 +1489,17 @@ static int imx_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- pci->pp.ops = &imx_pcie_host_ops;
imx_pcie->pci = pci;
imx_pcie->drvdata = of_device_get_match_data(dev);
+ mutex_init(&imx_pcie->lock);
+
+ if (imx_pcie->drvdata->ops)
+ pci->pp.ops = imx_pcie->drvdata->ops;
+ else
+ pci->pp.ops = &imx_pcie_host_dw_pme_ops;
+
/* Find the PHY if one is defined, only imx7d uses it */
np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
if (np) {
@@ -1363,9 +1533,13 @@ static int imx_pcie_probe(struct platform_device *pdev)
imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i];
/* Fetch clocks */
- ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
+ req_cnt = imx_pcie->drvdata->clks_cnt - imx_pcie->drvdata->clks_optional_cnt;
+ ret = devm_clk_bulk_get(dev, req_cnt, imx_pcie->clks);
if (ret)
return ret;
+ imx_pcie->clks[req_cnt].clk = devm_clk_get_optional(dev, "ref");
+ if (IS_ERR(imx_pcie->clks[req_cnt].clk))
+ return PTR_ERR(imx_pcie->clks[req_cnt].clk);
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
@@ -1391,7 +1565,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
switch (imx_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
- case IMX7D:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
imx_pcie->controller_id = 1;
break;
@@ -1399,13 +1572,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
break;
}
- /* Grab turnoff reset */
- imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
- if (IS_ERR(imx_pcie->turnoff_reset)) {
- dev_err(dev, "Failed to get TURNOFF reset control\n");
- return PTR_ERR(imx_pcie->turnoff_reset);
- }
-
if (imx_pcie->drvdata->gpr) {
/* Grab GPR config register range */
imx_pcie->iomuxc_gpr =
@@ -1484,6 +1650,7 @@ static int imx_pcie_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
} else {
+ pci->pp.use_atu_msg = true;
ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
@@ -1513,6 +1680,7 @@ static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"};
+static const char * const imx95_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux", "ref"};
static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
@@ -1548,6 +1716,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
.init_phy = imx6sx_pcie_init_phy,
.enable_ref_clk = imx6sx_pcie_enable_ref_clk,
.core_reset = imx6sx_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1565,6 +1734,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
.init_phy = imx_pcie_init_phy,
.enable_ref_clk = imx6q_pcie_enable_ref_clk,
.core_reset = imx6qp_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX7D] = {
.variant = IMX7D,
@@ -1576,14 +1746,14 @@ static const struct imx_pcie_drvdata drvdata[] = {
.clks_cnt = ARRAY_SIZE(imx6q_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx7d_pcie_init_phy,
.enable_ref_clk = imx7d_pcie_enable_ref_clk,
.core_reset = imx7d_pcie_core_reset,
},
[IMX8MQ] = {
.variant = IMX8MQ,
.flags = IMX_PCIE_FLAG_HAS_APP_RESET |
- IMX_PCIE_FLAG_HAS_PHY_RESET,
+ IMX_PCIE_FLAG_HAS_PHY_RESET |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mq-iomuxc-gpr",
.clk_names = imx8mq_clks,
.clks_cnt = ARRAY_SIZE(imx8mq_clks),
@@ -1621,15 +1791,19 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX8Q] = {
.variant = IMX8Q,
.flags = IMX_PCIE_FLAG_HAS_PHYDRV |
- IMX_PCIE_FLAG_CPU_ADDR_FIXUP,
+ IMX_PCIE_FLAG_CPU_ADDR_FIXUP |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.clk_names = imx8q_clks,
.clks_cnt = ARRAY_SIZE(imx8q_clks),
},
[IMX95] = {
.variant = IMX95,
- .flags = IMX_PCIE_FLAG_HAS_SERDES,
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .clk_names = imx95_clks,
+ .clks_cnt = ARRAY_SIZE(imx95_clks),
+ .clks_optional_cnt = 1,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
@@ -1678,6 +1852,14 @@ static const struct imx_pcie_drvdata drvdata[] = {
.epc_features = &imx8m_pcie_epc_features,
.enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
+ [IMX8Q_EP] = {
+ .variant = IMX8Q_EP,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
+ .clk_names = imx8q_clks,
+ .clks_cnt = ARRAY_SIZE(imx8q_clks),
+ },
[IMX95_EP] = {
.variant = IMX95_EP,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
@@ -1707,6 +1889,7 @@ static const struct of_device_id imx_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], },
{ .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index ee6f52568133..239a05b36e8e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -329,7 +329,6 @@ static int ls_pcie_probe(struct platform_device *pdev)
struct ls_pcie *pcie;
struct resource *dbi_base;
u32 index[2];
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -355,16 +354,15 @@ static int ls_pcie_probe(struct platform_device *pdev)
pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
if (pcie->drvdata->scfg_support) {
- pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg");
+ pcie->scfg =
+ syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "fsl,pcie-scfg", 2,
+ index);
if (IS_ERR(pcie->scfg)) {
dev_err(dev, "No syscfg phandle specified\n");
return PTR_ERR(pcie->scfg);
}
- ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2);
- if (ret)
- return ret;
-
pcie->index = index[1];
}
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index f8e7283dacd4..234c8cbcae3a 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -369,9 +369,22 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static const struct pci_epc_features artpec6_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features *
+artpec6_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &artpec6_pcie_epc_features;
+}
+
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
+ .get_features = artpec6_pcie_get_features,
};
static int artpec6_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index f3ac7d46a855..8e07d432e74f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -128,7 +128,8 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
- dma_addr_t cpu_addr, enum pci_barno bar)
+ dma_addr_t cpu_addr, enum pci_barno bar,
+ size_t size)
{
int ret;
u32 free_win;
@@ -145,7 +146,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
}
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar);
+ cpu_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
@@ -222,19 +223,30 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
return -EINVAL;
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
-
- if (!(flags & PCI_BASE_ADDRESS_SPACE))
- type = PCIE_ATU_TYPE_MEM;
- else
- type = PCIE_ATU_TYPE_IO;
+ /*
+ * Certain EPF drivers dynamically change the physical address of a BAR
+ * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by the
+ * host).
+ */
+ if (ep->epf_bar[bar]) {
+ /*
+ * We can only dynamically change a BAR if the new BAR size and
+ * BAR flags do not differ from the existing configuration.
+ */
+ if (ep->epf_bar[bar]->barno != bar ||
+ ep->epf_bar[bar]->size != size ||
+ ep->epf_bar[bar]->flags != flags)
+ return -EINVAL;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
- if (ret)
- return ret;
+ /*
+ * When dynamically changing a BAR, skip writing the BAR reg, as
+ * that would clear the BAR's PCI address assigned by the host.
+ */
+ goto config_atu;
+ }
- if (ep->epf_bar[bar])
- return 0;
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
@@ -246,9 +258,21 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
}
- ep->epf_bar[bar] = epf_bar;
dw_pcie_dbi_ro_wr_dis(pci);
+config_atu:
+ if (!(flags & PCI_BASE_ADDRESS_SPACE))
+ type = PCIE_ATU_TYPE_MEM;
+ else
+ type = PCIE_ATU_TYPE_IO;
+
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
+ size);
+ if (ret)
+ return ret;
+
+ ep->epf_bar[bar] = epf_bar;
+
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d2291c3ceb8b..ffaded8f2df7 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -436,18 +436,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (res) {
- pp->cfg0_size = resource_size(res);
- pp->cfg0_base = res->start;
-
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
- } else {
- dev_err(dev, "Missing *config* reg space\n");
+ if (!res) {
+ dev_err(dev, "Missing \"config\" reg space\n");
return -ENODEV;
}
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
+
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
+
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
@@ -530,8 +530,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
goto err_remove_edma;
}
- /* Ignore errors, the link may come up later */
- dw_pcie_wait_for_link(pci);
+ /*
+ * Note: Skip the link up delay only when a Link Up IRQ is present.
+ * If there is no Link Up IRQ, we should not bypass the delay
+ * because that would require users to manually rescan for devices.
+ */
+ if (!pp->use_linkup_irq)
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
bridge->sysdata = pp;
@@ -918,7 +924,7 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
{
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- int ret = 0;
+ int ret;
/*
* If L1SS is supported, then do not put the link into L2 as some
@@ -927,25 +933,33 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
return 0;
- if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
- return 0;
-
- if (pci->pp.ops->pme_turn_off)
+ if (pci->pp.ops->pme_turn_off) {
pci->pp.ops->pme_turn_off(&pci->pp);
- else
+ } else {
ret = dw_pcie_pme_turn_off(pci);
+ if (ret)
+ return ret;
+ }
- if (ret)
- return ret;
-
- ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val,
+ val == DW_PCIE_LTSSM_L2_IDLE ||
+ val <= DW_PCIE_LTSSM_DETECT_WAIT,
PCIE_PME_TO_L2_TIMEOUT_US/10,
PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
if (ret) {
+ /* Only log message when LTSSM isn't in DETECT or POLL */
dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
return ret;
}
+ /*
+ * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
+ * 100ns after L2/L3 Ready before turning off refclock and
+ * main power. This is harmless when no endpoint is connected.
+ */
+ udelay(1);
+
+ dw_pcie_stop_link(pci);
if (pci->pp.ops->deinit)
pci->pp.ops->deinit(&pci->pp);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 6d6cbc8b5b2c..145e7f579072 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -597,11 +597,12 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
}
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar)
+ int type, u64 cpu_addr, u8 bar, size_t size)
{
u32 retries, val;
- if (!IS_ALIGNED(cpu_addr, pci->region_align))
+ if (!IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(cpu_addr, size))
return -EINVAL;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
@@ -970,7 +971,7 @@ static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);
u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
- char name[6];
+ char name[15];
int ret;
if (pci->edma.nr_irqs == 1)
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 347ab74ac35a..501d9ddfea16 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -330,6 +330,7 @@ enum dw_pcie_ltssm {
/* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
DW_PCIE_LTSSM_L0 = 0x11,
DW_PCIE_LTSSM_L2_IDLE = 0x15,
@@ -379,6 +380,7 @@ struct dw_pcie_rp {
bool use_atu_msg;
int msg_atu_index;
struct resource *msg_res;
+ bool use_linkup_irq;
};
struct dw_pcie_ep_ops {
@@ -491,16 +493,13 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar);
+ int type, u64 cpu_addr, u8 bar, size_t size);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
int dw_pcie_edma_detect(struct dw_pcie *pci);
void dw_pcie_edma_remove(struct dw_pcie *pci);
-int dw_pcie_suspend_noirq(struct dw_pcie *pci);
-int dw_pcie_resume_noirq(struct dw_pcie *pci);
-
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
dw_pcie_write_dbi(pci, reg, 0x4, val);
@@ -678,6 +677,8 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
}
#ifdef CONFIG_PCIE_DW_HOST
+int dw_pcie_suspend_noirq(struct dw_pcie *pci);
+int dw_pcie_resume_noirq(struct dw_pcie *pci);
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
int dw_pcie_host_init(struct dw_pcie_rp *pp);
@@ -686,6 +687,16 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static inline int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 1170e1107508..93698abff4d9 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -389,6 +389,34 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.stop_link = rockchip_pcie_stop_link,
};
+static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ val = rockchip_pcie_get_ltssm(rockchip);
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
{
struct rockchip_pcie *rockchip = arg;
@@ -418,14 +446,29 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
-static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
+static int rockchip_pcie_configure_rc(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
{
+ struct device *dev = &pdev->dev;
struct dw_pcie_rp *pp;
+ int irq, ret;
u32 val;
if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
return -ENODEV;
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_rc_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-rc", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
/* LTSSM enable control mode */
val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
@@ -435,8 +478,19 @@ static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
pp = &rockchip->pci.pp;
pp->ops = &rockchip_pcie_host_ops;
+ pp->use_linkup_irq = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ /* unmask DLL up/down indicator */
+ val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
- return dw_pcie_host_init(pp);
+ return ret;
}
static int rockchip_pcie_configure_ep(struct platform_device *pdev,
@@ -450,14 +504,12 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
return -ENODEV;
irq = platform_get_irq_byname(pdev, "sys");
- if (irq < 0) {
- dev_err(dev, "missing sys IRQ resource\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(dev, irq, NULL,
rockchip_pcie_ep_sys_irq_thread,
- IRQF_ONESHOT, "pcie-sys", rockchip);
+ IRQF_ONESHOT, "pcie-sys-ep", rockchip);
if (ret) {
dev_err(dev, "failed to request PCIe sys IRQ\n");
return ret;
@@ -491,7 +543,8 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
pci_epc_init_notify(rockchip->pci.ep.epc);
/* unmask DLL up/down indicator and hot reset/link-down reset */
- rockchip_pcie_writel_apb(rockchip, 0x60000, PCIE_CLIENT_INTR_MASK_MISC);
+ val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED | PCIE_LINK_REQ_RST_NOT_INT, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
return ret;
}
@@ -553,7 +606,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
switch (data->mode) {
case DW_PCIE_RC_TYPE:
- ret = rockchip_pcie_configure_rc(rockchip);
+ ret = rockchip_pcie_configure_rc(pdev, rockchip);
if (ret)
goto deinit_clk;
break;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index dc102d8bd58c..e4d3366ead1f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1569,6 +1569,8 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
pci_lock_rescan_remove();
pci_rescan_bus(pp->bridge->bus);
pci_unlock_rescan_remove();
+
+ qcom_pcie_icc_opp_update(pcie);
} else {
dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
status);
@@ -1703,6 +1705,10 @@ static int qcom_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
+ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0)
+ pp->use_linkup_irq = true;
+
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
@@ -1716,7 +1722,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_host_deinit;
}
- irq = platform_get_irq_byname_optional(pdev, "global");
if (irq > 0) {
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
qcom_pcie_global_irq_thread,
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index cf5f59a745b3..f441bfd6f96a 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -75,6 +75,8 @@ int pci_host_common_probe(struct platform_device *pdev)
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
+ bridge->enable_device = ops->enable_device;
+ bridge->disable_device = ops->disable_device;
bridge->msi_domain = true;
return pci_host_probe(bridge);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index cdd5be16021d..6084b38bdda1 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -2053,6 +2053,7 @@ static struct irq_chip hv_msi_irq_chip = {
.irq_set_affinity = irq_chip_set_affinity_parent,
#ifdef CONFIG_X86
.irq_ack = irq_chip_ack_parent,
+ .flags = IRQCHIP_MOVE_DEFERRED,
#elif defined(CONFIG_ARM64)
.irq_eoi = irq_chip_eoi_parent,
#endif
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 46d3afe1d308..665f35f9d826 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1715,6 +1715,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
+MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index fefab2758a06..a7e51bc1c2fe 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -26,7 +26,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/msi.h>
-#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/pci-ecam.h>
@@ -667,12 +666,16 @@ static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
return NULL;
}
-static int apple_pcie_add_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
u32 sid, rid = pci_dev_id(pdev);
+ struct apple_pcie_port *port;
int idx, err;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return 0;
+
dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
pci_name(pdev->bus->self), port->idx);
@@ -698,12 +701,16 @@ static int apple_pcie_add_device(struct apple_pcie_port *port,
return idx >= 0 ? 0 : -ENOSPC;
}
-static void apple_pcie_release_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
+ struct apple_pcie_port *port;
u32 rid = pci_dev_id(pdev);
int idx;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return;
+
mutex_lock(&port->pcie->lock);
for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
@@ -721,45 +728,6 @@ static void apple_pcie_release_device(struct apple_pcie_port *port,
mutex_unlock(&port->pcie->lock);
}
-static int apple_pcie_bus_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
-{
- struct device *dev = data;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct apple_pcie_port *port;
- int err;
-
- /*
- * This is a bit ugly. We assume that if we get notified for
- * any PCI device, we must be in charge of it, and that there
- * is no other PCI controller in the whole system. It probably
- * holds for now, but who knows for how long?
- */
- port = apple_pcie_get_port(pdev);
- if (!port)
- return NOTIFY_DONE;
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- err = apple_pcie_add_device(port, pdev);
- if (err)
- return notifier_from_errno(err);
- break;
- case BUS_NOTIFY_DEL_DEVICE:
- apple_pcie_release_device(port, pdev);
- break;
- default:
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block apple_pcie_nb = {
- .notifier_call = apple_pcie_bus_notifier,
-};
-
static int apple_pcie_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
@@ -799,23 +767,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
return 0;
}
-static int apple_pcie_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
- if (ret)
- return ret;
-
- ret = pci_host_common_probe(pdev);
- if (ret)
- bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
-
- return ret;
-}
-
static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
.init = apple_pcie_init,
+ .enable_device = apple_pcie_enable_device,
+ .disable_device = apple_pcie_disable_device,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read,
@@ -830,7 +785,7 @@ static const struct of_device_id apple_pcie_of_match[] = {
MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
static struct platform_driver apple_pcie_driver = {
- .probe = apple_pcie_probe,
+ .probe = pci_host_common_probe,
.driver = {
.name = "pcie-apple",
.of_match_table = apple_pcie_of_match,
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index be52e3a123ab..aa24ac9aaecc 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -125,6 +125,8 @@
#define MAX_NUM_PHY_RESETS 3
+#define PCIE_MTK_RESET_TIME_US 10
+
/* Time in ms needed to complete PCIe reset on EN7581 SoC */
#define PCIE_EN7581_RESET_TIME_MS 100
@@ -133,10 +135,18 @@ struct mtk_gen3_pcie;
#define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0)
#define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0)
+enum mtk_gen3_pcie_flags {
+ SKIP_PCIE_RSTB = BIT(0), /* Skip PERST# assertion during device
+ * probing or suspend/resume phase to
+ * avoid hw bugs/issues.
+ */
+};
+
/**
* struct mtk_gen3_pcie_pdata - differentiate between host generations
* @power_up: pcie power_up callback
* @phy_resets: phy reset lines SoC data.
+ * @flags: pcie device flags.
*/
struct mtk_gen3_pcie_pdata {
int (*power_up)(struct mtk_gen3_pcie *pcie);
@@ -144,6 +154,7 @@ struct mtk_gen3_pcie_pdata {
const char *id[MAX_NUM_PHY_RESETS];
int num_resets;
} phy_resets;
+ u32 flags;
};
/**
@@ -438,22 +449,33 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
- /* Assert all reset signals */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-
/*
- * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
- * and 2.2.1 (Initial Power-Up (G3 to S0)).
- * The deassertion of PERST# should be delayed 100ms (TPVPERL)
- * for the power and clock to become stable.
+ * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
+ * causing occasional PCIe link down. In order to overcome the issue,
+ * PCIE_RSTB signals are not asserted/released at this stage and the
+ * PCIe block is reset using en7523_reset_assert() and
+ * en7581_pci_enable().
*/
- msleep(100);
-
- /* De-assert reset signals */
- val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert all reset signals */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+
+ /*
+ * Described in PCIe CEM specification revision 6.0.
+ *
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
/* Check if the link is up or not */
err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
@@ -913,11 +935,20 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
u32 val;
/*
- * Wait for the time needed to complete the bulk assert in
- * mtk_pcie_setup for EN7581 SoC.
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
*/
- mdelay(PCIE_EN7581_RESET_TIME_MS);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ reset_control_assert(pcie->mac_reset);
+ /* Wait for the time needed to complete the reset lines assert. */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ /*
+ * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
+ * requires PHY initialization and power-on before PHY reset deassert.
+ */
err = phy_init(pcie->phy);
if (err) {
dev_err(dev, "failed to initialize PHY\n");
@@ -940,17 +971,11 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
* Wait for the time needed to complete the bulk de-assert above.
* This time is specific for EN7581 SoC.
*/
- mdelay(PCIE_EN7581_RESET_TIME_MS);
+ msleep(PCIE_EN7581_RESET_TIME_MS);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
- if (err) {
- dev_err(dev, "failed to prepare clock\n");
- goto err_clk_prepare;
- }
-
val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
@@ -963,17 +988,22 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
- err = clk_bulk_enable(pcie->num_clks, pcie->clks);
+ err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
if (err) {
dev_err(dev, "failed to prepare clock\n");
- goto err_clk_enable;
+ goto err_clk_prepare_enable;
}
+ /*
+ * Airoha EN7581 performs PCIe reset via clk callbacks since it has a
+ * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to
+ * complete the PCIe reset.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
return 0;
-err_clk_enable:
- clk_bulk_unprepare(pcie->num_clks, pcie->clks);
-err_clk_prepare:
+err_clk_prepare_enable:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
@@ -990,6 +1020,15 @@ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
struct device *dev = pcie->dev;
int err;
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
+
/* PHY power on and enable pipe clock */
err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
if (err) {
@@ -1074,14 +1113,6 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
* counter since the bulk is shared.
*/
reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
- /*
- * The controller may have been left out of reset by the bootloader
- * so make sure that we get a clean start by asserting resets here.
- */
- reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
-
- reset_control_assert(pcie->mac_reset);
- usleep_range(10, 20);
/* Don't touch the hardware registers before power up */
err = pcie->soc->power_up(pcie);
@@ -1231,10 +1262,12 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
return err;
}
- /* Pull down the PERST# pin */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert the PERST# pin */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
dev_dbg(pcie->dev, "entered L2 states successfully");
@@ -1285,6 +1318,7 @@ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
.id[2] = "phy-lane2",
.num_resets = 3,
},
+ .flags = SKIP_PCIE_RSTB,
};
static const struct of_device_id mtk_pcie_of_match[] = {
@@ -1301,6 +1335,7 @@ static struct platform_driver mtk_pcie_driver = {
.name = "mtk-pcie-gen3",
.of_match_table = mtk_pcie_of_match,
.pm = &mtk_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index 047e2cef5afc..c5e0d025bc43 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
}
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res),
- outbound_name)) {
+ res->name)) {
dev_err(pcie->dev, "Cannot request memory region %s.\n",
outbound_name);
return -EIO;
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 1064b7b06cef..85ea36df2f59 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -40,6 +40,10 @@
* @irq_pci_fn: the latest PCI function that has updated the mapping of
* the MSI/INTX IRQ dedicated outbound region.
* @irq_pending: bitmask of asserted INTX IRQs.
+ * @perst_irq: IRQ used for the PERST# signal.
+ * @perst_asserted: True if the PERST# signal was asserted.
+ * @link_up: True if the PCI link is up.
+ * @link_training: Work item to execute PCI link training.
*/
struct rockchip_pcie_ep {
struct rockchip_pcie rockchip;
@@ -784,6 +788,7 @@ static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep)
SZ_1M);
if (!ep->irq_cpu_addr) {
dev_err(dev, "failed to reserve memory space for MSI\n");
+ err = -ENOMEM;
goto err_epc_mem_exit;
}
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index b9ade7632e11..0f88da378805 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -30,7 +30,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct resource *regs;
- int err;
+ int err, i;
if (rockchip->is_rc) {
regs = platform_get_resource_byname(pdev,
@@ -69,55 +69,23 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
rockchip->link_gen = 2;
- rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
- if (IS_ERR(rockchip->core_rst)) {
- if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing core reset property in node\n");
- return PTR_ERR(rockchip->core_rst);
- }
-
- rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
- if (IS_ERR(rockchip->mgmt_rst)) {
- if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt reset property in node\n");
- return PTR_ERR(rockchip->mgmt_rst);
- }
-
- rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
- if (IS_ERR(rockchip->mgmt_sticky_rst)) {
- if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt-sticky reset property in node\n");
- return PTR_ERR(rockchip->mgmt_sticky_rst);
- }
+ for (i = 0; i < ROCKCHIP_NUM_PM_RSTS; i++)
+ rockchip->pm_rsts[i].id = rockchip_pci_pm_rsts[i];
- rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(rockchip->pipe_rst)) {
- if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pipe reset property in node\n");
- return PTR_ERR(rockchip->pipe_rst);
- }
-
- rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
- if (IS_ERR(rockchip->pm_rst)) {
- if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pm reset property in node\n");
- return PTR_ERR(rockchip->pm_rst);
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the PM reset\n");
- rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
- if (IS_ERR(rockchip->pclk_rst)) {
- if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pclk reset property in node\n");
- return PTR_ERR(rockchip->pclk_rst);
- }
+ for (i = 0; i < ROCKCHIP_NUM_CORE_RSTS; i++)
+ rockchip->core_rsts[i].id = rockchip_pci_core_rsts[i];
- rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
- if (IS_ERR(rockchip->aclk_rst)) {
- if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing aclk reset property in node\n");
- return PTR_ERR(rockchip->aclk_rst);
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the Core resets\n");
if (rockchip->is_rc)
rockchip->perst_gpio = devm_gpiod_get_optional(dev, "ep",
@@ -129,29 +97,10 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
return dev_err_probe(dev, PTR_ERR(rockchip->perst_gpio),
"failed to get PERST# GPIO\n");
- rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
- if (IS_ERR(rockchip->aclk_pcie)) {
- dev_err(dev, "aclk clock not found\n");
- return PTR_ERR(rockchip->aclk_pcie);
- }
-
- rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
- if (IS_ERR(rockchip->aclk_perf_pcie)) {
- dev_err(dev, "aclk_perf clock not found\n");
- return PTR_ERR(rockchip->aclk_perf_pcie);
- }
-
- rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
- if (IS_ERR(rockchip->hclk_pcie)) {
- dev_err(dev, "hclk clock not found\n");
- return PTR_ERR(rockchip->hclk_pcie);
- }
-
- rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
- if (IS_ERR(rockchip->clk_pcie_pm)) {
- dev_err(dev, "pm clock not found\n");
- return PTR_ERR(rockchip->clk_pcie_pm);
- }
+ rockchip->num_clks = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (rockchip->num_clks < 0)
+ return dev_err_probe(dev, rockchip->num_clks,
+ "failed to get clocks\n");
return 0;
}
@@ -169,23 +118,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
int err, i;
u32 regs;
- err = reset_control_assert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "assert aclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "assert pclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "assert pm_rst err %d\n", err);
- return err;
- }
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Couldn't assert PM resets\n");
for (i = 0; i < MAX_LANE_NUM; i++) {
err = phy_init(rockchip->phys[i]);
@@ -195,47 +131,19 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
}
}
- err = reset_control_assert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "assert core_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "assert mgmt_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->pipe_rst);
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "assert pipe_rst err %d\n", err);
+ dev_err_probe(dev, err, "Couldn't assert Core resets\n");
goto err_exit_phy;
}
udelay(10);
- err = reset_control_deassert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "deassert pm_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "deassert aclk_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->pclk_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
if (err) {
- dev_err(dev, "deassert pclk_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert PM resets %d\n", err);
goto err_exit_phy;
}
@@ -275,31 +183,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
goto err_power_off_phy;
}
- /*
- * Please don't reorder the deassert sequence of the following
- * four reset pins.
- */
- err = reset_control_deassert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "deassert core_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->pipe_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "deassert pipe_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert Core reset %d\n", err);
goto err_power_off_phy;
}
@@ -375,50 +262,18 @@ int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
struct device *dev = rockchip->dev;
int err;
- err = clk_prepare_enable(rockchip->aclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_pcie clock\n");
- return err;
- }
-
- err = clk_prepare_enable(rockchip->aclk_perf_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
- goto err_aclk_perf_pcie;
- }
-
- err = clk_prepare_enable(rockchip->hclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable hclk_pcie clock\n");
- goto err_hclk_pcie;
- }
-
- err = clk_prepare_enable(rockchip->clk_pcie_pm);
- if (err) {
- dev_err(dev, "unable to enable clk_pcie_pm clock\n");
- goto err_clk_pcie_pm;
- }
+ err = clk_bulk_prepare_enable(rockchip->num_clks, rockchip->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to enable clocks\n");
return 0;
-
-err_clk_pcie_pm:
- clk_disable_unprepare(rockchip->hclk_pcie);
-err_hclk_pcie:
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
-err_aclk_perf_pcie:
- clk_disable_unprepare(rockchip->aclk_pcie);
- return err;
}
EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
-void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip)
{
- struct rockchip_pcie *rockchip = data;
- clk_disable_unprepare(rockchip->clk_pcie_pm);
- clk_disable_unprepare(rockchip->hclk_pcie);
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
- clk_disable_unprepare(rockchip->aclk_pcie);
+ clk_bulk_disable_unprepare(rockchip->num_clks, rockchip->clks);
}
EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index a51b087ce878..11def598534b 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -11,9 +11,11 @@
#ifndef _PCIE_ROCKCHIP_H
#define _PCIE_ROCKCHIP_H
+#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/reset.h>
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
@@ -309,22 +311,31 @@
(((c) << ((b) * 8 + 5)) & \
ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+#define ROCKCHIP_NUM_PM_RSTS ARRAY_SIZE(rockchip_pci_pm_rsts)
+#define ROCKCHIP_NUM_CORE_RSTS ARRAY_SIZE(rockchip_pci_core_rsts)
+
+static const char * const rockchip_pci_pm_rsts[] = {
+ "pm",
+ "pclk",
+ "aclk",
+};
+
+static const char * const rockchip_pci_core_rsts[] = {
+ "mgmt-sticky",
+ "core",
+ "mgmt",
+ "pipe",
+};
+
struct rockchip_pcie {
void __iomem *reg_base; /* DT axi-base */
void __iomem *apb_base; /* DT apb-base */
bool legacy_phy;
struct phy *phys[MAX_LANE_NUM];
- struct reset_control *core_rst;
- struct reset_control *mgmt_rst;
- struct reset_control *mgmt_sticky_rst;
- struct reset_control *pipe_rst;
- struct reset_control *pm_rst;
- struct reset_control *aclk_rst;
- struct reset_control *pclk_rst;
- struct clk *aclk_pcie;
- struct clk *aclk_perf_pcie;
- struct clk *hclk_pcie;
- struct clk *clk_pcie_pm;
+ struct reset_control_bulk_data pm_rsts[ROCKCHIP_NUM_PM_RSTS];
+ struct reset_control_bulk_data core_rsts[ROCKCHIP_NUM_CORE_RSTS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator *vpcie12v; /* 12V power supply */
struct regulator *vpcie3v3; /* 3.3V power supply */
struct regulator *vpcie1v8; /* 1.8V power supply */
@@ -358,7 +369,7 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
-void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip);
void rockchip_pcie_cfg_configuration_accesses(
struct rockchip_pcie *rockchip, u32 type);
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index a0f5e1d67b04..81e8bfae53d0 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -30,11 +30,14 @@
#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
-#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2)
-#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
-#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
-#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+#define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4
+#define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
@@ -80,14 +83,21 @@
enum xilinx_cpm_version {
CPM,
CPM5,
+ CPM5_HOST1,
};
/**
* struct xilinx_cpm_variant - CPM variant information
* @version: CPM version
+ * @ir_status: Offset for the error interrupt status register
+ * @ir_enable: Offset for the CPM5 local error interrupt enable register
+ * @ir_misc_value: A bitmask for the miscellaneous interrupt status
*/
struct xilinx_cpm_variant {
enum xilinx_cpm_version version;
+ u32 ir_status;
+ u32 ir_enable;
+ u32 ir_misc_value;
};
/**
@@ -269,6 +279,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
{
struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ const struct xilinx_cpm_variant *variant = port->variant;
unsigned long val;
int i;
@@ -279,11 +290,11 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
- if (port->variant->version == CPM5) {
- val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (variant->ir_status) {
+ val = readl_relaxed(port->cpm_base + variant->ir_status);
if (val)
writel_relaxed(val, port->cpm_base +
- XILINX_CPM_PCIE_IR_STATUS);
+ variant->ir_status);
}
/*
@@ -465,6 +476,8 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
*/
static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
+ const struct xilinx_cpm_variant *variant = port->variant;
+
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
else
@@ -483,15 +496,15 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
* XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
* CPM SLCR block.
*/
- writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
+ writel(variant->ir_misc_value,
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
- if (port->variant->version == CPM5) {
+ if (variant->ir_enable) {
writel(XILINX_CPM_PCIE_IR_LOCAL,
- port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ port->cpm_base + variant->ir_enable);
}
- /* Enable the Bridge enable bit */
+ /* Set Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
XILINX_CPM_PCIE_REG_RPSC_BEN,
XILINX_CPM_PCIE_REG_RPSC);
@@ -609,10 +622,21 @@ err_parse_dt:
static const struct xilinx_cpm_variant cpm_host = {
.version = CPM,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
};
static const struct xilinx_cpm_variant cpm5_host = {
.version = CPM5,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE0_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5_host1 = {
+ .version = CPM5_HOST1,
+ .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE1_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,
};
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
@@ -624,6 +648,10 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
.compatible = "xlnx,versal-cpm5-host",
.data = &cpm5_host,
},
+ {
+ .compatible = "xlnx,versal-cpm5-host1",
+ .data = &cpm5_host1,
+ },
{}
};
diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
index 6630cacef301..3fdfffdf0270 100644
--- a/drivers/pci/controller/plda/pcie-microchip-host.c
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -7,20 +7,27 @@
* Author: Daire McNamara <daire.mcnamara@microchip.com>
*/
+#include <linux/align.h>
+#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include <linux/wordpart.h>
#include "../../pci.h"
#include "pcie-plda.h"
+#define MC_MAX_NUM_INBOUND_WINDOWS 8
+#define MPFS_NC_BOUNCE_ADDR 0x80000000
+
/* PCIe Bridge Phy and Controller Phy offsets */
#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
#define MC_PCIE1_CTRL_ADDR 0x0000a000u
@@ -607,6 +614,91 @@ static void mc_disable_interrupts(struct mc_pcie *port)
writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST);
}
+static void mc_pcie_setup_inbound_atr(struct mc_pcie *port, int window_index,
+ u64 axi_addr, u64 pcie_addr, u64 size)
+{
+ u32 table_offset = window_index * ATR_ENTRY_SIZE;
+ void __iomem *table_addr = port->bridge_base_addr + table_offset;
+ u32 atr_sz;
+ u32 val;
+
+ atr_sz = ilog2(size) - 1;
+
+ val = ALIGN_DOWN(lower_32_bits(pcie_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+
+ writel(val, table_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+
+ writel(upper_32_bits(pcie_addr), table_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+
+ writel(lower_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_LSB);
+ writel(upper_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_UDW);
+
+ writel(TRSL_ID_AXI4_MASTER_0, table_addr + ATR0_PCIE_WIN0_TRSL_PARAM);
+}
+
+static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev,
+ struct mc_pcie *port)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct of_range_parser parser;
+ struct of_range range;
+ int atr_index = 0;
+
+ /*
+ * MPFS PCIe Root Port is 32-bit only, behind a Fabric Interface
+ * Controller FPGA logic block which contains the AXI-S interface.
+ *
+ * From the point of view of the PCIe Root Port, there are only two
+ * supported Root Port configurations:
+ *
+ * Configuration 1: for use with fully coherent designs; supports a
+ * window from 0x0 (CPU space) to specified PCIe space.
+ *
+ * Configuration 2: for use with non-coherent designs; supports two
+ * 1 GB windows to CPU space; one mapping CPU space 0 to PCIe space
+ * 0x80000000 and a second mapping CPU space 0x40000000 to PCIe
+ * space 0xc0000000. This cfg needs two windows because of how the
+ * MSI space is allocated in the AXI-S range on MPFS.
+ *
+ * The FIC interface outside the PCIe block *must* complete the
+ * inbound address translation as per MCHP MPFS FPGA design
+ * guidelines.
+ */
+ if (device_property_read_bool(dev, "dma-noncoherent")) {
+ /*
+ * Always need same two tables in this case. Need two tables
+ * due to hardware interactions between address and size.
+ */
+ mc_pcie_setup_inbound_atr(port, 0, 0,
+ MPFS_NC_BOUNCE_ADDR, SZ_1G);
+ mc_pcie_setup_inbound_atr(port, 1, SZ_1G,
+ MPFS_NC_BOUNCE_ADDR + SZ_1G, SZ_1G);
+ } else {
+ /* Find any DMA ranges */
+ if (of_pci_dma_range_parser_init(&parser, dn)) {
+ /* No DMA range property - setup default */
+ mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G);
+ return 0;
+ }
+
+ for_each_of_range(&parser, &range) {
+ if (atr_index >= MC_MAX_NUM_INBOUND_WINDOWS) {
+ dev_err(dev, "too many inbound ranges; %d available tables\n",
+ MC_MAX_NUM_INBOUND_WINDOWS);
+ return -EINVAL;
+ }
+ mc_pcie_setup_inbound_atr(port, atr_index, 0,
+ range.pci_addr, range.size);
+ atr_index++;
+ }
+ }
+
+ return 0;
+}
+
static int mc_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
@@ -627,6 +719,10 @@ static int mc_platform_init(struct pci_config_window *cfg)
if (ret)
return ret;
+ ret = mc_pcie_setup_inbound_ranges(pdev, port);
+ if (ret)
+ return ret;
+
port->plda.event_ops = &mc_event_ops;
port->plda.event_irq_chip = &mc_event_irq_chip;
port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index 8533dc618d45..4153214ca410 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -8,11 +8,14 @@
* Author: Daire McNamara <daire.mcnamara@microchip.com>
*/
+#include <linux/align.h>
+#include <linux/bitfield.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/pci_regs.h>
#include <linux/pci-ecam.h>
+#include <linux/wordpart.h>
#include "pcie-plda.h"
@@ -502,8 +505,9 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
ATR0_AXI4_SLV0_TRSL_PARAM);
- val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
- ATR_IMPL_ENABLE;
+ val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
ATR0_AXI4_SLV0_SRCADDR_PARAM);
@@ -518,13 +522,20 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
val = upper_32_bits(pci_addr);
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 val;
val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
}
-EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
struct plda_pcie_rp *port)
diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
index 0e7dc0d8e5ba..61ece26065ea 100644
--- a/drivers/pci/controller/plda/pcie-plda.h
+++ b/drivers/pci/controller/plda/pcie-plda.h
@@ -89,14 +89,15 @@
/* PCIe AXI slave table init defines */
#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
-#define ATR_SIZE_SHIFT 1
-#define ATR_IMPL_ENABLE 1
+#define ATR_SIZE_MASK GENMASK(6, 1)
+#define ATR_IMPL_ENABLE BIT(0)
#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
#define PCIE_TX_RX_INTERFACE 0x00000000u
#define PCIE_CONFIG_INTERFACE 0x00000001u
+#define TRSL_ID_AXI4_MASTER_0 0x00000004u
#define CONFIG_SPACE_ADDR_OFFSET 0x1000u
@@ -204,6 +205,7 @@ int plda_init_interrupts(struct platform_device *pdev,
void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
phys_addr_t axi_addr, phys_addr_t pci_addr,
size_t size);
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port);
int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
struct plda_pcie_rp *port);
int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index 3b59a86a764b..3431a7df3e0d 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -101,7 +101,7 @@ static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
* @bar: BAR the range is within
* @offset: offset from the BAR's start address
* @maxlen: length in bytes, beginning at @offset
- * @name: name associated with the request
+ * @name: name of the driver requesting the resource
* @req_flags: flags for the request, e.g., for kernel-exclusive requests
*
* Returns: 0 on success, a negative error code on failure.
@@ -411,46 +411,20 @@ static inline bool mask_contains_bar(int mask, int bar)
return mask & BIT(bar);
}
-/*
- * This is a copy of pci_intx() used to bypass the problem of recursive
- * function calls due to the hybrid nature of pci_intx().
- */
-static void __pcim_intx(struct pci_dev *pdev, int enable)
-{
- u16 pci_command, new;
-
- pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
-
- if (enable)
- new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
- else
- new = pci_command | PCI_COMMAND_INTX_DISABLE;
-
- if (new != pci_command)
- pci_write_config_word(pdev, PCI_COMMAND, new);
-}
-
static void pcim_intx_restore(struct device *dev, void *data)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pcim_intx_devres *res = data;
- __pcim_intx(pdev, res->orig_intx);
+ pci_intx(pdev, res->orig_intx);
}
-static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
+static void save_orig_intx(struct pci_dev *pdev, struct pcim_intx_devres *res)
{
- struct pcim_intx_devres *res;
-
- res = devres_find(dev, pcim_intx_restore, NULL, NULL);
- if (res)
- return res;
+ u16 pci_command;
- res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
- if (res)
- devres_add(dev, res);
-
- return res;
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ res->orig_intx = !(pci_command & PCI_COMMAND_INTX_DISABLE);
}
/**
@@ -466,16 +440,28 @@ static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
int pcim_intx(struct pci_dev *pdev, int enable)
{
struct pcim_intx_devres *res;
+ struct device *dev = &pdev->dev;
- res = get_or_create_intx_devres(&pdev->dev);
- if (!res)
- return -ENOMEM;
+ /*
+ * pcim_intx() must only restore the INTx value that existed before the
+ * driver was loaded, i.e., before it called pcim_intx() for the
+ * first time.
+ */
+ res = devres_find(dev, pcim_intx_restore, NULL, NULL);
+ if (!res) {
+ res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ save_orig_intx(pdev, res);
+ devres_add(dev, res);
+ }
- res->orig_intx = !enable;
- __pcim_intx(pdev, enable);
+ pci_intx(pdev, enable);
return 0;
}
+EXPORT_SYMBOL_GPL(pcim_intx);
static void pcim_disable_device(void *pdev_raw)
{
@@ -723,7 +709,7 @@ EXPORT_SYMBOL(pcim_iounmap);
* pcim_iomap_region - Request and iomap a PCI BAR
* @pdev: PCI device to map IO resources for
* @bar: Index of a BAR to map
- * @name: Name associated with the request
+ * @name: Name of the driver requesting the resource
*
* Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
*
@@ -790,7 +776,7 @@ EXPORT_SYMBOL(pcim_iounmap_region);
* pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to request and iomap
- * @name: Name associated with the requests
+ * @name: Name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
@@ -855,9 +841,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
/**
* pcim_request_region - Request a PCI BAR
- * @pdev: PCI device to requestion region for
+ * @pdev: PCI device to request region for
* @bar: Index of BAR to request
- * @name: Name associated with the request
+ * @name: Name of the driver requesting the resource
*
* Returns: 0 on success, a negative error code on failure.
*
@@ -874,9 +860,9 @@ EXPORT_SYMBOL(pcim_request_region);
/**
* pcim_request_region_exclusive - Request a PCI BAR exclusively
- * @pdev: PCI device to requestion region for
+ * @pdev: PCI device to request region for
* @bar: Index of BAR to request
- * @name: Name associated with the request
+ * @name: Name of the driver requesting the resource
*
* Returns: 0 on success, a negative error code on failure.
*
@@ -932,7 +918,7 @@ static void pcim_release_all_regions(struct pci_dev *pdev)
/**
* pcim_request_all_regions - Request all regions
* @pdev: PCI device to map IO resources for
- * @name: name associated with the request
+ * @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index ef6677f34116..b94e205ae10b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -44,6 +44,8 @@
#define TIMER_RESOLUTION 1
+#define CAP_UNALIGNED_ACCESS BIT(0)
+
static struct workqueue_struct *kpcitest_workqueue;
struct pci_epf_test {
@@ -74,6 +76,7 @@ struct pci_epf_test_reg {
u32 irq_type;
u32 irq_number;
u32 flags;
+ u32 caps;
} __packed;
static struct pci_epf_header test_header = {
@@ -251,7 +254,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
fail_back_rx:
dma_release_channel(epf_test->dma_chan_rx);
- epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
fail_back_tx:
dma_cap_zero(mask);
@@ -328,8 +331,8 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
void *copy_buf = NULL, *buf;
if (reg->flags & FLAG_USE_DMA) {
- if (epf_test->dma_private) {
- dev_err(dev, "Cannot transfer data using DMA\n");
+ if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
+ dev_err(dev, "DMA controller doesn't support MEMCPY\n");
ret = -EINVAL;
goto set_status;
}
@@ -739,6 +742,20 @@ static void pci_epf_test_clear_bar(struct pci_epf *epf)
}
}
+static void pci_epf_test_set_capabilities(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ struct pci_epc *epc = epf->epc;
+ u32 caps = 0;
+
+ if (epc->ops->align_addr)
+ caps |= CAP_UNALIGNED_ACCESS;
+
+ reg->caps = cpu_to_le32(caps);
+}
+
static int pci_epf_test_epc_init(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -763,6 +780,8 @@ static int pci_epf_test_epc_init(struct pci_epf *epf)
}
}
+ pci_epf_test_set_capabilities(epf);
+
ret = pci_epf_test_set_bar(epf);
if (ret)
return ret;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index bed7c7d1fe3c..9e9ca5f8e8f8 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -60,26 +60,17 @@ struct pci_epc *pci_epc_get(const char *epc_name)
int ret = -EINVAL;
struct pci_epc *epc;
struct device *dev;
- struct class_dev_iter iter;
- class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- if (strcmp(epc_name, dev_name(dev)))
- continue;
+ dev = class_find_device_by_name(&pci_epc_class, epc_name);
+ if (!dev)
+ goto err;
- epc = to_pci_epc(dev);
- if (!try_module_get(epc->ops->owner)) {
- ret = -EINVAL;
- goto err;
- }
-
- class_dev_iter_exit(&iter);
- get_device(&epc->dev);
+ epc = to_pci_epc(dev);
+ if (try_module_get(epc->ops->owner))
return epc;
- }
err:
- class_dev_iter_exit(&iter);
+ put_device(dev);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(pci_epc_get);
@@ -609,10 +600,20 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- int ret;
+ const struct pci_epc_features *epc_features;
+ enum pci_barno bar = epf_bar->barno;
int flags = epf_bar->flags;
+ int ret;
- if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
+ if (!epc_features)
+ return -EINVAL;
+
+ if (epc_features->bar[bar].type == BAR_FIXED &&
+ (epc_features->bar[bar].fixed_size != epf_bar->size))
+ return -EINVAL;
+
+ if (!is_power_of_2(epf_bar->size))
return -EINVAL;
if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
@@ -942,7 +943,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
{
int r;
- r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
+ r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match,
epc);
dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
}
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 8fa2797d4169..50bc2892a36c 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -202,6 +202,7 @@ void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
mutex_lock(&epf_pf->lock);
clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->epf_pf = NULL;
list_del(&epf_vf->list);
mutex_unlock(&epf_pf->lock);
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 8f3a0a33f362..b3aa34e3a4a2 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -84,7 +84,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
static int ibm_get_table_from_acpi(char **bufp);
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size);
static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
u32 lvl, void *context, void **rv);
@@ -98,7 +98,7 @@ static struct bin_attribute ibm_apci_table_attr __ro_after_init = {
.name = "apci_table",
.mode = S_IRUGO,
},
- .read = ibm_read_apci_table,
+ .read_new = ibm_read_apci_table,
.write = NULL,
};
static struct acpiphp_attention_info ibm_attention_info =
@@ -353,7 +353,7 @@ read_table_done:
* our solution is to only allow reading the table in all at once.
*/
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size)
{
int bytes_read = -EINVAL;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 4be402fe9ab9..9e4770cdd4d5 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -747,6 +747,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
struct resource *res;
const char *res_name;
struct pci_dev *pdev;
+ u32 sriovbars[PCI_SRIOV_NUM_BARS];
pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
if (ctrl & PCI_SRIOV_CTRL_VFE) {
@@ -783,6 +784,10 @@ found:
if (!iov)
return -ENOMEM;
+ /* Sizing SR-IOV BARs with VF Enable cleared - no decode */
+ __pci_size_stdbars(dev, PCI_SRIOV_NUM_BARS,
+ pos + PCI_SRIOV_BAR, sriovbars);
+
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
@@ -796,7 +801,8 @@ found:
bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
else
bar64 = __pci_read_base(dev, pci_bar_unknown, res,
- pos + PCI_SRIOV_BAR + i * 4);
+ pos + PCI_SRIOV_BAR + i * 4,
+ &sriovbars[i]);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index 569125726b3e..d7ba8795d60f 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -350,8 +350,11 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
domain = dev_get_msi_domain(&pdev->dev);
- if (!domain || !irq_domain_is_hierarchy(domain))
- return mode == ALLOW_LEGACY;
+ if (!domain || !irq_domain_is_hierarchy(domain)) {
+ if (IS_ENABLED(CONFIG_PCI_MSI_ARCH_FALLBACKS))
+ return mode == ALLOW_LEGACY;
+ return false;
+ }
if (!irq_domain_is_msi_parent(domain)) {
/*
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 3a45879d85db..2f647cac4cae 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -433,6 +433,10 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (WARN_ON_ONCE(dev->msi_enabled))
return -EINVAL;
+ /* Test for the availability of MSI support */
+ if (!pci_msi_domain_supports(dev, 0, ALLOW_LEGACY))
+ return -ENOTSUPP;
+
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 52f770bcc481..7a806f5c0d20 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -190,7 +190,8 @@ EXPORT_SYMBOL_GPL(of_pci_get_devfn);
*
* Returns 0 on success or a negative error-code on failure.
*/
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+static int of_pci_parse_bus_range(struct device_node *node,
+ struct resource *res)
{
u32 bus_range[2];
int error;
@@ -207,7 +208,6 @@ int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
return 0;
}
-EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
/**
* of_get_pci_domain_nr - Find the host bridge domain number
@@ -302,8 +302,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
* host bridge resources from DT
* @dev: host bridge device
- * @busno: bus number associated with the bridge root bus
- * @bus_max: maximum number of buses for this bridge
* @resources: list where the range of resources will be added after DT parsing
* @ib_resources: list where the range of inbound resources (with addresses
* from 'dma-ranges') will be added after DT parsing
@@ -319,7 +317,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* value if it failed.
*/
static int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
struct list_head *resources,
struct list_head *ib_resources,
resource_size_t *io_base)
@@ -343,14 +340,15 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
err = of_pci_parse_bus_range(dev_node, bus_range);
if (err) {
- bus_range->start = busno;
- bus_range->end = bus_max;
+ bus_range->start = 0;
+ bus_range->end = 0xff;
bus_range->flags = IORESOURCE_BUS;
- dev_info(dev, " No bus range found for %pOF, using %pR\n",
- dev_node, bus_range);
} else {
- if (bus_range->end > bus_range->start + bus_max)
- bus_range->end = bus_range->start + bus_max;
+ if (bus_range->end > 0xff) {
+ dev_warn(dev, " Invalid end bus number in %pR, defaulting to 0xff\n",
+ bus_range);
+ bus_range->end = 0xff;
+ }
}
pci_add_resource(resources, bus_range);
@@ -597,7 +595,7 @@ static int pci_parse_request_of_pci_ranges(struct device *dev,
INIT_LIST_HEAD(&bridge->windows);
INIT_LIST_HEAD(&bridge->dma_ranges);
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows,
+ err = devm_of_pci_get_host_bridge_resources(dev, &bridge->windows,
&bridge->dma_ranges, &iobase);
if (err)
return err;
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index 886c236e5de6..58fbafac7c6a 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -26,7 +26,7 @@ struct of_pci_addr_pair {
* side and the child address is the corresponding address on the secondary
* side.
*/
-struct of_pci_range {
+struct of_pci_range_entry {
u32 child_addr[OF_PCI_ADDRESS_CELLS];
u32 parent_addr[OF_PCI_ADDRESS_CELLS];
u32 size[OF_PCI_SIZE_CELLS];
@@ -101,7 +101,7 @@ static int of_pci_prop_bus_range(struct pci_dev *pdev,
static int of_pci_prop_ranges(struct pci_dev *pdev, struct of_changeset *ocs,
struct device_node *np)
{
- struct of_pci_range *rp;
+ struct of_pci_range_entry *rp;
struct resource *res;
int i, j, ret;
u32 flags, num;
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 7abd4f546d3c..0cb7e0aaba0e 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -161,7 +161,7 @@ out:
return ret;
}
-static struct bin_attribute p2pmem_alloc_attr = {
+static const struct bin_attribute p2pmem_alloc_attr = {
.attr = { .name = "allocate", .mode = 0660 },
.mmap = p2pmem_alloc_mmap,
/*
@@ -180,14 +180,14 @@ static struct attribute *p2pmem_attrs[] = {
NULL,
};
-static struct bin_attribute *p2pmem_bin_attrs[] = {
+static const struct bin_attribute *const p2pmem_bin_attrs[] = {
&p2pmem_alloc_attr,
NULL,
};
static const struct attribute_group p2pmem_group = {
.attrs = p2pmem_attrs,
- .bin_attrs = p2pmem_bin_attrs,
+ .bin_attrs_new = p2pmem_bin_attrs,
.name = "p2pmem",
};
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 35270172c833..f57ea36d125d 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1670,6 +1670,19 @@ static void pci_dma_cleanup(struct device *dev)
iommu_device_unuse_default_domain(dev);
}
+/*
+ * pci_device_irq_get_affinity - get IRQ affinity mask for device
+ * @dev: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @dev and @irq_vec.
+ */
+static const struct cpumask *pci_device_irq_get_affinity(struct device *dev,
+ unsigned int irq_vec)
+{
+ return pci_irq_get_affinity(to_pci_dev(dev), irq_vec);
+}
+
const struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
@@ -1677,6 +1690,7 @@ const struct bus_type pci_bus_type = {
.probe = pci_device_probe,
.remove = pci_device_remove,
.shutdown = pci_device_shutdown,
+ .irq_get_affinity = pci_device_irq_get_affinity,
.dev_groups = pci_dev_groups,
.bus_groups = pci_bus_groups,
.drv_groups = pci_drv_groups,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 7679d75d71e5..b46ce1a2c554 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -13,6 +13,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/pci.h>
@@ -694,7 +695,7 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(boot_vga);
static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
@@ -769,7 +770,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
@@ -837,9 +838,9 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
+static const BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
-static struct bin_attribute *pci_dev_config_attrs[] = {
+static const struct bin_attribute *const pci_dev_config_attrs[] = {
&bin_attr_config,
NULL,
};
@@ -856,7 +857,7 @@ static size_t pci_dev_config_attr_bin_size(struct kobject *kobj,
}
static const struct attribute_group pci_dev_config_attr_group = {
- .bin_attrs = pci_dev_config_attrs,
+ .bin_attrs_new = pci_dev_config_attrs,
.bin_size = pci_dev_config_attr_bin_size,
};
@@ -887,8 +888,8 @@ pci_llseek_resource(struct file *filep,
* callback routine (pci_legacy_read).
*/
static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -912,8 +913,8 @@ static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
* callback routine (pci_legacy_write).
*/
static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -1003,8 +1004,8 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->attr.name = "legacy_io";
b->legacy_io->size = 0xffff;
b->legacy_io->attr.mode = 0600;
- b->legacy_io->read = pci_read_legacy_io;
- b->legacy_io->write = pci_write_legacy_io;
+ b->legacy_io->read_new = pci_read_legacy_io;
+ b->legacy_io->write_new = pci_write_legacy_io;
/* See pci_create_attr() for motivation */
b->legacy_io->llseek = pci_llseek_resource;
b->legacy_io->mmap = pci_mmap_legacy_io;
@@ -1099,7 +1100,7 @@ static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count, bool write)
{
#ifdef CONFIG_HAS_IOPORT
@@ -1142,14 +1143,14 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
return pci_resource_io(filp, kobj, attr, buf, off, count, false);
}
static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
int ret;
@@ -1210,8 +1211,8 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
} else {
sprintf(res_attr_name, "resource%d", num);
if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
- res_attr->read = pci_read_resource_io;
- res_attr->write = pci_write_resource_io;
+ res_attr->read_new = pci_read_resource_io;
+ res_attr->write_new = pci_write_resource_io;
if (arch_can_pci_mmap_io())
res_attr->mmap = pci_mmap_resource_uc;
} else {
@@ -1292,7 +1293,7 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
* writing anything except 0 enables it
*/
static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1318,7 +1319,7 @@ static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
* device corresponding to @kobj.
*/
static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1344,9 +1345,9 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
+static const BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
-static struct bin_attribute *pci_dev_rom_attrs[] = {
+static const struct bin_attribute *const pci_dev_rom_attrs[] = {
&bin_attr_rom,
NULL,
};
@@ -1372,7 +1373,7 @@ static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj,
}
static const struct attribute_group pci_dev_rom_attr_group = {
- .bin_attrs = pci_dev_rom_attrs,
+ .bin_attrs_new = pci_dev_rom_attrs,
.is_bin_visible = pci_dev_rom_attr_is_visible,
.bin_size = pci_dev_rom_attr_bin_size,
};
@@ -1421,6 +1422,113 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+static ssize_t reset_method_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len = 0;
+ int i, m;
+
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = pdev->reset_methods[i];
+ if (!m)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
+ pci_reset_fn_methods[m].name);
+ }
+
+ if (len)
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static int reset_method_lookup(const char *name)
+{
+ int m;
+
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ if (sysfs_streq(name, pci_reset_fn_methods[m].name))
+ return m;
+ }
+
+ return 0; /* not found */
+}
+
+static ssize_t reset_method_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *tmp_options, *name;
+ int m, n;
+ u8 reset_methods[PCI_NUM_RESET_METHODS] = {};
+
+ if (sysfs_streq(buf, "")) {
+ pdev->reset_methods[0] = 0;
+ pci_warn(pdev, "All device reset methods disabled by user");
+ return count;
+ }
+
+ if (sysfs_streq(buf, "default")) {
+ pci_init_reset_methods(pdev);
+ return count;
+ }
+
+ char *options __free(kfree) = kstrndup(buf, count, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ n = 0;
+ tmp_options = options;
+ while ((name = strsep(&tmp_options, " ")) != NULL) {
+ if (sysfs_streq(name, ""))
+ continue;
+
+ name = strim(name);
+
+ /* Leave previous methods unchanged if input is invalid */
+ m = reset_method_lookup(name);
+ if (!m) {
+ pci_err(pdev, "Invalid reset method '%s'", name);
+ return -EINVAL;
+ }
+
+ if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
+ pci_err(pdev, "Unsupported reset method '%s'", name);
+ return -EINVAL;
+ }
+
+ if (n == PCI_NUM_RESET_METHODS - 1) {
+ pci_err(pdev, "Too many reset methods\n");
+ return -EINVAL;
+ }
+
+ reset_methods[n++] = m;
+ }
+
+ reset_methods[n] = 0;
+
+ /* Warn if dev-specific supported but not highest priority */
+ if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
+ reset_methods[0] != 1)
+ pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
+ memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
+ return count;
+}
+static DEVICE_ATTR_RW(reset_method);
+
+static struct attribute *pci_dev_reset_method_attrs[] = {
+ &dev_attr_reset_method.attr,
+ NULL,
+};
+
+static const struct attribute_group pci_dev_reset_method_attr_group = {
+ .attrs = pci_dev_reset_method_attrs,
+ .is_visible = pci_dev_reset_attr_is_visible,
+};
+
static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0b29ec6e8e5e..869d204a70a3 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -23,7 +23,6 @@
#include <linux/string.h>
#include <linux/log2.h>
#include <linux/logic_pio.h>
-#include <linux/pm_wakeup.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
@@ -1100,34 +1099,6 @@ static void pci_enable_acs(struct pci_dev *dev)
}
/**
- * pcie_read_tlp_log - read TLP Header Log
- * @dev: PCIe device
- * @where: PCI Config offset of TLP Header Log
- * @tlp_log: TLP Log structure to fill
- *
- * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC.
- *
- * Return: 0 on success and filled TLP Log structure, <0 on error.
- */
-int pcie_read_tlp_log(struct pci_dev *dev, int where,
- struct pcie_tlp_log *tlp_log)
-{
- int i, ret;
-
- memset(tlp_log, 0, sizeof(*tlp_log));
-
- for (i = 0; i < 4; i++) {
- ret = pci_read_config_dword(dev, where + i * 4,
- &tlp_log->dw[i]);
- if (ret)
- return pcibios_err_to_errno(ret);
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcie_read_tlp_log);
-
-/**
* pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
@@ -2059,6 +2030,28 @@ int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
return pci_enable_resources(dev, bars);
}
+static int pci_host_bridge_enable_device(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
+ int err;
+
+ if (host_bridge && host_bridge->enable_device) {
+ err = host_bridge->enable_device(host_bridge, dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void pci_host_bridge_disable_device(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
+
+ if (host_bridge && host_bridge->disable_device)
+ host_bridge->disable_device(host_bridge, dev);
+}
+
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
@@ -2074,9 +2067,13 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
if (bridge)
pcie_aspm_powersave_config_link(bridge);
+ err = pci_host_bridge_enable_device(dev);
+ if (err)
+ return err;
+
err = pcibios_enable_device(dev, bars);
if (err < 0)
- return err;
+ goto err_enable;
pci_fixup_device(pci_fixup_enable, dev);
if (dev->msi_enabled || dev->msix_enabled)
@@ -2091,6 +2088,12 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
}
return 0;
+
+err_enable:
+ pci_host_bridge_disable_device(dev);
+
+ return err;
+
}
/**
@@ -2274,6 +2277,8 @@ void pci_disable_device(struct pci_dev *dev)
if (atomic_dec_return(&dev->enable_cnt) != 0)
return;
+ pci_host_bridge_disable_device(dev);
+
do_pci_disable_device(dev);
dev->is_busmaster = 0;
@@ -3941,15 +3946,14 @@ EXPORT_SYMBOL(pci_release_region);
* __pci_request_region - Reserved PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resource
* @exclusive: whether the region access is exclusive or not
*
* Returns: 0 on success, negative error code on failure.
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as being
+ * reserved by owner @name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
* If @exclusive is set, then the region is marked so that userspace
* is explicitly not allowed to map the resource via /dev/mem or
@@ -3959,13 +3963,13 @@ EXPORT_SYMBOL(pci_release_region);
* message is also printed on failure.
*/
static int __pci_request_region(struct pci_dev *pdev, int bar,
- const char *res_name, int exclusive)
+ const char *name, int exclusive)
{
if (pci_is_managed(pdev)) {
if (exclusive == IORESOURCE_EXCLUSIVE)
- return pcim_request_region_exclusive(pdev, bar, res_name);
+ return pcim_request_region_exclusive(pdev, bar, name);
- return pcim_request_region(pdev, bar, res_name);
+ return pcim_request_region(pdev, bar, name);
}
if (pci_resource_len(pdev, bar) == 0)
@@ -3973,11 +3977,11 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
if (!request_region(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar), res_name))
+ pci_resource_len(pdev, bar), name))
goto err_out;
} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
if (!__request_mem_region(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar), res_name,
+ pci_resource_len(pdev, bar), name,
exclusive))
goto err_out;
}
@@ -3994,14 +3998,13 @@ err_out:
* pci_request_region - Reserve PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
- * @res_name: Name to be associated with resource
+ * @name: name of the driver requesting the resource
*
* Returns: 0 on success, negative error code on failure.
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as being
+ * reserved by owner @name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -4011,9 +4014,9 @@ err_out:
* when pcim_enable_device() has been called in advance. This hybrid feature is
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
{
- return __pci_request_region(pdev, bar, res_name, 0);
+ return __pci_request_region(pdev, bar, name, 0);
}
EXPORT_SYMBOL(pci_request_region);
@@ -4036,13 +4039,13 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
EXPORT_SYMBOL(pci_release_selected_regions);
static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name, int excl)
+ const char *name, int excl)
{
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
- if (__pci_request_region(pdev, i, res_name, excl))
+ if (__pci_request_region(pdev, i, name, excl))
goto err_out;
return 0;
@@ -4059,7 +4062,7 @@ err_out:
* pci_request_selected_regions - Reserve selected PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @bars: Bitmask of BARs to be requested
- * @res_name: Name to be associated with resource
+ * @name: Name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
@@ -4069,9 +4072,9 @@ err_out:
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name)
+ const char *name)
{
- return __pci_request_selected_regions(pdev, bars, res_name, 0);
+ return __pci_request_selected_regions(pdev, bars, name, 0);
}
EXPORT_SYMBOL(pci_request_selected_regions);
@@ -4079,7 +4082,7 @@ EXPORT_SYMBOL(pci_request_selected_regions);
* pci_request_selected_regions_exclusive - Request regions exclusively
* @pdev: PCI device to request regions from
* @bars: bit mask of BARs to request
- * @res_name: name to be associated with the requests
+ * @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
@@ -4089,9 +4092,9 @@ EXPORT_SYMBOL(pci_request_selected_regions);
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
- const char *res_name)
+ const char *name)
{
- return __pci_request_selected_regions(pdev, bars, res_name,
+ return __pci_request_selected_regions(pdev, bars, name,
IORESOURCE_EXCLUSIVE);
}
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
@@ -4114,12 +4117,11 @@ EXPORT_SYMBOL(pci_release_regions);
/**
* pci_request_regions - Reserve PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resources
*
- * Mark all PCI regions associated with PCI device @pdev as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark all PCI regions associated with PCI device @pdev as being reserved by
+ * owner @name. Do not access any address inside the PCI regions unless this
+ * call returns successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -4129,22 +4131,22 @@ EXPORT_SYMBOL(pci_release_regions);
* when pcim_enable_device() has been called in advance. This hybrid feature is
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_regions(struct pci_dev *pdev, const char *res_name)
+int pci_request_regions(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions(pdev,
- ((1 << PCI_STD_NUM_BARS) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), name);
}
EXPORT_SYMBOL(pci_request_regions);
/**
* pci_request_regions_exclusive - Reserve PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
* Mark all PCI regions associated with PCI device @pdev as being reserved
- * by owner @res_name. Do not access any address inside the PCI regions
+ * by owner @name. Do not access any address inside the PCI regions
* unless this call returns successfully.
*
* pci_request_regions_exclusive() will mark the region so that /dev/mem
@@ -4158,10 +4160,10 @@ EXPORT_SYMBOL(pci_request_regions);
* when pcim_enable_device() has been called in advance. This hybrid feature is
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
+int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions_exclusive(pdev,
- ((1 << PCI_STD_NUM_BARS) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), name);
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
@@ -4488,11 +4490,6 @@ void pci_disable_parity(struct pci_dev *dev)
* @enable: boolean: whether to enable or disable PCI INTx
*
* Enables/disables PCI INTx for device @pdev
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
*/
void pci_intx(struct pci_dev *pdev, int enable)
{
@@ -4505,15 +4502,10 @@ void pci_intx(struct pci_dev *pdev, int enable)
else
new = pci_command | PCI_COMMAND_INTX_DISABLE;
- if (new != pci_command) {
- /* Preserve the "hybrid" behavior for backwards compatibility */
- if (pci_is_managed(pdev)) {
- WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
- return;
- }
+ if (new == pci_command)
+ return;
- pci_write_config_word(pdev, PCI_COMMAND, new);
- }
+ pci_write_config_word(pdev, PCI_COMMAND, new);
}
EXPORT_SYMBOL_GPL(pci_intx);
@@ -5204,7 +5196,7 @@ static void pci_dev_restore(struct pci_dev *dev)
}
/* dev->reset_methods[] is a 0-terminated list of indices into this array */
-static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
+const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ },
{ pci_dev_specific_reset, .name = "device_specific" },
{ pci_dev_acpi_reset, .name = "acpi" },
@@ -5215,129 +5207,6 @@ static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ cxl_reset_bus_function, .name = "cxl_bus" },
};
-static ssize_t reset_method_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- ssize_t len = 0;
- int i, m;
-
- for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
- m = pdev->reset_methods[i];
- if (!m)
- break;
-
- len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
- pci_reset_fn_methods[m].name);
- }
-
- if (len)
- len += sysfs_emit_at(buf, len, "\n");
-
- return len;
-}
-
-static int reset_method_lookup(const char *name)
-{
- int m;
-
- for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
- if (sysfs_streq(name, pci_reset_fn_methods[m].name))
- return m;
- }
-
- return 0; /* not found */
-}
-
-static ssize_t reset_method_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- char *options, *tmp_options, *name;
- int m, n;
- u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
-
- if (sysfs_streq(buf, "")) {
- pdev->reset_methods[0] = 0;
- pci_warn(pdev, "All device reset methods disabled by user");
- return count;
- }
-
- if (sysfs_streq(buf, "default")) {
- pci_init_reset_methods(pdev);
- return count;
- }
-
- options = kstrndup(buf, count, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
-
- n = 0;
- tmp_options = options;
- while ((name = strsep(&tmp_options, " ")) != NULL) {
- if (sysfs_streq(name, ""))
- continue;
-
- name = strim(name);
-
- m = reset_method_lookup(name);
- if (!m) {
- pci_err(pdev, "Invalid reset method '%s'", name);
- goto error;
- }
-
- if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
- pci_err(pdev, "Unsupported reset method '%s'", name);
- goto error;
- }
-
- if (n == PCI_NUM_RESET_METHODS - 1) {
- pci_err(pdev, "Too many reset methods\n");
- goto error;
- }
-
- reset_methods[n++] = m;
- }
-
- reset_methods[n] = 0;
-
- /* Warn if dev-specific supported but not highest priority */
- if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
- reset_methods[0] != 1)
- pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
- memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
- kfree(options);
- return count;
-
-error:
- /* Leave previous methods unchanged */
- kfree(options);
- return -EINVAL;
-}
-static DEVICE_ATTR_RW(reset_method);
-
-static struct attribute *pci_dev_reset_method_attrs[] = {
- &dev_attr_reset_method.attr,
- NULL,
-};
-
-static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
- struct attribute *a, int n)
-{
- struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
-
- if (!pci_reset_supported(pdev))
- return 0;
-
- return a->mode;
-}
-
-const struct attribute_group pci_dev_reset_method_attr_group = {
- .attrs = pci_dev_reset_method_attrs,
- .is_visible = pci_dev_reset_method_attr_is_visible,
-};
-
/**
* __pci_reset_function_locked - reset a PCI device function while holding
* the @dev mutex lock.
@@ -6232,12 +6101,14 @@ u8 pcie_get_supported_speeds(struct pci_dev *dev)
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS;
+ /* Ignore speeds higher than Max Link Speed */
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0);
+
/* PCIe r3.0-compliant */
if (speeds)
return speeds;
- pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
-
/* Synthesize from the Max Link Speed field */
if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 2e40fc63ba31..01e51db8d285 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -4,6 +4,8 @@
#include <linux/pci.h>
+struct pcie_tlp_log;
+
/* Number of possible devfns: 0.0 to 1f.7 inclusive */
#define MAX_NR_DEVFNS 256
@@ -315,8 +317,10 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout);
int pci_setup_device(struct pci_dev *dev);
+void __pci_size_stdbars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int reg);
+ struct resource *res, unsigned int reg, u32 *sizes);
void pci_configure_ari(struct pci_dev *dev);
void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
@@ -547,6 +551,12 @@ struct aer_err_info {
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+
+int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
+ unsigned int tlp_len, struct pcie_tlp_log *log);
+unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc);
+void pcie_print_tlp_log(const struct pci_dev *dev,
+ const struct pcie_tlp_log *log, const char *pfx);
#endif /* CONFIG_PCIEAER */
#ifdef CONFIG_PCIEPORTBUS
@@ -565,6 +575,7 @@ void pci_dpc_init(struct pci_dev *pdev);
void dpc_process_error(struct pci_dev *pdev);
pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
bool pci_dpc_recovered(struct pci_dev *pdev);
+unsigned int dpc_tlp_log_len(struct pci_dev *dev);
#else
static inline void pci_save_dpc_state(struct pci_dev *dev) { }
static inline void pci_restore_dpc_state(struct pci_dev *dev) { }
@@ -766,6 +777,7 @@ struct pci_reset_fn_method {
int (*reset_fn)(struct pci_dev *pdev, bool probe);
char *name;
};
+extern const struct pci_reset_fn_method pci_reset_fn_methods[];
#ifdef CONFIG_PCI_QUIRKS
int pci_dev_specific_reset(struct pci_dev *dev, bool probe);
@@ -797,7 +809,6 @@ static inline u64 pci_rebar_size_to_bytes(int size)
struct device_node;
#ifdef CONFIG_OF
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
u32 of_pci_get_slot_power_limit(struct device_node *node,
@@ -814,12 +825,6 @@ bool of_pci_supply_present(struct device_node *np);
#else
static inline int
-of_pci_parse_bus_range(struct device_node *node, struct resource *res)
-{
- return -EINVAL;
-}
-
-static inline int
of_get_pci_domain_nr(struct device_node *node)
{
return -1;
@@ -960,8 +965,6 @@ static inline pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
extern const struct attribute_group aspm_ctrl_attr_group;
#endif
-extern const struct attribute_group pci_dev_reset_method_attr_group;
-
#ifdef CONFIG_X86_INTEL_MID
bool pci_use_mid_pm(void);
int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 53ccab62314d..173829aa02e6 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -7,7 +7,7 @@ pcieportdrv-y := portdrv.o rcec.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o bwctrl.o
obj-y += aspm.o
-obj-$(CONFIG_PCIEAER) += aer.o err.o
+obj-$(CONFIG_PCIEAER) += aer.o err.o tlp.o
obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 34ce9f834d0c..508474e17183 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -665,12 +665,6 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
}
}
-static void __print_tlp_header(struct pci_dev *dev, struct pcie_tlp_log *t)
-{
- pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
- t->dw[0], t->dw[1], t->dw[2], t->dw[3]);
-}
-
static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
@@ -725,7 +719,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
__aer_print_error(dev, info);
if (info->tlp_header_valid)
- __print_tlp_header(dev, &info->tlp);
+ pcie_print_tlp_log(dev, &info->tlp, dev_fmt(" "));
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
@@ -797,7 +791,7 @@ void pci_print_aer(struct pci_dev *dev, int aer_severity,
aer->uncor_severity);
if (tlp_header_valid)
- __print_tlp_header(dev, &aer->header_log);
+ pcie_print_tlp_log(dev, &aer->header_log, dev_fmt(" "));
trace_aer_event(dev_name(&dev->dev), (status & ~mask),
aer_severity, tlp_header_valid, &aer->header_log);
@@ -1248,7 +1242,10 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
if (info->status & AER_LOG_TLP_MASKS) {
info->tlp_header_valid = 1;
- pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
+ pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG,
+ aer + PCI_ERR_PREFIX_LOG,
+ aer_tlp_log_len(dev, aercc),
+ &info->tlp);
}
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 28567d457613..da3e7edcf49d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -81,24 +81,44 @@ void pci_configure_aspm_l1ss(struct pci_dev *pdev)
void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
{
+ struct pci_dev *parent = pdev->bus->self;
struct pci_cap_saved_state *save_state;
- u16 l1ss = pdev->l1ss;
u32 *cap;
/*
+ * If this is a Downstream Port, we never restore the L1SS state
+ * directly; we only restore it when we restore the state of the
+ * Upstream Port below it.
+ */
+ if (pcie_downstream_port(pdev) || !parent)
+ return;
+
+ if (!pdev->l1ss || !parent->l1ss)
+ return;
+
+ /*
* Save L1 substate configuration. The ASPM L0s/L1 configuration
* in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
*/
- if (!l1ss)
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
return;
- save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
+
+ /*
+ * Save parent's L1 substate configuration so we have it for
+ * pci_restore_aspm_l1ss_state(pdev) to restore.
+ */
+ save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
if (!save_state)
return;
cap = &save_state->cap.data[0];
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++);
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
}
void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
index b59cacc740fa..0a5e7efbce2c 100644
--- a/drivers/pci/pcie/bwctrl.c
+++ b/drivers/pci/pcie/bwctrl.c
@@ -303,14 +303,17 @@ static int pcie_bwnotif_probe(struct pcie_device *srv)
if (ret)
return ret;
- ret = devm_request_irq(&srv->device, srv->irq, pcie_bwnotif_irq,
- IRQF_SHARED, "PCIe bwctrl", srv);
- if (ret)
- return ret;
-
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- port->link_bwctrl = no_free_ptr(data);
+ port->link_bwctrl = data;
+
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
+ }
+
pcie_bwnotif_enable(srv);
}
}
@@ -331,11 +334,15 @@ static void pcie_bwnotif_remove(struct pcie_device *srv)
pcie_cooling_device_unregister(data->cdev);
- pcie_bwnotif_disable(srv->port);
+ scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
+ scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
+ pcie_bwnotif_disable(srv->port);
+
+ free_irq(srv->irq, srv);
- scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem)
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem)
srv->port->link_bwctrl = NULL;
+ }
+ }
}
static int pcie_bwnotif_suspend(struct pcie_device *srv)
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 2b6ef7efa3c1..242cabd5eeeb 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -190,7 +190,7 @@ out:
static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, dpc_status, first_error;
- u32 status, mask, sev, syserr, exc, log, prefix;
+ u32 status, mask, sev, syserr, exc, log;
struct pcie_tlp_log tlp_log;
int i;
@@ -215,22 +215,18 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
first_error == i ? " (First)" : "");
}
- if (pdev->dpc_rp_log_size < 4)
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG)
goto clear_status;
- pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, &tlp_log);
- pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
- tlp_log.dw[0], tlp_log.dw[1], tlp_log.dw[2], tlp_log.dw[3]);
+ pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
+ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG,
+ dpc_tlp_log_len(pdev), &tlp_log);
+ pcie_print_tlp_log(pdev, &tlp_log, dev_fmt(""));
- if (pdev->dpc_rp_log_size < 5)
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG + 1)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
- for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
- pci_read_config_dword(pdev,
- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
- pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
- }
clear_status:
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
}
@@ -404,7 +400,9 @@ void pci_dpc_init(struct pci_dev *pdev)
if (!pdev->dpc_rp_log_size) {
pdev->dpc_rp_log_size =
FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
- if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG ||
+ pdev->dpc_rp_log_size > PCIE_STD_NUM_TLP_HEADERLOG + 1 +
+ PCIE_STD_MAX_TLP_PREFIXLOG) {
pci_err(pdev, "RP PIO log size %u is invalid\n",
pdev->dpc_rp_log_size);
pdev->dpc_rp_log_size = 0;
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index 5e10306b6308..02e73099bad0 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -265,12 +265,14 @@ static int get_port_device_capability(struct pci_dev *dev)
(pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER)))
services |= PCIE_PORT_SERVICE_DPC;
+ /* Enable bandwidth control if more than one speed is supported. */
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
u32 linkcap;
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
- if (linkcap & PCI_EXP_LNKCAP_LBNC)
+ if (linkcap & PCI_EXP_LNKCAP_LBNC &&
+ hweight8(dev->supported_speeds) > 1)
services |= PCIE_PORT_SERVICE_BWCTRL;
}
diff --git a/drivers/pci/pcie/tlp.c b/drivers/pci/pcie/tlp.c
new file mode 100644
index 000000000000..0860b5da837f
--- /dev/null
+++ b/drivers/pci/pcie/tlp.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe TLP Log handling
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/aer.h>
+#include <linux/array_size.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include "../pci.h"
+
+/**
+ * aer_tlp_log_len - Calculate AER Capability TLP Header/Prefix Log length
+ * @dev: PCIe device
+ * @aercc: AER Capabilities and Control register value
+ *
+ * Return: TLP Header/Prefix Log length
+ */
+unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc)
+{
+ return PCIE_STD_NUM_TLP_HEADERLOG +
+ ((aercc & PCI_ERR_CAP_PREFIX_LOG_PRESENT) ?
+ dev->eetlp_prefix_max : 0);
+}
+
+#ifdef CONFIG_PCIE_DPC
+/**
+ * dpc_tlp_log_len - Calculate DPC RP PIO TLP Header/Prefix Log length
+ * @dev: PCIe device
+ *
+ * Return: TLP Header/Prefix Log length
+ */
+unsigned int dpc_tlp_log_len(struct pci_dev *dev)
+{
+ /* Remove ImpSpec Log register from the count */
+ if (dev->dpc_rp_log_size >= PCIE_STD_NUM_TLP_HEADERLOG + 1)
+ return dev->dpc_rp_log_size - 1;
+
+ return dev->dpc_rp_log_size;
+}
+#endif
+
+/**
+ * pcie_read_tlp_log - read TLP Header Log
+ * @dev: PCIe device
+ * @where: PCI Config offset of TLP Header Log
+ * @where2: PCI Config offset of TLP Prefix Log
+ * @tlp_len: TLP Log length (Header Log + TLP Prefix Log in DWORDs)
+ * @log: TLP Log structure to fill
+ *
+ * Fill @log from TLP Header Log registers, e.g., AER or DPC.
+ *
+ * Return: 0 on success and filled TLP Log structure, <0 on error.
+ */
+int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
+ unsigned int tlp_len, struct pcie_tlp_log *log)
+{
+ unsigned int i;
+ int off, ret;
+ u32 *to;
+
+ memset(log, 0, sizeof(*log));
+
+ for (i = 0; i < tlp_len; i++) {
+ if (i < PCIE_STD_NUM_TLP_HEADERLOG) {
+ off = where + i * 4;
+ to = &log->dw[i];
+ } else {
+ off = where2 + (i - PCIE_STD_NUM_TLP_HEADERLOG) * 4;
+ to = &log->prefix[i - PCIE_STD_NUM_TLP_HEADERLOG];
+ }
+
+ ret = pci_read_config_dword(dev, off, to);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+ }
+
+ return 0;
+}
+
+#define EE_PREFIX_STR " E-E Prefixes:"
+
+/**
+ * pcie_print_tlp_log - Print TLP Header / Prefix Log contents
+ * @dev: PCIe device
+ * @log: TLP Log structure
+ * @pfx: String prefix
+ *
+ * Prints TLP Header and Prefix Log information held by @log.
+ */
+void pcie_print_tlp_log(const struct pci_dev *dev,
+ const struct pcie_tlp_log *log, const char *pfx)
+{
+ char buf[11 * (PCIE_STD_NUM_TLP_HEADERLOG + ARRAY_SIZE(log->prefix)) +
+ sizeof(EE_PREFIX_STR)];
+ unsigned int i;
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%#010x %#010x %#010x %#010x",
+ log->dw[0], log->dw[1], log->dw[2], log->dw[3]);
+
+ if (log->prefix[0])
+ len += scnprintf(buf + len, sizeof(buf) - len, EE_PREFIX_STR);
+ for (i = 0; i < ARRAY_SIZE(log->prefix); i++) {
+ if (!log->prefix[i])
+ break;
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->prefix[i]);
+ }
+
+ pci_err(dev, "%sTLP Header: %s\n", pfx, buf);
+}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2e81ab0f5a25..246744d8d268 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -165,40 +165,66 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
/**
+ * __pci_size_bars - Read the raw BAR mask for a range of PCI BARs
+ * @dev: the PCI device
+ * @count: number of BARs to size
+ * @pos: starting config space position
+ * @sizes: array to store mask values
+ * @rom: indicate whether to use ROM mask, which avoids enabling ROM BARs
+ *
+ * Provided @sizes array must be sufficiently sized to store results for
+ * @count u32 BARs. Caller is responsible for disabling decode to specified
+ * BAR range around calling this function. This function is intended to avoid
+ * disabling decode around sizing each BAR individually, which can result in
+ * non-trivial overhead in virtualized environments with very large PCI BARs.
+ */
+static void __pci_size_bars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes, bool rom)
+{
+ u32 orig, mask = rom ? PCI_ROM_ADDRESS_MASK : ~0;
+ int i;
+
+ for (i = 0; i < count; i++, pos += 4, sizes++) {
+ pci_read_config_dword(dev, pos, &orig);
+ pci_write_config_dword(dev, pos, mask);
+ pci_read_config_dword(dev, pos, sizes);
+ pci_write_config_dword(dev, pos, orig);
+ }
+}
+
+void __pci_size_stdbars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes)
+{
+ __pci_size_bars(dev, count, pos, sizes, false);
+}
+
+static void __pci_size_rom(struct pci_dev *dev, unsigned int pos, u32 *sizes)
+{
+ __pci_size_bars(dev, 1, pos, sizes, true);
+}
+
+/**
* __pci_read_base - Read a PCI BAR
* @dev: the PCI device
* @type: type of the BAR
* @res: resource buffer to be filled in
* @pos: BAR position in the config space
+ * @sizes: array of one or more pre-read BAR masks
*
* Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
*/
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int pos)
+ struct resource *res, unsigned int pos, u32 *sizes)
{
- u32 l = 0, sz = 0, mask;
+ u32 l = 0, sz;
u64 l64, sz64, mask64;
- u16 orig_cmd;
struct pci_bus_region region, inverted_region;
const char *res_name = pci_resource_name(dev, res - dev->resource);
- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
-
- /* No printks while decoding is disabled! */
- if (!dev->mmio_always_on) {
- pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
- if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
- pci_write_config_word(dev, PCI_COMMAND,
- orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
- }
- }
-
res->name = pci_name(dev);
pci_read_config_dword(dev, pos, &l);
- pci_write_config_dword(dev, pos, l | mask);
- pci_read_config_dword(dev, pos, &sz);
- pci_write_config_dword(dev, pos, l);
+ sz = sizes[0];
/*
* All bits set in sz means the device isn't working properly.
@@ -238,18 +264,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
if (res->flags & IORESOURCE_MEM_64) {
pci_read_config_dword(dev, pos + 4, &l);
- pci_write_config_dword(dev, pos + 4, ~0);
- pci_read_config_dword(dev, pos + 4, &sz);
- pci_write_config_dword(dev, pos + 4, l);
+ sz = sizes[1];
l64 |= ((u64)l << 32);
sz64 |= ((u64)sz << 32);
mask64 |= ((u64)~0 << 32);
}
- if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
- pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
-
if (!sz64)
goto fail;
@@ -318,9 +339,14 @@ out:
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
}
-static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+static __always_inline void pci_read_bases(struct pci_dev *dev,
+ unsigned int howmany, int rom)
{
+ u32 rombar, stdbars[PCI_STD_NUM_BARS];
unsigned int pos, reg;
+ u16 orig_cmd;
+
+ BUILD_BUG_ON(statically_true(howmany > PCI_STD_NUM_BARS));
if (dev->non_compliant_bars)
return;
@@ -329,10 +355,28 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
if (dev->is_virtfn)
return;
+ /* No printks while decoding is disabled! */
+ if (!dev->mmio_always_on) {
+ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
+ if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
+ }
+ }
+
+ __pci_size_stdbars(dev, howmany, PCI_BASE_ADDRESS_0, stdbars);
+ if (rom)
+ __pci_size_rom(dev, rom, &rombar);
+
+ if (!dev->mmio_always_on &&
+ (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
+ pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
+
for (pos = 0; pos < howmany; pos++) {
struct resource *res = &dev->resource[pos];
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
- pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
+ pos += __pci_read_base(dev, pci_bar_unknown,
+ res, reg, &stdbars[pos]);
}
if (rom) {
@@ -340,7 +384,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
dev->rom_base_reg = rom;
res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
- __pci_read_base(dev, pci_bar_mem32, res, rom);
+ __pci_read_base(dev, pci_bar_mem32, res, rom, &rombar);
}
}
@@ -2251,8 +2295,8 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
static void pci_configure_eetlp_prefix(struct pci_dev *dev)
{
-#ifdef CONFIG_PCI_PASID
struct pci_dev *bridge;
+ unsigned int eetlp_max;
int pcie_type;
u32 cap;
@@ -2264,15 +2308,19 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
return;
pcie_type = pci_pcie_type(dev);
+
+ eetlp_max = FIELD_GET(PCI_EXP_DEVCAP2_EE_PREFIX_MAX, cap);
+ /* 00b means 4 */
+ eetlp_max = eetlp_max ?: 4;
+
if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
pcie_type == PCI_EXP_TYPE_RC_END)
- dev->eetlp_prefix_path = 1;
+ dev->eetlp_prefix_max = eetlp_max;
else {
bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->eetlp_prefix_path)
- dev->eetlp_prefix_path = 1;
+ if (bridge && bridge->eetlp_prefix_max)
+ dev->eetlp_prefix_max = eetlp_max;
}
-#endif
}
static void pci_configure_serr(struct pci_dev *dev)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 76f4df75b08a..82b21e34c545 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -12,6 +12,7 @@
* file, where their drivers can use them.
*/
+#include <linux/aer.h>
#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/types.h>
@@ -5521,7 +5522,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
* AMD Matisse USB 3.0 Host Controller 0x149c
* Intel 82579LM Gigabit Ethernet Controller 0x1502
* Intel 82579V Gigabit Ethernet Controller 0x1503
- *
+ * Mediatek MT7922 802.11ax PCI Express Wireless Network Adapter
*/
static void quirk_no_flr(struct pci_dev *dev)
{
@@ -5533,6 +5534,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MEDIATEK, 0x0616, quirk_no_flr);
/* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */
static void quirk_no_flr_snet(struct pci_dev *dev)
@@ -5984,6 +5986,17 @@ SWITCHTEC_QUIRK(0x5552); /* PAXA 52XG5 */
SWITCHTEC_QUIRK(0x5536); /* PAXA 36XG5 */
SWITCHTEC_QUIRK(0x5528); /* PAXA 28XG5 */
+#define SWITCHTEC_PCI100X_QUIRK(vid) \
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_EFAR, vid, \
+ PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
+SWITCHTEC_PCI100X_QUIRK(0x1001); /* PCI1001XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1002); /* PCI1002XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1003); /* PCI1003XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1004); /* PCI1004XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1005); /* PCI1005XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1006); /* PCI1006XG4 */
+
+
/*
* The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
* These IDs are used to forward responses to the originator on the other
@@ -6233,8 +6246,9 @@ static void dpc_log_size(struct pci_dev *dev)
return;
if (FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, val) == 0) {
- pci_info(dev, "Overriding RP PIO Log Size to 4\n");
- dev->dpc_rp_log_size = 4;
+ pci_info(dev, "Overriding RP PIO Log Size to %d\n",
+ PCIE_STD_NUM_TLP_HEADERLOG);
+ dev->dpc_rp_log_size = PCIE_STD_NUM_TLP_HEADERLOG;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
@@ -6253,6 +6267,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa72f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
#endif
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index c7e1089ffdaf..b14dfab04d84 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1739,6 +1739,26 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
.driver_data = gen, \
}
+#define SWITCHTEC_PCI100X_DEVICE(device_id, gen) \
+ { \
+ .vendor = PCI_VENDOR_ID_EFAR, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_MEMORY_OTHER << 8), \
+ .class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
+ }, \
+ { \
+ .vendor = PCI_VENDOR_ID_EFAR, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
+ .class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
+ }
+
static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */
SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */
@@ -1833,6 +1853,12 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */
+ SWITCHTEC_PCI100X_DEVICE(0x1001, SWITCHTEC_GEN4), /* PCI1001 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1002, SWITCHTEC_GEN4), /* PCI1002 12XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1003, SWITCHTEC_GEN4), /* PCI1003 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1004, SWITCHTEC_GEN4), /* PCI1004 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1005, SWITCHTEC_GEN4), /* PCI1005 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1006, SWITCHTEC_GEN4), /* PCI1006 16XG4 */
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index 1e604fbbda65..07de59ca2ebf 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -360,7 +360,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
return err;
}
- set_ctrl_reg_req_en(pdev, pdev->tph_mode);
+ set_ctrl_reg_req_en(pdev, pdev->tph_req_type);
pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n",
(loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag);
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index a469bcbc0da7..3d29b2602d0f 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -271,8 +271,8 @@ void pci_vpd_init(struct pci_dev *dev)
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
struct pci_dev *vpd_dev = dev;
@@ -295,8 +295,8 @@ static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
}
static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
struct pci_dev *vpd_dev = dev;
@@ -317,9 +317,9 @@ static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
return ret;
}
-static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
+static const BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
-static struct bin_attribute *vpd_attrs[] = {
+static const struct bin_attribute *const vpd_attrs[] = {
&bin_attr_vpd,
NULL,
};
@@ -336,7 +336,7 @@ static umode_t vpd_attr_is_visible(struct kobject *kobj,
}
const struct attribute_group pci_dev_vpd_attr_group = {
- .bin_attrs = vpd_attrs,
+ .bin_attrs_new = vpd_attrs,
.is_bin_visible = vpd_attr_is_visible,
};