summaryrefslogtreecommitdiff
path: root/drivers/pci/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r--drivers/pci/pci.c143
1 files changed, 70 insertions, 73 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index f489256cd9de..640d339a447d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1879,7 +1879,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
unsigned int pos, nbars, i;
u32 ctrl;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ pos = pdev->rebar_cap;
if (!pos)
return;
@@ -1892,7 +1892,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
- res = pdev->resource + bar_idx;
+ res = pci_resource_n(pdev, bar_idx);
size = pci_rebar_bytes_to_size(resource_size(res));
ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
@@ -3030,8 +3030,12 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
* pci_bridge_d3_possible - Is it possible to put the bridge into D3
* @bridge: Bridge to check
*
- * This function checks if it is possible to move the bridge to D3.
- * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
+ * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
+ *
+ * Return: Whether it is possible to move the bridge to D3.
+ *
+ * The return value is guaranteed to be constant across the entire lifetime
+ * of the bridge, including its hot-removal.
*/
bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
@@ -3075,10 +3079,10 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
- * It should be safe to put PCIe ports from 2015 or newer
- * to D3.
+ * Out of caution, we only allow PCIe ports from 2015 or newer
+ * into D3 on x86.
*/
- if (dmi_get_bios_year() >= 2015)
+ if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
return true;
break;
}
@@ -3192,6 +3196,12 @@ void pci_d3cold_disable(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_d3cold_disable);
+void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
+{
+ pci_power_up(pci_dev);
+ pci_update_current_state(pci_dev, PCI_D0);
+}
+
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
@@ -3202,9 +3212,6 @@ void pci_pm_init(struct pci_dev *dev)
u16 status;
u16 pmc;
- pm_runtime_forbid(&dev->dev);
- pm_runtime_set_active(&dev->dev);
- pm_runtime_enable(&dev->dev);
device_enable_async_suspend(&dev->dev);
dev->wakeup_prepared = false;
@@ -3214,14 +3221,14 @@ void pci_pm_init(struct pci_dev *dev)
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
if (!pm)
- return;
+ goto poweron;
/* Check device's ability to generate PME# */
pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
pci_err(dev, "unsupported PM cap regs version (%u)\n",
pmc & PCI_PM_CAP_VER_MASK);
- return;
+ goto poweron;
}
dev->pm_cap = pm;
@@ -3266,6 +3273,11 @@ void pci_pm_init(struct pci_dev *dev)
pci_read_config_word(dev, PCI_STATUS, &status);
if (status & PCI_STATUS_IMM_READY)
dev->imm_ready = 1;
+poweron:
+ pci_pm_power_up_and_verify_state(dev);
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_set_active(&dev->dev);
+ pm_runtime_enable(&dev->dev);
}
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
@@ -3726,6 +3738,11 @@ void pci_acs_init(struct pci_dev *dev)
pci_enable_acs(dev);
}
+void pci_rebar_init(struct pci_dev *pdev)
+{
+ pdev->rebar_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+}
+
/**
* pci_rebar_find_pos - find position of resize ctrl reg for BAR
* @pdev: PCI device
@@ -3740,7 +3757,7 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
unsigned int pos, nbars, i;
u32 ctrl;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ pos = pdev->rebar_cap;
if (!pos)
return -ENOTSUPP;
@@ -3765,7 +3782,7 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
* @bar: BAR to query
*
* Get the possible sizes of a resizable BAR as bitmask defined in the spec
- * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
+ * (bit 0=1MB, bit 31=128TB). Returns 0 if BAR isn't resizable.
*/
u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
{
@@ -3813,7 +3830,7 @@ int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
* pci_rebar_set_size - set a new size for a BAR
* @pdev: PCI device
* @bar: BAR to set size to
- * @size: new size as defined in the spec (0=1MB, 19=512GB)
+ * @size: new size as defined in the spec (0=1MB, 31=128TB)
*
* Set the new size of a BAR as defined in the spec.
* Returns zero if resizing was successful, error code otherwise.
@@ -3932,16 +3949,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
if (!pci_bar_index_is_valid(bar))
return;
- /*
- * This is done for backwards compatibility, because the old PCI devres
- * API had a mode in which the function became managed if it had been
- * enabled with pcim_enable_device() instead of pci_enable_device().
- */
- if (pci_is_managed(pdev)) {
- pcim_release_region(pdev, bar);
- return;
- }
-
if (pci_resource_len(pdev, bar) == 0)
return;
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
@@ -3979,13 +3986,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
if (!pci_bar_index_is_valid(bar))
return -EINVAL;
- if (pci_is_managed(pdev)) {
- if (exclusive == IORESOURCE_EXCLUSIVE)
- return pcim_request_region_exclusive(pdev, bar, name);
-
- return pcim_request_region(pdev, bar, name);
- }
-
if (pci_resource_len(pdev, bar) == 0)
return 0;
@@ -4022,11 +4022,6 @@ err_out:
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
{
@@ -4079,11 +4074,6 @@ err_out:
* @name: Name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *name)
@@ -4099,11 +4089,6 @@ EXPORT_SYMBOL(pci_request_selected_regions);
* @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
const char *name)
@@ -4139,11 +4124,6 @@ EXPORT_SYMBOL(pci_release_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions(struct pci_dev *pdev, const char *name)
{
@@ -4168,11 +4148,6 @@ EXPORT_SYMBOL(pci_request_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning message is also
* printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
{
@@ -4252,7 +4227,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
#ifndef pci_remap_iospace
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
-#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+#if defined(PCI_IOBASE)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
if (!(res->flags & IORESOURCE_IO))
@@ -4285,7 +4260,7 @@ EXPORT_SYMBOL(pci_remap_iospace);
*/
void pci_unmap_iospace(struct resource *res)
{
-#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+#if defined(PCI_IOBASE)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
vunmap_range(vaddr, vaddr + resource_size(res));
@@ -4713,6 +4688,11 @@ static int pcie_wait_for_link_status(struct pci_dev *pdev,
* @pdev: Device whose link to retrain.
* @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
*
+ * Trigger retraining of the PCIe Link and wait for the completion of the
+ * retraining. As link retraining is known to asserts LBMS and may change
+ * the Link Speed, LBMS is cleared after the retraining and the Link Speed
+ * of the subordinate bus is updated.
+ *
* Retrain completion status is retrieved from the Link Status Register
* according to @use_lt. It is not verified whether the use of the DLLLA
* bit is valid.
@@ -4752,7 +4732,19 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
* to track link speed or width changes made by hardware itself
* in attempt to correct unreliable link operation.
*/
- pcie_reset_lbms_count(pdev);
+ pcie_reset_lbms(pdev);
+
+ /*
+ * Ensure the Link Speed updates after retraining in case the Link
+ * Speed was changed because of the retraining. While the bwctrl's
+ * IRQ handler normally picks up the new Link Speed, clearing LBMS
+ * races with the IRQ handler reading the Link Status register and
+ * can result in the handler returning early without updating the
+ * Link Speed.
+ */
+ if (pdev->subordinate)
+ pcie_update_link_speed(pdev->subordinate);
+
return rc;
}
@@ -4780,7 +4772,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
/*
* PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
- * after which we should expect an link active if the reset was
+ * after which we should expect the link to be active if the reset was
* successful. If so, software must wait a minimum 100ms before sending
* configuration requests to devices downstream this port.
*
@@ -4949,7 +4941,7 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
delay);
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
- pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
+ pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
return -ENOTTY;
}
@@ -5244,6 +5236,7 @@ const struct pci_reset_fn_method pci_reset_fn_methods[] = {
int __pci_reset_function_locked(struct pci_dev *dev)
{
int i, m, rc;
+ const struct pci_reset_fn_method *method;
might_sleep();
@@ -5260,9 +5253,13 @@ int __pci_reset_function_locked(struct pci_dev *dev)
if (!m)
return -ENOTTY;
- rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
+ method = &pci_reset_fn_methods[m];
+ pci_dbg(dev, "reset via %s\n", method->name);
+ rc = method->reset_fn(dev, PCI_RESET_DO_RESET);
if (!rc)
return 0;
+
+ pci_dbg(dev, "%s failed with %d\n", method->name, rc);
if (rc != -ENOTTY)
return rc;
}
@@ -5528,7 +5525,8 @@ static void pci_slot_unlock(struct pci_slot *slot)
continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
}
@@ -6204,21 +6202,25 @@ void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
enum pci_bus_speed speed, speed_cap;
struct pci_dev *limiting_dev = NULL;
u32 bw_avail, bw_cap;
+ char *flit_mode = "";
bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
+ if (dev->bus && dev->bus->flit_mode)
+ flit_mode = ", in Flit mode";
+
if (bw_avail >= bw_cap && verbose)
- pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n",
bw_cap / 1000, bw_cap % 1000,
- pci_speed_string(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap, flit_mode);
else if (bw_avail < bw_cap)
- pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n",
bw_avail / 1000, bw_avail % 1000,
pci_speed_string(speed), width,
limiting_dev ? pci_name(limiting_dev) : "<unknown>",
bw_cap / 1000, bw_cap % 1000,
- pci_speed_string(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap, flit_mode);
}
/**
@@ -6788,11 +6790,6 @@ int __weak pci_ext_cfg_avail(void)
return 1;
}
-void __weak pci_fixup_cardbus(struct pci_bus *bus)
-{
-}
-EXPORT_SYMBOL(pci_fixup_cardbus);
-
static int __init pci_setup(char *str)
{
while (str) {