diff options
Diffstat (limited to 'drivers')
27 files changed, 753 insertions, 285 deletions
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 81bf71b10d44..99df00f64306 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -79,6 +79,12 @@ ((struct acpi_hest_generic_status *) \ ((struct ghes_estatus_node *)(estatus_node) + 1)) +#define GHES_VENDOR_ENTRY_LEN(gdata_len) \ + (sizeof(struct ghes_vendor_record_entry) + (gdata_len)) +#define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry) \ + ((struct acpi_hest_generic_data *) \ + ((struct ghes_vendor_record_entry *)(vendor_entry) + 1)) + /* * NMI-like notifications vary by architecture, before the compiler can prune * unused static functions it needs a value for these enums. @@ -123,6 +129,12 @@ static DEFINE_MUTEX(ghes_list_mutex); */ static DEFINE_SPINLOCK(ghes_notify_lock_irq); +struct ghes_vendor_record_entry { + struct work_struct work; + int error_severity; + char vendor_record[]; +}; + static struct gen_pool *ghes_estatus_pool; static unsigned long ghes_estatus_pool_size_request; @@ -511,6 +523,56 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata) #endif } +static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list); + +int ghes_register_vendor_record_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&vendor_record_notify_list, nb); +} +EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier); + +void ghes_unregister_vendor_record_notifier(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&vendor_record_notify_list, nb); +} +EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier); + +static void ghes_vendor_record_work_func(struct work_struct *work) +{ + struct ghes_vendor_record_entry *entry; + struct acpi_hest_generic_data *gdata; + u32 len; + + entry = container_of(work, struct ghes_vendor_record_entry, work); + gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry); + + blocking_notifier_call_chain(&vendor_record_notify_list, + entry->error_severity, gdata); + + len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata)); + gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len); +} + +static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, + int sev) +{ + struct acpi_hest_generic_data *copied_gdata; + struct ghes_vendor_record_entry *entry; + u32 len; + + len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata)); + entry = (void *)gen_pool_alloc(ghes_estatus_pool, len); + if (!entry) + return; + + copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry); + memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata)); + entry->error_severity = sev; + + INIT_WORK(&entry->work, ghes_vendor_record_work_func); + schedule_work(&entry->work); +} + static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -549,6 +611,7 @@ static bool ghes_do_proc(struct ghes *ghes, } else { void *err = acpi_hest_get_payload(gdata); + ghes_defer_non_standard_event(gdata, sev); log_non_standard_event(sec_type, fru_id, fru_text, sec_sev, err, gdata->error_data_length); diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 54b36b7ad47d..7ddd57abadd1 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -142,6 +142,26 @@ static struct mcfg_fixup mcfg_quirks[] = { XGENE_V2_ECAM_MCFG(4, 0), XGENE_V2_ECAM_MCFG(4, 1), XGENE_V2_ECAM_MCFG(4, 2), + +#define ALTRA_ECAM_QUIRK(rev, seg) \ + { "Ampere", "Altra ", rev, seg, MCFG_BUS_ANY, &pci_32b_read_ops } + + ALTRA_ECAM_QUIRK(1, 0), + ALTRA_ECAM_QUIRK(1, 1), + ALTRA_ECAM_QUIRK(1, 2), + ALTRA_ECAM_QUIRK(1, 3), + ALTRA_ECAM_QUIRK(1, 4), + ALTRA_ECAM_QUIRK(1, 5), + ALTRA_ECAM_QUIRK(1, 6), + ALTRA_ECAM_QUIRK(1, 7), + ALTRA_ECAM_QUIRK(1, 8), + ALTRA_ECAM_QUIRK(1, 9), + ALTRA_ECAM_QUIRK(1, 10), + ALTRA_ECAM_QUIRK(1, 11), + ALTRA_ECAM_QUIRK(1, 12), + ALTRA_ECAM_QUIRK(1, 13), + ALTRA_ECAM_QUIRK(1, 14), + ALTRA_ECAM_QUIRK(1, 15), }; static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; @@ -280,5 +300,5 @@ void __init pci_mmcfg_late_init(void) { int err = acpi_table_parse(ACPI_SIG_MCFG, pci_mcfg_parse); if (err) - pr_err("Failed to parse MCFG (%d)\n", err); + pr_debug("Failed to parse MCFG (%d)\n", err); } diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c index 8f8dfdf64833..a45ac7fa417b 100644 --- a/drivers/hid/intel-ish-hid/ipc/ipc.c +++ b/drivers/hid/intel-ish-hid/ipc/ipc.c @@ -755,7 +755,7 @@ static int _ish_hw_reset(struct ishtp_device *dev) csr |= PCI_D3hot; pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr); - mdelay(pdev->d3_delay); + mdelay(pdev->d3hot_delay); csr &= ~PCI_PM_CTRL_STATE_MASK; csr |= PCI_D0; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index cec8124301c7..dd11c06ca7f9 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5105,7 +5105,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 300; + pdev->d3hot_delay = 300; return 0; diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 4bef5c2bae9f..d323b25ae27e 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -187,6 +187,68 @@ config PCI_HYPERV The PCI device frontend driver allows the kernel to import arbitrary PCI devices from a PCI backend to support PCI driver domains. +choice + prompt "PCI Express hierarchy optimization setting" + default PCIE_BUS_DEFAULT + depends on PCI && EXPERT + help + MPS (Max Payload Size) and MRRS (Max Read Request Size) are PCIe + device parameters that affect performance and the ability to + support hotplug and peer-to-peer DMA. + + The following choices set the MPS and MRRS optimization strategy + at compile-time. The choices are the same as those offered for + the kernel command-line parameter 'pci', i.e., + 'pci=pcie_bus_tune_off', 'pci=pcie_bus_safe', + 'pci=pcie_bus_perf', and 'pci=pcie_bus_peer2peer'. + + This is a compile-time setting and can be overridden by the above + command-line parameters. If unsure, choose PCIE_BUS_DEFAULT. + +config PCIE_BUS_TUNE_OFF + bool "Tune Off" + depends on PCI + help + Use the BIOS defaults; don't touch MPS at all. This is the same + as booting with 'pci=pcie_bus_tune_off'. + +config PCIE_BUS_DEFAULT + bool "Default" + depends on PCI + help + Default choice; ensure that the MPS matches upstream bridge. + +config PCIE_BUS_SAFE + bool "Safe" + depends on PCI + help + Use largest MPS that boot-time devices support. If you have a + closed system with no possibility of adding new devices, this + will use the largest MPS that's supported by all devices. This + is the same as booting with 'pci=pcie_bus_safe'. + +config PCIE_BUS_PERFORMANCE + bool "Performance" + depends on PCI + help + Use MPS and MRRS for best performance. Ensure that a given + device's MPS is no larger than its parent MPS, which allows us to + keep all switches/bridges to the max MPS supported by their + parent. This is the same as booting with 'pci=pcie_bus_perf'. + +config PCIE_BUS_PEER2PEER + bool "Peer2peer" + depends on PCI + help + Set MPS = 128 for all devices. MPS configuration effected by the + other options could cause the MPS on one root port to be + different than that of the MPS on another, which may cause + hot-added devices or peer-to-peer DMA to fail. Set MPS to the + smallest possible value (128B) system-wide to avoid these issues. + This is the same as booting with 'pci=pcie_bus_peer2peer'. + +endchoice + source "drivers/pci/hotplug/Kconfig" source "drivers/pci/controller/Kconfig" source "drivers/pci/endpoint/Kconfig" diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index a7aa22512a92..2260fe09c395 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -294,6 +294,13 @@ config PCI_LOONGSON Say Y here if you want to enable PCI controller support on Loongson systems. +config PCIE_HISI_ERR + depends on ACPI_APEI_GHES && (ARM64 || COMPILE_TEST) + bool "HiSilicon HIP PCIe controller error handling driver" + help + Say Y here if you want error handling support + for the PCIe controller's errors on HiSilicon HIP SoCs + source "drivers/pci/controller/dwc/Kconfig" source "drivers/pci/controller/mobiveil/Kconfig" source "drivers/pci/controller/cadence/Kconfig" diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index bcdbf49ab1e4..04c6edc285c5 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o obj-$(CONFIG_VMD) += vmd.o obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o +obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW obj-y += dwc/ obj-y += mobiveil/ diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c index 1f54334f09f7..154a5398633c 100644 --- a/drivers/pci/controller/pci-v3-semi.c +++ b/drivers/pci/controller/pci-v3-semi.c @@ -658,7 +658,6 @@ static int v3_get_dma_range_config(struct v3_pci *v3, default: dev_err(v3->dev, "illegal dma memory chunk size\n"); return -EINVAL; - break; } val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE; *pci_map = val; diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c new file mode 100644 index 000000000000..7959c9c8d2bc --- /dev/null +++ b/drivers/pci/controller/pcie-hisi-error.c @@ -0,0 +1,327 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for handling the PCIe controller errors on + * HiSilicon HIP SoCs. + * + * Copyright (c) 2020 HiSilicon Limited. + */ + +#include <linux/acpi.h> +#include <acpi/ghes.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/kfifo.h> +#include <linux/spinlock.h> + +/* HISI PCIe controller error definitions */ +#define HISI_PCIE_ERR_MISC_REGS 33 + +#define HISI_PCIE_LOCAL_VALID_VERSION BIT(0) +#define HISI_PCIE_LOCAL_VALID_SOC_ID BIT(1) +#define HISI_PCIE_LOCAL_VALID_SOCKET_ID BIT(2) +#define HISI_PCIE_LOCAL_VALID_NIMBUS_ID BIT(3) +#define HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID BIT(4) +#define HISI_PCIE_LOCAL_VALID_CORE_ID BIT(5) +#define HISI_PCIE_LOCAL_VALID_PORT_ID BIT(6) +#define HISI_PCIE_LOCAL_VALID_ERR_TYPE BIT(7) +#define HISI_PCIE_LOCAL_VALID_ERR_SEVERITY BIT(8) +#define HISI_PCIE_LOCAL_VALID_ERR_MISC 9 + +static guid_t hisi_pcie_sec_guid = + GUID_INIT(0xB2889FC9, 0xE7D7, 0x4F9D, + 0xA8, 0x67, 0xAF, 0x42, 0xE9, 0x8B, 0xE7, 0x72); + +/* + * Firmware reports the socket port ID where the error occurred. These + * macros convert that to the core ID and core port ID required by the + * ACPI reset method. + */ +#define HISI_PCIE_PORT_ID(core, v) (((v) >> 1) + ((core) << 3)) +#define HISI_PCIE_CORE_ID(v) ((v) >> 3) +#define HISI_PCIE_CORE_PORT_ID(v) (((v) & 7) << 1) + +struct hisi_pcie_error_data { + u64 val_bits; + u8 version; + u8 soc_id; + u8 socket_id; + u8 nimbus_id; + u8 sub_module_id; + u8 core_id; + u8 port_id; + u8 err_severity; + u16 err_type; + u8 reserv[2]; + u32 err_misc[HISI_PCIE_ERR_MISC_REGS]; +}; + +struct hisi_pcie_error_private { + struct notifier_block nb; + struct device *dev; +}; + +enum hisi_pcie_submodule_id { + HISI_PCIE_SUB_MODULE_ID_AP, + HISI_PCIE_SUB_MODULE_ID_TL, + HISI_PCIE_SUB_MODULE_ID_MAC, + HISI_PCIE_SUB_MODULE_ID_DL, + HISI_PCIE_SUB_MODULE_ID_SDI, +}; + +static const char * const hisi_pcie_sub_module[] = { + [HISI_PCIE_SUB_MODULE_ID_AP] = "AP Layer", + [HISI_PCIE_SUB_MODULE_ID_TL] = "TL Layer", + [HISI_PCIE_SUB_MODULE_ID_MAC] = "MAC Layer", + [HISI_PCIE_SUB_MODULE_ID_DL] = "DL Layer", + [HISI_PCIE_SUB_MODULE_ID_SDI] = "SDI Layer", +}; + +enum hisi_pcie_err_severity { + HISI_PCIE_ERR_SEV_RECOVERABLE, + HISI_PCIE_ERR_SEV_FATAL, + HISI_PCIE_ERR_SEV_CORRECTED, + HISI_PCIE_ERR_SEV_NONE, +}; + +static const char * const hisi_pcie_error_sev[] = { + [HISI_PCIE_ERR_SEV_RECOVERABLE] = "recoverable", + [HISI_PCIE_ERR_SEV_FATAL] = "fatal", + [HISI_PCIE_ERR_SEV_CORRECTED] = "corrected", + [HISI_PCIE_ERR_SEV_NONE] = "none", +}; + +static const char *hisi_pcie_get_string(const char * const *array, + size_t n, u32 id) +{ + u32 index; + + for (index = 0; index < n; index++) { + if (index == id && array[index]) + return array[index]; + } + + return "unknown"; +} + +static int hisi_pcie_port_reset(struct platform_device *pdev, + u32 chip_id, u32 port_id) +{ + struct device *dev = &pdev->dev; + acpi_handle handle = ACPI_HANDLE(dev); + union acpi_object arg[3]; + struct acpi_object_list arg_list; + acpi_status s; + unsigned long long data = 0; + + arg[0].type = ACPI_TYPE_INTEGER; + arg[0].integer.value = chip_id; + arg[1].type = ACPI_TYPE_INTEGER; + arg[1].integer.value = HISI_PCIE_CORE_ID(port_id); + arg[2].type = ACPI_TYPE_INTEGER; + arg[2].integer.value = HISI_PCIE_CORE_PORT_ID(port_id); + + arg_list.count = 3; + arg_list.pointer = arg; + + s = acpi_evaluate_integer(handle, "RST", &arg_list, &data); + if (ACPI_FAILURE(s)) { + dev_err(dev, "No RST method\n"); + return -EIO; + } + + if (data) { + dev_err(dev, "Failed to Reset\n"); + return -EIO; + } + + return 0; +} + +static int hisi_pcie_port_do_recovery(struct platform_device *dev, + u32 chip_id, u32 port_id) +{ + acpi_status s; + struct device *device = &dev->dev; + acpi_handle root_handle = ACPI_HANDLE(device); + struct acpi_pci_root *pci_root; + struct pci_bus *root_bus; + struct pci_dev *pdev; + u32 domain, busnr, devfn; + + s = acpi_get_parent(root_handle, &root_handle); + if (ACPI_FAILURE(s)) + return -ENODEV; + pci_root = acpi_pci_find_root(root_handle); + if (!pci_root) + return -ENODEV; + root_bus = pci_root->bus; + domain = pci_root->segment; + + busnr = root_bus->number; + devfn = PCI_DEVFN(port_id, 0); + pdev = pci_get_domain_bus_and_slot(domain, busnr, devfn); + if (!pdev) { + dev_info(device, "Fail to get root port %04x:%02x:%02x.%d device\n", + domain, busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); + return -ENODEV; + } + + pci_stop_and_remove_bus_device_locked(pdev); + pci_dev_put(pdev); + + if (hisi_pcie_port_reset(dev, chip_id, port_id)) + return -EIO; + + /* + * The initialization time of subordinate devices after + * hot reset is no more than 1s, which is required by + * the PCI spec v5.0 sec 6.6.1. The time will shorten + * if Readiness Notifications mechanisms are used. But + * wait 1s here to adapt any conditions. + */ + ssleep(1UL); + + /* add root port and downstream devices */ + pci_lock_rescan_remove(); + pci_rescan_bus(root_bus); + pci_unlock_rescan_remove(); + + return 0; +} + +static void hisi_pcie_handle_error(struct platform_device *pdev, + const struct hisi_pcie_error_data *edata) +{ + struct device *dev = &pdev->dev; + int idx, rc; + const unsigned long valid_bits[] = {BITMAP_FROM_U64(edata->val_bits)}; + + if (edata->val_bits == 0) { + dev_warn(dev, "%s: no valid error information\n", __func__); + return; + } + + dev_info(dev, "\nHISI : HIP : PCIe controller error\n"); + if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOC_ID) + dev_info(dev, "Table version = %d\n", edata->version); + if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOCKET_ID) + dev_info(dev, "Socket ID = %d\n", edata->socket_id); + if (edata->val_bits & HISI_PCIE_LOCAL_VALID_NIMBUS_ID) + dev_info(dev, "Nimbus ID = %d\n", edata->nimbus_id); + if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID) + dev_info(dev, "Sub Module = %s\n", + hisi_pcie_get_string(hisi_pcie_sub_module, + ARRAY_SIZE(hisi_pcie_sub_module), + edata->sub_module_id)); + if (edata->val_bits & HISI_PCIE_LOCAL_VALID_CORE_ID) + dev_info(dev, "Core ID = core%d\n", edata->core_id); + if (edata->val_bits & HISI_PCIE_LOCAL_VALID_PORT_ID) + dev_info(dev, "Port ID = port%d\n", edata->port_id); + if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_SEVERITY) + dev_info(dev, "Error severity = %s\n", + hisi_pcie_get_string(hisi_pcie_error_sev, + ARRAY_SIZE(hisi_pcie_error_sev), + edata->err_severity)); + if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_TYPE) + dev_info(dev, "Error type = 0x%x\n", edata->err_type); + + dev_info(dev, "Reg Dump:\n"); + idx = HISI_PCIE_LOCAL_VALID_ERR_MISC; + for_each_set_bit_from(idx, valid_bits, + HISI_PCIE_LOCAL_VALID_ERR_MISC + HISI_PCIE_ERR_MISC_REGS) + dev_info(dev, "ERR_MISC_%d = 0x%x\n", idx - HISI_PCIE_LOCAL_VALID_ERR_MISC, + edata->err_misc[idx - HISI_PCIE_LOCAL_VALID_ERR_MISC]); + + if (edata->err_severity != HISI_PCIE_ERR_SEV_RECOVERABLE) + return; + + /* Recovery for the PCIe controller errors, try reset + * PCI port for the error recovery + */ + rc = hisi_pcie_port_do_recovery(pdev, edata->socket_id, + HISI_PCIE_PORT_ID(edata->core_id, edata->port_id)); + if (rc) + dev_info(dev, "fail to do hisi pcie port reset\n"); +} + +static int hisi_pcie_notify_error(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct acpi_hest_generic_data *gdata = data; + const struct hisi_pcie_error_data *error_data = acpi_hest_get_payload(gdata); + struct hisi_pcie_error_private *priv; + struct device *dev; + struct platform_device *pdev; + guid_t err_sec_guid; + u8 socket; + + import_guid(&err_sec_guid, gdata->section_type); + if (!guid_equal(&err_sec_guid, &hisi_pcie_sec_guid)) + return NOTIFY_DONE; + + priv = container_of(nb, struct hisi_pcie_error_private, nb); + dev = priv->dev; + + if (device_property_read_u8(dev, "socket", &socket)) + return NOTIFY_DONE; + + if (error_data->socket_id != socket) + return NOTIFY_DONE; + + pdev = container_of(dev, struct platform_device, dev); + hisi_pcie_handle_error(pdev, error_data); + + return NOTIFY_OK; +} + +static int hisi_pcie_error_handler_probe(struct platform_device *pdev) +{ + struct hisi_pcie_error_private *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->nb.notifier_call = hisi_pcie_notify_error; + priv->dev = &pdev->dev; + ret = ghes_register_vendor_record_notifier(&priv->nb); + if (ret) { + dev_err(&pdev->dev, + "Failed to register hisi pcie controller error handler with apei\n"); + return ret; + } + + platform_set_drvdata(pdev, priv); + + return 0; +} + +static int hisi_pcie_error_handler_remove(struct platform_device *pdev) +{ + struct hisi_pcie_error_private *priv = platform_get_drvdata(pdev); + + ghes_unregister_vendor_record_notifier(&priv->nb); + + return 0; +} + +static const struct acpi_device_id hisi_pcie_acpi_match[] = { + { "HISI0361", 0 }, + { } +}; + +static struct platform_driver hisi_pcie_error_handler_driver = { + .driver = { + .name = "hisi-pcie-error-handler", + .acpi_match_table = hisi_pcie_acpi_match, + }, + .probe = hisi_pcie_error_handler_probe, + .remove = hisi_pcie_error_handler_remove, +}; +module_platform_driver(hisi_pcie_error_handler_driver); + +MODULE_DESCRIPTION("HiSilicon HIP PCIe controller error handling driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index 8f065a42fc1a..b54d32a31669 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c @@ -168,4 +168,14 @@ const struct pci_ecam_ops pci_32b_ops = { .write = pci_generic_config_write32, } }; + +/* ECAM ops for 32-bit read only (non-compliant) */ +const struct pci_ecam_ops pci_32b_read_ops = { + .bus_shift = 20, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = pci_generic_config_read32, + .write = pci_generic_config_write, + } +}; #endif diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 6503d15effbb..2f5f4bb42dcc 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -73,10 +73,8 @@ static int board_added(struct controller *ctrl) /* Check link training status */ retval = pciehp_check_link_status(ctrl); - if (retval) { - ctrl_err(ctrl, "Failed to check link status\n"); + if (retval) goto err_exit; - } /* Check for a power fault */ if (ctrl->power_fault_detected || pciehp_query_power_fault(ctrl)) { diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 53433b37e181..fb3840e222ad 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -283,8 +283,6 @@ static void pcie_wait_for_presence(struct pci_dev *pdev) msleep(10); timeout -= 10; } while (timeout > 0); - - pci_info(pdev, "Timeout waiting for Presence Detect\n"); } int pciehp_check_link_status(struct controller *ctrl) @@ -293,8 +291,10 @@ int pciehp_check_link_status(struct controller *ctrl) bool found; u16 lnk_status; - if (!pcie_wait_for_link(pdev, true)) + if (!pcie_wait_for_link(pdev, true)) { + ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl)); return -1; + } if (ctrl->inband_presence_disabled) pcie_wait_for_presence(pdev); @@ -311,15 +311,18 @@ int pciehp_check_link_status(struct controller *ctrl) ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); if ((lnk_status & PCI_EXP_LNKSTA_LT) || !(lnk_status & PCI_EXP_LNKSTA_NLW)) { - ctrl_err(ctrl, "link training error: status %#06x\n", - lnk_status); + ctrl_info(ctrl, "Slot(%s): Cannot train link: status %#06x\n", + slot_name(ctrl), lnk_status); return -1; } pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); - if (!found) + if (!found) { + ctrl_info(ctrl, "Slot(%s): No device found\n", + slot_name(ctrl)); return -1; + } return 0; } diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index f979b7098acf..0a3c80ba66be 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -40,13 +40,13 @@ static DEFINE_MUTEX(rpadlpar_mutex); static struct device_node *find_vio_slot_node(char *drc_name) { struct device_node *parent = of_find_node_by_name(NULL, "vdevice"); - struct device_node *dn = NULL; + struct device_node *dn; int rc; if (!parent) return NULL; - while ((dn = of_get_next_child(parent, dn))) { + for_each_child_of_node(parent, dn) { rc = rpaphp_check_drc_props(dn, drc_name, NULL); if (rc == 0) break; @@ -60,10 +60,10 @@ static struct device_node *find_vio_slot_node(char *drc_name) static struct device_node *find_php_slot_pci_node(char *drc_name, char *drc_type) { - struct device_node *np = NULL; + struct device_node *np; int rc; - while ((np = of_find_node_by_name(np, "pci"))) { + for_each_node_by_name(np, "pci") { rc = rpaphp_check_drc_props(np, drc_name, drc_type); if (rc == 0) break; diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index afdc52d1cae7..aedd9dfd2a16 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -299,7 +299,6 @@ static int board_added(struct slot *p_slot) if (p_slot->status == 0xFF) { /* power fault occurred, but it was benign */ ctrl_dbg(ctrl, "%s: Power fault\n", __func__); - rc = POWER_FAILURE; p_slot->status = 0; goto err_exit; } diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 64ebed129dbf..85fc9936fa9e 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -53,7 +53,7 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr, if (pdev->p2pdma->pool) size = gen_pool_size(pdev->p2pdma->pool); - return snprintf(buf, PAGE_SIZE, "%zd\n", size); + return scnprintf(buf, PAGE_SIZE, "%zd\n", size); } static DEVICE_ATTR_RO(size); @@ -66,7 +66,7 @@ static ssize_t available_show(struct device *dev, struct device_attribute *attr, if (pdev->p2pdma->pool) avail = gen_pool_avail(pdev->p2pdma->pool); - return snprintf(buf, PAGE_SIZE, "%zd\n", avail); + return scnprintf(buf, PAGE_SIZE, "%zd\n", avail); } static DEVICE_ATTR_RO(available); @@ -75,8 +75,8 @@ static ssize_t published_show(struct device *dev, struct device_attribute *attr, { struct pci_dev *pdev = to_pci_dev(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", - pdev->p2pdma->p2pmem_published); + return scnprintf(buf, PAGE_SIZE, "%d\n", + pdev->p2pdma->p2pmem_published); } static DEVICE_ATTR_RO(published); @@ -761,7 +761,7 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, struct scatterlist *sg; void *addr; - sg = kzalloc(sizeof(*sg), GFP_KERNEL); + sg = kmalloc(sizeof(*sg), GFP_KERNEL); if (!sg) return NULL; diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index d5869a03f748..154db9a47511 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -1167,7 +1167,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev) * @pdev: the PCI device whose delay is to be updated * @handle: ACPI handle of this device * - * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM + * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM * control method of either the device itself or the PCI host bridge. * * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI @@ -1206,8 +1206,8 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev, } if (elements[3].type == ACPI_TYPE_INTEGER) { value = (int)elements[3].integer.value / 1000; - if (value < PCI_PM_D3_WAIT) - pdev->d3_delay = value; + if (value < PCI_PM_D3HOT_WAIT) + pdev->d3hot_delay = value; } } ACPI_FREE(obj); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 449466f71040..40be221e69cf 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -969,12 +969,6 @@ static int pci_pm_resume(struct device *dev) #ifdef CONFIG_HIBERNATE_CALLBACKS -/* - * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing - * a hibernate transition - */ -struct dev_pm_ops __weak pcibios_pm_ops; - static int pci_pm_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); @@ -1033,9 +1027,6 @@ static int pci_pm_freeze_noirq(struct device *dev) pci_pm_set_unknown_state(pci_dev); - if (pcibios_pm_ops.freeze_noirq) - return pcibios_pm_ops.freeze_noirq(dev); - return 0; } @@ -1043,13 +1034,6 @@ static int pci_pm_thaw_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - int error; - - if (pcibios_pm_ops.thaw_noirq) { - error = pcibios_pm_ops.thaw_noirq(dev); - if (error) - return error; - } /* * The pm->thaw_noirq() callback assumes the device has been @@ -1174,9 +1158,6 @@ static int pci_pm_poweroff_noirq(struct device *dev) pci_fixup_device(pci_fixup_suspend_late, pci_dev); - if (pcibios_pm_ops.poweroff_noirq) - return pcibios_pm_ops.poweroff_noirq(dev); - return 0; } @@ -1184,13 +1165,6 @@ static int pci_pm_restore_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - int error; - - if (pcibios_pm_ops.restore_noirq) { - error = pcibios_pm_ops.restore_noirq(dev); - if (error) - return error; - } pci_pm_default_resume_early(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c index a0b2bd6c918a..45855a5e9fca 100644 --- a/drivers/pci/pci-pf-stub.c +++ b/drivers/pci/pci-pf-stub.c @@ -37,18 +37,6 @@ static struct pci_driver pf_stub_driver = { .probe = pci_pf_stub_probe, .sriov_configure = pci_sriov_configure_simple, }; - -static int __init pci_pf_stub_init(void) -{ - return pci_register_driver(&pf_stub_driver); -} - -static void __exit pci_pf_stub_exit(void) -{ - pci_unregister_driver(&pf_stub_driver); -} - -module_init(pci_pf_stub_init); -module_exit(pci_pf_stub_exit); +module_pci_driver(pf_stub_driver); MODULE_LICENSE("GPL"); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 6d78df981d41..d15c881e2e7e 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -574,7 +574,7 @@ static ssize_t driver_override_show(struct device *dev, ssize_t len; device_lock(dev); - len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + len = scnprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); device_unlock(dev); return len; } @@ -708,6 +708,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, data[off - init_off + 3] = (val >> 24) & 0xff; off += 4; size -= 4; + cond_resched(); } if (size >= 2) { @@ -1196,10 +1197,10 @@ static int pci_create_resource_files(struct pci_dev *pdev) } return 0; } -#else /* !HAVE_PCI_MMAP */ +#else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */ int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; } void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } -#endif /* HAVE_PCI_MMAP */ +#endif /** * pci_write_rom - used to enable access to the PCI ROM display diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index a458c46d7e39..0e63e0e77708 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -15,7 +15,6 @@ #include <linux/init.h> #include <linux/msi.h> #include <linux/of.h> -#include <linux/of_pci.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/slab.h> @@ -30,8 +29,6 @@ #include <linux/pm_runtime.h> #include <linux/pci_hotplug.h> #include <linux/vmalloc.h> -#include <linux/pci-ats.h> -#include <asm/setup.h> #include <asm/dma.h> #include <linux/aer.h> #include "pci.h" @@ -49,7 +46,7 @@ EXPORT_SYMBOL(isa_dma_bridge_buggy); int pci_pci_problems; EXPORT_SYMBOL(pci_pci_problems); -unsigned int pci_pm_d3_delay; +unsigned int pci_pm_d3hot_delay; static void pci_pme_list_scan(struct work_struct *work); @@ -66,10 +63,10 @@ struct pci_pme_device { static void pci_dev_d3_sleep(struct pci_dev *dev) { - unsigned int delay = dev->d3_delay; + unsigned int delay = dev->d3hot_delay; - if (delay < pci_pm_d3_delay) - delay = pci_pm_d3_delay; + if (delay < pci_pm_d3hot_delay) + delay = pci_pm_d3hot_delay; if (delay) msleep(delay); @@ -101,7 +98,19 @@ unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE; #define DEFAULT_HOTPLUG_BUS_SIZE 1 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; + +/* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */ +#ifdef CONFIG_PCIE_BUS_TUNE_OFF +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; +#elif defined CONFIG_PCIE_BUS_SAFE +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; +#elif defined CONFIG_PCIE_BUS_PERFORMANCE +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; +#elif defined CONFIG_PCIE_BUS_PEER2PEER +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER; +#else enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT; +#endif /* * The default CLS is used if arch didn't set CLS explicitly and not @@ -876,6 +885,10 @@ static void pci_std_enable_acs(struct pci_dev *dev) /* Upstream Forwarding */ ctrl |= (cap & PCI_ACS_UF); + /* Enable Translation Blocking for external devices */ + if (dev->external_facing || dev->untrusted) + ctrl |= (cap & PCI_ACS_TB); + pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); } @@ -1065,7 +1078,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) if (state == PCI_D3hot || dev->current_state == PCI_D3hot) pci_dev_d3_sleep(dev); else if (state == PCI_D2 || dev->current_state == PCI_D2) - msleep(PCI_PM_D2_DELAY); + udelay(PCI_PM_D2_DELAY); pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); @@ -3013,7 +3026,7 @@ void pci_pm_init(struct pci_dev *dev) } dev->pm_cap = pm; - dev->d3_delay = PCI_PM_D3_WAIT; + dev->d3hot_delay = PCI_PM_D3HOT_WAIT; dev->d3cold_delay = PCI_PM_D3COLD_WAIT; dev->bridge_d3 = pci_bridge_d3_possible(dev); dev->d3cold_allowed = true; @@ -3038,7 +3051,7 @@ void pci_pm_init(struct pci_dev *dev) (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", - (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", + (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "", (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; dev->pme_poll = true; @@ -4621,7 +4634,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe) * * NOTE: This causes the caller to sleep for twice the device power transition * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms - * by default (i.e. unless the @dev's d3_delay field has a different value). + * by default (i.e. unless the @dev's d3hot_delay field has a different value). * Moreover, only devices in D0 can be reset by this function. */ static int pci_pm_reset(struct pci_dev *dev, int probe) @@ -4701,9 +4714,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, } if (active && ret) msleep(delay); - else if (ret != active) - pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", - active ? "set" : "cleared"); + return ret == active; } @@ -4828,6 +4839,7 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev) delay); if (!pcie_wait_for_link_delay(dev, true, delay)) { /* Did not train, no need to wait any further */ + pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n"); return; } } @@ -4920,16 +4932,10 @@ static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe) static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) { - struct pci_dev *pdev; - - if (dev->subordinate || !dev->slot || + if (dev->multifunction || dev->subordinate || !dev->slot || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) return -ENOTTY; - list_for_each_entry(pdev, &dev->bus->devices, bus_list) - if (pdev != dev && pdev->slot == dev->slot) - return -ENOTTY; - return pci_reset_hotplug_slot(dev->slot->hotplug, probe); } @@ -6005,7 +6011,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, if (flags & PCI_VGA_STATE_CHANGE_DECODES) { pci_read_config_word(dev, PCI_COMMAND, &cmd); - if (decode == true) + if (decode) cmd |= command_bits; else cmd &= ~command_bits; @@ -6021,7 +6027,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, if (bridge) { pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &cmd); - if (decode == true) + if (decode) cmd |= PCI_BRIDGE_CTL_VGA; else cmd &= ~PCI_BRIDGE_CTL_VGA; @@ -6350,7 +6356,7 @@ static ssize_t resource_alignment_show(struct bus_type *bus, char *buf) spin_lock(&resource_alignment_lock); if (resource_alignment_param) - count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param); + count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param); spin_unlock(&resource_alignment_lock); /* diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fa12f7cbc1a0..f86cae9aa1f4 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -43,10 +43,9 @@ int pci_probe_reset_function(struct pci_dev *dev); int pci_bridge_secondary_bus_reset(struct pci_dev *dev); int pci_bus_error_reset(struct pci_dev *dev); -#define PCI_PM_D2_DELAY 200 -#define PCI_PM_D3_WAIT 10 -#define PCI_PM_D3COLD_WAIT 100 -#define PCI_PM_BUS_WAIT 50 +#define PCI_PM_D2_DELAY 200 /* usec; see PCIe r4.0, sec 5.9.1 */ +#define PCI_PM_D3HOT_WAIT 10 /* msec */ +#define PCI_PM_D3COLD_WAIT 100 /* msec */ /** * struct pci_platform_pm_ops - Firmware PM callbacks @@ -178,7 +177,7 @@ extern struct mutex pci_slot_mutex; extern raw_spinlock_t pci_lock; -extern unsigned int pci_pm_d3_delay; +extern unsigned int pci_pm_d3hot_delay; #ifdef CONFIG_PCI_MSI void pci_no_msi(void); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 253c30cc1967..ac0557a305af 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -74,14 +74,6 @@ struct pcie_link_state { * has one slot under it, so at most there are 8 functions. */ struct aspm_latency acceptable[8]; - - /* L1 PM Substate info */ - struct { - u32 up_cap_ptr; /* L1SS cap ptr in upstream dev */ - u32 dw_cap_ptr; /* L1SS cap ptr in downstream dev */ - u32 ctl1; /* value to be programmed in ctl1 */ - u32 ctl2; /* value to be programmed in ctl2 */ - } l1ss; }; static int aspm_disabled, aspm_force; @@ -308,8 +300,10 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) } /* Convert L0s latency encoding to ns */ -static u32 calc_l0s_latency(u32 encoding) +static u32 calc_l0s_latency(u32 lnkcap) { + u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12; + if (encoding == 0x7) return (5 * 1000); /* > 4us */ return (64 << encoding); @@ -324,8 +318,10 @@ static u32 calc_l0s_acceptable(u32 encoding) } /* Convert L1 latency encoding to ns */ -static u32 calc_l1_latency(u32 encoding) +static u32 calc_l1_latency(u32 lnkcap) { + u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15; + if (encoding == 0x7) return (65 * 1000); /* > 64us */ return (1000 << encoding); @@ -380,58 +376,6 @@ static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value) } } -struct aspm_register_info { - u32 support:2; - u32 enabled:2; - u32 latency_encoding_l0s; - u32 latency_encoding_l1; - - /* L1 substates */ - u32 l1ss_cap_ptr; - u32 l1ss_cap; - u32 l1ss_ctl1; - u32 l1ss_ctl2; -}; - -static void pcie_get_aspm_reg(struct pci_dev *pdev, - struct aspm_register_info *info) -{ - u16 reg16; - u32 reg32; - - pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, ®32); - info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; - info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; - info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; - pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, ®16); - info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; - - /* Read L1 PM substate capabilities */ - info->l1ss_cap = info->l1ss_ctl1 = info->l1ss_ctl2 = 0; - info->l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!info->l1ss_cap_ptr) - return; - pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CAP, - &info->l1ss_cap); - if (!(info->l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) { - info->l1ss_cap = 0; - return; - } - - /* - * If we don't have LTR for the entire path from the Root Complex - * to this device, we can't use ASPM L1.2 because it relies on the - * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18. - */ - if (!pdev->ltr_path) - info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2; - - pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1, - &info->l1ss_ctl1); - pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2, - &info->l1ss_ctl2); -} - static void pcie_aspm_check_latency(struct pci_dev *endpoint) { u32 latency, l1_switch_latency = 0; @@ -493,39 +437,49 @@ static struct pci_dev *pci_function_0(struct pci_bus *linkbus) return NULL; } +static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, + u32 clear, u32 set) +{ + u32 val; + + pci_read_config_dword(pdev, pos, &val); + val &= ~clear; + val |= set; + pci_write_config_dword(pdev, pos, val); +} + /* Calculate L1.2 PM substate timing parameters */ static void aspm_calc_l1ss_info(struct pcie_link_state *link, - struct aspm_register_info *upreg, - struct aspm_register_info *dwreg) + u32 parent_l1ss_cap, u32 child_l1ss_cap) { + struct pci_dev *child = link->downstream, *parent = link->pdev; u32 val1, val2, scale1, scale2; u32 t_common_mode, t_power_on, l1_2_threshold, scale, value; - - link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr; - link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr; - link->l1ss.ctl1 = link->l1ss.ctl2 = 0; + u32 ctl1 = 0, ctl2 = 0; + u32 pctl1, pctl2, cctl1, cctl2; + u32 pl1_2_enables, cl1_2_enables; if (!(link->aspm_support & ASPM_STATE_L1_2_MASK)) return; /* Choose the greater of the two Port Common_Mode_Restore_Times */ - val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; - val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; + val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; + val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; t_common_mode = max(val1, val2); /* Choose the greater of the two Port T_POWER_ON times */ - val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; - scale1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; - val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; - scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; - - if (calc_l1ss_pwron(link->pdev, scale1, val1) > - calc_l1ss_pwron(link->downstream, scale2, val2)) { - link->l1ss.ctl2 |= scale1 | (val1 << 3); - t_power_on = calc_l1ss_pwron(link->pdev, scale1, val1); + val1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; + scale1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; + val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; + scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; + + if (calc_l1ss_pwron(parent, scale1, val1) > + calc_l1ss_pwron(child, scale2, val2)) { + ctl2 |= scale1 | (val1 << 3); + t_power_on = calc_l1ss_pwron(parent, scale1, val1); } else { - link->l1ss.ctl2 |= scale2 | (val2 << 3); - t_power_on = calc_l1ss_pwron(link->downstream, scale2, val2); + ctl2 |= scale2 | (val2 << 3); + t_power_on = calc_l1ss_pwron(child, scale2, val2); } /* @@ -540,14 +494,60 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, */ l1_2_threshold = 2 + 4 + t_common_mode + t_power_on; encode_l12_threshold(l1_2_threshold, &scale, &value); - link->l1ss.ctl1 |= t_common_mode << 8 | scale << 29 | value << 16; + ctl1 |= t_common_mode << 8 | scale << 29 | value << 16; + + pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1); + pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2); + pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1); + pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2); + + if (ctl1 == pctl1 && ctl1 == cctl1 && + ctl2 == pctl2 && ctl2 == cctl2) + return; + + /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */ + pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK; + cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; + + if (pl1_2_enables || cl1_2_enables) { + pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + } + + /* Program T_POWER_ON times in both ports */ + pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2); + pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); + + /* Program Common_Mode_Restore_Time in upstream device */ + pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); + + /* Program LTR_L1.2_THRESHOLD time in both ports */ + pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); + pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); + + if (pl1_2_enables || cl1_2_enables) { + pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0, + pl1_2_enables); + pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0, + cl1_2_enables); + } } static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) { struct pci_dev *child = link->downstream, *parent = link->pdev; + u32 parent_lnkcap, child_lnkcap; + u16 parent_lnkctl, child_lnkctl; + u32 parent_l1ss_cap, child_l1ss_cap; + u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0; struct pci_bus *linkbus = parent->subordinate; - struct aspm_register_info upreg, dwreg; if (blacklist) { /* Set enabled/disable so that we will disable ASPM later */ @@ -556,26 +556,28 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) return; } - /* Get upstream/downstream components' register state */ - pcie_get_aspm_reg(parent, &upreg); - pcie_get_aspm_reg(child, &dwreg); - /* * If ASPM not supported, don't mess with the clocks and link, * bail out now. */ - if (!(upreg.support & dwreg.support)) + pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap); + pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap); + if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS)) return; /* Configure common clock before checking latencies */ pcie_aspm_configure_common_clock(link); /* - * Re-read upstream/downstream components' register state - * after clock configuration + * Re-read upstream/downstream components' register state after + * clock configuration. L0s & L1 exit latencies in the otherwise + * read-only Link Capabilities may change depending on common clock + * configuration (PCIe r5.0, sec 7.5.3.6). */ - pcie_get_aspm_reg(parent, &upreg); - pcie_get_aspm_reg(child, &dwreg); + pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap); + pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap); + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl); + pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl); /* * Setup L0s state @@ -584,44 +586,71 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) * given link unless components on both sides of the link each * support L0s. */ - if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S) + if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S) link->aspm_support |= ASPM_STATE_L0S; - if (dwreg.enabled & PCIE_LINK_STATE_L0S) + + if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S) link->aspm_enabled |= ASPM_STATE_L0S_UP; - if (upreg.enabled & PCIE_LINK_STATE_L0S) + if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S) link->aspm_enabled |= ASPM_STATE_L0S_DW; - link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s); - link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s); + link->latency_up.l0s = calc_l0s_latency(parent_lnkcap); + link->latency_dw.l0s = calc_l0s_latency(child_lnkcap); /* Setup L1 state */ - if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1) + if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1) link->aspm_support |= ASPM_STATE_L1; - if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1) + + if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1) link->aspm_enabled |= ASPM_STATE_L1; - link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1); - link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1); + link->latency_up.l1 = calc_l1_latency(parent_lnkcap); + link->latency_dw.l1 = calc_l1_latency(child_lnkcap); /* Setup L1 substate */ - if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1) + pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP, + &parent_l1ss_cap); + pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP, + &child_l1ss_cap); + + if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) + parent_l1ss_cap = 0; + if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) + child_l1ss_cap = 0; + + /* + * If we don't have LTR for the entire path from the Root Complex + * to this device, we can't use ASPM L1.2 because it relies on the + * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18. + */ + if (!child->ltr_path) + child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2; + + if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1) link->aspm_support |= ASPM_STATE_L1_1; - if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2) + if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2) link->aspm_support |= ASPM_STATE_L1_2; - if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1) + if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1) link->aspm_support |= ASPM_STATE_L1_1_PCIPM; - if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2) + if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2) link->aspm_support |= ASPM_STATE_L1_2_PCIPM; - if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1) + if (parent_l1ss_cap) + pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + &parent_l1ss_ctl1); + if (child_l1ss_cap) + pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + &child_l1ss_ctl1); + + if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1) link->aspm_enabled |= ASPM_STATE_L1_1; - if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2) + if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2) link->aspm_enabled |= ASPM_STATE_L1_2; - if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1) + if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1) link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM; - if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) + if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM; if (link->aspm_support & ASPM_STATE_L1SS) - aspm_calc_l1ss_info(link, &upreg, &dwreg); + aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap); /* Save default state */ link->aspm_default = link->aspm_enabled; @@ -651,24 +680,11 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) } } -static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, - u32 clear, u32 set) -{ - u32 val; - - pci_read_config_dword(pdev, pos, &val); - val &= ~clear; - val |= set; - pci_write_config_dword(pdev, pos, val); -} - /* Configure the ASPM L1 substates */ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) { u32 val, enable_req; struct pci_dev *child = link->downstream, *parent = link->pdev; - u32 up_cap_ptr = link->l1ss.up_cap_ptr; - u32 dw_cap_ptr = link->l1ss.dw_cap_ptr; enable_req = (link->aspm_enabled ^ state) & state; @@ -686,9 +702,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) */ /* Disable all L1 substates */ - pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, + pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, PCI_L1SS_CTL1_L1SS_MASK, 0); - pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, + pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, PCI_L1SS_CTL1_L1SS_MASK, 0); /* * If needed, disable L1, and it gets enabled later @@ -701,30 +717,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) PCI_EXP_LNKCTL_ASPM_L1, 0); } - if (enable_req & ASPM_STATE_L1_2_MASK) { - - /* Program T_POWER_ON times in both ports */ - pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2, - link->l1ss.ctl2); - pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2, - link->l1ss.ctl2); - - /* Program Common_Mode_Restore_Time in upstream device */ - pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_CM_RESTORE_TIME, - link->l1ss.ctl1); - - /* Program LTR_L1.2_THRESHOLD time in both ports */ - pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, - link->l1ss.ctl1); - pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, - link->l1ss.ctl1); - } - val = 0; if (state & ASPM_STATE_L1_1) val |= PCI_L1SS_CTL1_ASPM_L1_1; @@ -736,9 +728,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ - pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, + pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, PCI_L1SS_CTL1_L1SS_MASK, val); - pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, + pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, PCI_L1SS_CTL1_L1SS_MASK, val); } diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c index 77e685771487..565d23cccb8b 100644 --- a/drivers/pci/pcie/bw_notification.c +++ b/drivers/pci/pcie/bw_notification.c @@ -14,6 +14,8 @@ * and warns when links become degraded in operation. */ +#define dev_fmt(fmt) "bw_notification: " fmt + #include "../pci.h" #include "portdrv.h" @@ -97,6 +99,7 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv) return ret; pcie_enable_link_bandwidth_notification(srv->port); + pci_info(srv->port, "enabled with IRQ %d\n", srv->irq); return 0; } diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index daa9a4153776..e05aba86a317 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -103,7 +103,8 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev) * Wait until the Link is inactive, then clear DPC Trigger Status * to allow the Port to leave DPC. */ - pcie_wait_for_link(pdev, false); + if (!pcie_wait_for_link(pdev, false)) + pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n"); if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) return PCI_ERS_RESULT_DISCONNECT; @@ -111,8 +112,10 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev) pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS, PCI_EXP_DPC_STATUS_TRIGGER); - if (!pcie_wait_for_link(pdev, true)) + if (!pcie_wait_for_link(pdev, true)) { + pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n"); return PCI_ERS_RESULT_DISCONNECT; + } return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 03d37128a24f..06f6bbcd8131 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2106,6 +2106,9 @@ static void pci_configure_ltr(struct pci_dev *dev) if (!pci_is_pcie(dev)) return; + /* Read L1 PM substate capabilities */ + dev->l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS); + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); if (!(cap & PCI_EXP_DEVCAP2_LTR)) return; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bdf9b52567e0..eefed9d26945 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1846,7 +1846,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pci */ static void quirk_intel_pcie_pm(struct pci_dev *dev) { - pci_pm_d3_delay = 120; + pci_pm_d3hot_delay = 120; dev->no_d1d2 = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm); @@ -1873,12 +1873,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay) { - if (dev->d3_delay >= delay) + if (dev->d3hot_delay >= delay) return; - dev->d3_delay = delay; + dev->d3hot_delay = delay; pci_info(dev, "extending delay after power-on from D3hot to %d msec\n", - dev->d3_delay); + dev->d3hot_delay); } static void quirk_radeon_pm(struct pci_dev *dev) @@ -3387,36 +3387,36 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); * PCI devices which are on Intel chips can skip the 10ms delay * before entering D3 mode. */ -static void quirk_remove_d3_delay(struct pci_dev *dev) -{ - dev->d3_delay = 0; -} -/* C600 Series devices do not need 10ms d3_delay */ -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay); -/* Lynxpoint-H PCH devices do not need 10ms d3_delay */ -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay); -/* Intel Cherrytrail devices do not need 10ms d3_delay */ -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay); +static void quirk_remove_d3hot_delay(struct pci_dev *dev) +{ + dev->d3hot_delay = 0; +} +/* C600 Series devices do not need 10ms d3hot_delay */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3hot_delay); +/* Lynxpoint-H PCH devices do not need 10ms d3hot_delay */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3hot_delay); +/* Intel Cherrytrail devices do not need 10ms d3hot_delay */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3hot_delay); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3hot_delay); /* * Some devices may pass our check in pci_intx_mask_supported() if @@ -4949,6 +4949,13 @@ static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev) } } +/* + * Currently this quirk does the equivalent of + * PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF + * + * TODO: This quirk also needs to do equivalent of PCI_ACS_TB, + * if dev->external_facing || dev->untrusted + */ static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev) { if (!pci_quirk_intel_pch_acs_match(dev)) @@ -4988,6 +4995,9 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev) ctrl |= (cap & PCI_ACS_CR); ctrl |= (cap & PCI_ACS_UF); + if (dev->external_facing || dev->untrusted) + ctrl |= (cap & PCI_ACS_TB); + pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl); pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n"); diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index a000a1e316f7..beba430a197e 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -1573,7 +1573,7 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i spin_lock_init(&isp->lock); /* This is not a true PCI device on SoC, so the delay is not needed. */ - pdev->d3_delay = 0; + pdev->d3hot_delay = 0; pci_set_drvdata(pdev, isp); |