summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_root.c21
-rw-r--r--drivers/acpi/scan.c33
-rw-r--r--drivers/bus/Kconfig8
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/hisi_lpc.c615
-rw-r--r--drivers/char/xillybus/xillybus_pcie.c1
-rw-r--r--drivers/fpga/altera-cvp.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/mcb/mcb-pci.c1
-rw-r--r--drivers/misc/pci_endpoint_test.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/of/address.c96
-rw-r--r--drivers/pci/Makefile69
-rw-r--r--drivers/pci/access.c380
-rw-r--r--drivers/pci/ats.c10
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/cadence/pcie-cadence-ep.c15
-rw-r--r--drivers/pci/dwc/pci-exynos.c18
-rw-r--r--drivers/pci/dwc/pci-imx6.c18
-rw-r--r--drivers/pci/dwc/pci-keystone-dw.c91
-rw-r--r--drivers/pci/dwc/pci-keystone.c1
-rw-r--r--drivers/pci/dwc/pci-keystone.h4
-rw-r--r--drivers/pci/dwc/pci-layerscape.c3
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c18
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c36
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c396
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c16
-rw-r--r--drivers/pci/dwc/pcie-designware.h30
-rw-r--r--drivers/pci/dwc/pcie-histb.c43
-rw-r--r--drivers/pci/dwc/pcie-qcom.c91
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c28
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c32
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c56
-rw-r--r--drivers/pci/host-bridge.c2
-rw-r--r--drivers/pci/host/pcie-altera.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c23
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c12
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/iov.c50
-rw-r--r--drivers/pci/mmap.c2
-rw-r--r--drivers/pci/msi.c3
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/pci/pci-driver.c96
-rw-r--r--drivers/pci/pci-label.c5
-rw-r--r--drivers/pci/pci-stub.c3
-rw-r--r--drivers/pci/pci-sysfs.c111
-rw-r--r--drivers/pci/pci.c377
-rw-r--r--drivers/pci/pci.h45
-rw-r--r--drivers/pci/pcie/Makefile19
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c10
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h4
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c11
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c3
-rw-r--r--drivers/pci/pcie/aer/ecrc.c8
-rw-r--r--drivers/pci/pcie/aspm.c23
-rw-r--r--drivers/pci/pcie/dpc.c (renamed from drivers/pci/pcie/pcie-dpc.c)3
-rw-r--r--drivers/pci/pcie/pme.c1
-rw-r--r--drivers/pci/pcie/portdrv.h87
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c5
-rw-r--r--drivers/pci/pcie/portdrv_bus.c56
-rw-r--r--drivers/pci/pcie/portdrv_core.c84
-rw-r--r--drivers/pci/pcie/portdrv_pci.c61
-rw-r--r--drivers/pci/probe.c75
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/quirks.c171
-rw-r--r--drivers/pci/rom.c4
-rw-r--r--drivers/pci/search.c8
-rw-r--r--drivers/pci/setup-bus.c6
-rw-r--r--drivers/pci/setup-irq.c4
-rw-r--r--drivers/pci/setup-res.c10
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pci/syscall.c9
-rw-r--r--drivers/pci/vpd.c576
-rw-r--r--drivers/pci/xen-pcifront.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c5
80 files changed, 2512 insertions, 1839 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 6fc204a52493..0da18bde6a16 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -729,7 +729,8 @@ next:
}
}
-static void acpi_pci_root_remap_iospace(struct resource_entry *entry)
+static void acpi_pci_root_remap_iospace(struct fwnode_handle *fwnode,
+ struct resource_entry *entry)
{
#ifdef PCI_IOBASE
struct resource *res = entry->res;
@@ -738,7 +739,7 @@ static void acpi_pci_root_remap_iospace(struct resource_entry *entry)
resource_size_t length = resource_size(res);
unsigned long port;
- if (pci_register_io_range(cpu_addr, length))
+ if (pci_register_io_range(fwnode, cpu_addr, length))
goto err;
port = pci_address_to_pio(cpu_addr);
@@ -780,7 +781,8 @@ int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info)
else {
resource_list_for_each_entry_safe(entry, tmp, list) {
if (entry->res->flags & IORESOURCE_IO)
- acpi_pci_root_remap_iospace(entry);
+ acpi_pci_root_remap_iospace(&device->fwnode,
+ entry);
if (entry->res->flags & IORESOURCE_DISABLED)
resource_list_destroy_entry(entry);
@@ -871,6 +873,7 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
struct acpi_device *device = root->device;
int node = acpi_get_node(device->handle);
struct pci_bus *bus;
+ struct pci_host_bridge *host_bridge;
info->root = root;
info->bridge = device;
@@ -895,9 +898,17 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
if (!bus)
goto out_release_info;
+ host_bridge = to_pci_host_bridge(bus->bridge);
+ if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+ host_bridge->native_hotplug = 0;
+ if (!(root->osc_control_set & OSC_PCI_EXPRESS_AER_CONTROL))
+ host_bridge->native_aer = 0;
+ if (!(root->osc_control_set & OSC_PCI_EXPRESS_PME_CONTROL))
+ host_bridge->native_pme = 0;
+
pci_scan_child_bus(bus);
- pci_set_host_bridge_release(to_pci_host_bridge(bus->bridge),
- acpi_pci_root_release_info, info);
+ pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info,
+ info);
if (node != NUMA_NO_NODE)
dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
return bus;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 8e63d937babb..a4cbf3efc809 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1524,11 +1524,25 @@ static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
return -1;
}
-static bool acpi_is_serial_bus_slave(struct acpi_device *device)
+static bool acpi_is_indirect_io_slave(struct acpi_device *device)
+{
+ struct acpi_device *parent = device->parent;
+ const struct acpi_device_id indirect_io_hosts[] = {
+ {"HISI0191", 0},
+ {}
+ };
+
+ return parent && !acpi_match_device_ids(parent, indirect_io_hosts);
+}
+
+static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{
struct list_head resource_list;
bool is_serial_bus_slave = false;
+ if (acpi_is_indirect_io_slave(device))
+ return true;
+
/* Macs use device properties in lieu of _CRS resources */
if (x86_apple_machine &&
(fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
@@ -1560,7 +1574,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
acpi_bus_get_flags(device);
device->flags.match_driver = false;
device->flags.initialized = true;
- device->flags.serial_bus_slave = acpi_is_serial_bus_slave(device);
+ device->flags.enumeration_by_parent =
+ acpi_device_enumeration_by_parent(device);
acpi_device_clear_enumerated(device);
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
@@ -1858,10 +1873,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
static void acpi_default_enumeration(struct acpi_device *device)
{
/*
- * Do not enumerate SPI/I2C/UART slaves as they will be enumerated by
- * their respective parents.
+ * Do not enumerate devices with enumeration_by_parent flag set as
+ * they will be enumerated by their respective parents.
*/
- if (!device->flags.serial_bus_slave) {
+ if (!device->flags.enumeration_by_parent) {
acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device);
} else {
@@ -1958,7 +1973,7 @@ static void acpi_bus_attach(struct acpi_device *device)
return;
device->flags.match_driver = true;
- if (ret > 0 && !device->flags.serial_bus_slave) {
+ if (ret > 0 && !device->flags.enumeration_by_parent) {
acpi_device_set_enumerated(device);
goto ok;
}
@@ -1967,10 +1982,10 @@ static void acpi_bus_attach(struct acpi_device *device)
if (ret < 0)
return;
- if (!device->pnp.type.platform_id && !device->flags.serial_bus_slave)
- acpi_device_set_enumerated(device);
- else
+ if (device->pnp.type.platform_id || device->flags.enumeration_by_parent)
acpi_default_enumeration(device);
+ else
+ acpi_device_set_enumerated(device);
ok:
list_for_each_entry(child, &device->children, node)
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 57e011d36a79..a3fad0f0292f 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -65,6 +65,14 @@ config BRCMSTB_GISB_ARB
arbiter. This driver provides timeout and target abort error handling
and internal bus master decoding.
+config HISILICON_LPC
+ bool "Support for ISA I/O space on HiSilicon Hip06/7"
+ depends on ARM64 && (ARCH_HISI || COMPILE_TEST)
+ select INDIRECT_PIO
+ help
+ Driver to enable I/O access to devices attached to the Low Pin
+ Count bus on the HiSilicon Hip06/7 SoC.
+
config IMX_WEIM
bool "Freescale EIM DRIVER"
depends on ARCH_MXC
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 9bcd0bf3954b..50bb12a971a0 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_ARM_CCI) += arm-cci.o
obj-$(CONFIG_ARM_CCN) += arm-ccn.o
+obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
new file mode 100644
index 000000000000..2d4611e4c339
--- /dev/null
+++ b/drivers/bus/hisi_lpc.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Hisilicon Limited, All Rights Reserved.
+ * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ * Author: Zou Rongrong <zourongrong@huawei.com>
+ * Author: John Garry <john.garry@huawei.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/logic_pio.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#define DRV_NAME "hisi-lpc"
+
+/*
+ * Setting this bit means each IO operation will target a different port
+ * address; 0 means repeated IO operations will use the same port,
+ * such as BT.
+ */
+#define FG_INCRADDR_LPC 0x02
+
+struct lpc_cycle_para {
+ unsigned int opflags;
+ unsigned int csize; /* data length of each operation */
+};
+
+struct hisi_lpc_dev {
+ spinlock_t cycle_lock;
+ void __iomem *membase;
+ struct logic_pio_hwaddr *io_host;
+};
+
+/* The max IO cycle counts supported is four per operation at maximum */
+#define LPC_MAX_DWIDTH 4
+
+#define LPC_REG_STARTUP_SIGNAL 0x00
+#define LPC_REG_STARTUP_SIGNAL_START BIT(0)
+#define LPC_REG_OP_STATUS 0x04
+#define LPC_REG_OP_STATUS_IDLE BIT(0)
+#define LPC_REG_OP_STATUS_FINISHED BIT(1)
+#define LPC_REG_OP_LEN 0x10 /* LPC cycles count per start */
+#define LPC_REG_CMD 0x14
+#define LPC_REG_CMD_OP BIT(0) /* 0: read, 1: write */
+#define LPC_REG_CMD_SAMEADDR BIT(3)
+#define LPC_REG_ADDR 0x20 /* target address */
+#define LPC_REG_WDATA 0x24 /* write FIFO */
+#define LPC_REG_RDATA 0x28 /* read FIFO */
+
+/* The minimal nanosecond interval for each query on LPC cycle status */
+#define LPC_NSEC_PERWAIT 100
+
+/*
+ * The maximum waiting time is about 128us. It is specific for stream I/O,
+ * such as ins.
+ *
+ * The fastest IO cycle time is about 390ns, but the worst case will wait
+ * for extra 256 lpc clocks, so (256 + 13) * 30ns = 8 us. The maximum burst
+ * cycles is 16. So, the maximum waiting time is about 128us under worst
+ * case.
+ *
+ * Choose 1300 as the maximum.
+ */
+#define LPC_MAX_WAITCNT 1300
+
+/* About 10us. This is specific for single IO operations, such as inb */
+#define LPC_PEROP_WAITCNT 100
+
+static int wait_lpc_idle(unsigned char *mbase, unsigned int waitcnt)
+{
+ u32 status;
+
+ do {
+ status = readl(mbase + LPC_REG_OP_STATUS);
+ if (status & LPC_REG_OP_STATUS_IDLE)
+ return (status & LPC_REG_OP_STATUS_FINISHED) ? 0 : -EIO;
+ ndelay(LPC_NSEC_PERWAIT);
+ } while (--waitcnt);
+
+ return -ETIME;
+}
+
+/*
+ * hisi_lpc_target_in - trigger a series of LPC cycles for read operation
+ * @lpcdev: pointer to hisi lpc device
+ * @para: some parameters used to control the lpc I/O operations
+ * @addr: the lpc I/O target port address
+ * @buf: where the read back data is stored
+ * @opcnt: how many I/O operations required, i.e. data width
+ *
+ * Returns 0 on success, non-zero on fail.
+ */
+static int hisi_lpc_target_in(struct hisi_lpc_dev *lpcdev,
+ struct lpc_cycle_para *para, unsigned long addr,
+ unsigned char *buf, unsigned long opcnt)
+{
+ unsigned int cmd_word;
+ unsigned int waitcnt;
+ unsigned long flags;
+ int ret;
+
+ if (!buf || !opcnt || !para || !para->csize || !lpcdev)
+ return -EINVAL;
+
+ cmd_word = 0; /* IO mode, Read */
+ waitcnt = LPC_PEROP_WAITCNT;
+ if (!(para->opflags & FG_INCRADDR_LPC)) {
+ cmd_word |= LPC_REG_CMD_SAMEADDR;
+ waitcnt = LPC_MAX_WAITCNT;
+ }
+
+ /* whole operation must be atomic */
+ spin_lock_irqsave(&lpcdev->cycle_lock, flags);
+
+ writel_relaxed(opcnt, lpcdev->membase + LPC_REG_OP_LEN);
+ writel_relaxed(cmd_word, lpcdev->membase + LPC_REG_CMD);
+ writel_relaxed(addr, lpcdev->membase + LPC_REG_ADDR);
+
+ writel(LPC_REG_STARTUP_SIGNAL_START,
+ lpcdev->membase + LPC_REG_STARTUP_SIGNAL);
+
+ /* whether the operation is finished */
+ ret = wait_lpc_idle(lpcdev->membase, waitcnt);
+ if (ret) {
+ spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
+ return ret;
+ }
+
+ readsb(lpcdev->membase + LPC_REG_RDATA, buf, opcnt);
+
+ spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
+
+ return 0;
+}
+
+/*
+ * hisi_lpc_target_out - trigger a series of LPC cycles for write operation
+ * @lpcdev: pointer to hisi lpc device
+ * @para: some parameters used to control the lpc I/O operations
+ * @addr: the lpc I/O target port address
+ * @buf: where the data to be written is stored
+ * @opcnt: how many I/O operations required, i.e. data width
+ *
+ * Returns 0 on success, non-zero on fail.
+ */
+static int hisi_lpc_target_out(struct hisi_lpc_dev *lpcdev,
+ struct lpc_cycle_para *para, unsigned long addr,
+ const unsigned char *buf, unsigned long opcnt)
+{
+ unsigned int waitcnt;
+ unsigned long flags;
+ u32 cmd_word;
+ int ret;
+
+ if (!buf || !opcnt || !para || !lpcdev)
+ return -EINVAL;
+
+ /* default is increasing address */
+ cmd_word = LPC_REG_CMD_OP; /* IO mode, write */
+ waitcnt = LPC_PEROP_WAITCNT;
+ if (!(para->opflags & FG_INCRADDR_LPC)) {
+ cmd_word |= LPC_REG_CMD_SAMEADDR;
+ waitcnt = LPC_MAX_WAITCNT;
+ }
+
+ spin_lock_irqsave(&lpcdev->cycle_lock, flags);
+
+ writel_relaxed(opcnt, lpcdev->membase + LPC_REG_OP_LEN);
+ writel_relaxed(cmd_word, lpcdev->membase + LPC_REG_CMD);
+ writel_relaxed(addr, lpcdev->membase + LPC_REG_ADDR);
+
+ writesb(lpcdev->membase + LPC_REG_WDATA, buf, opcnt);
+
+ writel(LPC_REG_STARTUP_SIGNAL_START,
+ lpcdev->membase + LPC_REG_STARTUP_SIGNAL);
+
+ /* whether the operation is finished */
+ ret = wait_lpc_idle(lpcdev->membase, waitcnt);
+
+ spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
+
+ return ret;
+}
+
+static unsigned long hisi_lpc_pio_to_addr(struct hisi_lpc_dev *lpcdev,
+ unsigned long pio)
+{
+ return pio - lpcdev->io_host->io_start + lpcdev->io_host->hw_start;
+}
+
+/*
+ * hisi_lpc_comm_in - input the data in a single operation
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @dwidth: the data length required to read from the target I/O port
+ *
+ * When success, data is returned. Otherwise, ~0 is returned.
+ */
+static u32 hisi_lpc_comm_in(void *hostdata, unsigned long pio, size_t dwidth)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ struct lpc_cycle_para iopara;
+ unsigned long addr;
+ u32 rd_data = 0;
+ int ret;
+
+ if (!lpcdev || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return ~0;
+
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+
+ iopara.opflags = FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ ret = hisi_lpc_target_in(lpcdev, &iopara, addr,
+ (unsigned char *)&rd_data, dwidth);
+ if (ret)
+ return ~0;
+
+ return le32_to_cpu(rd_data);
+}
+
+/*
+ * hisi_lpc_comm_out - output the data in a single operation
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @val: a value to be output from caller, maximum is four bytes
+ * @dwidth: the data width required writing to the target I/O port
+ *
+ * This function corresponds to out(b,w,l) only.
+ */
+static void hisi_lpc_comm_out(void *hostdata, unsigned long pio,
+ u32 val, size_t dwidth)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ struct lpc_cycle_para iopara;
+ const unsigned char *buf;
+ unsigned long addr;
+
+ if (!lpcdev || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return;
+
+ val = cpu_to_le32(val);
+
+ buf = (const unsigned char *)&val;
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+
+ iopara.opflags = FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ hisi_lpc_target_out(lpcdev, &iopara, addr, buf, dwidth);
+}
+
+/*
+ * hisi_lpc_comm_ins - input the data in the buffer in multiple operations
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @buffer: a buffer where read/input data bytes are stored
+ * @dwidth: the data width required writing to the target I/O port
+ * @count: how many data units whose length is dwidth will be read
+ *
+ * When success, the data read back is stored in buffer pointed by buffer.
+ * Returns 0 on success, -errno otherwise.
+ */
+static u32 hisi_lpc_comm_ins(void *hostdata, unsigned long pio, void *buffer,
+ size_t dwidth, unsigned int count)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ unsigned char *buf = buffer;
+ struct lpc_cycle_para iopara;
+ unsigned long addr;
+
+ if (!lpcdev || !buf || !count || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return -EINVAL;
+
+ iopara.opflags = 0;
+ if (dwidth > 1)
+ iopara.opflags |= FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+
+ do {
+ int ret;
+
+ ret = hisi_lpc_target_in(lpcdev, &iopara, addr, buf, dwidth);
+ if (ret)
+ return ret;
+ buf += dwidth;
+ } while (--count);
+
+ return 0;
+}
+
+/*
+ * hisi_lpc_comm_outs - output the data in the buffer in multiple operations
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @buffer: a buffer where write/output data bytes are stored
+ * @dwidth: the data width required writing to the target I/O port
+ * @count: how many data units whose length is dwidth will be written
+ */
+static void hisi_lpc_comm_outs(void *hostdata, unsigned long pio,
+ const void *buffer, size_t dwidth,
+ unsigned int count)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ struct lpc_cycle_para iopara;
+ const unsigned char *buf = buffer;
+ unsigned long addr;
+
+ if (!lpcdev || !buf || !count || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return;
+
+ iopara.opflags = 0;
+ if (dwidth > 1)
+ iopara.opflags |= FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+ do {
+ if (hisi_lpc_target_out(lpcdev, &iopara, addr, buf, dwidth))
+ break;
+ buf += dwidth;
+ } while (--count);
+}
+
+static const struct logic_pio_host_ops hisi_lpc_ops = {
+ .in = hisi_lpc_comm_in,
+ .out = hisi_lpc_comm_out,
+ .ins = hisi_lpc_comm_ins,
+ .outs = hisi_lpc_comm_outs,
+};
+
+#ifdef CONFIG_ACPI
+#define MFD_CHILD_NAME_PREFIX DRV_NAME"-"
+#define MFD_CHILD_NAME_LEN (ACPI_ID_LEN + sizeof(MFD_CHILD_NAME_PREFIX) - 1)
+
+struct hisi_lpc_mfd_cell {
+ struct mfd_cell_acpi_match acpi_match;
+ char name[MFD_CHILD_NAME_LEN];
+ char pnpid[ACPI_ID_LEN];
+};
+
+static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
+ struct acpi_device *host,
+ struct resource *res)
+{
+ unsigned long sys_port;
+ resource_size_t len = resource_size(res);
+
+ sys_port = logic_pio_trans_hwaddr(&host->fwnode, res->start, len);
+ if (sys_port == ~0UL)
+ return -EFAULT;
+
+ res->start = sys_port;
+ res->end = sys_port + len;
+
+ return 0;
+}
+
+/*
+ * hisi_lpc_acpi_set_io_res - set the resources for a child's MFD
+ * @child: the device node to be updated the I/O resource
+ * @hostdev: the device node associated with host controller
+ * @res: double pointer to be set to the address of translated resources
+ * @num_res: pointer to variable to hold the number of translated resources
+ *
+ * Returns 0 when successful, and a negative value for failure.
+ *
+ * For a given host controller, each child device will have an associated
+ * host-relative address resource. This function will return the translated
+ * logical PIO addresses for each child devices resources.
+ */
+static int hisi_lpc_acpi_set_io_res(struct device *child,
+ struct device *hostdev,
+ const struct resource **res, int *num_res)
+{
+ struct acpi_device *adev;
+ struct acpi_device *host;
+ struct resource_entry *rentry;
+ LIST_HEAD(resource_list);
+ struct resource *resources;
+ int count;
+ int i;
+
+ if (!child || !hostdev)
+ return -EINVAL;
+
+ host = to_acpi_device(hostdev);
+ adev = to_acpi_device(child);
+
+ if (!adev->status.present) {
+ dev_dbg(child, "device is not present\n");
+ return -EIO;
+ }
+
+ if (acpi_device_enumerated(adev)) {
+ dev_dbg(child, "has been enumerated\n");
+ return -EIO;
+ }
+
+ /*
+ * The following code segment to retrieve the resources is common to
+ * acpi_create_platform_device(), so consider a common helper function
+ * in future.
+ */
+ count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (count <= 0) {
+ dev_dbg(child, "failed to get resources\n");
+ return count ? count : -EIO;
+ }
+
+ resources = devm_kcalloc(hostdev, count, sizeof(*resources),
+ GFP_KERNEL);
+ if (!resources) {
+ dev_warn(hostdev, "could not allocate memory for %d resources\n",
+ count);
+ acpi_dev_free_resource_list(&resource_list);
+ return -ENOMEM;
+ }
+ count = 0;
+ list_for_each_entry(rentry, &resource_list, node)
+ resources[count++] = *rentry->res;
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ /* translate the I/O resources */
+ for (i = 0; i < count; i++) {
+ int ret;
+
+ if (!(resources[i].flags & IORESOURCE_IO))
+ continue;
+ ret = hisi_lpc_acpi_xlat_io_res(adev, host, &resources[i]);
+ if (ret) {
+ dev_err(child, "translate IO range %pR failed (%d)\n",
+ &resources[i], ret);
+ return ret;
+ }
+ }
+ *res = resources;
+ *num_res = count;
+
+ return 0;
+}
+
+/*
+ * hisi_lpc_acpi_probe - probe children for ACPI FW
+ * @hostdev: LPC host device pointer
+ *
+ * Returns 0 when successful, and a negative value for failure.
+ *
+ * Scan all child devices and create a per-device MFD with
+ * logical PIO translated IO resources.
+ */
+static int hisi_lpc_acpi_probe(struct device *hostdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(hostdev);
+ struct hisi_lpc_mfd_cell *hisi_lpc_mfd_cells;
+ struct mfd_cell *mfd_cells;
+ struct acpi_device *child;
+ int size, ret, count = 0, cell_num = 0;
+
+ list_for_each_entry(child, &adev->children, node)
+ cell_num++;
+
+ /* allocate the mfd cell and companion ACPI info, one per child */
+ size = sizeof(*mfd_cells) + sizeof(*hisi_lpc_mfd_cells);
+ mfd_cells = devm_kcalloc(hostdev, cell_num, size, GFP_KERNEL);
+ if (!mfd_cells)
+ return -ENOMEM;
+
+ hisi_lpc_mfd_cells = (struct hisi_lpc_mfd_cell *)&mfd_cells[cell_num];
+ /* Only consider the children of the host */
+ list_for_each_entry(child, &adev->children, node) {
+ struct mfd_cell *mfd_cell = &mfd_cells[count];
+ struct hisi_lpc_mfd_cell *hisi_lpc_mfd_cell =
+ &hisi_lpc_mfd_cells[count];
+ struct mfd_cell_acpi_match *acpi_match =
+ &hisi_lpc_mfd_cell->acpi_match;
+ char *name = hisi_lpc_mfd_cell[count].name;
+ char *pnpid = hisi_lpc_mfd_cell[count].pnpid;
+ struct mfd_cell_acpi_match match = {
+ .pnpid = pnpid,
+ };
+
+ /*
+ * For any instances of this host controller (Hip06 and Hip07
+ * are the only chipsets), we would not have multiple slaves
+ * with the same HID. And in any system we would have just one
+ * controller active. So don't worrry about MFD name clashes.
+ */
+ snprintf(name, MFD_CHILD_NAME_LEN, MFD_CHILD_NAME_PREFIX"%s",
+ acpi_device_hid(child));
+ snprintf(pnpid, ACPI_ID_LEN, "%s", acpi_device_hid(child));
+
+ memcpy(acpi_match, &match, sizeof(*acpi_match));
+ mfd_cell->name = name;
+ mfd_cell->acpi_match = acpi_match;
+
+ ret = hisi_lpc_acpi_set_io_res(&child->dev, &adev->dev,
+ &mfd_cell->resources,
+ &mfd_cell->num_resources);
+ if (ret) {
+ dev_warn(&child->dev, "set resource fail (%d)\n", ret);
+ return ret;
+ }
+ count++;
+ }
+
+ ret = mfd_add_devices(hostdev, PLATFORM_DEVID_NONE,
+ mfd_cells, cell_num, NULL, 0, NULL);
+ if (ret) {
+ dev_err(hostdev, "failed to add mfd cells (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_lpc_acpi_match[] = {
+ {"HISI0191"},
+ {}
+};
+#else
+static int hisi_lpc_acpi_probe(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif // CONFIG_ACPI
+
+/*
+ * hisi_lpc_probe - the probe callback function for hisi lpc host,
+ * will finish all the initialization.
+ * @pdev: the platform device corresponding to hisi lpc host
+ *
+ * Returns 0 on success, non-zero on fail.
+ */
+static int hisi_lpc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *acpi_device = ACPI_COMPANION(dev);
+ struct logic_pio_hwaddr *range;
+ struct hisi_lpc_dev *lpcdev;
+ resource_size_t io_end;
+ struct resource *res;
+ int ret;
+
+ lpcdev = devm_kzalloc(dev, sizeof(*lpcdev), GFP_KERNEL);
+ if (!lpcdev)
+ return -ENOMEM;
+
+ spin_lock_init(&lpcdev->cycle_lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lpcdev->membase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(lpcdev->membase))
+ return PTR_ERR(lpcdev->membase);
+
+ range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+
+ range->fwnode = dev->fwnode;
+ range->flags = LOGIC_PIO_INDIRECT;
+ range->size = PIO_INDIRECT_SIZE;
+
+ ret = logic_pio_register_range(range);
+ if (ret) {
+ dev_err(dev, "register IO range failed (%d)!\n", ret);
+ return ret;
+ }
+ lpcdev->io_host = range;
+
+ /* register the LPC host PIO resources */
+ if (acpi_device)
+ ret = hisi_lpc_acpi_probe(dev);
+ else
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret)
+ return ret;
+
+ lpcdev->io_host->hostdata = lpcdev;
+ lpcdev->io_host->ops = &hisi_lpc_ops;
+
+ io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
+ dev_info(dev, "registered range [%pa - %pa]\n",
+ &lpcdev->io_host->io_start, &io_end);
+
+ return ret;
+}
+
+static const struct of_device_id hisi_lpc_of_match[] = {
+ { .compatible = "hisilicon,hip06-lpc", },
+ { .compatible = "hisilicon,hip07-lpc", },
+ {}
+};
+
+static struct platform_driver hisi_lpc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hisi_lpc_of_match,
+ .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
+ },
+ .probe = hisi_lpc_probe,
+};
+builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c
index dff2d1538164..05e5324f60bd 100644
--- a/drivers/char/xillybus/xillybus_pcie.c
+++ b/drivers/char/xillybus/xillybus_pcie.c
@@ -24,7 +24,6 @@ MODULE_LICENSE("GPL v2");
#define PCI_DEVICE_ID_XILLYBUS 0xebeb
-#define PCI_VENDOR_ID_ALTERA 0x1172
#define PCI_VENDOR_ID_ACTEL 0x11aa
#define PCI_VENDOR_ID_LATTICE 0x1204
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 00e73d28077c..77b04e4b3254 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -384,8 +384,6 @@ static int altera_cvp_probe(struct pci_dev *pdev,
const struct pci_device_id *dev_id);
static void altera_cvp_remove(struct pci_dev *pdev);
-#define PCI_VENDOR_ID_ALTERA 0x1172
-
static struct pci_device_id altera_cvp_id_tbl[] = {
{ PCI_VDEVICE(ALTERA, PCI_ANY_ID) },
{ }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 173d0095e3b2..ca17508fd28c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -434,7 +434,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
- dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
+
+ dev_priv->bridge_dev =
+ pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
DRM_ERROR("bridge device not found\n");
return -1;
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index af4d2f26f1c6..c2d69e33bf2b 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -117,6 +117,7 @@ static void mcb_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id mcb_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEN, PCI_DEVICE_ID_MEN_CHAMELEON) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_MEN_CHAMELEON) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, mcb_pci_tbl);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 320276f42653..fe8897e64635 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -534,12 +534,14 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
}
for (bar = BAR_0; bar <= BAR_5; bar++) {
- base = pci_ioremap_bar(pdev, bar);
- if (!base) {
- dev_err(dev, "failed to read BAR%d\n", bar);
- WARN_ON(bar == test_reg_bar);
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
+ base = pci_ioremap_bar(pdev, bar);
+ if (!base) {
+ dev_err(dev, "failed to read BAR%d\n", bar);
+ WARN_ON(bar == test_reg_bar);
+ }
+ test->bar[bar] = base;
}
- test->bar[bar] = base;
}
test->base = test->bar[test_reg_bar];
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index a434fecfdfeb..aa05fb534942 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2120,91 +2120,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
return 0;
}
-static void fm10k_slot_warn(struct fm10k_intfc *interface)
-{
- enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
- enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
- struct fm10k_hw *hw = &interface->hw;
- int max_gts = 0, expected_gts = 0;
-
- if (pcie_get_minimum_link(interface->pdev, &speed, &width) ||
- speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
- dev_warn(&interface->pdev->dev,
- "Unable to determine PCI Express bandwidth.\n");
- return;
- }
-
- switch (speed) {
- case PCIE_SPEED_2_5GT:
- /* 8b/10b encoding reduces max throughput by 20% */
- max_gts = 2 * width;
- break;
- case PCIE_SPEED_5_0GT:
- /* 8b/10b encoding reduces max throughput by 20% */
- max_gts = 4 * width;
- break;
- case PCIE_SPEED_8_0GT:
- /* 128b/130b encoding has less than 2% impact on throughput */
- max_gts = 8 * width;
- break;
- default:
- dev_warn(&interface->pdev->dev,
- "Unable to determine PCI Express bandwidth.\n");
- return;
- }
-
- dev_info(&interface->pdev->dev,
- "PCI Express bandwidth of %dGT/s available\n",
- max_gts);
- dev_info(&interface->pdev->dev,
- "(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n",
- (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
- speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
- speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
- "Unknown"),
- hw->bus.width,
- (speed == PCIE_SPEED_2_5GT ? "20%" :
- speed == PCIE_SPEED_5_0GT ? "20%" :
- speed == PCIE_SPEED_8_0GT ? "<2%" :
- "Unknown"),
- (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
- hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
- hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
- "Unknown"));
-
- switch (hw->bus_caps.speed) {
- case fm10k_bus_speed_2500:
- /* 8b/10b encoding reduces max throughput by 20% */
- expected_gts = 2 * hw->bus_caps.width;
- break;
- case fm10k_bus_speed_5000:
- /* 8b/10b encoding reduces max throughput by 20% */
- expected_gts = 4 * hw->bus_caps.width;
- break;
- case fm10k_bus_speed_8000:
- /* 128b/130b encoding has less than 2% impact on throughput */
- expected_gts = 8 * hw->bus_caps.width;
- break;
- default:
- dev_warn(&interface->pdev->dev,
- "Unable to determine expected PCI Express bandwidth.\n");
- return;
- }
-
- if (max_gts >= expected_gts)
- return;
-
- dev_warn(&interface->pdev->dev,
- "This device requires %dGT/s of bandwidth for optimal performance.\n",
- expected_gts);
- dev_warn(&interface->pdev->dev,
- "A %sslot with x%d lanes is suggested.\n",
- (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " :
- hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " :
- hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""),
- hw->bus_caps.width);
-}
-
/**
* fm10k_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -2326,7 +2241,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
/* print warning for non-optimal configurations */
- fm10k_slot_warn(interface);
+ pcie_print_link_status(interface->pdev);
/* report MAC address for logging */
dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 4d84cab77105..30cacac54e69 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -623,85 +623,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
-static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
- enum pci_bus_speed *speed,
- enum pcie_link_width *width)
-{
- u32 lnkcap1, lnkcap2;
- int err1, err2;
-
-#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
-
- *speed = PCI_SPEED_UNKNOWN;
- *width = PCIE_LNK_WIDTH_UNKNOWN;
-
- err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
- &lnkcap1);
- err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
- &lnkcap2);
- if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
- *speed = PCIE_SPEED_8_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
- *speed = PCIE_SPEED_5_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
- *speed = PCIE_SPEED_2_5GT;
- }
- if (!err1) {
- *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
- if (!lnkcap2) { /* pre-r3.0 */
- if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
- *speed = PCIE_SPEED_5_0GT;
- else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
- *speed = PCIE_SPEED_2_5GT;
- }
- }
-
- if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
- return err1 ? err1 :
- err2 ? err2 : -EINVAL;
- }
- return 0;
-}
-
-static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
-{
- enum pcie_link_width width, width_cap;
- enum pci_bus_speed speed, speed_cap;
- int err;
-
-#define PCIE_SPEED_STR(speed) \
- (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
- speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
- speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
- "Unknown")
-
- err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
- if (err) {
- mlx4_warn(dev,
- "Unable to determine PCIe device BW capabilities\n");
- return;
- }
-
- err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
- if (err || speed == PCI_SPEED_UNKNOWN ||
- width == PCIE_LNK_WIDTH_UNKNOWN) {
- mlx4_warn(dev,
- "Unable to determine PCI device chain minimum BW\n");
- return;
- }
-
- if (width != width_cap || speed != speed_cap)
- mlx4_warn(dev,
- "PCIe BW is different than device's capability\n");
-
- mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
- PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
- mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
- width, width_cap);
- return;
-}
-
/*The function checks if there are live vf, return the num of them*/
static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
{
@@ -3475,7 +3396,7 @@ slave_start:
* express device capabilities are under-satisfied by the bus.
*/
if (!mlx4_is_slave(dev))
- mlx4_check_pcie_caps(dev);
+ pcie_print_link_status(dev->persist->pdev);
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 47bab842c5ee..93291ec4a3d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3864,36 +3864,6 @@ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
indirection_rqt[i] = i % num_channels;
}
-static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
-{
- enum pcie_link_width width;
- enum pci_bus_speed speed;
- int err = 0;
-
- err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
- if (err)
- return err;
-
- if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
- return -EINVAL;
-
- switch (speed) {
- case PCIE_SPEED_2_5GT:
- *pci_bw = 2500 * width;
- break;
- case PCIE_SPEED_5_0GT:
- *pci_bw = 5000 * width;
- break;
- case PCIE_SPEED_8_0GT:
- *pci_bw = 8000 * width;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
{
return (link_speed && pci_bw &&
@@ -3979,7 +3949,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
params->num_tc = 1;
mlx5e_get_max_linkspeed(mdev, &link_speed);
- mlx5e_get_pci_bw(mdev, &pci_bw);
+ pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
link_speed, pci_bw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2ef641c91c26..622f02d34aae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1043,6 +1043,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
fw_rev_min(dev), fw_rev_sub(dev));
+ /* Only PFs hold the relevant PCIe information for this query */
+ if (mlx5_core_is_pf(dev))
+ pcie_print_link_status(dev->pdev);
+
/* on load removing any previous indication of internal error, device is
* up
*/
diff --git a/drivers/of/address.c b/drivers/of/address.c
index ce4d3d8b85de..53349912ac75 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -2,8 +2,10 @@
#define pr_fmt(fmt) "OF: " fmt
#include <linux/device.h>
+#include <linux/fwnode.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/logic_pio.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/pci.h>
@@ -333,7 +335,8 @@ int of_pci_range_to_resource(struct of_pci_range *range,
if (res->flags & IORESOURCE_IO) {
unsigned long port;
- err = pci_register_io_range(range->cpu_addr, range->size);
+ err = pci_register_io_range(&np->fwnode, range->cpu_addr,
+ range->size);
if (err)
goto invalid_range;
port = pci_address_to_pio(range->cpu_addr);
@@ -560,9 +563,14 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
* that translation is impossible (that is we are not dealing with a value
* that can be mapped to a cpu physical address). This is not really specified
* that way, but this is traditionally the way IBM at least do things
+ *
+ * Whenever the translation fails, the *host pointer will be set to the
+ * device that had registered logical PIO mapping, and the return code is
+ * relative to that node.
*/
static u64 __of_translate_address(struct device_node *dev,
- const __be32 *in_addr, const char *rprop)
+ const __be32 *in_addr, const char *rprop,
+ struct device_node **host)
{
struct device_node *parent = NULL;
struct of_bus *bus, *pbus;
@@ -575,6 +583,7 @@ static u64 __of_translate_address(struct device_node *dev,
/* Increase refcount at current level */
of_node_get(dev);
+ *host = NULL;
/* Get parent & match bus type */
parent = of_get_parent(dev);
if (parent == NULL)
@@ -595,6 +604,8 @@ static u64 __of_translate_address(struct device_node *dev,
/* Translate */
for (;;) {
+ struct logic_pio_hwaddr *iorange;
+
/* Switch to parent bus */
of_node_put(dev);
dev = parent;
@@ -607,6 +618,19 @@ static u64 __of_translate_address(struct device_node *dev,
break;
}
+ /*
+ * For indirectIO device which has no ranges property, get
+ * the address from reg directly.
+ */
+ iorange = find_io_range_by_fwnode(&dev->fwnode);
+ if (iorange && (iorange->flags != LOGIC_PIO_CPU_MMIO)) {
+ result = of_read_number(addr + 1, na - 1);
+ pr_debug("indirectIO matched(%pOF) 0x%llx\n",
+ dev, result);
+ *host = of_node_get(dev);
+ break;
+ }
+
/* Get new parent bus and counts */
pbus = of_match_bus(parent);
pbus->count_cells(dev, &pna, &pns);
@@ -638,13 +662,32 @@ static u64 __of_translate_address(struct device_node *dev,
u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
{
- return __of_translate_address(dev, in_addr, "ranges");
+ struct device_node *host;
+ u64 ret;
+
+ ret = __of_translate_address(dev, in_addr, "ranges", &host);
+ if (host) {
+ of_node_put(host);
+ return OF_BAD_ADDR;
+ }
+
+ return ret;
}
EXPORT_SYMBOL(of_translate_address);
u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
{
- return __of_translate_address(dev, in_addr, "dma-ranges");
+ struct device_node *host;
+ u64 ret;
+
+ ret = __of_translate_address(dev, in_addr, "dma-ranges", &host);
+
+ if (host) {
+ of_node_put(host);
+ return OF_BAD_ADDR;
+ }
+
+ return ret;
}
EXPORT_SYMBOL(of_translate_dma_address);
@@ -686,29 +729,48 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
}
EXPORT_SYMBOL(of_get_address);
+static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
+ u64 size)
+{
+ u64 taddr;
+ unsigned long port;
+ struct device_node *host;
+
+ taddr = __of_translate_address(dev, in_addr, "ranges", &host);
+ if (host) {
+ /* host-specific port access */
+ port = logic_pio_trans_hwaddr(&host->fwnode, taddr, size);
+ of_node_put(host);
+ } else {
+ /* memory-mapped I/O range */
+ port = pci_address_to_pio(taddr);
+ }
+
+ if (port == (unsigned long)-1)
+ return OF_BAD_ADDR;
+
+ return port;
+}
+
static int __of_address_to_resource(struct device_node *dev,
const __be32 *addrp, u64 size, unsigned int flags,
const char *name, struct resource *r)
{
u64 taddr;
- if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
+ if (flags & IORESOURCE_MEM)
+ taddr = of_translate_address(dev, addrp);
+ else if (flags & IORESOURCE_IO)
+ taddr = of_translate_ioport(dev, addrp, size);
+ else
return -EINVAL;
- taddr = of_translate_address(dev, addrp);
+
if (taddr == OF_BAD_ADDR)
return -EINVAL;
memset(r, 0, sizeof(struct resource));
- if (flags & IORESOURCE_IO) {
- unsigned long port;
- port = pci_address_to_pio(taddr);
- if (port == (unsigned long)-1)
- return -EINVAL;
- r->start = port;
- r->end = port + size - 1;
- } else {
- r->start = taddr;
- r->end = taddr + size - 1;
- }
+
+ r->start = taddr;
+ r->end = taddr + size - 1;
r->flags = flags;
r->name = name ? name : dev->full_name;
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 941970936840..952addc7bacf 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -1,61 +1,40 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the PCI bus specific drivers.
-#
-obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o remove.o pci.o \
- pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
- irq.o vpd.o setup-bus.o vc.o mmap.o setup-irq.o
+obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
+ remove.o pci.o pci-driver.o search.o \
+ pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
+ setup-bus.o vc.o mmap.o setup-irq.o
ifdef CONFIG_PCI
-obj-$(CONFIG_PROC_FS) += proc.o
-obj-$(CONFIG_SYSFS) += slot.o
-obj-$(CONFIG_OF) += of.o
+obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSFS) += slot.o
+obj-$(CONFIG_OF) += of.o
endif
-obj-$(CONFIG_PCI_QUIRKS) += quirks.o
-
-# Build PCI Express stuff if needed
-obj-$(CONFIG_PCIEPORTBUS) += pcie/
-
-# Build the PCI Hotplug drivers if we were asked to
-obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
-
-# Build the PCI MSI interrupt support
-obj-$(CONFIG_PCI_MSI) += msi.o
-
-obj-$(CONFIG_PCI_ATS) += ats.o
-obj-$(CONFIG_PCI_IOV) += iov.o
-
-#
-# ACPI Related PCI FW Functions
-# ACPI _DSM provided firmware instance and string name
-#
-obj-$(CONFIG_ACPI) += pci-acpi.o
-
-# SMBIOS provided firmware instance and labels
-obj-$(CONFIG_PCI_LABEL) += pci-label.o
-
-# Intel MID platform PM support
-obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
-
-obj-$(CONFIG_PCI_SYSCALL) += syscall.o
-
-obj-$(CONFIG_PCI_STUB) += pci-stub.o
-
-obj-$(CONFIG_PCI_ECAM) += ecam.o
-
+obj-$(CONFIG_PCI_QUIRKS) += quirks.o
+obj-$(CONFIG_PCIEPORTBUS) += pcie/
+obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
+obj-$(CONFIG_PCI_MSI) += msi.o
+obj-$(CONFIG_PCI_ATS) += ats.o
+obj-$(CONFIG_PCI_IOV) += iov.o
+obj-$(CONFIG_ACPI) += pci-acpi.o
+obj-$(CONFIG_PCI_LABEL) += pci-label.o
+obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
+obj-$(CONFIG_PCI_SYSCALL) += syscall.o
+obj-$(CONFIG_PCI_STUB) += pci-stub.o
+obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
-ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
-
-# PCI host controller drivers
-obj-y += host/
-obj-y += switch/
+obj-y += host/
+obj-y += switch/
+# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
-# Endpoint library must be initialized before its users
obj-$(CONFIG_PCIE_CADENCE) += cadence/
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
+
+ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 5e9a9822d9d4..a3ad2fe185b9 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/module.h>
-#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/wait.h>
@@ -17,9 +15,9 @@
DEFINE_RAW_SPINLOCK(pci_lock);
/*
- * Wrappers for all PCI configuration access functions. They just check
- * alignment, do locking and call the low-level functions pointed to
- * by pci_dev->ops.
+ * Wrappers for all PCI configuration access functions. They just check
+ * alignment, do locking and call the low-level functions pointed to
+ * by pci_dev->ops.
*/
#define PCI_byte_BAD 0
@@ -264,372 +262,6 @@ PCI_USER_WRITE_CONFIG(byte, u8)
PCI_USER_WRITE_CONFIG(word, u16)
PCI_USER_WRITE_CONFIG(dword, u32)
-/* VPD access through PCI 2.2+ VPD capability */
-
-/**
- * pci_read_vpd - Read one entry from Vital Product Data
- * @dev: pci device struct
- * @pos: offset in vpd space
- * @count: number of bytes to read
- * @buf: pointer to where to store result
- */
-ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
-{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->read(dev, pos, count, buf);
-}
-EXPORT_SYMBOL(pci_read_vpd);
-
-/**
- * pci_write_vpd - Write entry to Vital Product Data
- * @dev: pci device struct
- * @pos: offset in vpd space
- * @count: number of bytes to write
- * @buf: buffer containing write data
- */
-ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
-{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->write(dev, pos, count, buf);
-}
-EXPORT_SYMBOL(pci_write_vpd);
-
-/**
- * pci_set_vpd_size - Set size of Vital Product Data space
- * @dev: pci device struct
- * @len: size of vpd space
- */
-int pci_set_vpd_size(struct pci_dev *dev, size_t len)
-{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->set_size(dev, len);
-}
-EXPORT_SYMBOL(pci_set_vpd_size);
-
-#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
-
-/**
- * pci_vpd_size - determine actual size of Vital Product Data
- * @dev: pci device struct
- * @old_size: current assumed size, also maximum allowed size
- */
-static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
-{
- size_t off = 0;
- unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
-
- while (off < old_size &&
- pci_read_vpd(dev, off, 1, header) == 1) {
- unsigned char tag;
-
- if (header[0] & PCI_VPD_LRDT) {
- /* Large Resource Data Type Tag */
- tag = pci_vpd_lrdt_tag(header);
- /* Only read length from known tag items */
- if ((tag == PCI_VPD_LTIN_ID_STRING) ||
- (tag == PCI_VPD_LTIN_RO_DATA) ||
- (tag == PCI_VPD_LTIN_RW_DATA)) {
- if (pci_read_vpd(dev, off+1, 2,
- &header[1]) != 2) {
- pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
- tag, off + 1);
- return 0;
- }
- off += PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(header);
- }
- } else {
- /* Short Resource Data Type Tag */
- off += PCI_VPD_SRDT_TAG_SIZE +
- pci_vpd_srdt_size(header);
- tag = pci_vpd_srdt_tag(header);
- }
-
- if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
- return off;
-
- if ((tag != PCI_VPD_LTIN_ID_STRING) &&
- (tag != PCI_VPD_LTIN_RO_DATA) &&
- (tag != PCI_VPD_LTIN_RW_DATA)) {
- pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
- (header[0] & PCI_VPD_LRDT) ? "large" : "short",
- tag, off);
- return 0;
- }
- }
- return 0;
-}
-
-/*
- * Wait for last operation to complete.
- * This code has to spin since there is no other notification from the PCI
- * hardware. Since the VPD is often implemented by serial attachment to an
- * EEPROM, it may take many milliseconds to complete.
- *
- * Returns 0 on success, negative values indicate error.
- */
-static int pci_vpd_wait(struct pci_dev *dev)
-{
- struct pci_vpd *vpd = dev->vpd;
- unsigned long timeout = jiffies + msecs_to_jiffies(125);
- unsigned long max_sleep = 16;
- u16 status;
- int ret;
-
- if (!vpd->busy)
- return 0;
-
- while (time_before(jiffies, timeout)) {
- ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
- &status);
- if (ret < 0)
- return ret;
-
- if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
- vpd->busy = 0;
- return 0;
- }
-
- if (fatal_signal_pending(current))
- return -EINTR;
-
- usleep_range(10, max_sleep);
- if (max_sleep < 1024)
- max_sleep *= 2;
- }
-
- pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
- return -ETIMEDOUT;
-}
-
-static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
- void *arg)
-{
- struct pci_vpd *vpd = dev->vpd;
- int ret;
- loff_t end = pos + count;
- u8 *buf = arg;
-
- if (pos < 0)
- return -EINVAL;
-
- if (!vpd->valid) {
- vpd->valid = 1;
- vpd->len = pci_vpd_size(dev, vpd->len);
- }
-
- if (vpd->len == 0)
- return -EIO;
-
- if (pos > vpd->len)
- return 0;
-
- if (end > vpd->len) {
- end = vpd->len;
- count = end - pos;
- }
-
- if (mutex_lock_killable(&vpd->lock))
- return -EINTR;
-
- ret = pci_vpd_wait(dev);
- if (ret < 0)
- goto out;
-
- while (pos < end) {
- u32 val;
- unsigned int i, skip;
-
- ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
- pos & ~3);
- if (ret < 0)
- break;
- vpd->busy = 1;
- vpd->flag = PCI_VPD_ADDR_F;
- ret = pci_vpd_wait(dev);
- if (ret < 0)
- break;
-
- ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
- if (ret < 0)
- break;
-
- skip = pos & 3;
- for (i = 0; i < sizeof(u32); i++) {
- if (i >= skip) {
- *buf++ = val;
- if (++pos == end)
- break;
- }
- val >>= 8;
- }
- }
-out:
- mutex_unlock(&vpd->lock);
- return ret ? ret : count;
-}
-
-static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
- const void *arg)
-{
- struct pci_vpd *vpd = dev->vpd;
- const u8 *buf = arg;
- loff_t end = pos + count;
- int ret = 0;
-
- if (pos < 0 || (pos & 3) || (count & 3))
- return -EINVAL;
-
- if (!vpd->valid) {
- vpd->valid = 1;
- vpd->len = pci_vpd_size(dev, vpd->len);
- }
-
- if (vpd->len == 0)
- return -EIO;
-
- if (end > vpd->len)
- return -EINVAL;
-
- if (mutex_lock_killable(&vpd->lock))
- return -EINTR;
-
- ret = pci_vpd_wait(dev);
- if (ret < 0)
- goto out;
-
- while (pos < end) {
- u32 val;
-
- val = *buf++;
- val |= *buf++ << 8;
- val |= *buf++ << 16;
- val |= *buf++ << 24;
-
- ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
- if (ret < 0)
- break;
- ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
- pos | PCI_VPD_ADDR_F);
- if (ret < 0)
- break;
-
- vpd->busy = 1;
- vpd->flag = 0;
- ret = pci_vpd_wait(dev);
- if (ret < 0)
- break;
-
- pos += sizeof(u32);
- }
-out:
- mutex_unlock(&vpd->lock);
- return ret ? ret : count;
-}
-
-static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
-{
- struct pci_vpd *vpd = dev->vpd;
-
- if (len == 0 || len > PCI_VPD_MAX_SIZE)
- return -EIO;
-
- vpd->valid = 1;
- vpd->len = len;
-
- return 0;
-}
-
-static const struct pci_vpd_ops pci_vpd_ops = {
- .read = pci_vpd_read,
- .write = pci_vpd_write,
- .set_size = pci_vpd_set_size,
-};
-
-static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
- void *arg)
-{
- struct pci_dev *tdev = pci_get_slot(dev->bus,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
- ssize_t ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_read_vpd(tdev, pos, count, arg);
- pci_dev_put(tdev);
- return ret;
-}
-
-static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
- const void *arg)
-{
- struct pci_dev *tdev = pci_get_slot(dev->bus,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
- ssize_t ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_write_vpd(tdev, pos, count, arg);
- pci_dev_put(tdev);
- return ret;
-}
-
-static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
-{
- struct pci_dev *tdev = pci_get_slot(dev->bus,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
- int ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_set_vpd_size(tdev, len);
- pci_dev_put(tdev);
- return ret;
-}
-
-static const struct pci_vpd_ops pci_vpd_f0_ops = {
- .read = pci_vpd_f0_read,
- .write = pci_vpd_f0_write,
- .set_size = pci_vpd_f0_set_size,
-};
-
-int pci_vpd_init(struct pci_dev *dev)
-{
- struct pci_vpd *vpd;
- u8 cap;
-
- cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
- if (!cap)
- return -ENODEV;
-
- vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
- if (!vpd)
- return -ENOMEM;
-
- vpd->len = PCI_VPD_MAX_SIZE;
- if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
- vpd->ops = &pci_vpd_f0_ops;
- else
- vpd->ops = &pci_vpd_ops;
- mutex_init(&vpd->lock);
- vpd->cap = cap;
- vpd->busy = 0;
- vpd->valid = 0;
- dev->vpd = vpd;
- return 0;
-}
-
-void pci_vpd_release(struct pci_dev *dev)
-{
- kfree(dev->vpd);
-}
-
/**
* pci_cfg_access_lock - Lock PCI config reads/writes
* @dev: pci device struct
@@ -686,8 +318,10 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
raw_spin_lock_irqsave(&pci_lock, flags);
- /* This indicates a problem in the caller, but we don't need
- * to kill them, unlike a double-block above. */
+ /*
+ * This indicates a problem in the caller, but we don't need
+ * to kill them, unlike a double-block above.
+ */
WARN_ON(!dev->block_cfg_access);
dev->block_cfg_access = 0;
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 6ad80a1fd5a7..89305b569d3d 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -1,14 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/ats.c
- *
- * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
- * Copyright (C) 2011 Advanced Micro Devices,
- *
- * PCI Express I/O Virtualization (IOV) support.
+ * PCI Express I/O Virtualization (IOV) support
* Address Translation Service 1.0
* Page Request Interface added by Joerg Roedel <joerg.roedel@amd.com>
* PASID support added by Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
+ * Copyright (C) 2011 Advanced Micro Devices,
*/
#include <linux/export.h>
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 737d1c52f002..bc2ded4c451f 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/bus.c
- *
* From setup-res.c, by:
* Dave Rusling (david.rusling@reo.mts.dec.com)
* David Mosberger (davidm@cs.arizona.edu)
diff --git a/drivers/pci/cadence/pcie-cadence-ep.c b/drivers/pci/cadence/pcie-cadence-ep.c
index 3c3a97743453..3d8283e450a9 100644
--- a/drivers/pci/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/cadence/pcie-cadence-ep.c
@@ -77,16 +77,19 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, enum pci_barno bar,
- dma_addr_t bar_phys, size_t size, int flags)
+static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+ struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ dma_addr_t bar_phys = epf_bar->phys_addr;
+ enum pci_barno bar = epf_bar->barno;
+ int flags = epf_bar->flags;
u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
u64 sz;
/* BAR size is 2^(aperture + 7) */
- sz = max_t(size_t, size, CDNS_PCIE_EP_MIN_APERTURE);
+ sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
/*
* roundup_pow_of_two() returns an unsigned long, which is not suited
* for 64bit values.
@@ -103,6 +106,9 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, enum pci_barno bar,
if (is_64bits && (bar & 1))
return -EINVAL;
+ if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
+ epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
if (is_64bits && is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
else if (is_prefetch)
@@ -139,10 +145,11 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, enum pci_barno bar,
}
static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
- enum pci_barno bar)
+ struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ enum pci_barno bar = epf_bar->barno;
u32 reg, cfg, b, ctrl;
if (bar < BAR_4) {
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index ca6278113936..4cc1e5df8c79 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -294,15 +294,6 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
-{
- struct exynos_pcie *ep = arg;
- struct dw_pcie *pci = ep->pci;
- struct pcie_port *pp = &pci->pp;
-
- return dw_handle_msi_irq(pp);
-}
-
static void exynos_pcie_msi_init(struct exynos_pcie *ep)
{
struct dw_pcie *pci = ep->pci;
@@ -428,15 +419,6 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
dev_err(dev, "failed to get msi irq\n");
return pp->msi_irq;
}
-
- ret = devm_request_irq(dev, pp->msi_irq,
- exynos_pcie_msi_irq_handler,
- IRQF_SHARED | IRQF_NO_THREAD,
- "exynos-pcie", ep);
- if (ret) {
- dev_err(dev, "failed to request msi irq\n");
- return ret;
- }
}
pp->root_bus_nr = -1;
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 4fddbd08b089..4818ef875f8a 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -542,15 +542,6 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -EINVAL;
}
-static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
-{
- struct imx6_pcie *imx6_pcie = arg;
- struct dw_pcie *pci = imx6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
-
- return dw_handle_msi_irq(pp);
-}
-
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -674,15 +665,6 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
dev_err(dev, "failed to get MSI irq\n");
return -ENODEV;
}
-
- ret = devm_request_irq(dev, pp->msi_irq,
- imx6_pcie_msi_handler,
- IRQF_SHARED | IRQF_NO_THREAD,
- "mx6-pcie-msi", imx6_pcie);
- if (ret) {
- dev_err(dev, "failed to request MSI irq\n");
- return ret;
- }
}
pp->root_bus_nr = -1;
diff --git a/drivers/pci/dwc/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c
index 99a0e7076221..0682213328e9 100644
--- a/drivers/pci/dwc/pci-keystone-dw.c
+++ b/drivers/pci/dwc/pci-keystone-dw.c
@@ -120,20 +120,15 @@ void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
}
}
-static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
+void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
{
- u32 offset, reg_offset, bit_pos;
+ u32 reg_offset, bit_pos;
struct keystone_pcie *ks_pcie;
- struct msi_desc *msi;
- struct pcie_port *pp;
struct dw_pcie *pci;
- msi = irq_data_get_msi_desc(d);
- pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
pci = to_dw_pcie_from_pp(pp);
ks_pcie = to_keystone_pcie(pci);
- offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
- update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
BIT(bit_pos));
@@ -162,85 +157,9 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
BIT(bit_pos));
}
-static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
-{
- struct msi_desc *msi;
- struct pcie_port *pp;
- u32 offset;
-
- msi = irq_data_get_msi_desc(d);
- pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
- offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
-
- /* Mask the end point if PVM implemented */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- if (msi->msi_attrib.maskbit)
- pci_msi_mask_irq(d);
- }
-
- ks_dw_pcie_msi_clear_irq(pp, offset);
-}
-
-static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
-{
- struct msi_desc *msi;
- struct pcie_port *pp;
- u32 offset;
-
- msi = irq_data_get_msi_desc(d);
- pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
- offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
-
- /* Mask the end point if PVM implemented */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- if (msi->msi_attrib.maskbit)
- pci_msi_unmask_irq(d);
- }
-
- ks_dw_pcie_msi_set_irq(pp, offset);
-}
-
-static struct irq_chip ks_dw_pcie_msi_irq_chip = {
- .name = "Keystone-PCIe-MSI-IRQ",
- .irq_ack = ks_dw_pcie_msi_irq_ack,
- .irq_mask = ks_dw_pcie_msi_irq_mask,
- .irq_unmask = ks_dw_pcie_msi_irq_unmask,
-};
-
-static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+int ks_dw_pcie_msi_host_init(struct pcie_port *pp)
{
- irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
- handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
- .map = ks_dw_pcie_msi_map,
-};
-
-int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- struct device *dev = pci->dev;
- int i;
-
- pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
- MAX_MSI_IRQS,
- &ks_dw_pcie_msi_domain_ops,
- chip);
- if (!pp->irq_domain) {
- dev_err(dev, "irq domain init failed\n");
- return -ENXIO;
- }
-
- for (i = 0; i < MAX_MSI_IRQS; i++)
- irq_create_mapping(pp->irq_domain, i);
-
- return 0;
+ return dw_pcie_allocate_domains(pp);
}
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index d4f8ab90c018..d55ae0716adf 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -297,6 +297,7 @@ static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
.msi_clear_irq = ks_dw_pcie_msi_clear_irq,
.get_msi_addr = ks_dw_pcie_get_msi_addr,
.msi_host_init = ks_dw_pcie_msi_host_init,
+ .msi_irq_ack = ks_dw_pcie_msi_irq_ack,
.scan_bus = ks_dw_pcie_v3_65_scan_bus,
};
diff --git a/drivers/pci/dwc/pci-keystone.h b/drivers/pci/dwc/pci-keystone.h
index 1dd1f3ef98e7..8a13da391543 100644
--- a/drivers/pci/dwc/pci-keystone.h
+++ b/drivers/pci/dwc/pci-keystone.h
@@ -49,9 +49,9 @@ int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 *val);
void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
+void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp);
void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
-int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
- struct msi_controller *chip);
+int ks_dw_pcie_msi_host_init(struct pcie_port *pp);
int ks_dw_pcie_link_up(struct dw_pcie *pci);
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index a7b4159631ae..3724d3ef7008 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -182,8 +182,7 @@ static int ls1021_pcie_host_init(struct pcie_port *pp)
return ls_pcie_host_init(pp);
}
-static int ls_pcie_msi_host_init(struct pcie_port *pp,
- struct msi_controller *chip)
+static int ls_pcie_msi_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index 93b3df9ed1b5..e66cede2b5b7 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -383,15 +383,6 @@ static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
.host_init = artpec6_pcie_host_init,
};
-static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
-{
- struct artpec6_pcie *artpec6_pcie = arg;
- struct dw_pcie *pci = artpec6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
-
- return dw_handle_msi_irq(pp);
-}
-
static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
struct platform_device *pdev)
{
@@ -406,15 +397,6 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
dev_err(dev, "failed to get MSI irq\n");
return pp->msi_irq;
}
-
- ret = devm_request_irq(dev, pp->msi_irq,
- artpec6_pcie_msi_handler,
- IRQF_SHARED | IRQF_NO_THREAD,
- "artpec6-pcie-msi", artpec6_pcie);
- if (ret) {
- dev_err(dev, "failed to request MSI irq\n");
- return ret;
- }
}
pp->root_bus_nr = -1;
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c
index 3a6feeff5f5b..f07678bf7cfc 100644
--- a/drivers/pci/dwc/pcie-designware-ep.c
+++ b/drivers/pci/dwc/pcie-designware-ep.c
@@ -19,7 +19,8 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
pci_epc_linkup(epc);
}
-void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
+ int flags)
{
u32 reg;
@@ -27,9 +28,18 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writel_dbi2(pci, reg, 0x0);
dw_pcie_writel_dbi(pci, reg, 0x0);
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
+ dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+ }
dw_pcie_dbi_ro_wr_dis(pci);
}
+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+ __dw_pcie_ep_reset_bar(pci, bar, 0);
+}
+
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr)
{
@@ -104,25 +114,28 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
}
static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
- enum pci_barno bar)
+ struct pci_epf_bar *epf_bar)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
u32 atu_index = ep->bar_to_atu[bar];
- dw_pcie_ep_reset_bar(pci, bar);
+ __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
clear_bit(atu_index, ep->ib_window_map);
}
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
- enum pci_barno bar,
- dma_addr_t bar_phys, size_t size, int flags)
+ struct pci_epf_bar *epf_bar)
{
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
enum dw_pcie_as_type as_type;
u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
@@ -131,13 +144,20 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
else
as_type = DW_PCIE_AS_IO;
- ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type);
+ ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
if (ret)
return ret;
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writel_dbi2(pci, reg, size - 1);
+
+ dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
dw_pcie_writel_dbi(pci, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
+ dw_pcie_writel_dbi(pci, reg + 4, 0);
+ }
+
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -322,7 +342,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
struct device_node *np = dev->of_node;
if (!pci->dbi_base || !pci->dbi_base2) {
- dev_err(dev, "dbi_base/deb_base2 is not populated\n");
+ dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
return -EINVAL;
}
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 8de2d5c69b1d..550fdbb5c226 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -8,6 +8,7 @@
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -42,22 +43,46 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
return dw_pcie_write(pci->dbi_base + where, size, val);
}
-static struct irq_chip dw_msi_irq_chip = {
+static void dw_msi_ack_irq(struct irq_data *d)
+{
+ irq_chip_ack_parent(d);
+}
+
+static void dw_msi_mask_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void dw_msi_unmask_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip dw_pcie_msi_irq_chip = {
.name = "PCI-MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
+ .irq_ack = dw_msi_ack_irq,
+ .irq_mask = dw_msi_mask_irq,
+ .irq_unmask = dw_msi_unmask_irq,
+};
+
+static struct msi_domain_info dw_pcie_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &dw_pcie_msi_irq_chip,
};
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
- u32 val;
int i, pos, irq;
+ u32 val, num_ctrls;
irqreturn_t ret = IRQ_NONE;
- for (i = 0; i < MAX_MSI_CTRLS; i++) {
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ for (i = 0; i < num_ctrls; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
&val);
if (!val)
@@ -78,206 +103,216 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
return ret;
}
-void dw_pcie_msi_init(struct pcie_port *pp)
+/* Chained MSI interrupt service routine */
+static void dw_chained_msi_isr(struct irq_desc *desc)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct page *page;
- u64 msi_target;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct pcie_port *pp;
- page = alloc_page(GFP_KERNEL);
- pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, pp->msi_data)) {
- dev_err(dev, "failed to map MSI data\n");
- __free_page(page);
- return;
- }
- msi_target = (u64)pp->msi_data;
+ chained_irq_enter(chip, desc);
- /* program the msi_data */
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
- (u32)(msi_target & 0xffffffff));
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
- (u32)(msi_target >> 32 & 0xffffffff));
+ pp = irq_desc_get_handler_data(desc);
+ dw_handle_msi_irq(pp);
+
+ chained_irq_exit(chip, desc);
}
-static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- unsigned int res, bit, val;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u64 msi_target;
- res = (irq / 32) * 12;
- bit = irq % 32;
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
- val &= ~(1 << bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
-}
+ if (pp->ops->get_msi_addr)
+ msi_target = pp->ops->get_msi_addr(pp);
+ else
+ msi_target = (u64)pp->msi_data;
-static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
- unsigned int nvec, unsigned int pos)
-{
- unsigned int i;
-
- for (i = 0; i < nvec; i++) {
- irq_set_msi_desc_off(irq_base, i, NULL);
- /* Disable corresponding interrupt on MSI controller */
- if (pp->ops->msi_clear_irq)
- pp->ops->msi_clear_irq(pp, pos + i);
- else
- dw_pcie_msi_clear_irq(pp, pos + i);
- }
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
- bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
+ if (pp->ops->get_msi_data)
+ msg->data = pp->ops->get_msi_data(pp, data->hwirq);
+ else
+ msg->data = data->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+static int dw_pci_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- unsigned int res, bit, val;
-
- res = (irq / 32) * 12;
- bit = irq % 32;
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
- val |= 1 << bit;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+ return -EINVAL;
}
-static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
+static void dw_pci_bottom_mask(struct irq_data *data)
{
- int irq, pos0, i;
- struct pcie_port *pp;
-
- pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
- pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
- order_base_2(no_irqs));
- if (pos0 < 0)
- goto no_valid_irq;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
- irq = irq_find_mapping(pp->irq_domain, pos0);
- if (!irq)
- goto no_valid_irq;
+ raw_spin_lock_irqsave(&pp->lock, flags);
- /*
- * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
- * descs so there is no need to allocate descs here. We can therefore
- * assume that if irq_find_mapping above returns non-zero, then the
- * descs are also successfully allocated.
- */
+ if (pp->ops->msi_clear_irq) {
+ pp->ops->msi_clear_irq(pp, data->hwirq);
+ } else {
+ ctrl = data->hwirq / 32;
+ res = ctrl * 12;
+ bit = data->hwirq % 32;
- for (i = 0; i < no_irqs; i++) {
- if (irq_set_msi_desc_off(irq, i, desc) != 0) {
- clear_irq_range(pp, irq, i, pos0);
- goto no_valid_irq;
- }
- /*Enable corresponding interrupt in MSI interrupt controller */
- if (pp->ops->msi_set_irq)
- pp->ops->msi_set_irq(pp, pos0 + i);
- else
- dw_pcie_msi_set_irq(pp, pos0 + i);
+ pp->irq_status[ctrl] &= ~(1 << bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
+ pp->irq_status[ctrl]);
}
- *pos = pos0;
- desc->nvec_used = no_irqs;
- desc->msi_attrib.multiple = order_base_2(no_irqs);
-
- return irq;
-
-no_valid_irq:
- *pos = pos0;
- return -ENOSPC;
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
-static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
+static void dw_pci_bottom_unmask(struct irq_data *data)
{
- struct msi_msg msg;
- u64 msi_target;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
- if (pp->ops->get_msi_addr)
- msi_target = pp->ops->get_msi_addr(pp);
- else
- msi_target = (u64)pp->msi_data;
+ raw_spin_lock_irqsave(&pp->lock, flags);
- msg.address_lo = (u32)(msi_target & 0xffffffff);
- msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
+ if (pp->ops->msi_set_irq) {
+ pp->ops->msi_set_irq(pp, data->hwirq);
+ } else {
+ ctrl = data->hwirq / 32;
+ res = ctrl * 12;
+ bit = data->hwirq % 32;
- if (pp->ops->get_msi_data)
- msg.data = pp->ops->get_msi_data(pp, pos);
- else
- msg.data = pos;
+ pp->irq_status[ctrl] |= 1 << bit;
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
+ pp->irq_status[ctrl]);
+ }
- pci_write_msi_msg(irq, &msg);
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
-static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
- struct msi_desc *desc)
+static void dw_pci_bottom_ack(struct irq_data *d)
{
- int irq, pos;
- struct pcie_port *pp = pdev->bus->sysdata;
-
- if (desc->msi_attrib.is_msix)
- return -EINVAL;
-
- irq = assign_irq(1, desc, &pos);
- if (irq < 0)
- return irq;
+ struct msi_desc *msi = irq_data_get_msi_desc(d);
+ struct pcie_port *pp;
- dw_msi_setup_msg(pp, irq, pos);
+ pp = msi_desc_to_pci_sysdata(msi);
- return 0;
+ if (pp->ops->msi_irq_ack)
+ pp->ops->msi_irq_ack(d->hwirq, pp);
}
-static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
- int nvec, int type)
+static struct irq_chip dw_pci_msi_bottom_irq_chip = {
+ .name = "DWPCI-MSI",
+ .irq_ack = dw_pci_bottom_ack,
+ .irq_compose_msi_msg = dw_pci_setup_msi_msg,
+ .irq_set_affinity = dw_pci_msi_set_affinity,
+ .irq_mask = dw_pci_bottom_mask,
+ .irq_unmask = dw_pci_bottom_unmask,
+};
+
+static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
{
-#ifdef CONFIG_PCI_MSI
- int irq, pos;
- struct msi_desc *desc;
- struct pcie_port *pp = pdev->bus->sysdata;
+ struct pcie_port *pp = domain->host_data;
+ unsigned long flags;
+ u32 i;
+ int bit;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
- /* MSI-X interrupts are not supported */
- if (type == PCI_CAP_ID_MSIX)
- return -EINVAL;
+ bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
+ order_base_2(nr_irqs));
- WARN_ON(!list_is_singular(&pdev->dev.msi_list));
- desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
- irq = assign_irq(nvec, desc, &pos);
- if (irq < 0)
- return irq;
+ if (bit < 0)
+ return -ENOSPC;
- dw_msi_setup_msg(pp, irq, pos);
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, bit + i,
+ &dw_pci_msi_bottom_irq_chip,
+ pp, handle_edge_irq,
+ NULL, NULL);
return 0;
-#else
- return -EINVAL;
-#endif
}
-static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+static void dw_pcie_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
- struct irq_data *data = irq_get_irq_data(irq);
- struct msi_desc *msi = irq_data_get_msi_desc(data);
- struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
-
- clear_irq_range(pp, irq, 1, data->hwirq);
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+ bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
+ order_base_2(nr_irqs));
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
-static struct msi_controller dw_pcie_msi_chip = {
- .setup_irq = dw_msi_setup_irq,
- .setup_irqs = dw_msi_setup_irqs,
- .teardown_irq = dw_msi_teardown_irq,
+static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
+ .alloc = dw_pcie_irq_domain_alloc,
+ .free = dw_pcie_irq_domain_free,
};
-static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+int dw_pcie_allocate_domains(struct pcie_port *pp)
{
- irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
+
+ pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
+ &dw_pcie_msi_domain_ops, pp);
+ if (!pp->irq_domain) {
+ dev_err(pci->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ pp->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &dw_pcie_msi_domain_info,
+ pp->irq_domain);
+ if (!pp->msi_domain) {
+ dev_err(pci->dev, "failed to create MSI domain\n");
+ irq_domain_remove(pp->irq_domain);
+ return -ENOMEM;
+ }
return 0;
}
-static const struct irq_domain_ops msi_domain_ops = {
- .map = dw_pcie_msi_map,
-};
+void dw_pcie_free_msi(struct pcie_port *pp)
+{
+ irq_set_chained_handler(pp->msi_irq, NULL);
+ irq_set_handler_data(pp->msi_irq, NULL);
+
+ irq_domain_remove(pp->msi_domain);
+ irq_domain_remove(pp->irq_domain);
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct page *page;
+ u64 msi_target;
+
+ page = alloc_page(GFP_KERNEL);
+ pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, pp->msi_data)) {
+ dev_err(dev, "failed to map MSI data\n");
+ __free_page(page);
+ return;
+ }
+ msi_target = (u64)pp->msi_data;
+
+ /* program the msi_data */
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+ lower_32_bits(msi_target));
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
+ upper_32_bits(msi_target));
+}
int dw_pcie_host_init(struct pcie_port *pp)
{
@@ -285,11 +320,13 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
+ struct resource_entry *win, *tmp;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
- int i, ret;
- struct resource_entry *win, *tmp;
+ int ret;
+
+ raw_spin_lock_init(&pci->pp.lock);
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
@@ -388,20 +425,35 @@ int dw_pcie_host_init(struct pcie_port *pp)
pci->num_viewport = 2;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- if (!pp->ops->msi_host_init) {
- pp->irq_domain = irq_domain_add_linear(dev->of_node,
- MAX_MSI_IRQS, &msi_domain_ops,
- &dw_pcie_msi_chip);
- if (!pp->irq_domain) {
- dev_err(dev, "irq domain init failed\n");
- ret = -ENXIO;
+ /*
+ * If a specific SoC driver needs to change the
+ * default number of vectors, it needs to implement
+ * the set_num_vectors callback.
+ */
+ if (!pp->ops->set_num_vectors) {
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+ } else {
+ pp->ops->set_num_vectors(pp);
+
+ if (pp->num_vectors > MAX_MSI_IRQS ||
+ pp->num_vectors == 0) {
+ dev_err(dev,
+ "Invalid number of vectors\n");
goto error;
}
+ }
- for (i = 0; i < MAX_MSI_IRQS; i++)
- irq_create_mapping(pp->irq_domain, i);
+ if (!pp->ops->msi_host_init) {
+ ret = dw_pcie_allocate_domains(pp);
+ if (ret)
+ goto error;
+
+ if (pp->msi_irq)
+ irq_set_chained_handler_and_data(pp->msi_irq,
+ dw_chained_msi_isr,
+ pp);
} else {
- ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
+ ret = pp->ops->msi_host_init(pp);
if (ret < 0)
goto error;
}
@@ -421,10 +473,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
bridge->ops = &dw_pcie_ops;
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- bridge->msi = &dw_pcie_msi_chip;
- dw_pcie_msi_chip.dev = dev;
- }
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
@@ -593,11 +641,17 @@ static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
void dw_pcie_setup_rc(struct pcie_port *pp)
{
- u32 val;
+ u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
dw_pcie_setup(pci);
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++)
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * 12), 4,
+ &pp->irq_status[ctrl]);
/* setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index ebdf28bcd67d..5416aa8a07a5 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -25,13 +25,6 @@ struct dw_plat_pcie {
struct dw_pcie *pci;
};
-static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
-{
- struct pcie_port *pp = arg;
-
- return dw_handle_msi_irq(pp);
-}
-
static int dw_plat_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -63,15 +56,6 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
pp->msi_irq = platform_get_irq(pdev, 0);
if (pp->msi_irq < 0)
return pp->msi_irq;
-
- ret = devm_request_irq(dev, pp->msi_irq,
- dw_plat_pcie_msi_irq_handler,
- IRQF_SHARED | IRQF_NO_THREAD,
- "dw-plat-pcie-msi", pp);
- if (ret) {
- dev_err(dev, "failed to request MSI IRQ\n");
- return ret;
- }
}
pp->root_bus_nr = -1;
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index 11b13864a406..fe811dbc12cf 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -107,13 +107,10 @@
#define MSI_MESSAGE_DATA_32 0x58
#define MSI_MESSAGE_DATA_64 0x5C
-/*
- * Maximum number of MSI IRQs can be 256 per controller. But keep
- * it 32 as of now. Probably we will never need more than 32. If needed,
- * then increment it in multiple of 32.
- */
-#define MAX_MSI_IRQS 32
-#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
+#define MAX_MSI_IRQS 256
+#define MAX_MSI_IRQS_PER_CTRL 32
+#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
+#define MSI_DEF_NUM_VECTORS 32
/* Maximum number of inbound/outbound iATUs */
#define MAX_IATU_IN 256
@@ -149,7 +146,9 @@ struct dw_pcie_host_ops {
phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
u32 (*get_msi_data)(struct pcie_port *pp, int pos);
void (*scan_bus)(struct pcie_port *pp);
- int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
+ void (*set_num_vectors)(struct pcie_port *pp);
+ int (*msi_host_init)(struct pcie_port *pp);
+ void (*msi_irq_ack)(int irq, struct pcie_port *pp);
};
struct pcie_port {
@@ -174,7 +173,11 @@ struct pcie_port {
const struct dw_pcie_host_ops *ops;
int msi_irq;
struct irq_domain *irq_domain;
+ struct irq_domain *msi_domain;
dma_addr_t msi_data;
+ u32 num_vectors;
+ u32 irq_status[MAX_MSI_CTRLS];
+ raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
@@ -316,8 +319,10 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
#ifdef CONFIG_PCIE_DW_HOST
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
+void dw_pcie_free_msi(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
+int dw_pcie_allocate_domains(struct pcie_port *pp);
#else
static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
@@ -328,6 +333,10 @@ static inline void dw_pcie_msi_init(struct pcie_port *pp)
{
}
+static inline void dw_pcie_free_msi(struct pcie_port *pp)
+{
+}
+
static inline void dw_pcie_setup_rc(struct pcie_port *pp)
{
}
@@ -336,6 +345,11 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
{
return 0;
}
+
+static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_PCIE_DW_EP
diff --git a/drivers/pci/dwc/pcie-histb.c b/drivers/pci/dwc/pcie-histb.c
index 70b5c0b108bf..3611d6ce9a92 100644
--- a/drivers/pci/dwc/pcie-histb.c
+++ b/drivers/pci/dwc/pcie-histb.c
@@ -61,6 +61,7 @@ struct histb_pcie {
struct reset_control *bus_reset;
void __iomem *ctrl;
int reset_gpio;
+ struct regulator *vpcie;
};
static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg)
@@ -207,13 +208,6 @@ static struct dw_pcie_host_ops histb_pcie_host_ops = {
.host_init = histb_pcie_host_init,
};
-static irqreturn_t histb_pcie_msi_irq_handler(int irq, void *arg)
-{
- struct pcie_port *pp = arg;
-
- return dw_handle_msi_irq(pp);
-}
-
static void histb_pcie_host_disable(struct histb_pcie *hipcie)
{
reset_control_assert(hipcie->soft_reset);
@@ -227,6 +221,9 @@ static void histb_pcie_host_disable(struct histb_pcie *hipcie)
if (gpio_is_valid(hipcie->reset_gpio))
gpio_set_value_cansleep(hipcie->reset_gpio, 0);
+
+ if (hipcie->vpcie)
+ regulator_disable(hipcie->vpcie);
}
static int histb_pcie_host_enable(struct pcie_port *pp)
@@ -237,6 +234,14 @@ static int histb_pcie_host_enable(struct pcie_port *pp)
int ret;
/* power on PCIe device if have */
+ if (hipcie->vpcie) {
+ ret = regulator_enable(hipcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
if (gpio_is_valid(hipcie->reset_gpio))
gpio_set_value_cansleep(hipcie->reset_gpio, 1);
@@ -276,13 +281,14 @@ static int histb_pcie_host_enable(struct pcie_port *pp)
return 0;
err_aux_clk:
- clk_disable_unprepare(hipcie->aux_clk);
-err_pipe_clk:
clk_disable_unprepare(hipcie->pipe_clk);
-err_sys_clk:
+err_pipe_clk:
clk_disable_unprepare(hipcie->sys_clk);
-err_bus_clk:
+err_sys_clk:
clk_disable_unprepare(hipcie->bus_clk);
+err_bus_clk:
+ if (hipcie->vpcie)
+ regulator_disable(hipcie->vpcie);
return ret;
}
@@ -332,6 +338,13 @@ static int histb_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pci->dbi_base);
}
+ hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
+ if (IS_ERR(hipcie->vpcie)) {
+ if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ hipcie->vpcie = NULL;
+ }
+
hipcie->reset_gpio = of_get_named_gpio_flags(np,
"reset-gpios", 0, &of_flags);
if (of_flags & OF_GPIO_ACTIVE_LOW)
@@ -393,14 +406,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
dev_err(dev, "Failed to get MSI IRQ\n");
return pp->msi_irq;
}
-
- ret = devm_request_irq(dev, pp->msi_irq,
- histb_pcie_msi_irq_handler,
- IRQF_SHARED, "histb-pcie-msi", pp);
- if (ret) {
- dev_err(dev, "cannot request MSI IRQ\n");
- return ret;
- }
}
hipcie->phy = devm_phy_get(dev, "phy");
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 6310c66e265c..5897af7d3355 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -79,6 +79,7 @@
#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
#define SLV_ADDR_SPACE_SZ 0x10000000
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
struct qcom_pcie_resources_2_1_0 {
struct clk *iface_clk;
struct clk *core_clk;
@@ -88,9 +89,7 @@ struct qcom_pcie_resources_2_1_0 {
struct reset_control *ahb_reset;
struct reset_control *por_reset;
struct reset_control *phy_reset;
- struct regulator *vdda;
- struct regulator *vdda_phy;
- struct regulator *vdda_refclk;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
};
struct qcom_pcie_resources_1_0_0 {
@@ -102,12 +101,14 @@ struct qcom_pcie_resources_1_0_0 {
struct regulator *vdda;
};
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
struct qcom_pcie_resources_2_3_2 {
struct clk *aux_clk;
struct clk *master_clk;
struct clk *slave_clk;
struct clk *cfg_clk;
struct clk *pipe_clk;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
struct qcom_pcie_resources_2_4_0 {
@@ -180,13 +181,6 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
-static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
-{
- struct pcie_port *pp = arg;
-
- return dw_handle_msi_irq(pp);
-}
-
static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
{
struct dw_pcie *pci = pcie->pci;
@@ -216,18 +210,15 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ int ret;
- res->vdda = devm_regulator_get(dev, "vdda");
- if (IS_ERR(res->vdda))
- return PTR_ERR(res->vdda);
-
- res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
- if (IS_ERR(res->vdda_phy))
- return PTR_ERR(res->vdda_phy);
-
- res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
- if (IS_ERR(res->vdda_refclk))
- return PTR_ERR(res->vdda_refclk);
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vdda_phy";
+ res->supplies[2].supply = "vdda_refclk";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
res->iface_clk = devm_clk_get(dev, "iface");
if (IS_ERR(res->iface_clk))
@@ -273,9 +264,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
clk_disable_unprepare(res->iface_clk);
clk_disable_unprepare(res->core_clk);
clk_disable_unprepare(res->phy_clk);
- regulator_disable(res->vdda);
- regulator_disable(res->vdda_phy);
- regulator_disable(res->vdda_refclk);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
@@ -286,24 +275,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
u32 val;
int ret;
- ret = regulator_enable(res->vdda);
- if (ret) {
- dev_err(dev, "cannot enable vdda regulator\n");
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
return ret;
}
- ret = regulator_enable(res->vdda_refclk);
- if (ret) {
- dev_err(dev, "cannot enable vdda_refclk regulator\n");
- goto err_refclk;
- }
-
- ret = regulator_enable(res->vdda_phy);
- if (ret) {
- dev_err(dev, "cannot enable vdda_phy regulator\n");
- goto err_vdda_phy;
- }
-
ret = reset_control_assert(res->ahb_reset);
if (ret) {
dev_err(dev, "cannot assert ahb reset\n");
@@ -387,11 +364,7 @@ err_clk_core:
err_clk_phy:
clk_disable_unprepare(res->iface_clk);
err_assert_ahb:
- regulator_disable(res->vdda_phy);
-err_vdda_phy:
- regulator_disable(res->vdda_refclk);
-err_refclk:
- regulator_disable(res->vdda);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
return ret;
}
@@ -521,6 +494,14 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
res->aux_clk = devm_clk_get(dev, "aux");
if (IS_ERR(res->aux_clk))
@@ -550,6 +531,8 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
clk_disable_unprepare(res->master_clk);
clk_disable_unprepare(res->cfg_clk);
clk_disable_unprepare(res->aux_clk);
+
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
@@ -567,10 +550,16 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
u32 val;
int ret;
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
ret = clk_prepare_enable(res->aux_clk);
if (ret) {
dev_err(dev, "cannot prepare/enable aux clock\n");
- return ret;
+ goto err_aux_clk;
}
ret = clk_prepare_enable(res->cfg_clk);
@@ -621,6 +610,9 @@ err_master_clk:
err_cfg_clk:
clk_disable_unprepare(res->aux_clk);
+err_aux_clk:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
return ret;
}
@@ -1262,15 +1254,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq < 0)
return pp->msi_irq;
-
- ret = devm_request_irq(dev, pp->msi_irq,
- qcom_pcie_msi_irq_handler,
- IRQF_SHARED | IRQF_NO_THREAD,
- "qcom-pcie-msi", pp);
- if (ret) {
- dev_err(dev, "cannot request msi irq\n");
- return ret;
- }
}
ret = phy_init(pcie->phy);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 64d8a17f8094..7cef85124325 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -70,7 +70,7 @@ struct pci_epf_test_data {
bool linkup_notifier;
};
-static int bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
+static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
static int pci_epf_test_copy(struct pci_epf_test *epf_test)
{
@@ -344,21 +344,23 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epc *epc = epf->epc;
+ struct pci_epf_bar *epf_bar;
int bar;
cancel_delayed_work(&epf_test->cmd_handler);
pci_epc_stop(epc);
for (bar = BAR_0; bar <= BAR_5; bar++) {
+ epf_bar = &epf->bar[bar];
+
if (epf_test->reg[bar]) {
pci_epf_free_space(epf, epf_test->reg[bar], bar);
- pci_epc_clear_bar(epc, epf->func_no, bar);
+ pci_epc_clear_bar(epc, epf->func_no, epf_bar);
}
}
}
static int pci_epf_test_set_bar(struct pci_epf *epf)
{
- int flags;
int bar;
int ret;
struct pci_epf_bar *epf_bar;
@@ -367,21 +369,27 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
- if (sizeof(dma_addr_t) == 0x8)
- flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
-
for (bar = BAR_0; bar <= BAR_5; bar++) {
epf_bar = &epf->bar[bar];
- ret = pci_epc_set_bar(epc, epf->func_no, bar,
- epf_bar->phys_addr,
- epf_bar->size, flags);
+
+ epf_bar->flags |= upper_32_bits(epf_bar->size) ?
+ PCI_BASE_ADDRESS_MEM_TYPE_64 :
+ PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar);
dev_err(dev, "failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
}
+ /*
+ * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
+ * if the specific implementation required a 64-bit BAR,
+ * even if we only requested a 32-bit BAR.
+ */
+ if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ bar++;
}
return 0;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index e245bba0ab53..b0ee42739c3c 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -276,22 +276,25 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
* pci_epc_clear_bar() - reset the BAR
* @epc: the EPC device for which the BAR has to be cleared
* @func_no: the endpoint function number in the EPC device
- * @bar: the BAR number that has to be reset
+ * @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to reset the BAR of the endpoint device.
*/
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar)
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar)
{
unsigned long flags;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+ (epf_bar->barno == BAR_5 &&
+ epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
return;
if (!epc->ops->clear_bar)
return;
spin_lock_irqsave(&epc->lock, flags);
- epc->ops->clear_bar(epc, func_no, bar);
+ epc->ops->clear_bar(epc, func_no, epf_bar);
spin_unlock_irqrestore(&epc->lock, flags);
}
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@@ -300,26 +303,31 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
* pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
* @epc: the EPC device on which BAR has to be configured
* @func_no: the endpoint function number in the EPC device
- * @bar: the BAR number that has to be configured
- * @size: the size of the addr space
- * @flags: specify memory allocation/io allocation/32bit address/64 bit address
+ * @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to configure the BAR of the endpoint device.
*/
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, enum pci_barno bar,
- dma_addr_t bar_phys, size_t size, int flags)
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar)
{
int ret;
unsigned long irq_flags;
-
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ int flags = epf_bar->flags;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+ (epf_bar->barno == BAR_5 &&
+ flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
+ (flags & PCI_BASE_ADDRESS_SPACE_IO &&
+ flags & PCI_BASE_ADDRESS_IO_MASK) ||
+ (upper_32_bits(epf_bar->size) &&
+ !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
return -EINVAL;
if (!epc->ops->set_bar)
return 0;
spin_lock_irqsave(&epc->lock, irq_flags);
- ret = epc->ops->set_bar(epc, func_no, bar, bar_phys, size, flags);
+ ret = epc->ops->set_bar(epc, func_no, epf_bar);
spin_unlock_irqrestore(&epc->lock, irq_flags);
return ret;
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 766ce1dca2ec..465b5f058b6d 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -98,6 +98,8 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
epf->bar[bar].phys_addr = 0;
epf->bar[bar].size = 0;
+ epf->bar[bar].barno = 0;
+ epf->bar[bar].flags = 0;
}
EXPORT_SYMBOL_GPL(pci_epf_free_space);
@@ -126,6 +128,8 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
epf->bar[bar].phys_addr = phys_addr;
epf->bar[bar].size = size;
+ epf->bar[bar].barno = bar;
+ epf->bar[bar].flags = PCI_BASE_ADDRESS_SPACE_MEMORY;
return space;
}
@@ -200,29 +204,17 @@ struct pci_epf *pci_epf_create(const char *name)
int ret;
struct pci_epf *epf;
struct device *dev;
- char *func_name;
- char *buf;
+ int len;
epf = kzalloc(sizeof(*epf), GFP_KERNEL);
- if (!epf) {
- ret = -ENOMEM;
- goto err_ret;
- }
-
- buf = kstrdup(name, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto free_epf;
- }
-
- func_name = buf;
- buf = strchrnul(buf, '.');
- *buf = '\0';
+ if (!epf)
+ return ERR_PTR(-ENOMEM);
- epf->name = kstrdup(func_name, GFP_KERNEL);
+ len = strchrnul(name, '.') - name;
+ epf->name = kstrndup(name, len, GFP_KERNEL);
if (!epf->name) {
- ret = -ENOMEM;
- goto free_func_name;
+ kfree(epf);
+ return ERR_PTR(-ENOMEM);
}
dev = &epf->dev;
@@ -231,28 +223,18 @@ struct pci_epf *pci_epf_create(const char *name)
dev->type = &pci_epf_type;
ret = dev_set_name(dev, "%s", name);
- if (ret)
- goto put_dev;
+ if (ret) {
+ put_device(dev);
+ return ERR_PTR(ret);
+ }
ret = device_add(dev);
- if (ret)
- goto put_dev;
+ if (ret) {
+ put_device(dev);
+ return ERR_PTR(ret);
+ }
- kfree(func_name);
return epf;
-
-put_dev:
- put_device(dev);
- kfree(epf->name);
-
-free_func_name:
- kfree(func_name);
-
-free_epf:
- kfree(epf);
-
-err_ret:
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(pci_epf_create);
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index ac8d81268296..e01d53f5b32f 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * host bridge related code
+ * Host bridge related code
*/
#include <linux/kernel.h>
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index 2235f4760951..a6af62e0256d 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -145,7 +145,7 @@ static bool altera_pcie_valid_device(struct altera_pcie *pcie,
static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
{
int i;
- bool sop = 0;
+ bool sop = false;
u32 ctrl;
u32 reg0, reg1;
u32 comp_status = 1;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index e2198a2feeca..b45b375c0e6c 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -541,6 +541,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
{
unsigned long long sta = 0;
struct acpiphp_func *func;
+ u32 dvid;
list_for_each_entry(func, &slot->funcs, sibling) {
if (func->flags & FUNC_HAS_STA) {
@@ -551,19 +552,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
if (ACPI_SUCCESS(status) && sta)
break;
} else {
- u32 dvid;
-
- pci_bus_read_config_dword(slot->bus,
- PCI_DEVFN(slot->device,
- func->function),
- PCI_VENDOR_ID, &dvid);
- if (dvid != 0xffffffff) {
+ if (pci_bus_read_dev_vendor_id(slot->bus,
+ PCI_DEVFN(slot->device, func->function),
+ &dvid, 0)) {
sta = ACPI_STA_ALL;
break;
}
}
}
+ if (!sta) {
+ /*
+ * Check for the slot itself since it may be that the
+ * ACPI slot is a device below PCIe upstream port so in
+ * that case it may not even be reachable yet.
+ */
+ if (pci_bus_read_dev_vendor_id(slot->bus,
+ PCI_DEVFN(slot->device, 0), &dvid, 0)) {
+ sta = ACPI_STA_ALL;
+ }
+ }
+
return (unsigned int)sta;
}
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index b1b6e45253b2..616df442520b 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -2812,18 +2812,16 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
dbg("CND: length = 0x%x\n", base);
io_node = get_io_resource(&(resources->io_head), base);
+ if (!io_node)
+ return -ENOMEM;
dbg("Got io_node start = %8.8x, length = %8.8x next (%p)\n",
io_node->base, io_node->length, io_node->next);
dbg("func (%p) io_head (%p)\n", func, func->io_head);
/* allocate the resource to the board */
- if (io_node) {
- base = io_node->base;
-
- io_node->next = func->io_head;
- func->io_head = io_node;
- } else
- return -ENOMEM;
+ base = io_node->base;
+ io_node->next = func->io_head;
+ func->io_head = io_node;
} else if ((temp_register & 0x0BL) == 0x08) {
/* Map prefetchable memory */
base = temp_register & 0xFFFFFFF0;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 636ed8f4b869..88e917c9120f 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -20,10 +20,11 @@
#include <linux/pci_hotplug.h>
#include <linux/delay.h>
#include <linux/sched/signal.h> /* signal_pending() */
-#include <linux/pcieport_if.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include "../pcie/portdrv.h"
+
#define MY_NAME "pciehp"
extern bool pciehp_poll_mode;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 677924ae0350..8adf4a64f291 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -1,12 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/iov.c
- *
- * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
- *
- * PCI Express I/O Virtualization (IOV) support.
+ * PCI Express I/O Virtualization (IOV) support
* Single Root IOV 1.0
* Address Translation Service 1.0
+ *
+ * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
*/
#include <linux/pci.h>
@@ -114,6 +112,29 @@ resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
}
+static void pci_read_vf_config_common(struct pci_dev *virtfn)
+{
+ struct pci_dev *physfn = virtfn->physfn;
+
+ /*
+ * Some config registers are the same across all associated VFs.
+ * Read them once from VF0 so we can skip reading them from the
+ * other VFs.
+ *
+ * PCIe r4.0, sec 9.3.4.1, technically doesn't require all VFs to
+ * have the same Revision ID and Subsystem ID, but we assume they
+ * do.
+ */
+ pci_read_config_dword(virtfn, PCI_CLASS_REVISION,
+ &physfn->sriov->class);
+ pci_read_config_byte(virtfn, PCI_HEADER_TYPE,
+ &physfn->sriov->hdr_type);
+ pci_read_config_word(virtfn, PCI_SUBSYSTEM_VENDOR_ID,
+ &physfn->sriov->subsystem_vendor);
+ pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
+ &physfn->sriov->subsystem_device);
+}
+
int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
int i;
@@ -136,13 +157,17 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
virtfn->device = iov->vf_device;
+ virtfn->is_virtfn = 1;
+ virtfn->physfn = pci_dev_get(dev);
+
+ if (id == 0)
+ pci_read_vf_config_common(virtfn);
+
rc = pci_setup_device(virtfn);
if (rc)
- goto failed0;
+ goto failed1;
virtfn->dev.parent = dev->dev.parent;
- virtfn->physfn = pci_dev_get(dev);
- virtfn->is_virtfn = 1;
virtfn->multifunction = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
@@ -163,10 +188,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
- goto failed1;
+ goto failed2;
rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
if (rc)
- goto failed2;
+ goto failed3;
kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
@@ -174,11 +199,12 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
return 0;
-failed2:
+failed3:
sysfs_remove_link(&dev->dev.kobj, buf);
+failed2:
+ pci_stop_and_remove_bus_device(virtfn);
failed1:
pci_dev_put(dev);
- pci_stop_and_remove_bus_device(virtfn);
failed0:
virtfn_remove_bus(dev->bus, bus);
failed:
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
index 814a3ce341fc..24505b08de40 100644
--- a/drivers/pci/mmap.c
+++ b/drivers/pci/mmap.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * mmap.c — generic PCI resource mmap helper
+ * Generic PCI resource mmap helper
*
* Copyright © 2017 Amazon.com, Inc. or its affiliates.
*
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 8b0729c94bb7..30250631efe7 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * File: msi.c
- * Purpose: PCI Message Signaled Interrupt (MSI)
+ * PCI Message Signaled Interrupt (MSI)
*
* Copyright (C) 2003-2004 Intel
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 78157688dcc9..1abdbf267c19 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * File: pci-acpi.c
- * Purpose: Provide PCI support in ACPI
+ * PCI support in ACPI
*
* Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
* Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 3bed6beda051..28cf87a7ff4c 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/pci-driver.c
- *
* (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
* (C) Copyright 2007 Novell Inc.
*/
@@ -19,6 +17,7 @@
#include <linux/suspend.h>
#include <linux/kexec.h>
#include "pci.h"
+#include "pcie/portdrv.h"
struct pci_dynid {
struct list_head node;
@@ -714,6 +713,18 @@ static void pci_pm_complete(struct device *dev)
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
+static void pcie_pme_root_status_cleanup(struct pci_dev *pci_dev)
+{
+ /*
+ * Some BIOSes forget to clear Root PME Status bits after system
+ * wakeup, which breaks ACPI-based runtime wakeup on PCI Express.
+ * Clear those bits now just in case (shouldn't hurt).
+ */
+ if (pci_is_pcie(pci_dev) &&
+ (pci_pcie_type(pci_dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pci_dev) == PCI_EXP_TYPE_RC_EC))
+ pcie_clear_root_pme_status(pci_dev);
+}
static int pci_pm_suspend(struct device *dev)
{
@@ -873,6 +884,8 @@ static int pci_pm_resume_noirq(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
+ pcie_pme_root_status_cleanup(pci_dev);
+
if (drv && drv->pm && drv->pm->resume_noirq)
error = drv->pm->resume_noirq(dev);
@@ -1517,6 +1530,42 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+/**
+ * pci_uevent_ers - emit a uevent during recovery path of PCI device
+ * @pdev: PCI device undergoing error recovery
+ * @err_type: type of error event
+ */
+void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
+{
+ int idx = 0;
+ char *envp[3];
+
+ switch (err_type) {
+ case PCI_ERS_RESULT_NONE:
+ case PCI_ERS_RESULT_CAN_RECOVER:
+ envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
+ envp[idx++] = "DEVICE_ONLINE=0";
+ break;
+ case PCI_ERS_RESULT_RECOVERED:
+ envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY";
+ envp[idx++] = "DEVICE_ONLINE=1";
+ break;
+ case PCI_ERS_RESULT_DISCONNECT:
+ envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY";
+ envp[idx++] = "DEVICE_ONLINE=0";
+ break;
+ default:
+ break;
+ }
+
+ if (idx > 0) {
+ envp[idx++] = NULL;
+ kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
+ }
+}
+#endif
+
static int pci_bus_num_vf(struct device *dev)
{
return pci_num_vf(to_pci_dev(dev));
@@ -1538,8 +1587,49 @@ struct bus_type pci_bus_type = {
};
EXPORT_SYMBOL(pci_bus_type);
+#ifdef CONFIG_PCIEPORTBUS
+static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct pcie_device *pciedev;
+ struct pcie_port_service_driver *driver;
+
+ if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
+ return 0;
+
+ pciedev = to_pcie_device(dev);
+ driver = to_service_driver(drv);
+
+ if (driver->service != pciedev->service)
+ return 0;
+
+ if (driver->port_type != PCIE_ANY_PORT &&
+ driver->port_type != pci_pcie_type(pciedev->port))
+ return 0;
+
+ return 1;
+}
+
+struct bus_type pcie_port_bus_type = {
+ .name = "pci_express",
+ .match = pcie_port_bus_match,
+};
+EXPORT_SYMBOL_GPL(pcie_port_bus_type);
+#endif
+
static int __init pci_driver_init(void)
{
- return bus_register(&pci_bus_type);
+ int ret;
+
+ ret = bus_register(&pci_bus_type);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_PCIEPORTBUS
+ ret = bus_register(&pcie_port_bus_type);
+ if (ret)
+ return ret;
+#endif
+
+ return 0;
}
postcore_initcall(pci_driver_init);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index a961a71d950f..a5910f942857 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Purpose: Export the firmware instance and label associated with
- * a pci device to sysfs
+ * Export the firmware instance and label associated with a PCI device to
+ * sysfs
+ *
* Copyright (C) 2010 Dell Inc.
* by Narendra K <Narendra_K@dell.com>,
* Jordan Hargrave <Jordan_Hargrave@dell.com>
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 10d54f939048..66f8a59fadbd 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-/* pci-stub - simple stub driver to reserve a pci device
+/*
+ * Simple stub driver to reserve a PCI device
*
* Copyright (C) 2008 Red Hat, Inc.
* Author:
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index eb6bee8724cc..366d93af051d 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/pci-sysfs.c
- *
* (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
* (C) Copyright 2002-2004 IBM Corp.
* (C) Copyright 2003 Matthew Wilcox
@@ -12,7 +10,6 @@
* File attributes for PCI devices
*
* Modeled after usb's driverfs.c
- *
*/
@@ -158,45 +155,18 @@ static DEVICE_ATTR_RO(resource);
static ssize_t max_link_speed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- u32 linkcap;
- int err;
- const char *speed;
-
- err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
- if (err)
- return -EINVAL;
-
- switch (linkcap & PCI_EXP_LNKCAP_SLS) {
- case PCI_EXP_LNKCAP_SLS_8_0GB:
- speed = "8 GT/s";
- break;
- case PCI_EXP_LNKCAP_SLS_5_0GB:
- speed = "5 GT/s";
- break;
- case PCI_EXP_LNKCAP_SLS_2_5GB:
- speed = "2.5 GT/s";
- break;
- default:
- speed = "Unknown speed";
- }
+ struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%s\n", speed);
+ return sprintf(buf, "%s\n", PCIE_SPEED2STR(pcie_get_speed_cap(pdev)));
}
static DEVICE_ATTR_RO(max_link_speed);
static ssize_t max_link_width_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- u32 linkcap;
- int err;
-
- err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
- if (err)
- return -EINVAL;
+ struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", (linkcap & PCI_EXP_LNKCAP_MLW) >> 4);
+ return sprintf(buf, "%u\n", pcie_get_width_cap(pdev));
}
static DEVICE_ATTR_RO(max_link_width);
@@ -213,6 +183,9 @@ static ssize_t current_link_speed_show(struct device *dev,
return -EINVAL;
switch (linkstat & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_16_0GB:
+ speed = "16 GT/s";
+ break;
case PCI_EXP_LNKSTA_CLS_8_0GB:
speed = "8 GT/s";
break;
@@ -982,38 +955,6 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
return count;
}
-static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
-{
- struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
-
- if (bin_attr->size > 0) {
- if (off > bin_attr->size)
- count = 0;
- else if (count > bin_attr->size - off)
- count = bin_attr->size - off;
- }
-
- return pci_read_vpd(dev, off, count, buf);
-}
-
-static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
-{
- struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
-
- if (bin_attr->size > 0) {
- if (off > bin_attr->size)
- count = 0;
- else if (count > bin_attr->size - off)
- count = bin_attr->size - off;
- }
-
- return pci_write_vpd(dev, off, count, buf);
-}
-
#ifdef HAVE_PCI_LEGACY
/**
* pci_read_legacy_io - read byte(s) from legacy I/O port space
@@ -1517,46 +1458,20 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
static int pci_create_capabilities_sysfs(struct pci_dev *dev)
{
int retval;
- struct bin_attribute *attr;
- /* If the device has VPD, try to expose it in sysfs. */
- if (dev->vpd) {
- attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
- if (!attr)
- return -ENOMEM;
-
- sysfs_bin_attr_init(attr);
- attr->size = 0;
- attr->attr.name = "vpd";
- attr->attr.mode = S_IRUSR | S_IWUSR;
- attr->read = read_vpd_attr;
- attr->write = write_vpd_attr;
- retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
- if (retval) {
- kfree(attr);
- return retval;
- }
- dev->vpd->attr = attr;
- }
-
- /* Active State Power Management */
+ pcie_vpd_create_sysfs_dev_files(dev);
pcie_aspm_create_sysfs_dev_files(dev);
- if (!pci_probe_reset_function(dev)) {
+ if (dev->reset_fn) {
retval = device_create_file(&dev->dev, &reset_attr);
if (retval)
goto error;
- dev->reset_fn = 1;
}
return 0;
error:
pcie_aspm_remove_sysfs_dev_files(dev);
- if (dev->vpd && dev->vpd->attr) {
- sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
- kfree(dev->vpd->attr);
- }
-
+ pcie_vpd_remove_sysfs_dev_files(dev);
return retval;
}
@@ -1630,11 +1545,7 @@ err:
static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
{
- if (dev->vpd && dev->vpd->attr) {
- sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
- kfree(dev->vpd->attr);
- }
-
+ pcie_vpd_remove_sysfs_dev_files(dev);
pcie_aspm_remove_sysfs_dev_files(dev);
if (dev->reset_fn) {
device_remove_file(&dev->dev, &reset_attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index f6a4dd10d9b0..43aeecc4b675 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * PCI Bus Services, see include/linux/pci.h for further explanation.
+ * PCI Bus Services, see include/linux/pci.h for further explanation.
*
- * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
- * David Mosberger-Tang
+ * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
+ * David Mosberger-Tang
*
- * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
+ * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
*/
#include <linux/acpi.h>
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/log2.h>
+#include <linux/logic_pio.h>
#include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
#include <linux/interrupt.h>
@@ -126,6 +127,9 @@ static int __init pcie_port_pm_setup(char *str)
}
__setup("pcie_port_pm=", pcie_port_pm_setup);
+/* Time to wait after a reset for device to become responsive */
+#define PCIE_RESET_READY_POLL_MS 60000
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -1684,6 +1688,15 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
/**
+ * pcie_clear_root_pme_status - Clear root port PME interrupt status.
+ * @dev: PCIe root port or event collector.
+ */
+void pcie_clear_root_pme_status(struct pci_dev *dev)
+{
+ pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
+}
+
+/**
* pci_check_pme_status - Check if given device has generated PME.
* @dev: Device to check.
*
@@ -3440,68 +3453,35 @@ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
-#ifdef PCI_IOBASE
-struct io_range {
- struct list_head list;
- phys_addr_t start;
- resource_size_t size;
-};
-
-static LIST_HEAD(io_range_list);
-static DEFINE_SPINLOCK(io_range_lock);
-#endif
-
/*
* Record the PCI IO range (expressed as CPU physical address + size).
* Return a negative value if an error has occured, zero otherwise
*/
-int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
+int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+ resource_size_t size)
{
- int err = 0;
-
+ int ret = 0;
#ifdef PCI_IOBASE
- struct io_range *range;
- resource_size_t allocated_size = 0;
-
- /* check if the range hasn't been previously recorded */
- spin_lock(&io_range_lock);
- list_for_each_entry(range, &io_range_list, list) {
- if (addr >= range->start && addr + size <= range->start + size) {
- /* range already registered, bail out */
- goto end_register;
- }
- allocated_size += range->size;
- }
-
- /* range not registed yet, check for available space */
- if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
- /* if it's too big check if 64K space can be reserved */
- if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
- err = -E2BIG;
- goto end_register;
- }
+ struct logic_pio_hwaddr *range;
- size = SZ_64K;
- pr_warn("Requested IO range too big, new size set to 64K\n");
- }
+ if (!size || addr + size < addr)
+ return -EINVAL;
- /* add the range to the list */
range = kzalloc(sizeof(*range), GFP_ATOMIC);
- if (!range) {
- err = -ENOMEM;
- goto end_register;
- }
+ if (!range)
+ return -ENOMEM;
- range->start = addr;
+ range->fwnode = fwnode;
range->size = size;
+ range->hw_start = addr;
+ range->flags = LOGIC_PIO_CPU_MMIO;
- list_add_tail(&range->list, &io_range_list);
-
-end_register:
- spin_unlock(&io_range_lock);
+ ret = logic_pio_register_range(range);
+ if (ret)
+ kfree(range);
#endif
- return err;
+ return ret;
}
phys_addr_t pci_pio_to_address(unsigned long pio)
@@ -3509,21 +3489,10 @@ phys_addr_t pci_pio_to_address(unsigned long pio)
phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
#ifdef PCI_IOBASE
- struct io_range *range;
- resource_size_t allocated_size = 0;
-
- if (pio > IO_SPACE_LIMIT)
+ if (pio >= MMIO_UPPER_LIMIT)
return address;
- spin_lock(&io_range_lock);
- list_for_each_entry(range, &io_range_list, list) {
- if (pio >= allocated_size && pio < allocated_size + range->size) {
- address = range->start + pio - allocated_size;
- break;
- }
- allocated_size += range->size;
- }
- spin_unlock(&io_range_lock);
+ address = logic_pio_to_hwaddr(pio);
#endif
return address;
@@ -3532,21 +3501,7 @@ phys_addr_t pci_pio_to_address(unsigned long pio)
unsigned long __weak pci_address_to_pio(phys_addr_t address)
{
#ifdef PCI_IOBASE
- struct io_range *res;
- resource_size_t offset = 0;
- unsigned long addr = -1;
-
- spin_lock(&io_range_lock);
- list_for_each_entry(res, &io_range_list, list) {
- if (address >= res->start && address < res->start + res->size) {
- addr = address - res->start + offset;
- break;
- }
- offset += res->size;
- }
- spin_unlock(&io_range_lock);
-
- return addr;
+ return logic_pio_trans_cpuaddr(address);
#else
if (address > IO_SPACE_LIMIT)
return (unsigned long)-1;
@@ -4017,20 +3972,13 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
-static void pci_flr_wait(struct pci_dev *dev)
+static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
{
- int delay = 1, timeout = 60000;
+ int delay = 1;
u32 id;
/*
- * Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within
- * 100ms, but may silently discard requests while the FLR is in
- * progress. Wait 100ms before trying to access the device.
- */
- msleep(100);
-
- /*
- * After 100ms, the device should not silently discard config
+ * After reset, the device should not silently discard config
* requests, but it may still indicate that it needs more time by
* responding to them with CRS completions. The Root Port will
* generally synthesize ~0 data to complete the read (except when
@@ -4044,14 +3992,14 @@ static void pci_flr_wait(struct pci_dev *dev)
pci_read_config_dword(dev, PCI_COMMAND, &id);
while (id == ~0) {
if (delay > timeout) {
- pci_warn(dev, "not ready %dms after FLR; giving up\n",
- 100 + delay - 1);
- return;
+ pci_warn(dev, "not ready %dms after %s; giving up\n",
+ delay - 1, reset_type);
+ return -ENOTTY;
}
if (delay > 1000)
- pci_info(dev, "not ready %dms after FLR; waiting\n",
- 100 + delay - 1);
+ pci_info(dev, "not ready %dms after %s; waiting\n",
+ delay - 1, reset_type);
msleep(delay);
delay *= 2;
@@ -4059,7 +4007,10 @@ static void pci_flr_wait(struct pci_dev *dev)
}
if (delay > 1000)
- pci_info(dev, "ready %dms after FLR\n", 100 + delay - 1);
+ pci_info(dev, "ready %dms after %s\n", delay - 1,
+ reset_type);
+
+ return 0;
}
/**
@@ -4088,13 +4039,21 @@ static bool pcie_has_flr(struct pci_dev *dev)
* device supports FLR before calling this function, e.g. by using the
* pcie_has_flr() helper.
*/
-void pcie_flr(struct pci_dev *dev)
+int pcie_flr(struct pci_dev *dev)
{
if (!pci_wait_for_pending_transaction(dev))
pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
- pci_flr_wait(dev);
+
+ /*
+ * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
+ * 100ms, but may silently discard requests while the FLR is in
+ * progress. Wait 100ms before trying to access the device.
+ */
+ msleep(100);
+
+ return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
}
EXPORT_SYMBOL_GPL(pcie_flr);
@@ -4127,8 +4086,16 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
- pci_flr_wait(dev);
- return 0;
+
+ /*
+ * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
+ * updated 27 July 2006; a device must complete an FLR within
+ * 100ms, but may silently discard requests while the FLR is in
+ * progress. Wait 100ms before trying to access the device.
+ */
+ msleep(100);
+
+ return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
}
/**
@@ -4173,7 +4140,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
pci_dev_d3_sleep(dev);
- return 0;
+ return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
}
void pci_reset_secondary_bus(struct pci_dev *dev)
@@ -4183,6 +4150,7 @@ void pci_reset_secondary_bus(struct pci_dev *dev)
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+
/*
* PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
* this to 2ms to ensure that we meet the minimum requirement.
@@ -4214,9 +4182,11 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
* Use the bridge control register to assert reset on the secondary bus.
* Devices on the secondary bus are left in power-on state.
*/
-void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
+int pci_reset_bridge_secondary_bus(struct pci_dev *dev)
{
pcibios_reset_secondary_bus(dev);
+
+ return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
}
EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
@@ -4379,8 +4349,9 @@ int __pci_reset_function_locked(struct pci_dev *dev)
if (rc != -ENOTTY)
return rc;
if (pcie_has_flr(dev)) {
- pcie_flr(dev);
- return 0;
+ rc = pcie_flr(dev);
+ if (rc != -ENOTTY)
+ return rc;
}
rc = pci_af_flr(dev, 0);
if (rc != -ENOTTY)
@@ -4450,9 +4421,8 @@ int pci_reset_function(struct pci_dev *dev)
{
int rc;
- rc = pci_probe_reset_function(dev);
- if (rc)
- return rc;
+ if (!dev->reset_fn)
+ return -ENOTTY;
pci_dev_lock(dev);
pci_dev_save_and_disable(dev);
@@ -4487,9 +4457,8 @@ int pci_reset_function_locked(struct pci_dev *dev)
{
int rc;
- rc = pci_probe_reset_function(dev);
- if (rc)
- return rc;
+ if (!dev->reset_fn)
+ return -ENOTTY;
pci_dev_save_and_disable(dev);
@@ -4511,18 +4480,17 @@ int pci_try_reset_function(struct pci_dev *dev)
{
int rc;
- rc = pci_probe_reset_function(dev);
- if (rc)
- return rc;
+ if (!dev->reset_fn)
+ return -ENOTTY;
if (!pci_dev_trylock(dev))
return -EAGAIN;
pci_dev_save_and_disable(dev);
rc = __pci_reset_function_locked(dev);
+ pci_dev_restore(dev);
pci_dev_unlock(dev);
- pci_dev_restore(dev);
return rc;
}
EXPORT_SYMBOL_GPL(pci_try_reset_function);
@@ -4730,7 +4698,9 @@ static void pci_slot_restore(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
+ pci_dev_lock(dev);
pci_dev_restore(dev);
+ pci_dev_unlock(dev);
if (dev->subordinate)
pci_bus_restore(dev->subordinate);
}
@@ -5147,6 +5117,180 @@ int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
EXPORT_SYMBOL(pcie_get_minimum_link);
/**
+ * pcie_bandwidth_available - determine minimum link settings of a PCIe
+ * device and its bandwidth limitation
+ * @dev: PCI device to query
+ * @limiting_dev: storage for device causing the bandwidth limitation
+ * @speed: storage for speed of limiting device
+ * @width: storage for width of limiting device
+ *
+ * Walk up the PCI device chain and find the point where the minimum
+ * bandwidth is available. Return the bandwidth available there and (if
+ * limiting_dev, speed, and width pointers are supplied) information about
+ * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
+ * raw bandwidth.
+ */
+u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ u16 lnksta;
+ enum pci_bus_speed next_speed;
+ enum pcie_link_width next_width;
+ u32 bw, next_bw;
+
+ if (speed)
+ *speed = PCI_SPEED_UNKNOWN;
+ if (width)
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ bw = 0;
+
+ while (dev) {
+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+
+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT;
+
+ next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
+
+ /* Check if current device limits the total bandwidth */
+ if (!bw || next_bw <= bw) {
+ bw = next_bw;
+
+ if (limiting_dev)
+ *limiting_dev = dev;
+ if (speed)
+ *speed = next_speed;
+ if (width)
+ *width = next_width;
+ }
+
+ dev = pci_upstream_bridge(dev);
+ }
+
+ return bw;
+}
+EXPORT_SYMBOL(pcie_bandwidth_available);
+
+/**
+ * pcie_get_speed_cap - query for the PCI device's link speed capability
+ * @dev: PCI device to query
+ *
+ * Query the PCI device speed capability. Return the maximum link speed
+ * supported by the device.
+ */
+enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
+{
+ u32 lnkcap2, lnkcap;
+
+ /*
+ * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
+ * Speeds Vector in Link Capabilities 2 when supported, falling
+ * back to Max Link Speed in Link Capabilities otherwise.
+ */
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
+ if (lnkcap2) { /* PCIe r3.0-compliant */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
+ return PCIE_SPEED_16_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ return PCIE_SPEED_8_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ return PCIE_SPEED_5_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ return PCIE_SPEED_2_5GT;
+ return PCI_SPEED_UNKNOWN;
+ }
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap) {
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
+ return PCIE_SPEED_16_0GT;
+ else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
+ return PCIE_SPEED_8_0GT;
+ else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
+ return PCIE_SPEED_5_0GT;
+ else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
+ return PCIE_SPEED_2_5GT;
+ }
+
+ return PCI_SPEED_UNKNOWN;
+}
+
+/**
+ * pcie_get_width_cap - query for the PCI device's link width capability
+ * @dev: PCI device to query
+ *
+ * Query the PCI device width capability. Return the maximum link width
+ * supported by the device.
+ */
+enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
+{
+ u32 lnkcap;
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap)
+ return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
+
+ return PCIE_LNK_WIDTH_UNKNOWN;
+}
+
+/**
+ * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
+ * @dev: PCI device
+ * @speed: storage for link speed
+ * @width: storage for link width
+ *
+ * Calculate a PCI device's link bandwidth by querying for its link speed
+ * and width, multiplying them, and applying encoding overhead. The result
+ * is in Mb/s, i.e., megabits/second of raw bandwidth.
+ */
+u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ *speed = pcie_get_speed_cap(dev);
+ *width = pcie_get_width_cap(dev);
+
+ if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
+ return 0;
+
+ return *width * PCIE_SPEED2MBS_ENC(*speed);
+}
+
+/**
+ * pcie_print_link_status - Report the PCI device's link speed and width
+ * @dev: PCI device to query
+ *
+ * Report the available bandwidth at the device. If this is less than the
+ * device is capable of, report the device's maximum possible bandwidth and
+ * the upstream link that limits its performance to less than that.
+ */
+void pcie_print_link_status(struct pci_dev *dev)
+{
+ enum pcie_link_width width, width_cap;
+ enum pci_bus_speed speed, speed_cap;
+ struct pci_dev *limiting_dev = NULL;
+ u32 bw_avail, bw_cap;
+
+ bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
+ bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
+
+ if (bw_avail >= bw_cap)
+ pci_info(dev, "%u.%03u Gb/s available bandwidth (%s x%d link)\n",
+ bw_cap / 1000, bw_cap % 1000,
+ PCIE_SPEED2STR(speed_cap), width_cap);
+ else
+ pci_info(dev, "%u.%03u Gb/s available bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
+ bw_avail / 1000, bw_avail % 1000,
+ PCIE_SPEED2STR(speed), width,
+ limiting_dev ? pci_name(limiting_dev) : "<unknown>",
+ bw_cap / 1000, bw_cap % 1000,
+ PCIE_SPEED2STR(speed_cap), width_cap);
+}
+EXPORT_SYMBOL(pcie_print_link_status);
+
+/**
* pci_select_bars - Make BAR mask from the type of resource
* @dev: the PCI device for which BAR mask is made
* @flags: resource type mask to be selected
@@ -5611,8 +5755,9 @@ static int of_pci_bus_find_domain_nr(struct device *parent)
use_dt_domains = 0;
domain = pci_get_new_domain_nr();
} else {
- dev_err(parent, "Node %pOF has inconsistent \"linux,pci-domain\" property in DT\n",
- parent->of_node);
+ if (parent)
+ pr_err("Node %pOF has ", parent->of_node);
+ pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
domain = -1;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fcd81911b127..023f7cf25bff 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -71,6 +71,7 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
void pci_power_up(struct pci_dev *dev);
void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
+void pcie_clear_root_pme_status(struct pci_dev *dev);
int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
void pci_pme_restore(struct pci_dev *dev);
bool pci_dev_keep_suspended(struct pci_dev *dev);
@@ -104,25 +105,10 @@ static inline bool pci_power_manageable(struct pci_dev *pci_dev)
return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3;
}
-struct pci_vpd_ops {
- ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
- ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
- int (*set_size)(struct pci_dev *dev, size_t len);
-};
-
-struct pci_vpd {
- const struct pci_vpd_ops *ops;
- struct bin_attribute *attr; /* Descriptor for sysfs VPD entry */
- struct mutex lock;
- unsigned int len;
- u16 flag;
- u8 cap;
- u8 busy:1;
- u8 valid:1;
-};
-
int pci_vpd_init(struct pci_dev *dev);
void pci_vpd_release(struct pci_dev *dev);
+void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev);
+void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev);
/* PCI /proc functions */
#ifdef CONFIG_PROC_FS
@@ -253,6 +239,27 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
void pci_disable_bridge_window(struct pci_dev *dev);
+/* PCIe link information */
+#define PCIE_SPEED2STR(speed) \
+ ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \
+ (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \
+ (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \
+ (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \
+ "Unknown speed")
+
+/* PCIe speed to Mb/s reduced by encoding overhead */
+#define PCIE_SPEED2MBS_ENC(speed) \
+ ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
+ (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \
+ (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \
+ (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \
+ 0)
+
+enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
+enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
+u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
+ enum pcie_link_width *width);
+
/* Single Root I/O Virtualization */
struct pci_sriov {
int pos; /* Capability position */
@@ -271,6 +278,10 @@ struct pci_sriov {
u16 driver_max_VFs; /* Max num VFs driver supports */
struct pci_dev *dev; /* Lowest numbered PF */
struct pci_dev *self; /* This PF */
+ u32 class; /* VF device */
+ u8 hdr_type; /* VF header type */
+ u16 subsystem_vendor; /* VF subsystem vendor */
+ u16 subsystem_device; /* VF subsystem device */
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
bool drivers_autoprobe; /* Auto probing of VFs by driver */
};
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 223e4c34c29a..800e1d404a45 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -1,20 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for PCI-Express PORT Driver
-#
-
-# Build PCI Express ASPM if needed
-obj-$(CONFIG_PCIEASPM) += aspm.o
+# Makefile for PCI Express features and port driver
-pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
-pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o
+pcieportdrv-y := portdrv_core.o portdrv_pci.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
-# Build PCI Express AER if needed
+obj-$(CONFIG_PCIEASPM) += aspm.o
obj-$(CONFIG_PCIEAER) += aer/
-
-obj-$(CONFIG_PCIE_PME) += pme.o
-
-obj-$(CONFIG_PCIE_DPC) += pcie-dpc.o
-obj-$(CONFIG_PCIE_PTM) += ptm.o
+obj-$(CONFIG_PCIE_PME) += pme.o
+obj-$(CONFIG_PCIE_DPC) += dpc.o
+obj-$(CONFIG_PCIE_PTM) += ptm.o
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 25e1feb962c5..a49090935303 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -344,7 +344,7 @@ static int aer_inject(struct aer_error_inj *einj)
goto out_put;
}
- pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos_cap_err = dev->aer_cap;
if (!pos_cap_err) {
pci_err(dev, "aer_inject: Device doesn't support AER\n");
ret = -EPROTONOSUPPORT;
@@ -355,7 +355,7 @@ static int aer_inject(struct aer_error_inj *einj)
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
&uncor_mask);
- rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
+ rp_pos_cap_err = rpdev->aer_cap;
if (!rp_pos_cap_err) {
pci_err(rpdev, "aer_inject: Root port doesn't support AER\n");
ret = -EPROTONOSUPPORT;
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index da8331f5684d..779b3879b1b5 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -1,15 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/pcie/aer/aerdrv.c
- *
- * This file implements the AER root port service driver. The driver will
- * register an irq handler. When root port triggers an AER interrupt, the irq
- * handler will collect root port status and schedule a work.
+ * Implement the AER root port service driver. The driver registers an IRQ
+ * handler. When a root port triggers an AER interrupt, the IRQ handler
+ * collects root port status and schedules work.
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
* Zhang Yanmin (yanmin.zhang@intel.com)
- *
*/
#include <linux/pci.h>
@@ -21,7 +18,6 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/pcieport_if.h>
#include <linux/slab.h>
#include "aerdrv.h"
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 5449e5ce139d..08b4584f62fe 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -3,17 +3,17 @@
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
* Zhang Yanmin (yanmin.zhang@intel.com)
- *
*/
#ifndef _AERDRV_H_
#define _AERDRV_H_
#include <linux/workqueue.h>
-#include <linux/pcieport_if.h>
#include <linux/aer.h>
#include <linux/interrupt.h>
+#include "../portdrv.h"
+
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
PCI_EXP_RTCTL_SEFEE)
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index b2019440e882..08c87de13cb8 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -5,7 +5,6 @@
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
* Zhang Yanmin (yanmin.zhang@intel.com)
- *
*/
#include <linux/module.h>
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index a4bfea52e7d4..0ea5acc40323 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -1,16 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/pcie/aer/aerdrv_core.c
- *
- * This file implements the core part of PCIe AER. When a PCIe
- * error is delivered, an error message will be collected and printed to
- * console, then, an error recovery procedure will be executed by following
- * the PCI error recovery rules.
+ * Implement the core part of PCIe AER. When a PCIe error is delivered, an
+ * error message will be collected and printed to console, then an error
+ * recovery procedure will be executed by following the PCI error recovery
+ * rules.
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
* Zhang Yanmin (yanmin.zhang@intel.com)
- *
*/
#include <linux/module.h>
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 6a352e638699..cfc89dd57831 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -1,13 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/pcie/aer/aerdrv_errprint.c
- *
* Format error messages and print them to console.
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
* Zhang Yanmin (yanmin.zhang@intel.com)
- *
*/
#include <linux/module.h>
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
index 26d3cac9e635..039efb606e31 100644
--- a/drivers/pci/pcie/aer/ecrc.c
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Enables/disables PCIe ECRC checking.
+ * Enable/disable PCIe ECRC checking
*
- * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
* Andrew Patterson <andrew.patterson@hp.com>
*/
@@ -40,7 +40,7 @@ static int enable_ecrc_checking(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return -ENODEV;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
if (!pos)
return -ENODEV;
@@ -68,7 +68,7 @@ static int disable_ecrc_checking(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return -ENODEV;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
if (!pos)
return -ENODEV;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 57feef2ecfe7..f76eb7704f64 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * File: drivers/pci/pcie/aspm.c
- * Enabling PCIe link L0s/L1 state and Clock Power Management
+ * Enable PCIe link L0s/L1 state and Clock Power Management
*
* Copyright (C) 2007 Intel
* Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
@@ -228,6 +227,24 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
+ /* Port might be already in common clock mode */
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+ if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
+ bool consistent = true;
+
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pcie_capability_read_word(child, PCI_EXP_LNKCTL,
+ &reg16);
+ if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
+ consistent = false;
+ break;
+ }
+ }
+ if (consistent)
+ return;
+ pci_warn(parent, "ASPM: current common clock configuration is broken, reconfiguring\n");
+ }
+
/* Configure downstream component, all functions */
list_for_each_entry(child, &linkbus->devices, bus_list) {
pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
@@ -322,7 +339,7 @@ static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
{
- u64 threshold_ns = threshold_us * 1000;
+ u32 threshold_ns = threshold_us * 1000;
/* See PCIe r3.1, sec 7.33.3 and sec 6.18 */
if (threshold_ns < 32) {
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/dpc.c
index 38e40c6c576f..8c57d607e603 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -10,7 +10,8 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <linux/pcieport_if.h>
+
+#include "portdrv.h"
#include "../pci.h"
#include "aer/aerdrv.h"
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 5480f54f7612..3ed67676ea2a 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/device.h>
-#include <linux/pcieport_if.h>
#include <linux/pm_runtime.h>
#include "../pci.h"
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index a854bc569117..d0c6783dbfe3 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * File: portdrv.h
* Purpose: PCI Express Port Bus Driver's Internal Data Structures
*
* Copyright (C) 2004 Intel
@@ -12,7 +11,66 @@
#include <linux/compiler.h>
-#define PCIE_PORT_DEVICE_MAXSERVICES 5
+extern bool pcie_ports_native;
+
+/* Service Type */
+#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
+#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
+#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */
+#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT)
+#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */
+#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
+#define PCIE_PORT_SERVICE_DPC_SHIFT 3 /* Downstream Port Containment */
+#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT)
+
+#define PCIE_PORT_DEVICE_MAXSERVICES 4
+
+/* Port Type */
+#define PCIE_ANY_PORT (~0)
+
+struct pcie_device {
+ int irq; /* Service IRQ/MSI/MSI-X Vector */
+ struct pci_dev *port; /* Root/Upstream/Downstream Port */
+ u32 service; /* Port service this device represents */
+ void *priv_data; /* Service Private Data */
+ struct device device; /* Generic Device Interface */
+};
+#define to_pcie_device(d) container_of(d, struct pcie_device, device)
+
+static inline void set_service_data(struct pcie_device *dev, void *data)
+{
+ dev->priv_data = data;
+}
+
+static inline void *get_service_data(struct pcie_device *dev)
+{
+ return dev->priv_data;
+}
+
+struct pcie_port_service_driver {
+ const char *name;
+ int (*probe) (struct pcie_device *dev);
+ void (*remove) (struct pcie_device *dev);
+ int (*suspend) (struct pcie_device *dev);
+ int (*resume) (struct pcie_device *dev);
+
+ /* Device driver may resume normal operations */
+ void (*error_resume)(struct pci_dev *dev);
+
+ /* Link Reset Capability - AER service driver specific */
+ pci_ers_result_t (*reset_link) (struct pci_dev *dev);
+
+ int port_type; /* Type of the port this driver can handle */
+ u32 service; /* Port service this device represents */
+
+ struct device_driver driver;
+};
+#define to_service_driver(d) \
+ container_of(d, struct pcie_port_service_driver, driver)
+
+int pcie_port_service_register(struct pcie_port_service_driver *new);
+void pcie_port_service_unregister(struct pcie_port_service_driver *new);
+
/*
* The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must
* be one of the first 32 MSI-X entries. Per PCI r3.0, sec 6.8.3.1, MSI
@@ -34,20 +92,6 @@ void pcie_port_bus_unregister(void);
struct pci_dev;
-void pcie_clear_root_pme_status(struct pci_dev *dev);
-
-#ifdef CONFIG_HOTPLUG_PCI_PCIE
-extern bool pciehp_msi_disabled;
-
-static inline bool pciehp_no_msi(void)
-{
- return pciehp_msi_disabled;
-}
-
-#else /* !CONFIG_HOTPLUG_PCI_PCIE */
-static inline bool pciehp_no_msi(void) { return false; }
-#endif /* !CONFIG_HOTPLUG_PCI_PCIE */
-
#ifdef CONFIG_PCIE_PME
extern bool pcie_pme_msi_disabled;
@@ -68,15 +112,4 @@ static inline bool pcie_pme_no_msi(void) { return false; }
static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
-#ifdef CONFIG_ACPI
-void pcie_port_acpi_setup(struct pci_dev *port, int *mask);
-
-static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask)
-{
- pcie_port_acpi_setup(port, mask);
-}
-#else /* !CONFIG_ACPI */
-static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask){}
-#endif /* !CONFIG_ACPI */
-
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index 319c94976873..8ab5d434b9c6 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -10,7 +10,6 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
-#include <linux/pcieport_if.h>
#include "aer/aerdrv.h"
#include "../pci.h"
@@ -48,11 +47,11 @@ void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
flags = root->osc_control_set;
- *srv_mask = PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_DPC;
+ *srv_mask = 0;
if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
*srv_mask |= PCIE_PORT_SERVICE_HP;
if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
*srv_mask |= PCIE_PORT_SERVICE_PME;
if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
- *srv_mask |= PCIE_PORT_SERVICE_AER;
+ *srv_mask |= PCIE_PORT_SERVICE_AER | PCIE_PORT_SERVICE_DPC;
}
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
deleted file mode 100644
index f0fba552a0e2..000000000000
--- a/drivers/pci/pcie/portdrv_bus.c
+++ /dev/null
@@ -1,56 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * File: portdrv_bus.c
- * Purpose: PCI Express Port Bus Driver's Bus Overloading Functions
- *
- * Copyright (C) 2004 Intel
- * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/pm.h>
-
-#include <linux/pcieport_if.h>
-#include "portdrv.h"
-
-static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
-
-struct bus_type pcie_port_bus_type = {
- .name = "pci_express",
- .match = pcie_port_bus_match,
-};
-EXPORT_SYMBOL_GPL(pcie_port_bus_type);
-
-static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
-{
- struct pcie_device *pciedev;
- struct pcie_port_service_driver *driver;
-
- if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
- return 0;
-
- pciedev = to_pcie_device(dev);
- driver = to_service_driver(drv);
-
- if (driver->service != pciedev->service)
- return 0;
-
- if ((driver->port_type != PCIE_ANY_PORT) &&
- (driver->port_type != pci_pcie_type(pciedev->port)))
- return 0;
-
- return 1;
-}
-
-int pcie_port_bus_register(void)
-{
- return bus_register(&pcie_port_bus_type);
-}
-
-void pcie_port_bus_unregister(void)
-{
- bus_unregister(&pcie_port_bus_type);
-}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index ef3bad4ad010..c9c0663db282 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * File: portdrv_core.c
* Purpose: PCI Express Port Bus Driver's Core Functions
*
* Copyright (C) 2004 Intel
@@ -15,23 +14,11 @@
#include <linux/pm_runtime.h>
#include <linux/string.h>
#include <linux/slab.h>
-#include <linux/pcieport_if.h>
#include <linux/aer.h>
#include "../pci.h"
#include "portdrv.h"
-bool pciehp_msi_disabled;
-
-static int __init pciehp_setup(char *str)
-{
- if (!strncmp(str, "nomsi", 5))
- pciehp_msi_disabled = true;
-
- return 1;
-}
-__setup("pcie_hp=", pciehp_setup);
-
/**
* release_pcie_device - free PCI Express port service device structure
* @dev: Port service device to release
@@ -52,7 +39,7 @@ static void release_pcie_device(struct device *dev)
static int pcie_message_numbers(struct pci_dev *dev, int mask,
u32 *pme, u32 *aer, u32 *dpc)
{
- u32 nvec = 0, pos, reg32;
+ u32 nvec = 0, pos;
u16 reg16;
/*
@@ -68,8 +55,11 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
nvec = *pme + 1;
}
+#ifdef CONFIG_PCIEAER
if (mask & PCIE_PORT_SERVICE_AER) {
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ u32 reg32;
+
+ pos = dev->aer_cap;
if (pos) {
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS,
&reg32);
@@ -77,6 +67,7 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
nvec = max(nvec, *aer + 1);
}
}
+#endif
if (mask & PCIE_PORT_SERVICE_DPC) {
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
@@ -169,16 +160,13 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
irqs[i] = -1;
/*
- * If we support PME or hotplug, but we can't use MSI/MSI-X for
- * them, we have to fall back to INTx or other interrupts, e.g., a
- * system shared interrupt.
+ * If we support PME but can't use MSI/MSI-X for it, we have to
+ * fall back to INTx or other interrupts, e.g., a system shared
+ * interrupt.
*/
if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
goto legacy_irq;
- if ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())
- goto legacy_irq;
-
/* Try to use MSI-X or MSI if supported */
if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
return 0;
@@ -189,10 +177,8 @@ legacy_irq:
if (ret < 0)
return -ENODEV;
- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
- if (i != PCIE_PORT_SERVICE_VC_SHIFT)
- irqs[i] = pci_irq_vector(dev, 0);
- }
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ irqs[i] = pci_irq_vector(dev, 0);
return 0;
}
@@ -209,23 +195,13 @@ legacy_irq:
*/
static int get_port_device_capability(struct pci_dev *dev)
{
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
int services = 0;
- int cap_mask = 0;
-
- if (pcie_ports_disabled)
- return 0;
-
- cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
- | PCIE_PORT_SERVICE_VC;
- if (pci_aer_available())
- cap_mask |= PCIE_PORT_SERVICE_AER | PCIE_PORT_SERVICE_DPC;
- if (pcie_ports_auto)
- pcie_port_platform_notify(dev, &cap_mask);
-
- /* Hot-Plug Capable */
- if ((cap_mask & PCIE_PORT_SERVICE_HP) && dev->is_hotplug_bridge) {
+ if (dev->is_hotplug_bridge &&
+ (pcie_ports_native || host->native_hotplug)) {
services |= PCIE_PORT_SERVICE_HP;
+
/*
* Disable hot-plug interrupts in case they have been enabled
* by the BIOS and the hot-plug service driver is not loaded.
@@ -233,23 +209,29 @@ static int get_port_device_capability(struct pci_dev *dev)
pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
}
- /* AER capable */
- if ((cap_mask & PCIE_PORT_SERVICE_AER)
- && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) {
+
+#ifdef CONFIG_PCIEAER
+ if (dev->aer_cap && pci_aer_available() &&
+ (pcie_ports_native || host->native_aer)) {
services |= PCIE_PORT_SERVICE_AER;
+
/*
* Disable AER on this port in case it's been enabled by the
* BIOS (the AER service driver will enable it when necessary).
*/
pci_disable_pcie_error_reporting(dev);
}
- /* VC support */
- if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
- services |= PCIE_PORT_SERVICE_VC;
- /* Root ports are capable of generating PME too */
- if ((cap_mask & PCIE_PORT_SERVICE_PME)
- && pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
+#endif
+
+ /*
+ * Root ports are capable of generating PME too. Root Complex
+ * Event Collectors can also generate PMEs, but we don't handle
+ * those yet.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
+ (pcie_ports_native || host->native_pme)) {
services |= PCIE_PORT_SERVICE_PME;
+
/*
* Disable PME interrupt on this port in case it's been enabled
* by the BIOS (the PME service driver will enable it when
@@ -257,7 +239,9 @@ static int get_port_device_capability(struct pci_dev *dev)
*/
pcie_pme_interrupt_enable(dev, false);
}
- if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC))
+
+ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) &&
+ pci_aer_available() && services & PCIE_PORT_SERVICE_AER)
services |= PCIE_PORT_SERVICE_DPC;
return services;
@@ -335,7 +319,7 @@ int pcie_port_device_register(struct pci_dev *dev)
*/
status = pcie_init_service_irqs(dev, irqs, capabilities);
if (status) {
- capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
+ capabilities &= PCIE_PORT_SERVICE_HP;
if (!capabilities)
goto error_disable;
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index fb1c1bb87316..973f1b80a038 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -1,9 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * File: portdrv_pci.c
* Purpose: PCI Express Port Bus Driver
* Author: Tom Nguyen <tom.l.nguyen@intel.com>
- * Version: v1.0
*
* Copyright (C) 2004 Intel
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
@@ -15,10 +13,8 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/init.h>
-#include <linux/pcieport_if.h>
#include <linux/aer.h>
#include <linux/dmi.h>
-#include <linux/pci-aspm.h>
#include "../pci.h"
#include "portdrv.h"
@@ -27,22 +23,18 @@
bool pcie_ports_disabled;
/*
- * If this switch is set, ACPI _OSC will be used to determine whether or not to
- * enable PCIe port native services.
+ * If the user specified "pcie_ports=native", use the PCIe services regardless
+ * of whether the platform has given us permission. On ACPI systems, this
+ * means we ignore _OSC.
*/
-bool pcie_ports_auto = true;
+bool pcie_ports_native;
static int __init pcie_port_setup(char *str)
{
- if (!strncmp(str, "compat", 6)) {
+ if (!strncmp(str, "compat", 6))
pcie_ports_disabled = true;
- } else if (!strncmp(str, "native", 6)) {
- pcie_ports_disabled = false;
- pcie_ports_auto = false;
- } else if (!strncmp(str, "auto", 4)) {
- pcie_ports_disabled = false;
- pcie_ports_auto = true;
- }
+ else if (!strncmp(str, "native", 6))
+ pcie_ports_native = true;
return 1;
}
@@ -50,15 +42,6 @@ __setup("pcie_ports=", pcie_port_setup);
/* global data */
-/**
- * pcie_clear_root_pme_status - Clear root port PME interrupt status.
- * @dev: PCIe root port or event collector.
- */
-void pcie_clear_root_pme_status(struct pci_dev *dev)
-{
- pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
-}
-
static int pcie_portdrv_restore_config(struct pci_dev *dev)
{
int retval;
@@ -71,20 +54,6 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
}
#ifdef CONFIG_PM
-static int pcie_port_resume_noirq(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- /*
- * Some BIOSes forget to clear Root PME Status bits after system wakeup
- * which breaks ACPI-based runtime wakeup on PCI Express, so clear those
- * bits now just in case (shouldn't hurt).
- */
- if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
- pcie_clear_root_pme_status(pdev);
- return 0;
-}
-
static int pcie_port_runtime_suspend(struct device *dev)
{
return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
@@ -112,7 +81,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.thaw = pcie_port_device_resume,
.poweroff = pcie_port_device_suspend,
.restore = pcie_port_device_resume,
- .resume_noirq = pcie_port_resume_noirq,
.runtime_suspend = pcie_port_runtime_suspend,
.runtime_resume = pcie_port_runtime_resume,
.runtime_idle = pcie_port_runtime_idle,
@@ -283,22 +251,11 @@ static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = {
static int __init pcie_portdrv_init(void)
{
- int retval;
-
if (pcie_ports_disabled)
- return pci_register_driver(&pcie_portdriver);
+ return -EACCES;
dmi_check_system(pcie_portdrv_dmi_table);
- retval = pcie_port_bus_register();
- if (retval) {
- printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval);
- goto out;
- }
- retval = pci_register_driver(&pcie_portdriver);
- if (retval)
- pcie_port_bus_unregister();
- out:
- return retval;
+ return pci_register_driver(&pcie_portdriver);
}
device_initcall(pcie_portdrv_init);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ef5377438a1e..caa07109e5f5 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * probe.c - PCI detection and setup code
+ * PCI detection and setup code
*/
#include <linux/kernel.h>
@@ -329,6 +329,10 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
if (dev->non_compliant_bars)
return;
+ /* Per PCIe r4.0, sec 9.3.4.1.11, the VF BARs are all RO Zero */
+ if (dev->is_virtfn)
+ return;
+
for (pos = 0; pos < howmany; pos++) {
struct resource *res = &dev->resource[pos];
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
@@ -540,6 +544,16 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
INIT_LIST_HEAD(&bridge->windows);
bridge->dev.release = pci_release_host_bridge_dev;
+ /*
+ * We assume we can manage these PCIe features. Some systems may
+ * reserve these for use by the platform itself, e.g., an ACPI BIOS
+ * may implement its own AER handling and use _OSC to prevent the
+ * OS from interfering.
+ */
+ bridge->native_aer = 1;
+ bridge->native_hotplug = 1;
+ bridge->native_pme = 1;
+
return bridge;
}
EXPORT_SYMBOL(pci_alloc_host_bridge);
@@ -592,7 +606,7 @@ const unsigned char pcie_link_speed[] = {
PCIE_SPEED_2_5GT, /* 1 */
PCIE_SPEED_5_0GT, /* 2 */
PCIE_SPEED_8_0GT, /* 3 */
- PCI_SPEED_UNKNOWN, /* 4 */
+ PCIE_SPEED_16_0GT, /* 4 */
PCI_SPEED_UNKNOWN, /* 5 */
PCI_SPEED_UNKNOWN, /* 6 */
PCI_SPEED_UNKNOWN, /* 7 */
@@ -1230,6 +1244,13 @@ static void pci_read_irq(struct pci_dev *dev)
{
unsigned char irq;
+ /* VFs are not allowed to use INTx, so skip the config reads */
+ if (dev->is_virtfn) {
+ dev->pin = 0;
+ dev->irq = 0;
+ return;
+ }
+
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
dev->pin = irq;
if (irq)
@@ -1389,6 +1410,43 @@ int pci_cfg_space_size(struct pci_dev *dev)
return PCI_CFG_SPACE_SIZE;
}
+static u32 pci_class(struct pci_dev *dev)
+{
+ u32 class;
+
+#ifdef CONFIG_PCI_IOV
+ if (dev->is_virtfn)
+ return dev->physfn->sriov->class;
+#endif
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
+ return class;
+}
+
+static void pci_subsystem_ids(struct pci_dev *dev, u16 *vendor, u16 *device)
+{
+#ifdef CONFIG_PCI_IOV
+ if (dev->is_virtfn) {
+ *vendor = dev->physfn->sriov->subsystem_vendor;
+ *device = dev->physfn->sriov->subsystem_device;
+ return;
+ }
+#endif
+ pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, vendor);
+ pci_read_config_word(dev, PCI_SUBSYSTEM_ID, device);
+}
+
+static u8 pci_hdr_type(struct pci_dev *dev)
+{
+ u8 hdr_type;
+
+#ifdef CONFIG_PCI_IOV
+ if (dev->is_virtfn)
+ return dev->physfn->sriov->hdr_type;
+#endif
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
+ return hdr_type;
+}
+
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
static void pci_msi_setup_pci_dev(struct pci_dev *dev)
@@ -1454,8 +1512,7 @@ int pci_setup_device(struct pci_dev *dev)
struct pci_bus_region region;
struct resource *res;
- if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
- return -EIO;
+ hdr_type = pci_hdr_type(dev);
dev->sysdata = dev->bus->sysdata;
dev->dev.parent = dev->bus->bridge;
@@ -1477,7 +1534,8 @@ int pci_setup_device(struct pci_dev *dev)
dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn));
- pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
+ class = pci_class(dev);
+
dev->revision = class & 0xff;
dev->class = class >> 8; /* upper 3 bytes */
@@ -1517,8 +1575,8 @@ int pci_setup_device(struct pci_dev *dev)
goto bad;
pci_read_irq(dev);
pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
- pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
- pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
+
+ pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device);
/*
* Do the ugly legacy mode stuff here rather than broken chip
@@ -2121,6 +2179,9 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Advanced Error Reporting */
pci_aer_init(dev);
+
+ if (pci_probe_reset_function(dev) == 0)
+ dev->reset_fn = 1;
}
/*
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 58a662e3c4a6..1ee8927a0635 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Procfs interface for the PCI bus.
+ * Procfs interface for the PCI bus
*
- * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz>
+ * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz>
*/
#include <linux/init.h>
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fc734014206f..76eaf4ad7652 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1,15 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * This file contains work-arounds for many known PCI hardware
- * bugs. Devices present only on certain architectures (host
- * bridges et cetera) should be handled in arch-specific code.
+ * This file contains work-arounds for many known PCI hardware bugs.
+ * Devices present only on certain architectures (host bridges et cetera)
+ * should be handled in arch-specific code.
*
- * Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
+ * Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
*
- * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
*
- * Init/reset quirks for USB host controllers should be in the
- * USB quirks file, where their drivers can access reuse it.
+ * Init/reset quirks for USB host controllers should be in the USB quirks
+ * file, where their drivers can use them.
*/
#include <linux/types.h>
@@ -1967,31 +1967,6 @@ static void quirk_netmos(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
-/*
- * Quirk non-zero PCI functions to route VPD access through function 0 for
- * devices that share VPD resources between functions. The functions are
- * expected to be identical devices.
- */
-static void quirk_f0_vpd_link(struct pci_dev *dev)
-{
- struct pci_dev *f0;
-
- if (!PCI_FUNC(dev->devfn))
- return;
-
- f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
- if (!f0)
- return;
-
- if (f0->vpd && dev->class == f0->class &&
- dev->vendor == f0->vendor && dev->device == f0->device)
- dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
-
- pci_dev_put(f0);
-}
-DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
- PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
-
static void quirk_e100_interrupt(struct pci_dev *dev)
{
u16 command, pmcsr;
@@ -2182,83 +2157,6 @@ static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
-/*
- * If a device follows the VPD format spec, the PCI core will not read or
- * write past the VPD End Tag. But some vendors do not follow the VPD
- * format spec, so we can't tell how much data is safe to access. Devices
- * may behave unpredictably if we access too much. Blacklist these devices
- * so we don't touch VPD at all.
- */
-static void quirk_blacklist_vpd(struct pci_dev *dev)
-{
- if (dev->vpd) {
- dev->vpd->len = 0;
- pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
- }
-}
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
- quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
-
-/*
- * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
- * VPD end tag will hang the device. This problem was initially
- * observed when a vpd entry was created in sysfs
- * ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry
- * will dump 32k of data. Reading a full 32k will cause an access
- * beyond the VPD end tag causing the device to hang. Once the device
- * is hung, the bnx2 driver will not be able to reset the device.
- * We believe that it is legal to read beyond the end tag and
- * therefore the solution is to limit the read/write length.
- */
-static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
-{
- /*
- * Only disable the VPD capability for 5706, 5706S, 5708,
- * 5708S and 5709 rev. A
- */
- if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
- (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
- (dev->device == PCI_DEVICE_ID_NX2_5708) ||
- (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
- ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
- (dev->revision & 0xf0) == 0x0)) {
- if (dev->vpd)
- dev->vpd->len = 0x80;
- }
-}
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5706,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5706S,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5708,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5708S,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5709,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5709S,
- quirk_brcm_570x_limit_vpd);
-
static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
{
u32 rev;
@@ -3104,16 +3002,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
static ktime_t fixup_debug_start(struct pci_dev *dev,
void (*fn)(struct pci_dev *dev))
{
- ktime_t calltime = 0;
+ if (initcall_debug)
+ pci_info(dev, "calling %pF @ %i\n", fn, task_pid_nr(current));
- pci_dbg(dev, "calling %pF\n", fn);
- if (initcall_debug) {
- pr_debug("calling %pF @ %i for %s\n",
- fn, task_pid_nr(current), dev_name(&dev->dev));
- calltime = ktime_get();
- }
-
- return calltime;
+ return ktime_get();
}
static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
@@ -3122,13 +3014,11 @@ static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
ktime_t delta, rettime;
unsigned long long duration;
- if (initcall_debug) {
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- pr_debug("pci fixup %pF returned after %lld usecs for %s\n",
- fn, duration, dev_name(&dev->dev));
- }
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ if (initcall_debug || duration > 10000)
+ pci_info(dev, "%pF took %lld usecs\n", fn, duration);
}
/*
@@ -3417,25 +3307,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
quirk_thunderbolt_hotplug_msi);
-static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
-{
- pci_set_vpd_size(dev, 8192);
-}
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
-
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
@@ -3896,6 +3767,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
quirk_dma_func1_alias);
@@ -4514,6 +4388,15 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
/* APM X-Gene */
{ PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
+ /* Ampere Computing */
+ { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
{ 0 }
};
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 374a33443be9..a7b5c37a85ec 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -1,11 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/rom.c
+ * PCI ROM access routines
*
* (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
* (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
- *
- * PCI ROM access routines
*/
#include <linux/kernel.h>
#include <linux/export.h>
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index bc1e023f1353..2b5f720862d3 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * PCI searching functions.
+ * PCI searching functions
*
- * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter,
+ * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter,
* David Mosberger-Tang
- * Copyright (C) 1997 -- 2000 Martin Mares <mj@ucw.cz>
- * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 1997 -- 2000 Martin Mares <mj@ucw.cz>
+ * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com>
*/
#include <linux/pci.h>
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 3cce29a069e6..072784f55ea5 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1,16 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/setup-bus.c
+ * Support routines for initializing a PCI subsystem
*
* Extruded from code written by
* Dave Rusling (david.rusling@reo.mts.dec.com)
* David Mosberger (davidm@cs.arizona.edu)
* David Miller (davem@redhat.com)
*
- * Support routines for initializing a PCI subsystem.
- */
-
-/*
* Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
* PCI-PCI bridges cleanup, sorted resource allocation.
* Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index 5ad4ee7d7b1e..7129494754dd 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -1,13 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/setup-irq.c
+ * Support routines for initializing a PCI subsystem
*
* Extruded from code written by
* Dave Rusling (david.rusling@reo.mts.dec.com)
* David Mosberger (davidm@cs.arizona.edu)
* David Miller (davem@redhat.com)
- *
- * Support routines for initializing a PCI subsystem.
*/
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 369d48d6c6f1..1ef01d79b52e 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -1,18 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/setup-res.c
+ * Support routines for initializing a PCI subsystem
*
* Extruded from code written by
* Dave Rusling (david.rusling@reo.mts.dec.com)
* David Mosberger (davidm@cs.arizona.edu)
* David Miller (davem@redhat.com)
*
- * Support routines for initializing a PCI subsystem.
- */
-
-/* fixed for multiple pci buses, 1999 Andrea Arcangeli <andrea@suse.de> */
-
-/*
+ * Fixed for multiple PCI buses, 1999 Andrea Arcangeli <andrea@suse.de>
+ *
* Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
* Resource sorting
*/
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index d10f556dc03e..e634229ece89 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/pci/slot.c
* Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
* Copyright (C) 2006-2009 Hewlett-Packard Development Company, L.P.
* Alex Chiang <achiang@hp.com>
@@ -76,6 +75,7 @@ static const char *pci_bus_speed_strings[] = {
"2.5 GT/s PCIe", /* 0x14 */
"5.0 GT/s PCIe", /* 0x15 */
"8.0 GT/s PCIe", /* 0x16 */
+ "16.0 GT/s PCIe", /* 0x17 */
};
static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf)
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index e725f99b5479..d96626c614f5 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * pci_syscall.c
- *
- * For architectures where we want to allow direct access
- * to the PCI config stuff - it would probably be preferable
- * on PCs too, but there people just do it by hand with the
- * magic northbridge registers..
+ * For architectures where we want to allow direct access to the PCI config
+ * stuff - it would probably be preferable on PCs too, but there people
+ * just do it by hand with the magic northbridge registers.
*/
#include <linux/errno.h>
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 70fba57d6103..00b8b5d37056 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -1,13 +1,465 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * File: vpd.c
- * Purpose: Provide PCI VPD support
+ * PCI VPD support
*
* Copyright (C) 2010 Broadcom Corporation.
*/
#include <linux/pci.h>
+#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/sched/signal.h>
+#include "pci.h"
+
+/* VPD access through PCI 2.2+ VPD capability */
+
+struct pci_vpd_ops {
+ ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
+ ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+ int (*set_size)(struct pci_dev *dev, size_t len);
+};
+
+struct pci_vpd {
+ const struct pci_vpd_ops *ops;
+ struct bin_attribute *attr; /* Descriptor for sysfs VPD entry */
+ struct mutex lock;
+ unsigned int len;
+ u16 flag;
+ u8 cap;
+ unsigned int busy:1;
+ unsigned int valid:1;
+};
+
+/**
+ * pci_read_vpd - Read one entry from Vital Product Data
+ * @dev: pci device struct
+ * @pos: offset in vpd space
+ * @count: number of bytes to read
+ * @buf: pointer to where to store result
+ */
+ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->read(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_read_vpd);
+
+/**
+ * pci_write_vpd - Write entry to Vital Product Data
+ * @dev: pci device struct
+ * @pos: offset in vpd space
+ * @count: number of bytes to write
+ * @buf: buffer containing write data
+ */
+ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->write(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_write_vpd);
+
+/**
+ * pci_set_vpd_size - Set size of Vital Product Data space
+ * @dev: pci device struct
+ * @len: size of vpd space
+ */
+int pci_set_vpd_size(struct pci_dev *dev, size_t len)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->set_size(dev, len);
+}
+EXPORT_SYMBOL(pci_set_vpd_size);
+
+#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
+
+/**
+ * pci_vpd_size - determine actual size of Vital Product Data
+ * @dev: pci device struct
+ * @old_size: current assumed size, also maximum allowed size
+ */
+static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
+{
+ size_t off = 0;
+ unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
+
+ while (off < old_size &&
+ pci_read_vpd(dev, off, 1, header) == 1) {
+ unsigned char tag;
+
+ if (header[0] & PCI_VPD_LRDT) {
+ /* Large Resource Data Type Tag */
+ tag = pci_vpd_lrdt_tag(header);
+ /* Only read length from known tag items */
+ if ((tag == PCI_VPD_LTIN_ID_STRING) ||
+ (tag == PCI_VPD_LTIN_RO_DATA) ||
+ (tag == PCI_VPD_LTIN_RW_DATA)) {
+ if (pci_read_vpd(dev, off+1, 2,
+ &header[1]) != 2) {
+ pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
+ tag, off + 1);
+ return 0;
+ }
+ off += PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(header);
+ }
+ } else {
+ /* Short Resource Data Type Tag */
+ off += PCI_VPD_SRDT_TAG_SIZE +
+ pci_vpd_srdt_size(header);
+ tag = pci_vpd_srdt_tag(header);
+ }
+
+ if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
+ return off;
+
+ if ((tag != PCI_VPD_LTIN_ID_STRING) &&
+ (tag != PCI_VPD_LTIN_RO_DATA) &&
+ (tag != PCI_VPD_LTIN_RW_DATA)) {
+ pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
+ (header[0] & PCI_VPD_LRDT) ? "large" : "short",
+ tag, off);
+ return 0;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Wait for last operation to complete.
+ * This code has to spin since there is no other notification from the PCI
+ * hardware. Since the VPD is often implemented by serial attachment to an
+ * EEPROM, it may take many milliseconds to complete.
+ *
+ * Returns 0 on success, negative values indicate error.
+ */
+static int pci_vpd_wait(struct pci_dev *dev)
+{
+ struct pci_vpd *vpd = dev->vpd;
+ unsigned long timeout = jiffies + msecs_to_jiffies(125);
+ unsigned long max_sleep = 16;
+ u16 status;
+ int ret;
+
+ if (!vpd->busy)
+ return 0;
+
+ while (time_before(jiffies, timeout)) {
+ ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
+ vpd->busy = 0;
+ return 0;
+ }
+
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ usleep_range(10, max_sleep);
+ if (max_sleep < 1024)
+ max_sleep *= 2;
+ }
+
+ pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
+ return -ETIMEDOUT;
+}
+
+static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
+ void *arg)
+{
+ struct pci_vpd *vpd = dev->vpd;
+ int ret;
+ loff_t end = pos + count;
+ u8 *buf = arg;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (!vpd->valid) {
+ vpd->valid = 1;
+ vpd->len = pci_vpd_size(dev, vpd->len);
+ }
+
+ if (vpd->len == 0)
+ return -EIO;
+
+ if (pos > vpd->len)
+ return 0;
+
+ if (end > vpd->len) {
+ end = vpd->len;
+ count = end - pos;
+ }
+
+ if (mutex_lock_killable(&vpd->lock))
+ return -EINTR;
+
+ ret = pci_vpd_wait(dev);
+ if (ret < 0)
+ goto out;
+
+ while (pos < end) {
+ u32 val;
+ unsigned int i, skip;
+
+ ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
+ pos & ~3);
+ if (ret < 0)
+ break;
+ vpd->busy = 1;
+ vpd->flag = PCI_VPD_ADDR_F;
+ ret = pci_vpd_wait(dev);
+ if (ret < 0)
+ break;
+
+ ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
+ if (ret < 0)
+ break;
+
+ skip = pos & 3;
+ for (i = 0; i < sizeof(u32); i++) {
+ if (i >= skip) {
+ *buf++ = val;
+ if (++pos == end)
+ break;
+ }
+ val >>= 8;
+ }
+ }
+out:
+ mutex_unlock(&vpd->lock);
+ return ret ? ret : count;
+}
+
+static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
+ const void *arg)
+{
+ struct pci_vpd *vpd = dev->vpd;
+ const u8 *buf = arg;
+ loff_t end = pos + count;
+ int ret = 0;
+
+ if (pos < 0 || (pos & 3) || (count & 3))
+ return -EINVAL;
+
+ if (!vpd->valid) {
+ vpd->valid = 1;
+ vpd->len = pci_vpd_size(dev, vpd->len);
+ }
+
+ if (vpd->len == 0)
+ return -EIO;
+
+ if (end > vpd->len)
+ return -EINVAL;
+
+ if (mutex_lock_killable(&vpd->lock))
+ return -EINTR;
+
+ ret = pci_vpd_wait(dev);
+ if (ret < 0)
+ goto out;
+
+ while (pos < end) {
+ u32 val;
+
+ val = *buf++;
+ val |= *buf++ << 8;
+ val |= *buf++ << 16;
+ val |= *buf++ << 24;
+
+ ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
+ if (ret < 0)
+ break;
+ ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
+ pos | PCI_VPD_ADDR_F);
+ if (ret < 0)
+ break;
+
+ vpd->busy = 1;
+ vpd->flag = 0;
+ ret = pci_vpd_wait(dev);
+ if (ret < 0)
+ break;
+
+ pos += sizeof(u32);
+ }
+out:
+ mutex_unlock(&vpd->lock);
+ return ret ? ret : count;
+}
+
+static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
+{
+ struct pci_vpd *vpd = dev->vpd;
+
+ if (len == 0 || len > PCI_VPD_MAX_SIZE)
+ return -EIO;
+
+ vpd->valid = 1;
+ vpd->len = len;
+
+ return 0;
+}
+
+static const struct pci_vpd_ops pci_vpd_ops = {
+ .read = pci_vpd_read,
+ .write = pci_vpd_write,
+ .set_size = pci_vpd_set_size,
+};
+
+static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
+ void *arg)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ ssize_t ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_read_vpd(tdev, pos, count, arg);
+ pci_dev_put(tdev);
+ return ret;
+}
+
+static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
+ const void *arg)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ ssize_t ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_write_vpd(tdev, pos, count, arg);
+ pci_dev_put(tdev);
+ return ret;
+}
+
+static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ int ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_set_vpd_size(tdev, len);
+ pci_dev_put(tdev);
+ return ret;
+}
+
+static const struct pci_vpd_ops pci_vpd_f0_ops = {
+ .read = pci_vpd_f0_read,
+ .write = pci_vpd_f0_write,
+ .set_size = pci_vpd_f0_set_size,
+};
+
+int pci_vpd_init(struct pci_dev *dev)
+{
+ struct pci_vpd *vpd;
+ u8 cap;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+ if (!cap)
+ return -ENODEV;
+
+ vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
+ if (!vpd)
+ return -ENOMEM;
+
+ vpd->len = PCI_VPD_MAX_SIZE;
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
+ vpd->ops = &pci_vpd_f0_ops;
+ else
+ vpd->ops = &pci_vpd_ops;
+ mutex_init(&vpd->lock);
+ vpd->cap = cap;
+ vpd->busy = 0;
+ vpd->valid = 0;
+ dev->vpd = vpd;
+ return 0;
+}
+
+void pci_vpd_release(struct pci_dev *dev)
+{
+ kfree(dev->vpd);
+}
+
+static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (bin_attr->size > 0) {
+ if (off > bin_attr->size)
+ count = 0;
+ else if (count > bin_attr->size - off)
+ count = bin_attr->size - off;
+ }
+
+ return pci_read_vpd(dev, off, count, buf);
+}
+
+static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (bin_attr->size > 0) {
+ if (off > bin_attr->size)
+ count = 0;
+ else if (count > bin_attr->size - off)
+ count = bin_attr->size - off;
+ }
+
+ return pci_write_vpd(dev, off, count, buf);
+}
+
+void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev)
+{
+ int retval;
+ struct bin_attribute *attr;
+
+ if (!dev->vpd)
+ return;
+
+ attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
+ if (!attr)
+ return;
+
+ sysfs_bin_attr_init(attr);
+ attr->size = 0;
+ attr->attr.name = "vpd";
+ attr->attr.mode = S_IRUSR | S_IWUSR;
+ attr->read = read_vpd_attr;
+ attr->write = write_vpd_attr;
+ retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
+ if (retval) {
+ kfree(attr);
+ return;
+ }
+
+ dev->vpd->attr = attr;
+}
+
+void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev)
+{
+ if (dev->vpd && dev->vpd->attr) {
+ sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
+ kfree(dev->vpd->attr);
+ }
+}
int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
{
@@ -61,3 +513,123 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
return -ENOENT;
}
EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
+
+#ifdef CONFIG_PCI_QUIRKS
+/*
+ * Quirk non-zero PCI functions to route VPD access through function 0 for
+ * devices that share VPD resources between functions. The functions are
+ * expected to be identical devices.
+ */
+static void quirk_f0_vpd_link(struct pci_dev *dev)
+{
+ struct pci_dev *f0;
+
+ if (!PCI_FUNC(dev->devfn))
+ return;
+
+ f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ if (!f0)
+ return;
+
+ if (f0->vpd && dev->class == f0->class &&
+ dev->vendor == f0->vendor && dev->device == f0->device)
+ dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+ pci_dev_put(f0);
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
+
+/*
+ * If a device follows the VPD format spec, the PCI core will not read or
+ * write past the VPD End Tag. But some vendors do not follow the VPD
+ * format spec, so we can't tell how much data is safe to access. Devices
+ * may behave unpredictably if we access too much. Blacklist these devices
+ * so we don't touch VPD at all.
+ */
+static void quirk_blacklist_vpd(struct pci_dev *dev)
+{
+ if (dev->vpd) {
+ dev->vpd->len = 0;
+ pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
+ quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
+
+/*
+ * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
+ * VPD end tag will hang the device. This problem was initially
+ * observed when a vpd entry was created in sysfs
+ * ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry
+ * will dump 32k of data. Reading a full 32k will cause an access
+ * beyond the VPD end tag causing the device to hang. Once the device
+ * is hung, the bnx2 driver will not be able to reset the device.
+ * We believe that it is legal to read beyond the end tag and
+ * therefore the solution is to limit the read/write length.
+ */
+static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
+{
+ /*
+ * Only disable the VPD capability for 5706, 5706S, 5708,
+ * 5708S and 5709 rev. A
+ */
+ if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
+ (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
+ (dev->device == PCI_DEVICE_ID_NX2_5708) ||
+ (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
+ ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
+ (dev->revision & 0xf0) == 0x0)) {
+ if (dev->vpd)
+ dev->vpd->len = 0x80;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5706,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5706S,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5708,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5708S,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5709,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5709S,
+ quirk_brcm_570x_limit_vpd);
+
+static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
+{
+ pci_set_vpd_size(dev, 8192);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
+#endif
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 8785014f656e..eba6e33147a2 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Xen PCI Frontend.
+ * Xen PCI Frontend
*
- * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 9a68914100ad..bb655854713d 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -2880,8 +2880,9 @@ static int tsi721_probe(struct pci_dev *pdev,
"Invalid MRRS override value %d", pcie_mrrs);
}
- /* Adjust PCIe completion timeout. */
- pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
+ /* Set PCIe completion timeout to 1-10ms */
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0x2);
/*
* FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block