From 68f8e9fa56ce6b5cb18323e8d8fa536fee0f89ca Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 25 Mar 2015 16:23:42 +0800 Subject: PCI: Print more info in sriov_enable() error message If we don't have space for all the bus numbers required to enable VFs, print the largest bus number required and the range available. No functional change; improved error message only. Signed-off-by: Bjorn Helgaas Acked-by: Wei Yang Signed-off-by: Benjamin Herrenschmidt --- drivers/pci/iov.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/pci') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 4b3a4eaad996..c4c33ead03bc 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -180,6 +180,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) struct pci_dev *pdev; struct pci_sriov *iov = dev->sriov; int bars = 0; + u8 bus; if (!nr_virtfn) return 0; @@ -216,8 +217,10 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) iov->offset = offset; iov->stride = stride; - if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->busn_res.end) { - dev_err(&dev->dev, "SR-IOV: bus number out of range\n"); + bus = virtfn_bus(dev, nr_virtfn - 1); + if (bus > dev->bus->busn_res.end) { + dev_err(&dev->dev, "can't enable %d VFs (bus %02x out of range of %pR)\n", + nr_virtfn, bus, &dev->bus->busn_res); return -ENOMEM; } -- cgit v1.2.3 From e88ae01d2c43be3c3a6f340bfa7bab8b87127f57 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 25 Mar 2015 16:23:43 +0800 Subject: PCI: Print PF SR-IOV resource that contains all VF(n) BAR space When we size VF BAR0, VF BAR1, etc., from the SR-IOV Capability of a PF, we learn the alignment requirement and amount of space consumed by a single VF. But when VFs are enabled, *each* of the NumVFs consumes that amount of space, so the total size of the PF resource is "VF BAR size * NumVFs". Add a printk of the total space consumed by the VFs corresponding to what we already do for normal non-IOV BARs. No functional change; new message only. [bhelgaas: split out into its own patch] Signed-off-by: Wei Yang Acked-by: Bjorn Helgaas Signed-off-by: Benjamin Herrenschmidt --- drivers/pci/iov.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/pci') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index c4c33ead03bc..05f9d97e4175 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -372,6 +372,8 @@ found: goto failed; } res->end = res->start + resource_size(res) * total - 1; + dev_info(&dev->dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n", + i, res, i, total); nres++; } -- cgit v1.2.3 From 0e6c9122a6ec96d19f1db61e9750287d86b6829c Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 25 Mar 2015 16:23:44 +0800 Subject: PCI: Keep individual VF BAR size in struct pci_sriov Currently we don't store the individual VF BAR size. We calculate it when needed by dividing the PF's IOV resource size (which contains space for *all* the VFs) by total_VFs or by reading the BAR in the SR-IOV capability again. Keep the individual VF BAR size in struct pci_sriov.barsz[], add pci_iov_resource_size() to retrieve it, and use that instead of doing the division or reading the SR-IOV capability BAR. [bhelgaas: rename to "barsz[]", simplify barsz[] index computation, remove SR-IOV capability BAR sizing] Signed-off-by: Wei Yang Acked-by: Bjorn Helgaas Signed-off-by: Benjamin Herrenschmidt --- drivers/pci/iov.c | 39 ++++++++++++++++++++------------------- drivers/pci/pci.h | 1 + include/linux/pci.h | 3 +++ 3 files changed, 24 insertions(+), 19 deletions(-) (limited to 'drivers/pci') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 05f9d97e4175..5bca0e1a2799 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -57,6 +57,14 @@ static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus) pci_remove_bus(virtbus); } +resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) +{ + if (!dev->is_physfn) + return 0; + + return dev->sriov->barsz[resno - PCI_IOV_RESOURCES]; +} + static int virtfn_add(struct pci_dev *dev, int id, int reset) { int i; @@ -92,8 +100,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) continue; virtfn->resource[i].name = pci_name(virtfn); virtfn->resource[i].flags = res->flags; - size = resource_size(res); - do_div(size, iov->total_VFs); + size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); virtfn->resource[i].start = res->start + size * id; virtfn->resource[i].end = virtfn->resource[i].start + size - 1; rc = request_resource(res, &virtfn->resource[i]); @@ -311,7 +318,7 @@ static void sriov_disable(struct pci_dev *dev) static int sriov_init(struct pci_dev *dev, int pos) { - int i; + int i, bar64; int rc; int nres; u32 pgsz; @@ -360,29 +367,29 @@ found: pgsz &= ~(pgsz - 1); pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz); + iov = kzalloc(sizeof(*iov), GFP_KERNEL); + if (!iov) + return -ENOMEM; + nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = dev->resource + PCI_IOV_RESOURCES + i; - i += __pci_read_base(dev, pci_bar_unknown, res, - pos + PCI_SRIOV_BAR + i * 4); + bar64 = __pci_read_base(dev, pci_bar_unknown, res, + pos + PCI_SRIOV_BAR + i * 4); if (!res->flags) continue; if (resource_size(res) & (PAGE_SIZE - 1)) { rc = -EIO; goto failed; } + iov->barsz[i] = resource_size(res); res->end = res->start + resource_size(res) * total - 1; dev_info(&dev->dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n", i, res, i, total); + i += bar64; nres++; } - iov = kzalloc(sizeof(*iov), GFP_KERNEL); - if (!iov) { - rc = -ENOMEM; - goto failed; - } - iov->pos = pos; iov->nres = nres; iov->ctrl = ctrl; @@ -414,6 +421,7 @@ failed: res->flags = 0; } + kfree(iov); return rc; } @@ -510,14 +518,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno) */ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) { - struct resource tmp; - int reg = pci_iov_resource_bar(dev, resno); - - if (!reg) - return 0; - - __pci_read_base(dev, pci_bar_unknown, &tmp, reg); - return resource_alignment(&tmp); + return pci_iov_resource_size(dev, resno); } /** diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 4091f82239cd..57329645dd01 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -247,6 +247,7 @@ struct pci_sriov { struct pci_dev *dev; /* lowest numbered PF */ struct pci_dev *self; /* this PF */ struct mutex lock; /* lock for VF bus */ + resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ }; #ifdef CONFIG_PCI_ATS diff --git a/include/linux/pci.h b/include/linux/pci.h index 211e9da8a7d7..15596582e575 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1675,6 +1675,7 @@ int pci_num_vf(struct pci_dev *dev); int pci_vfs_assigned(struct pci_dev *dev); int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); int pci_sriov_get_totalvfs(struct pci_dev *dev); +resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); #else static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { return -ENODEV; } @@ -1686,6 +1687,8 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) { return 0; } static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) { return 0; } +static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) +{ return 0; } #endif #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) -- cgit v1.2.3 From c1fe1f96e30d31cc99826f19a058d9e9eef87712 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 25 Mar 2015 16:23:45 +0800 Subject: PCI: Index IOV resources in the conventional style Most of PCI uses "res = &dev->resource[i]", not "res = dev->resource + i". Use that style in iov.c also. No functional change. Signed-off-by: Bjorn Helgaas Acked-by: Wei Yang Signed-off-by: Benjamin Herrenschmidt --- drivers/pci/iov.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/pci') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 5bca0e1a2799..27b98c361823 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -95,7 +95,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) virtfn->multifunction = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = dev->resource + PCI_IOV_RESOURCES + i; + res = &dev->resource[i + PCI_IOV_RESOURCES]; if (!res->parent) continue; virtfn->resource[i].name = pci_name(virtfn); @@ -212,7 +212,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { bars |= (1 << (i + PCI_IOV_RESOURCES)); - res = dev->resource + PCI_IOV_RESOURCES + i; + res = &dev->resource[i + PCI_IOV_RESOURCES]; if (res->parent) nres++; } @@ -373,7 +373,7 @@ found: nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = dev->resource + PCI_IOV_RESOURCES + i; + res = &dev->resource[i + PCI_IOV_RESOURCES]; bar64 = __pci_read_base(dev, pci_bar_unknown, res, pos + PCI_SRIOV_BAR + i * 4); if (!res->flags) @@ -417,7 +417,7 @@ found: failed: for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = dev->resource + PCI_IOV_RESOURCES + i; + res = &dev->resource[i + PCI_IOV_RESOURCES]; res->flags = 0; } -- cgit v1.2.3 From f59dca27d20c73500c4a88ab9a077e40669755db Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 25 Mar 2015 16:23:46 +0800 Subject: PCI: Refresh First VF Offset and VF Stride when updating NumVFs The First VF Offset and VF Stride fields depend on the NumVFs setting, so refresh the cached fields in struct pci_sriov when updating NumVFs. See the SR-IOV spec r1.1, sec 3.3.9 and 3.3.10. [bhelgaas: changelog, remove kernel-doc comment marker] Signed-off-by: Wei Yang Acked-by: Bjorn Helgaas Signed-off-by: Benjamin Herrenschmidt --- drivers/pci/iov.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) (limited to 'drivers/pci') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 27b98c361823..a8752c2c2b53 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -31,6 +31,21 @@ static inline u8 virtfn_devfn(struct pci_dev *dev, int id) dev->sriov->stride * id) & 0xff; } +/* + * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may + * change when NumVFs changes. + * + * Update iov->offset and iov->stride when NumVFs is written. + */ +static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn) +{ + struct pci_sriov *iov = dev->sriov; + + pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn); + pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride); +} + static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr) { struct pci_bus *child; @@ -253,7 +268,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) return rc; } - pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn); + pci_iov_set_numvfs(dev, nr_virtfn); iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; pci_cfg_access_lock(dev); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); @@ -282,7 +297,7 @@ failed: iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); - pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0); + pci_iov_set_numvfs(dev, 0); ssleep(1); pci_cfg_access_unlock(dev); @@ -313,7 +328,7 @@ static void sriov_disable(struct pci_dev *dev) sysfs_remove_link(&dev->dev.kobj, "dep_link"); iov->num_VFs = 0; - pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0); + pci_iov_set_numvfs(dev, 0); } static int sriov_init(struct pci_dev *dev, int pos) @@ -452,7 +467,7 @@ static void sriov_restore_state(struct pci_dev *dev) pci_update_resource(dev, i); pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz); - pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->num_VFs); + pci_iov_set_numvfs(dev, iov->num_VFs); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); if (iov->ctrl & PCI_SRIOV_CTRL_VFE) msleep(100); -- cgit v1.2.3 From 4449f079722c86d2f6925da039835acdd8e973a8 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 25 Mar 2015 16:23:47 +0800 Subject: PCI: Calculate maximum number of buses required for VFs An SR-IOV device can change its First VF Offset and VF Stride based on the values of ARI Capable Hierarchy and NumVFs. The number of buses required for all VFs is determined by NumVFs, First VF Offset, and VF Stride (see SR-IOV spec r1.1, sec 2.1.2). Previously pci_iov_bus_range() computed how many buses would be required by TotalVFs, but this was based on a single NumVFs value and may not have been the maximum for all NumVFs configurations. Iterate over all valid NumVFs and calculate the maximum number of bus numbers that could ever be required for VFs of this device. [bhelgaas: changelog, compute busnr of NumVFs, not TotalVFs, remove kerenl-doc comment marker] Signed-off-by: Wei Yang Acked-by: Bjorn Helgaas Signed-off-by: Benjamin Herrenschmidt --- drivers/pci/iov.c | 31 +++++++++++++++++++++++++++---- drivers/pci/pci.h | 1 + 2 files changed, 28 insertions(+), 4 deletions(-) (limited to 'drivers/pci') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index a8752c2c2b53..2ae921f84bd3 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -46,6 +46,30 @@ static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn) pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride); } +/* + * The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride + * determine how many additional bus numbers will be consumed by VFs. + * + * Iterate over all valid NumVFs and calculate the maximum number of bus + * numbers that could ever be required. + */ +static inline u8 virtfn_max_buses(struct pci_dev *dev) +{ + struct pci_sriov *iov = dev->sriov; + int nr_virtfn; + u8 max = 0; + u8 busnr; + + for (nr_virtfn = 1; nr_virtfn <= iov->total_VFs; nr_virtfn++) { + pci_iov_set_numvfs(dev, nr_virtfn); + busnr = virtfn_bus(dev, nr_virtfn - 1); + if (busnr > max) + max = busnr; + } + + return max; +} + static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr) { struct pci_bus *child; @@ -427,6 +451,7 @@ found: dev->sriov = iov; dev->is_physfn = 1; + iov->max_VF_buses = virtfn_max_buses(dev); return 0; @@ -556,15 +581,13 @@ void pci_restore_iov_state(struct pci_dev *dev) int pci_iov_bus_range(struct pci_bus *bus) { int max = 0; - u8 busnr; struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { if (!dev->is_physfn) continue; - busnr = virtfn_bus(dev, dev->sriov->total_VFs - 1); - if (busnr > max) - max = busnr; + if (dev->sriov->max_VF_buses > max) + max = dev->sriov->max_VF_buses; } return max ? max - bus->number : 0; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 57329645dd01..bae593c04541 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -243,6 +243,7 @@ struct pci_sriov { u16 stride; /* following VF stride */ u32 pgsz; /* page size for BAR alignment */ u8 link; /* Function Dependency Link */ + u8 max_VF_buses; /* max buses consumed by VFs */ u16 driver_max_VFs; /* max num VFs driver supports */ struct pci_dev *dev; /* lowest numbered PF */ struct pci_dev *self; /* this PF */ -- cgit v1.2.3 From b07579c0924eee1543eb6cd2c19544d15a4b5236 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 25 Mar 2015 16:23:48 +0800 Subject: PCI: Export pci_iov_virtfn_bus() and pci_iov_virtfn_devfn() On PowerNV, some resource reservation is needed for SR-IOV VFs that don't exist at the bootup stage. To do the match between resources and VFs, the code need to get the VF's BDF in advance. Rename virtfn_bus() and virtfn_devfn() to pci_iov_virtfn_bus() and pci_iov_virtfn_devfn() and export them. [bhelgaas: changelog, make "busnr" int] Signed-off-by: Wei Yang Acked-by: Bjorn Helgaas Signed-off-by: Benjamin Herrenschmidt --- drivers/pci/iov.c | 28 ++++++++++++++++------------ include/linux/pci.h | 11 +++++++++++ 2 files changed, 27 insertions(+), 12 deletions(-) (limited to 'drivers/pci') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 2ae921f84bd3..5643a1011e23 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -19,16 +19,20 @@ #define VIRTFN_ID_LEN 16 -static inline u8 virtfn_bus(struct pci_dev *dev, int id) +int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id) { + if (!dev->is_physfn) + return -EINVAL; return dev->bus->number + ((dev->devfn + dev->sriov->offset + - dev->sriov->stride * id) >> 8); + dev->sriov->stride * vf_id) >> 8); } -static inline u8 virtfn_devfn(struct pci_dev *dev, int id) +int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id) { + if (!dev->is_physfn) + return -EINVAL; return (dev->devfn + dev->sriov->offset + - dev->sriov->stride * id) & 0xff; + dev->sriov->stride * vf_id) & 0xff; } /* @@ -58,11 +62,11 @@ static inline u8 virtfn_max_buses(struct pci_dev *dev) struct pci_sriov *iov = dev->sriov; int nr_virtfn; u8 max = 0; - u8 busnr; + int busnr; for (nr_virtfn = 1; nr_virtfn <= iov->total_VFs; nr_virtfn++) { pci_iov_set_numvfs(dev, nr_virtfn); - busnr = virtfn_bus(dev, nr_virtfn - 1); + busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1); if (busnr > max) max = busnr; } @@ -116,7 +120,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) struct pci_bus *bus; mutex_lock(&iov->dev->sriov->lock); - bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id)); + bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id)); if (!bus) goto failed; @@ -124,7 +128,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) if (!virtfn) goto failed0; - virtfn->devfn = virtfn_devfn(dev, id); + virtfn->devfn = pci_iov_virtfn_devfn(dev, id); virtfn->vendor = dev->vendor; pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device); pci_setup_device(virtfn); @@ -186,8 +190,8 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset) struct pci_sriov *iov = dev->sriov; virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), - virtfn_bus(dev, id), - virtfn_devfn(dev, id)); + pci_iov_virtfn_bus(dev, id), + pci_iov_virtfn_devfn(dev, id)); if (!virtfn) return; @@ -226,7 +230,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) struct pci_dev *pdev; struct pci_sriov *iov = dev->sriov; int bars = 0; - u8 bus; + int bus; if (!nr_virtfn) return 0; @@ -263,7 +267,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) iov->offset = offset; iov->stride = stride; - bus = virtfn_bus(dev, nr_virtfn - 1); + bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1); if (bus > dev->bus->busn_res.end) { dev_err(&dev->dev, "can't enable %d VFs (bus %02x out of range of %pR)\n", nr_virtfn, bus, &dev->bus->busn_res); diff --git a/include/linux/pci.h b/include/linux/pci.h index 15596582e575..99ea94835fb6 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1669,6 +1669,9 @@ int pci_ext_cfg_avail(void); void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); #ifdef CONFIG_PCI_IOV +int pci_iov_virtfn_bus(struct pci_dev *dev, int id); +int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); + int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); void pci_disable_sriov(struct pci_dev *dev); int pci_num_vf(struct pci_dev *dev); @@ -1677,6 +1680,14 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); int pci_sriov_get_totalvfs(struct pci_dev *dev); resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); #else +static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) +{ + return -ENOSYS; +} +static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) +{ + return -ENOSYS; +} static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { return -ENODEV; } static inline void pci_disable_sriov(struct pci_dev *dev) { } -- cgit v1.2.3 From 995df527f3990d91fe8f3d7d1917a7adc3938320 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 25 Mar 2015 16:23:49 +0800 Subject: PCI: Add pcibios_sriov_enable() and pcibios_sriov_disable() VFs are dynamically created when a driver enables them. On some platforms, like PowerNV, special resources are necessary to enable VFs. Add platform hooks for enabling and disabling VFs. Signed-off-by: Wei Yang Acked-by: Bjorn Helgaas Signed-off-by: Benjamin Herrenschmidt --- drivers/pci/iov.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers/pci') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 5643a1011e23..64c46925c62d 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -220,6 +220,11 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset) pci_dev_put(dev); } +int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) +{ + return 0; +} + static int sriov_enable(struct pci_dev *dev, int nr_virtfn) { int rc; @@ -231,6 +236,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) struct pci_sriov *iov = dev->sriov; int bars = 0; int bus; + int retval; if (!nr_virtfn) return 0; @@ -307,6 +313,12 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) if (nr_virtfn < initial) initial = nr_virtfn; + if ((retval = pcibios_sriov_enable(dev, initial))) { + dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n", + retval); + return retval; + } + for (i = 0; i < initial; i++) { rc = virtfn_add(dev, i, 0); if (rc) @@ -335,6 +347,11 @@ failed: return rc; } +int __weak pcibios_sriov_disable(struct pci_dev *pdev) +{ + return 0; +} + static void sriov_disable(struct pci_dev *dev) { int i; @@ -346,6 +363,8 @@ static void sriov_disable(struct pci_dev *dev) for (i = 0; i < iov->num_VFs; i++) virtfn_remove(dev, i, 0); + pcibios_sriov_disable(dev); + iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); -- cgit v1.2.3 From 978d2d68312326b715a5913aaab1eaf24fe99108 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 25 Mar 2015 16:23:50 +0800 Subject: PCI: Add pcibios_iov_resource_alignment() interface Per the SR-IOV spec r1.1, sec 3.3.14, the required alignment of a PF's IOV BAR is the size of an individual VF BAR, and the size consumed is the individual VF BAR size times NumVFs. The PowerNV platform has additional alignment requirements to help support its Partitionable Endpoint device isolation feature (see Documentation/powerpc/pci_iov_resource_on_powernv.txt). Add a pcibios_iov_resource_alignment() interface to allow platforms to request additional alignment. [bhelgaas: changelog, adapt to reworked pci_sriov_resource_alignment(), drop "align" parameter] Signed-off-by: Wei Yang Acked-by: Bjorn Helgaas Signed-off-by: Benjamin Herrenschmidt --- drivers/pci/iov.c | 8 +++++++- include/linux/pci.h | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/pci') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 64c46925c62d..ee0ebff103a4 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -569,6 +569,12 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno) 4 * (resno - PCI_IOV_RESOURCES); } +resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev, + int resno) +{ + return pci_iov_resource_size(dev, resno); +} + /** * pci_sriov_resource_alignment - get resource alignment for VF BAR * @dev: the PCI device @@ -581,7 +587,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno) */ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) { - return pci_iov_resource_size(dev, resno); + return pcibios_iov_resource_alignment(dev, resno); } /** diff --git a/include/linux/pci.h b/include/linux/pci.h index 99ea94835fb6..4e1f17db1a81 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1174,6 +1174,7 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus); void pci_setup_bridge(struct pci_bus *bus); resource_size_t pcibios_window_alignment(struct pci_bus *bus, unsigned long type); +resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) -- cgit v1.2.3 From d74b9027a4dafa44d3a3c2a44ce135e50a13ec10 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 25 Mar 2015 16:23:51 +0800 Subject: PCI: Consider additional PF's IOV BAR alignment in sizing and assigning When sizing and assigning resources, we divide the resources into two lists: the requested list and the additional list. We don't consider the alignment of additional VF(n) BAR space. This is because the alignment required for the VF(n) BAR space is the size of an individual VF BAR, not the size of the space for *all* VFs. But we want additional alignment to support partitioning on PowerNV. Consider the additional IOV BAR alignment when sizing and assigning resources. When there is not enough system MMIO space to accomodate both the requested list and the additional list, the PF's IOV BAR alignment will not contribute to the bridge. When there is enough system MMIO space for both lists, the additional alignment will contribute to the bridge. The additional alignment is stored in the min_align of pci_dev_resource, which is stored in the additional list by add_to_list() at the end of pbus_size_mem(). The additional alignment is calculated in pci_resource_alignment(). For an IOV BAR, we have arch dependent function to get the alignment for different arch. [bhelgaas: changelog, printk cast] Signed-off-by: Wei Yang Acked-by: Bjorn Helgaas Signed-off-by: Benjamin Herrenschmidt --- drivers/pci/setup-bus.c | 95 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 79 insertions(+), 16 deletions(-) (limited to 'drivers/pci') diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index e3e17f3c0f0f..6603d401bb7c 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -99,8 +99,8 @@ static void remove_from_list(struct list_head *head, } } -static resource_size_t get_res_add_size(struct list_head *head, - struct resource *res) +static struct pci_dev_resource *res_to_dev_res(struct list_head *head, + struct resource *res) { struct pci_dev_resource *dev_res; @@ -109,17 +109,37 @@ static resource_size_t get_res_add_size(struct list_head *head, int idx = res - &dev_res->dev->resource[0]; dev_printk(KERN_DEBUG, &dev_res->dev->dev, - "res[%d]=%pR get_res_add_size add_size %llx\n", + "res[%d]=%pR res_to_dev_res add_size %llx min_align %llx\n", idx, dev_res->res, - (unsigned long long)dev_res->add_size); + (unsigned long long)dev_res->add_size, + (unsigned long long)dev_res->min_align); - return dev_res->add_size; + return dev_res; } } - return 0; + return NULL; } +static resource_size_t get_res_add_size(struct list_head *head, + struct resource *res) +{ + struct pci_dev_resource *dev_res; + + dev_res = res_to_dev_res(head, res); + return dev_res ? dev_res->add_size : 0; +} + +static resource_size_t get_res_add_align(struct list_head *head, + struct resource *res) +{ + struct pci_dev_resource *dev_res; + + dev_res = res_to_dev_res(head, res); + return dev_res ? dev_res->min_align : 0; +} + + /* Sort resources by alignment */ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) { @@ -215,7 +235,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head, struct resource *res; struct pci_dev_resource *add_res, *tmp; struct pci_dev_resource *dev_res; - resource_size_t add_size; + resource_size_t add_size, align; int idx; list_for_each_entry_safe(add_res, tmp, realloc_head, list) { @@ -238,13 +258,13 @@ static void reassign_resources_sorted(struct list_head *realloc_head, idx = res - &add_res->dev->resource[0]; add_size = add_res->add_size; + align = add_res->min_align; if (!resource_size(res)) { - res->start = add_res->start; + res->start = align; res->end = res->start + add_size - 1; if (pci_assign_resource(add_res->dev, idx)) reset_resource(res); } else { - resource_size_t align = add_res->min_align; res->flags |= add_res->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); if (pci_reassign_resource(add_res->dev, idx, @@ -368,8 +388,9 @@ static void __assign_resources_sorted(struct list_head *head, LIST_HEAD(save_head); LIST_HEAD(local_fail_head); struct pci_dev_resource *save_res; - struct pci_dev_resource *dev_res, *tmp_res; + struct pci_dev_resource *dev_res, *tmp_res, *dev_res2; unsigned long fail_type; + resource_size_t add_align, align; /* Check if optional add_size is there */ if (!realloc_head || list_empty(realloc_head)) @@ -384,10 +405,44 @@ static void __assign_resources_sorted(struct list_head *head, } /* Update res in head list with add_size in realloc_head list */ - list_for_each_entry(dev_res, head, list) + list_for_each_entry_safe(dev_res, tmp_res, head, list) { dev_res->res->end += get_res_add_size(realloc_head, dev_res->res); + /* + * There are two kinds of additional resources in the list: + * 1. bridge resource -- IORESOURCE_STARTALIGN + * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN + * Here just fix the additional alignment for bridge + */ + if (!(dev_res->res->flags & IORESOURCE_STARTALIGN)) + continue; + + add_align = get_res_add_align(realloc_head, dev_res->res); + + /* + * The "head" list is sorted by the alignment to make sure + * resources with bigger alignment will be assigned first. + * After we change the alignment of a dev_res in "head" list, + * we need to reorder the list by alignment to make it + * consistent. + */ + if (add_align > dev_res->res->start) { + dev_res->res->start = add_align; + dev_res->res->end = add_align + + resource_size(dev_res->res); + + list_for_each_entry(dev_res2, head, list) { + align = pci_resource_alignment(dev_res2->dev, + dev_res2->res); + if (add_align > align) + list_move_tail(&dev_res->list, + &dev_res2->list); + } + } + + } + /* Try updated head list with add_size added */ assign_requested_resources_sorted(head, &local_fail_head); @@ -962,6 +1017,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, struct resource *b_res = find_free_bus_resource(bus, mask | IORESOURCE_PREFETCH, type); resource_size_t children_add_size = 0; + resource_size_t children_add_align = 0; + resource_size_t add_align = 0; if (!b_res) return -ENOSPC; @@ -986,6 +1043,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, /* put SRIOV requested res to the optional list */ if (realloc_head && i >= PCI_IOV_RESOURCES && i <= PCI_IOV_RESOURCE_END) { + add_align = max(pci_resource_alignment(dev, r), add_align); r->end = r->start - 1; add_to_list(realloc_head, dev, r, r_size, 0/* don't care */); children_add_size += r_size; @@ -1016,19 +1074,23 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (order > max_order) max_order = order; - if (realloc_head) + if (realloc_head) { children_add_size += get_res_add_size(realloc_head, r); + children_add_align = get_res_add_align(realloc_head, r); + add_align = max(add_align, children_add_align); + } } } min_align = calculate_mem_align(aligns, max_order); min_align = max(min_align, window_alignment(bus, b_res->flags)); size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); + add_align = max(min_align, add_align); if (children_add_size > add_size) add_size = children_add_size; size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : calculate_memsize(size, min_size, add_size, - resource_size(b_res), min_align); + resource_size(b_res), add_align); if (!size0 && !size1) { if (b_res->start || b_res->end) dev_info(&bus->self->dev, "disabling bridge window %pR to %pR (unused)\n", @@ -1040,10 +1102,11 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, b_res->end = size0 + min_align - 1; b_res->flags |= IORESOURCE_STARTALIGN; if (size1 > size0 && realloc_head) { - add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); - dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx\n", + add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align); + dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx add_align %llx\n", b_res, &bus->busn_res, - (unsigned long long)size1-size0); + (unsigned long long) (size1 - size0), + (unsigned long long) add_align); } return 0; } -- cgit v1.2.3