diff options
author | Alexey Kardashevskiy <aik@ozlabs.ru> | 2016-04-29 11:55:24 +0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-05-11 14:54:31 +0300 |
commit | b5cb9ab1a00b112fcb96164c814f1f111deeafba (patch) | |
tree | 22196f630c24217b3a5d23d8c999831a50157e5f /arch/powerpc/platforms/powernv/npu-dma.c | |
parent | 85674868cecebdf6eb7239ecf9c32b6273208d03 (diff) | |
download | linux-b5cb9ab1a00b112fcb96164c814f1f111deeafba.tar.xz |
powerpc/powernv/npu: Enable NVLink pass through
IBM POWER8 NVlink systems come with Tesla K40-ish GPUs each of which
also has a couple of fast speed links (NVLink). The interface to links
is exposed as an emulated PCI bridge which is included into the same
IOMMU group as the corresponding GPU.
In the kernel, NPUs get a separate PHB of the PNV_PHB_NPU type and a PE
which behave pretty much as the standard IODA2 PHB except NPU PHB has
just a single TVE in the hardware which means it can have either
32bit window or 64bit window or DMA bypass but never two of these.
In order to make these links work when GPU is passed to the guest,
these bridges need to be passed as well; otherwise performance will
degrade.
This implements and exports API to manage NPU state in regard to VFIO;
it replicates iommu_table_group_ops.
This defines a new pnv_pci_ioda2_npu_ops which is assigned to
the IODA2 bridge if there are NPUs for a GPU on the bridge.
The new callbacks call the default IODA2 callbacks plus new NPU API.
This adds a gpe_table_group_to_npe() helper to find NPU PE for the IODA2
table_group, it is not expected to fail as the helper is only called
from the pnv_pci_ioda2_npu_ops.
This does not define NPU-specific .release_ownership() so after
VFIO is finished, DMA on NPU is disabled which is ok as the nvidia
driver sets DMA mask when probing which enable 32 or 64bit DMA on NPU.
This adds a pnv_pci_npu_setup_iommu() helper which adds NPUs to
the GPU group if any found. The helper uses helpers to look for
the "ibm,gpu" property in the device tree which is a phandle of
the corresponding GPU.
This adds an additional loop over PEs in pnv_ioda_setup_dma() as the main
loop skips NPU PEs as they do not have 32bit DMA segments.
As pnv_npu_set_window() and pnv_npu_unset_window() are started being used
by the new IODA2-NPU IOMMU group, this makes the helpers public and
adds the DMA window number parameter.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-By: Alistair Popple <alistair@popple.id.au>
[mpe: Add pnv_pci_ioda_setup_iommu_api() to fix build with IOMMU_API=n]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/platforms/powernv/npu-dma.c')
-rw-r--r-- | arch/powerpc/platforms/powernv/npu-dma.c | 64 |
1 files changed, 58 insertions, 6 deletions
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index cb2d1dad38a8..0459e100b4e7 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -12,6 +12,7 @@ #include <linux/export.h> #include <linux/pci.h> #include <linux/memblock.h> +#include <linux/iommu.h> #include <asm/iommu.h> #include <asm/pnv-pci.h> @@ -154,7 +155,7 @@ static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe, return pe; } -static long pnv_npu_set_window(struct pnv_ioda_pe *npe, +long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num, struct iommu_table *tbl) { struct pnv_phb *phb = npe->phb; @@ -182,13 +183,13 @@ static long pnv_npu_set_window(struct pnv_ioda_pe *npe, pnv_pci_ioda2_tce_invalidate_entire(phb, false); /* Add the table to the list so its TCE cache will get invalidated */ - pnv_pci_link_table_and_group(phb->hose->node, 0, + pnv_pci_link_table_and_group(phb->hose->node, num, tbl, &npe->table_group); return 0; } -static long pnv_npu_unset_window(struct pnv_ioda_pe *npe) +long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num) { struct pnv_phb *phb = npe->phb; int64_t rc; @@ -205,7 +206,7 @@ static long pnv_npu_unset_window(struct pnv_ioda_pe *npe) } pnv_pci_ioda2_tce_invalidate_entire(phb, false); - pnv_pci_unlink_table_and_group(npe->table_group.tables[0], + pnv_pci_unlink_table_and_group(npe->table_group.tables[num], &npe->table_group); return 0; @@ -231,7 +232,7 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe) if (!gpe) return; - rc = pnv_npu_set_window(npe, gpe->table_group.tables[0]); + rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]); /* * We don't initialise npu_pe->tce32_table as we always use @@ -255,7 +256,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe) if (phb->type != PNV_PHB_NPU || !npe->pdev) return -EINVAL; - rc = pnv_npu_unset_window(npe); + rc = pnv_npu_unset_window(npe, 0); if (rc != OPAL_SUCCESS) return rc; @@ -307,3 +308,54 @@ void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass) } } } + +/* Switch ownership from platform code to external user (e.g. VFIO) */ +void pnv_npu_take_ownership(struct pnv_ioda_pe *npe) +{ + struct pnv_phb *phb = npe->phb; + int64_t rc; + + /* + * Note: NPU has just a single TVE in the hardware which means that + * while used by the kernel, it can have either 32bit window or + * DMA bypass but never both. So we deconfigure 32bit window only + * if it was enabled at the moment of ownership change. + */ + if (npe->table_group.tables[0]) { + pnv_npu_unset_window(npe, 0); + return; + } + + /* Disable bypass */ + rc = opal_pci_map_pe_dma_window_real(phb->opal_id, + npe->pe_number, npe->pe_number, + 0 /* bypass base */, 0); + if (rc) { + pe_err(npe, "Failed to disable bypass, err %lld\n", rc); + return; + } + pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false); +} + +struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe) +{ + struct pnv_phb *phb = npe->phb; + struct pci_bus *pbus = phb->hose->bus; + struct pci_dev *npdev, *gpdev = NULL, *gptmp; + struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); + + if (!gpe || !gpdev) + return NULL; + + list_for_each_entry(npdev, &pbus->devices, bus_list) { + gptmp = pnv_pci_get_gpu_dev(npdev); + + if (gptmp != gpdev) + continue; + + pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev)); + iommu_group_add_device(gpe->table_group.group, &npdev->dev); + } + + return gpe; +} |