diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/vfio/mdev/mdev_private.h | 2 | ||||
-rw-r--r-- | drivers/vfio/pci/Kconfig | 6 | ||||
-rw-r--r-- | drivers/vfio/pci/Makefile | 1 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci.c | 21 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci_config.c | 2 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci_igd.c | 53 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci_nvlink2.c | 490 | ||||
-rw-r--r-- | drivers/vfio/pci/vfio_pci_private.h | 14 | ||||
-rw-r--r-- | drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c | 2 | ||||
-rw-r--r-- | drivers/vfio/vfio_iommu_type1.c | 14 |
10 files changed, 64 insertions, 541 deletions
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index 74c2e5411469..fdbb3bee99a9 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Mediated device interal definitions + * Mediated device internal definitions * * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * Author: Neo Jia <cjia@nvidia.com> diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 4abddbebd4b2..53ce78d7d07b 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -39,9 +39,3 @@ config VFIO_PCI_IGD and LPC bridge config space. To enable Intel IGD assignment through vfio-pci, say Y. - -config VFIO_PCI_NVLINK2 - def_bool y - depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU - help - VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile index eff97a7cd9f1..3ff42093962f 100644 --- a/drivers/vfio/pci/Makefile +++ b/drivers/vfio/pci/Makefile @@ -2,7 +2,6 @@ vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o -vfio-pci-$(CONFIG_VFIO_PCI_NVLINK2) += vfio_pci_nvlink2.o vfio-pci-$(CONFIG_S390) += vfio_pci_zdev.o obj-$(CONFIG_VFIO_PCI) += vfio-pci.o diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 55ef27a15d4d..882e890dca54 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -378,7 +378,6 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev)) vdev->has_vga = true; - if (vfio_pci_is_vga(pdev) && pdev->vendor == PCI_VENDOR_ID_INTEL && IS_ENABLED(CONFIG_VFIO_PCI_IGD)) { @@ -389,24 +388,6 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) } } - if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && - IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) { - ret = vfio_pci_nvdia_v100_nvlink2_init(vdev); - if (ret && ret != -ENODEV) { - pci_warn(pdev, "Failed to setup NVIDIA NV2 RAM region\n"); - goto disable_exit; - } - } - - if (pdev->vendor == PCI_VENDOR_ID_IBM && - IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) { - ret = vfio_pci_ibm_npu2_init(vdev); - if (ret && ret != -ENODEV) { - pci_warn(pdev, "Failed to setup NVIDIA NV2 ATSD region\n"); - goto disable_exit; - } - } - vfio_pci_probe_mmaps(vdev); return 0; @@ -2434,7 +2415,7 @@ static int __init vfio_pci_init(void) { int ret; - /* Allocate shared config space permision data used by all devices */ + /* Allocate shared config space permission data used by all devices */ ret = vfio_pci_init_perm_bits(); if (ret) return ret; diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index a402adee8a21..d57f037f65b8 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -101,7 +101,7 @@ static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = { /* * Read/Write Permission Bits - one bit for each bit in capability * Any field can be read if it exists, but what is read depends on - * whether the field is 'virtualized', or just pass thru to the + * whether the field is 'virtualized', or just pass through to the * hardware. Any virtualized field is also virtualized for writes. * Writes are only permitted if they have a 1 bit here. */ diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c index e66dfb0178ed..228df565e9bc 100644 --- a/drivers/vfio/pci/vfio_pci_igd.c +++ b/drivers/vfio/pci/vfio_pci_igd.c @@ -21,6 +21,10 @@ #define OPREGION_SIZE (8 * 1024) #define OPREGION_PCI_ADDR 0xfc +#define OPREGION_RVDA 0x3ba +#define OPREGION_RVDS 0x3c2 +#define OPREGION_VERSION 0x16 + static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite) { @@ -58,6 +62,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev) u32 addr, size; void *base; int ret; + u16 version; ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr); if (ret) @@ -83,6 +88,54 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev) size *= 1024; /* In KB */ + /* + * Support opregion v2.1+ + * When VBT data exceeds 6KB size and cannot be within mailbox #4, then + * the Extended VBT region next to opregion is used to hold the VBT data. + * RVDA (Relative Address of VBT Data from Opregion Base) and RVDS + * (Raw VBT Data Size) from opregion structure member are used to hold the + * address from region base and size of VBT data. RVDA/RVDS are not + * defined before opregion 2.0. + * + * opregion 2.1+: RVDA is unsigned, relative offset from + * opregion base, and should point to the end of opregion. + * otherwise, exposing to userspace to allow read access to everything between + * the OpRegion and VBT is not safe. + * RVDS is defined as size in bytes. + * + * opregion 2.0: rvda is the physical VBT address. + * Since rvda is HPA it cannot be directly used in guest. + * And it should not be practically available for end user,so it is not supported. + */ + version = le16_to_cpu(*(__le16 *)(base + OPREGION_VERSION)); + if (version >= 0x0200) { + u64 rvda; + u32 rvds; + + rvda = le64_to_cpu(*(__le64 *)(base + OPREGION_RVDA)); + rvds = le32_to_cpu(*(__le32 *)(base + OPREGION_RVDS)); + if (rvda && rvds) { + /* no support for opregion v2.0 with physical VBT address */ + if (version == 0x0200) { + memunmap(base); + pci_err(vdev->pdev, + "IGD assignment does not support opregion v2.0 with an extended VBT region\n"); + return -EINVAL; + } + + if (rvda != size) { + memunmap(base); + pci_err(vdev->pdev, + "Extended VBT does not follow opregion on version 0x%04x\n", + version); + return -EINVAL; + } + + /* region size for opregion v2.0+: opregion and VBT size. */ + size += rvds; + } + } + if (size != OPREGION_SIZE) { memunmap(base); base = memremap(addr, size, MEMREMAP_WB); diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c deleted file mode 100644 index 9adcf6a8f888..000000000000 --- a/drivers/vfio/pci/vfio_pci_nvlink2.c +++ /dev/null @@ -1,490 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2. - * - * Copyright (C) 2018 IBM Corp. All rights reserved. - * Author: Alexey Kardashevskiy <aik@ozlabs.ru> - * - * Register an on-GPU RAM region for cacheable access. - * - * Derived from original vfio_pci_igd.c: - * Copyright (C) 2016 Red Hat, Inc. All rights reserved. - * Author: Alex Williamson <alex.williamson@redhat.com> - */ - -#include <linux/io.h> -#include <linux/pci.h> -#include <linux/uaccess.h> -#include <linux/vfio.h> -#include <linux/sched/mm.h> -#include <linux/mmu_context.h> -#include <asm/kvm_ppc.h> -#include "vfio_pci_private.h" - -#define CREATE_TRACE_POINTS -#include "trace.h" - -EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap_fault); -EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap); -EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_npu2_mmap); - -struct vfio_pci_nvgpu_data { - unsigned long gpu_hpa; /* GPU RAM physical address */ - unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */ - unsigned long useraddr; /* GPU RAM userspace address */ - unsigned long size; /* Size of the GPU RAM window (usually 128GB) */ - struct mm_struct *mm; - struct mm_iommu_table_group_mem_t *mem; /* Pre-registered RAM descr. */ - struct pci_dev *gpdev; - struct notifier_block group_notifier; -}; - -static size_t vfio_pci_nvgpu_rw(struct vfio_pci_device *vdev, - char __user *buf, size_t count, loff_t *ppos, bool iswrite) -{ - unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; - struct vfio_pci_nvgpu_data *data = vdev->region[i].data; - loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - loff_t posaligned = pos & PAGE_MASK, posoff = pos & ~PAGE_MASK; - size_t sizealigned; - void __iomem *ptr; - - if (pos >= vdev->region[i].size) - return -EINVAL; - - count = min(count, (size_t)(vdev->region[i].size - pos)); - - /* - * We map only a bit of GPU RAM for a short time instead of mapping it - * for the guest lifetime as: - * - * 1) we do not know GPU RAM size, only aperture which is 4-8 times - * bigger than actual RAM size (16/32GB RAM vs. 128GB aperture); - * 2) mapping GPU RAM allows CPU to prefetch and if this happens - * before NVLink bridge is reset (which fences GPU RAM), - * hardware management interrupts (HMI) might happen, this - * will freeze NVLink bridge. - * - * This is not fast path anyway. - */ - sizealigned = ALIGN(posoff + count, PAGE_SIZE); - ptr = ioremap_cache(data->gpu_hpa + posaligned, sizealigned); - if (!ptr) - return -EFAULT; - - if (iswrite) { - if (copy_from_user(ptr + posoff, buf, count)) - count = -EFAULT; - else - *ppos += count; - } else { - if (copy_to_user(buf, ptr + posoff, count)) - count = -EFAULT; - else - *ppos += count; - } - - iounmap(ptr); - - return count; -} - -static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev, - struct vfio_pci_region *region) -{ - struct vfio_pci_nvgpu_data *data = region->data; - long ret; - - /* If there were any mappings at all... */ - if (data->mm) { - if (data->mem) { - ret = mm_iommu_put(data->mm, data->mem); - WARN_ON(ret); - } - - mmdrop(data->mm); - } - - vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY, - &data->group_notifier); - - pnv_npu2_unmap_lpar_dev(data->gpdev); - - kfree(data); -} - -static vm_fault_t vfio_pci_nvgpu_mmap_fault(struct vm_fault *vmf) -{ - vm_fault_t ret; - struct vm_area_struct *vma = vmf->vma; - struct vfio_pci_region *region = vma->vm_private_data; - struct vfio_pci_nvgpu_data *data = region->data; - unsigned long vmf_off = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - unsigned long nv2pg = data->gpu_hpa >> PAGE_SHIFT; - unsigned long vm_pgoff = vma->vm_pgoff & - ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); - unsigned long pfn = nv2pg + vm_pgoff + vmf_off; - - ret = vmf_insert_pfn(vma, vmf->address, pfn); - trace_vfio_pci_nvgpu_mmap_fault(data->gpdev, pfn << PAGE_SHIFT, - vmf->address, ret); - - return ret; -} - -static const struct vm_operations_struct vfio_pci_nvgpu_mmap_vmops = { - .fault = vfio_pci_nvgpu_mmap_fault, -}; - -static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev, - struct vfio_pci_region *region, struct vm_area_struct *vma) -{ - int ret; - struct vfio_pci_nvgpu_data *data = region->data; - - if (data->useraddr) - return -EPERM; - - if (vma->vm_end - vma->vm_start > data->size) - return -EINVAL; - - vma->vm_private_data = region; - vma->vm_flags |= VM_PFNMAP; - vma->vm_ops = &vfio_pci_nvgpu_mmap_vmops; - - /* - * Calling mm_iommu_newdev() here once as the region is not - * registered yet and therefore right initialization will happen now. - * Other places will use mm_iommu_find() which returns - * registered @mem and does not go gup(). - */ - data->useraddr = vma->vm_start; - data->mm = current->mm; - - mmgrab(data->mm); - ret = (int) mm_iommu_newdev(data->mm, data->useraddr, - vma_pages(vma), data->gpu_hpa, &data->mem); - - trace_vfio_pci_nvgpu_mmap(vdev->pdev, data->gpu_hpa, data->useraddr, - vma->vm_end - vma->vm_start, ret); - - return ret; -} - -static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev, - struct vfio_pci_region *region, struct vfio_info_cap *caps) -{ - struct vfio_pci_nvgpu_data *data = region->data; - struct vfio_region_info_cap_nvlink2_ssatgt cap = { - .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT, - .header.version = 1, - .tgt = data->gpu_tgt - }; - - return vfio_info_add_capability(caps, &cap.header, sizeof(cap)); -} - -static const struct vfio_pci_regops vfio_pci_nvgpu_regops = { - .rw = vfio_pci_nvgpu_rw, - .release = vfio_pci_nvgpu_release, - .mmap = vfio_pci_nvgpu_mmap, - .add_capability = vfio_pci_nvgpu_add_capability, -}; - -static int vfio_pci_nvgpu_group_notifier(struct notifier_block *nb, - unsigned long action, void *opaque) -{ - struct kvm *kvm = opaque; - struct vfio_pci_nvgpu_data *data = container_of(nb, - struct vfio_pci_nvgpu_data, - group_notifier); - - if (action == VFIO_GROUP_NOTIFY_SET_KVM && kvm && - pnv_npu2_map_lpar_dev(data->gpdev, - kvm->arch.lpid, MSR_DR | MSR_PR)) - return NOTIFY_BAD; - - return NOTIFY_OK; -} - -int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev) -{ - int ret; - u64 reg[2]; - u64 tgt = 0; - struct device_node *npu_node, *mem_node; - struct pci_dev *npu_dev; - struct vfio_pci_nvgpu_data *data; - uint32_t mem_phandle = 0; - unsigned long events = VFIO_GROUP_NOTIFY_SET_KVM; - - /* - * PCI config space does not tell us about NVLink presense but - * platform does, use this. - */ - npu_dev = pnv_pci_get_npu_dev(vdev->pdev, 0); - if (!npu_dev) - return -ENODEV; - - npu_node = pci_device_to_OF_node(npu_dev); - if (!npu_node) - return -EINVAL; - - if (of_property_read_u32(npu_node, "memory-region", &mem_phandle)) - return -ENODEV; - - mem_node = of_find_node_by_phandle(mem_phandle); - if (!mem_node) - return -EINVAL; - - if (of_property_read_variable_u64_array(mem_node, "reg", reg, - ARRAY_SIZE(reg), ARRAY_SIZE(reg)) != - ARRAY_SIZE(reg)) - return -EINVAL; - - if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) { - dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n"); - return -EFAULT; - } - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->gpu_hpa = reg[0]; - data->gpu_tgt = tgt; - data->size = reg[1]; - - dev_dbg(&vdev->pdev->dev, "%lx..%lx\n", data->gpu_hpa, - data->gpu_hpa + data->size - 1); - - data->gpdev = vdev->pdev; - data->group_notifier.notifier_call = vfio_pci_nvgpu_group_notifier; - - ret = vfio_register_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY, - &events, &data->group_notifier); - if (ret) - goto free_exit; - - /* - * We have just set KVM, we do not need the listener anymore. - * Also, keeping it registered means that if more than one GPU is - * assigned, we will get several similar notifiers notifying about - * the same device again which does not help with anything. - */ - vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY, - &data->group_notifier); - - ret = vfio_pci_register_dev_region(vdev, - PCI_VENDOR_ID_NVIDIA | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, - VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM, - &vfio_pci_nvgpu_regops, - data->size, - VFIO_REGION_INFO_FLAG_READ | - VFIO_REGION_INFO_FLAG_WRITE | - VFIO_REGION_INFO_FLAG_MMAP, - data); - if (ret) - goto free_exit; - - return 0; -free_exit: - kfree(data); - - return ret; -} - -/* - * IBM NPU2 bridge - */ -struct vfio_pci_npu2_data { - void *base; /* ATSD register virtual address, for emulated access */ - unsigned long mmio_atsd; /* ATSD physical address */ - unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */ - unsigned int link_speed; /* The link speed from DT's ibm,nvlink-speed */ -}; - -static size_t vfio_pci_npu2_rw(struct vfio_pci_device *vdev, - char __user *buf, size_t count, loff_t *ppos, bool iswrite) -{ - unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; - struct vfio_pci_npu2_data *data = vdev->region[i].data; - loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - - if (pos >= vdev->region[i].size) - return -EINVAL; - - count = min(count, (size_t)(vdev->region[i].size - pos)); - - if (iswrite) { - if (copy_from_user(data->base + pos, buf, count)) - return -EFAULT; - } else { - if (copy_to_user(buf, data->base + pos, count)) - return -EFAULT; - } - *ppos += count; - - return count; -} - -static int vfio_pci_npu2_mmap(struct vfio_pci_device *vdev, - struct vfio_pci_region *region, struct vm_area_struct *vma) -{ - int ret; - struct vfio_pci_npu2_data *data = region->data; - unsigned long req_len = vma->vm_end - vma->vm_start; - - if (req_len != PAGE_SIZE) - return -EINVAL; - - vma->vm_flags |= VM_PFNMAP; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - ret = remap_pfn_range(vma, vma->vm_start, data->mmio_atsd >> PAGE_SHIFT, - req_len, vma->vm_page_prot); - trace_vfio_pci_npu2_mmap(vdev->pdev, data->mmio_atsd, vma->vm_start, - vma->vm_end - vma->vm_start, ret); - - return ret; -} - -static void vfio_pci_npu2_release(struct vfio_pci_device *vdev, - struct vfio_pci_region *region) -{ - struct vfio_pci_npu2_data *data = region->data; - - memunmap(data->base); - kfree(data); -} - -static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev, - struct vfio_pci_region *region, struct vfio_info_cap *caps) -{ - struct vfio_pci_npu2_data *data = region->data; - struct vfio_region_info_cap_nvlink2_ssatgt captgt = { - .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT, - .header.version = 1, - .tgt = data->gpu_tgt - }; - struct vfio_region_info_cap_nvlink2_lnkspd capspd = { - .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD, - .header.version = 1, - .link_speed = data->link_speed - }; - int ret; - - ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt)); - if (ret) - return ret; - - return vfio_info_add_capability(caps, &capspd.header, sizeof(capspd)); -} - -static const struct vfio_pci_regops vfio_pci_npu2_regops = { - .rw = vfio_pci_npu2_rw, - .mmap = vfio_pci_npu2_mmap, - .release = vfio_pci_npu2_release, - .add_capability = vfio_pci_npu2_add_capability, -}; - -int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev) -{ - int ret; - struct vfio_pci_npu2_data *data; - struct device_node *nvlink_dn; - u32 nvlink_index = 0, mem_phandle = 0; - struct pci_dev *npdev = vdev->pdev; - struct device_node *npu_node = pci_device_to_OF_node(npdev); - struct pci_controller *hose = pci_bus_to_host(npdev->bus); - u64 mmio_atsd = 0; - u64 tgt = 0; - u32 link_speed = 0xff; - - /* - * PCI config space does not tell us about NVLink presense but - * platform does, use this. - */ - if (!pnv_pci_get_gpu_dev(vdev->pdev)) - return -ENODEV; - - if (of_property_read_u32(npu_node, "memory-region", &mem_phandle)) - return -ENODEV; - - /* - * NPU2 normally has 8 ATSD registers (for concurrency) and 6 links - * so we can allocate one register per link, using nvlink index as - * a key. - * There is always at least one ATSD register so as long as at least - * NVLink bridge #0 is passed to the guest, ATSD will be available. - */ - nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); - if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", - &nvlink_index))) - return -ENODEV; - - if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index, - &mmio_atsd)) { - if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", 0, - &mmio_atsd)) { - dev_warn(&vdev->pdev->dev, "No available ATSD found\n"); - mmio_atsd = 0; - } else { - dev_warn(&vdev->pdev->dev, - "Using fallback ibm,mmio-atsd[0] for ATSD.\n"); - } - } - - if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) { - dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n"); - return -EFAULT; - } - - if (of_property_read_u32(npu_node, "ibm,nvlink-speed", &link_speed)) { - dev_warn(&vdev->pdev->dev, "No ibm,nvlink-speed found\n"); - return -EFAULT; - } - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->mmio_atsd = mmio_atsd; - data->gpu_tgt = tgt; - data->link_speed = link_speed; - if (data->mmio_atsd) { - data->base = memremap(data->mmio_atsd, SZ_64K, MEMREMAP_WT); - if (!data->base) { - ret = -ENOMEM; - goto free_exit; - } - } - - /* - * We want to expose the capability even if this specific NVLink - * did not get its own ATSD register because capabilities - * belong to VFIO regions and normally there will be ATSD register - * assigned to the NVLink bridge. - */ - ret = vfio_pci_register_dev_region(vdev, - PCI_VENDOR_ID_IBM | - VFIO_REGION_TYPE_PCI_VENDOR_TYPE, - VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD, - &vfio_pci_npu2_regops, - data->mmio_atsd ? PAGE_SIZE : 0, - VFIO_REGION_INFO_FLAG_READ | - VFIO_REGION_INFO_FLAG_WRITE | - VFIO_REGION_INFO_FLAG_MMAP, - data); - if (ret) - goto free_exit; - - return 0; - -free_exit: - if (data->base) - memunmap(data->base); - kfree(data); - - return ret; -} diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 8755a0febd05..5a36272cecbf 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -200,20 +200,6 @@ static inline int vfio_pci_igd_init(struct vfio_pci_device *vdev) return -ENODEV; } #endif -#ifdef CONFIG_VFIO_PCI_NVLINK2 -extern int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev); -extern int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev); -#else -static inline int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev) -{ - return -ENODEV; -} - -static inline int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev) -{ - return -ENODEV; -} -#endif #ifdef CONFIG_S390 extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev, diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c index 09a9453b75c5..63cc7f0b2e4a 100644 --- a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c +++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c @@ -26,7 +26,7 @@ #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ -/* DMA Control registe defines */ +/* DMA Control register defines */ #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 45cbfd4879a5..5fb1bb52b057 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -16,7 +16,7 @@ * IOMMU to support the IOMMU API and have few to no restrictions around * the IOVA range that can be mapped. The Type1 IOMMU is currently * optimized for relatively static mappings of a userspace process with - * userpsace pages pinned into memory. We also assume devices and IOMMU + * userspace pages pinned into memory. We also assume devices and IOMMU * domains are PCI based as the IOMMU API is still centered around a * device/bus interface rather than a group interface. */ @@ -877,7 +877,7 @@ again: /* * If iommu capable domain exist in the container then all pages are - * already pinned and accounted. Accouting should be done if there is no + * already pinned and accounted. Accounting should be done if there is no * iommu capable domain in the container. */ do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu); @@ -960,7 +960,7 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data, bool do_accounting; int i; - if (!iommu || !user_pfn) + if (!iommu || !user_pfn || npage <= 0) return -EINVAL; /* Supported for v2 version only */ @@ -977,13 +977,13 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data, iova = user_pfn[i] << PAGE_SHIFT; dma = vfio_find_dma(iommu, iova, PAGE_SIZE); if (!dma) - goto unpin_exit; + break; + vfio_unpin_page_external(dma, iova, do_accounting); } -unpin_exit: mutex_unlock(&iommu->lock); - return i > npage ? npage : (i > 0 ? i : -EINVAL); + return i > 0 ? i : -EINVAL; } static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, @@ -2177,7 +2177,7 @@ static int vfio_iommu_resv_exclude(struct list_head *iova, continue; /* * Insert a new node if current node overlaps with the - * reserve region to exlude that from valid iova range. + * reserve region to exclude that from valid iova range. * Note that, new node is inserted before the current * node and finally the current node is deleted keeping * the list updated and sorted. |