diff options
Diffstat (limited to 'drivers/iommu/intel')
-rw-r--r-- | drivers/iommu/intel/Kconfig | 19 | ||||
-rw-r--r-- | drivers/iommu/intel/dmar.c | 2 | ||||
-rw-r--r-- | drivers/iommu/intel/iommu.c | 197 | ||||
-rw-r--r-- | drivers/iommu/intel/pasid.c | 28 | ||||
-rw-r--r-- | drivers/iommu/intel/pasid.h | 16 | ||||
-rw-r--r-- | drivers/iommu/intel/perf.c | 2 | ||||
-rw-r--r-- | drivers/iommu/intel/svm.c | 7 |
7 files changed, 135 insertions, 136 deletions
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index 43ebd8af11c5..0ddb77115be7 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -25,9 +25,11 @@ config INTEL_IOMMU and include PCI device scope covered by these DMA remapping devices. +if INTEL_IOMMU + config INTEL_IOMMU_DEBUGFS bool "Export Intel IOMMU internals in Debugfs" - depends on INTEL_IOMMU && IOMMU_DEBUGFS + depends on IOMMU_DEBUGFS select DMAR_PERF help !!!WARNING!!! @@ -41,7 +43,7 @@ config INTEL_IOMMU_DEBUGFS config INTEL_IOMMU_SVM bool "Support for Shared Virtual Memory with Intel IOMMU" - depends on INTEL_IOMMU && X86_64 + depends on X86_64 select PCI_PASID select PCI_PRI select MMU_NOTIFIER @@ -53,9 +55,8 @@ config INTEL_IOMMU_SVM means of a Process Address Space ID (PASID). config INTEL_IOMMU_DEFAULT_ON - def_bool y - prompt "Enable Intel DMA Remapping Devices by default" - depends on INTEL_IOMMU + bool "Enable Intel DMA Remapping Devices by default" + default y help Selecting this option will enable a DMAR device at boot time if one is found. If this option is not selected, DMAR support can @@ -63,7 +64,7 @@ config INTEL_IOMMU_DEFAULT_ON config INTEL_IOMMU_BROKEN_GFX_WA bool "Workaround broken graphics drivers (going away soon)" - depends on INTEL_IOMMU && BROKEN && X86 + depends on BROKEN && X86 help Current Graphics drivers tend to use physical address for DMA and avoid using DMA APIs. Setting this config @@ -74,7 +75,7 @@ config INTEL_IOMMU_BROKEN_GFX_WA config INTEL_IOMMU_FLOPPY_WA def_bool y - depends on INTEL_IOMMU && X86 + depends on X86 help Floppy disk drivers are known to bypass DMA API calls thereby failing to work when IOMMU is enabled. This @@ -83,7 +84,7 @@ config INTEL_IOMMU_FLOPPY_WA config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON bool "Enable Intel IOMMU scalable mode by default" - depends on INTEL_IOMMU + default y help Selecting this option will enable by default the scalable mode if hardware presents the capability. The scalable mode is defined in @@ -92,3 +93,5 @@ config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON is not selected, scalable mode support could also be enabled by passing intel_iommu=sm_on to the kernel. If not sure, please use the default value. + +endif # INTEL_IOMMU diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index d66f79acd14d..0ec5514c9980 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -149,8 +149,6 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) } else { info = kzalloc(size, GFP_KERNEL); if (!info) { - pr_warn("Out of memory when allocating notify_info " - "for %s.\n", pci_name(dev)); if (dmar_dev_scope_status == 0) dmar_dev_scope_status = -ENOMEM; return NULL; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index dd22fc7d5176..d75f59ae28e6 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -33,6 +33,7 @@ #include <linux/iommu.h> #include <linux/dma-iommu.h> #include <linux/intel-iommu.h> +#include <linux/intel-svm.h> #include <linux/syscore_ops.h> #include <linux/tboot.h> #include <linux/dmi.h> @@ -85,24 +86,6 @@ #define LEVEL_STRIDE (9) #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) -/* - * This bitmap is used to advertise the page sizes our hardware support - * to the IOMMU core, which will then use this information to split - * physically contiguous memory regions it is mapping into page sizes - * that we support. - * - * Traditionally the IOMMU core just handed us the mappings directly, - * after making sure the size is an order of a 4KiB page and that the - * mapping has natural alignment. - * - * To retain this behavior, we currently advertise that we support - * all page sizes that are an order of 4KiB. - * - * If at some point we'd like to utilize the IOMMU core's new behavior, - * we could change this to advertise the real page sizes we support. - */ -#define INTEL_IOMMU_PGSIZES (~0xFFFUL) - static inline int agaw_to_level(int agaw) { return agaw + 2; @@ -345,23 +328,13 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); -#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON -int dmar_disabled = 0; -#else -int dmar_disabled = 1; -#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */ - -#ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON -int intel_iommu_sm = 1; -#else -int intel_iommu_sm; -#endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */ +int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON); +int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON); int intel_iommu_enabled = 0; EXPORT_SYMBOL_GPL(intel_iommu_enabled); static int dmar_map_gfx = 1; -static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; static int iommu_skip_te_disable; @@ -454,14 +427,17 @@ static int __init intel_iommu_setup(char *str) pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n"); iommu_dma_forcedac = true; } else if (!strncmp(str, "strict", 6)) { - pr_info("Disable batched IOTLB flush\n"); - intel_iommu_strict = 1; + pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n"); + iommu_set_dma_strict(); } else if (!strncmp(str, "sp_off", 6)) { pr_info("Disable supported super page\n"); intel_iommu_superpage = 0; } else if (!strncmp(str, "sm_on", 5)) { - pr_info("Intel-IOMMU: scalable mode supported\n"); + pr_info("Enable scalable mode if hardware supports\n"); intel_iommu_sm = 1; + } else if (!strncmp(str, "sm_off", 6)) { + pr_info("Scalable mode is disallowed\n"); + intel_iommu_sm = 0; } else if (!strncmp(str, "tboot_noforce", 13)) { pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); intel_iommu_tboot_noforce = 1; @@ -601,7 +577,7 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) int iommu_id; /* si_domain and vm domain should not get here. */ - if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA)) + if (WARN_ON(!iommu_is_dma_domain(&domain->domain))) return NULL; for_each_domain_iommu(iommu_id, domain) @@ -736,6 +712,23 @@ static int domain_update_device_node(struct dmar_domain *domain) static void domain_update_iotlb(struct dmar_domain *domain); +/* Return the super pagesize bitmap if supported. */ +static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain) +{ + unsigned long bitmap = 0; + + /* + * 1-level super page supports page size of 2MiB, 2-level super page + * supports page size of both 2MiB and 1GiB. + */ + if (domain->iommu_superpage == 1) + bitmap |= SZ_2M; + else if (domain->iommu_superpage == 2) + bitmap |= SZ_2M | SZ_1G; + + return bitmap; +} + /* Some capabilities may be different across iommus */ static void domain_update_iommu_cap(struct dmar_domain *domain) { @@ -762,6 +755,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain) else domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); + domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain); domain_update_iotlb(domain); } @@ -1035,7 +1029,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; if (domain_use_first_level(domain)) { pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US; - if (domain->domain.type == IOMMU_DOMAIN_DMA) + if (iommu_is_dma_domain(&domain->domain)) pteval |= DMA_FL_PTE_ACCESS; } if (cmpxchg64(&pte->val, 0ULL, pteval)) @@ -1548,7 +1542,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) if (info->pri_supported && (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) && - !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) + !pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH)) info->pri_enabled = 1; #endif if (info->ats_supported && pci_ats_page_aligned(pdev) && @@ -1780,11 +1774,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) spin_lock_init(&iommu->lock); iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); - if (!iommu->domain_ids) { - pr_err("%s: Allocating domain id array failed\n", - iommu->name); + if (!iommu->domain_ids) return -ENOMEM; - } size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **); iommu->domains = kzalloc(size, GFP_KERNEL); @@ -1980,10 +1971,6 @@ static void domain_exit(struct dmar_domain *domain) /* Remove associated devices and clear attached or cached domains */ domain_remove_dev_info(domain); - /* destroy iovas */ - if (domain->domain.type == IOMMU_DOMAIN_DMA) - iommu_put_dma_cookie(&domain->domain); - if (domain->pgd) { struct page *freelist; @@ -2334,9 +2321,9 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long phys_pfn, unsigned long nr_pages, int prot) { + struct dma_pte *first_pte = NULL, *pte = NULL; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; - struct dma_pte *pte = NULL; phys_addr_t pteval; u64 attr; @@ -2348,13 +2335,9 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); attr |= DMA_FL_PTE_PRESENT; if (domain_use_first_level(domain)) { - attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US; - - if (domain->domain.type == IOMMU_DOMAIN_DMA) { - attr |= DMA_FL_PTE_ACCESS; - if (prot & DMA_PTE_WRITE) - attr |= DMA_FL_PTE_DIRTY; - } + attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; + if (prot & DMA_PTE_WRITE) + attr |= DMA_FL_PTE_DIRTY; } pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr; @@ -2369,6 +2352,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); if (!pte) return -ENOMEM; + first_pte = pte; + /* It is large page*/ if (largepage_lvl > 1) { unsigned long end_pfn; @@ -2416,14 +2401,14 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, * recalculate 'pte' and switch back to smaller pages for the * end of the mapping, if the trailing size is not enough to * use another superpage (i.e. nr_pages < lvl_pages). - * - * We leave clflush for the leaf pte changes to iotlb_sync_map() - * callback. */ pte++; if (!nr_pages || first_pte_in_page(pte) || - (largepage_lvl > 1 && nr_pages < lvl_pages)) + (largepage_lvl > 1 && nr_pages < lvl_pages)) { + domain_flush_cache(domain, first_pte, + (void *)pte - (void *)first_pte); pte = NULL; + } } return 0; @@ -3227,7 +3212,6 @@ static int __init init_dmars(void) g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), GFP_KERNEL); if (!g_iommus) { - pr_err("Allocating global iommu array failed\n"); ret = -ENOMEM; goto error; } @@ -4393,9 +4377,9 @@ int __init intel_iommu_init(void) * is likely to be much lower than the overhead of synchronizing * the virtual and physical IOMMU page-tables. */ - if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { - pr_warn("IOMMU batching is disabled due to virtualization"); - intel_iommu_strict = 1; + if (cap_caching_mode(iommu->cap)) { + pr_info_once("IOMMU batching disallowed due to virtualization\n"); + iommu_set_dma_strict(); } iommu_device_sysfs_add(&iommu->iommu, NULL, intel_iommu_groups, @@ -4404,7 +4388,6 @@ int __init intel_iommu_init(void) } up_read(&dmar_global_lock); - iommu_set_dma_strict(intel_iommu_strict); bus_set_iommu(&pci_bus_type, &intel_iommu_ops); if (si_domain && !hw_pass_through) register_memory_notifier(&intel_iommu_memory_nb); @@ -4532,6 +4515,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) switch (type) { case IOMMU_DOMAIN_DMA: + case IOMMU_DOMAIN_DMA_FQ: case IOMMU_DOMAIN_UNMANAGED: dmar_domain = alloc_domain(0); if (!dmar_domain) { @@ -4544,10 +4528,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return NULL; } - if (type == IOMMU_DOMAIN_DMA && - iommu_get_dma_cookie(&dmar_domain->domain)) - return NULL; - domain = &dmar_domain->domain; domain->geometry.aperture_start = 0; domain->geometry.aperture_end = @@ -5067,6 +5047,28 @@ static int intel_iommu_map(struct iommu_domain *domain, hpa >> VTD_PAGE_SHIFT, size, prot); } +static int intel_iommu_map_pages(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) +{ + unsigned long pgshift = __ffs(pgsize); + size_t size = pgcount << pgshift; + int ret; + + if (pgsize != SZ_4K && pgsize != SZ_2M && pgsize != SZ_1G) + return -EINVAL; + + if (!IS_ALIGNED(iova | paddr, pgsize)) + return -EINVAL; + + ret = intel_iommu_map(domain, iova, paddr, size, prot, gfp); + if (!ret && mapped) + *mapped = size; + + return ret; +} + static size_t intel_iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather) @@ -5096,6 +5098,17 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, return size; } +static size_t intel_iommu_unmap_pages(struct iommu_domain *domain, + unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) +{ + unsigned long pgshift = __ffs(pgsize); + size_t size = pgcount << pgshift; + + return intel_iommu_unmap(domain, iova, size, gather); +} + static void intel_iommu_tlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { @@ -5172,12 +5185,8 @@ static void intel_iommu_release_device(struct device *dev) static void intel_iommu_probe_finalize(struct device *dev) { - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - - if (domain && domain->type == IOMMU_DOMAIN_DMA) - iommu_setup_dma_ops(dev, 0, U64_MAX); - else - set_dma_ops(dev, NULL); + set_dma_ops(dev, NULL); + iommu_setup_dma_ops(dev, 0, U64_MAX); } static void intel_iommu_get_resv_regions(struct device *device, @@ -5532,39 +5541,6 @@ static bool risky_device(struct pci_dev *pdev) return false; } -static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn, - unsigned long clf_pages) -{ - struct dma_pte *first_pte = NULL, *pte = NULL; - unsigned long lvl_pages = 0; - int level = 0; - - while (clf_pages > 0) { - if (!pte) { - level = 0; - pte = pfn_to_dma_pte(domain, clf_pfn, &level); - if (WARN_ON(!pte)) - return; - first_pte = pte; - lvl_pages = lvl_to_nr_pages(level); - } - - if (WARN_ON(!lvl_pages || clf_pages < lvl_pages)) - return; - - clf_pages -= lvl_pages; - clf_pfn += lvl_pages; - pte++; - - if (!clf_pages || first_pte_in_page(pte) || - (level > 1 && clf_pages < lvl_pages)) { - domain_flush_cache(domain, first_pte, - (void *)pte - (void *)first_pte); - pte = NULL; - } - } -} - static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size) { @@ -5574,9 +5550,6 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain, struct intel_iommu *iommu; int iommu_id; - if (!dmar_domain->iommu_coherency) - clflush_sync_map(dmar_domain, pfn, pages); - for_each_domain_iommu(iommu_id, dmar_domain) { iommu = g_iommus[iommu_id]; __mapping_notify_one(iommu, dmar_domain, pfn, pages); @@ -5593,9 +5566,9 @@ const struct iommu_ops intel_iommu_ops = { .aux_attach_dev = intel_iommu_aux_attach_device, .aux_detach_dev = intel_iommu_aux_detach_device, .aux_get_pasid = intel_iommu_aux_get_pasid, - .map = intel_iommu_map, + .map_pages = intel_iommu_map_pages, + .unmap_pages = intel_iommu_unmap_pages, .iotlb_sync_map = intel_iommu_iotlb_sync_map, - .unmap = intel_iommu_unmap, .flush_iotlb_all = intel_flush_iotlb_all, .iotlb_sync = intel_iommu_tlb_sync, .iova_to_phys = intel_iommu_iova_to_phys, @@ -5611,7 +5584,7 @@ const struct iommu_ops intel_iommu_ops = { .dev_disable_feat = intel_iommu_dev_disable_feat, .is_attach_deferred = intel_iommu_is_attach_deferred, .def_domain_type = device_def_domain_type, - .pgsize_bitmap = INTEL_IOMMU_PGSIZES, + .pgsize_bitmap = SZ_4K, #ifdef CONFIG_INTEL_IOMMU_SVM .cache_invalidate = intel_iommu_sva_invalidate, .sva_bind_gpasid = intel_svm_bind_gpasid, @@ -5714,8 +5687,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) } else if (dmar_map_gfx) { /* we have to ensure the gfx device is idle before we flush */ pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n"); - intel_iommu_strict = 1; - } + iommu_set_dma_strict(); + } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index c6cf44a6c923..07c390aed1fe 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -511,29 +511,39 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, u32 pasid, bool fault_ignore) { struct pasid_entry *pte; - u16 did; + u16 did, pgtt; pte = intel_pasid_get_entry(dev, pasid); if (WARN_ON(!pte)) return; - if (!(pte->val[0] & PASID_PTE_PRESENT)) + if (!pasid_pte_is_present(pte)) return; did = pasid_get_domain_id(pte); + pgtt = pasid_pte_get_pgtt(pte); + intel_pasid_clear_entry(dev, pasid, fault_ignore); if (!ecap_coherent(iommu->ecap)) clflush_cache_range(pte, sizeof(*pte)); pasid_cache_invalidation_with_pasid(iommu, did, pasid); - qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + + if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY) + qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + else + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); /* Device IOTLB doesn't need to be flushed in caching mode. */ if (!cap_caching_mode(iommu->cap)) devtlb_invalidation_with_pasid(iommu, dev, pasid); } +/* + * This function flushes cache for a newly setup pasid table entry. + * Caller of it should not modify the in-use pasid table entries. + */ static void pasid_flush_caches(struct intel_iommu *iommu, struct pasid_entry *pte, u32 pasid, u16 did) @@ -585,6 +595,10 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, if (WARN_ON(!pte)) return -EINVAL; + /* Caller must ensure PASID entry is not in use. */ + if (pasid_pte_is_present(pte)) + return -EBUSY; + pasid_clear_entry(pte); /* Setup the first level page table pointer: */ @@ -684,6 +698,10 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, return -ENODEV; } + /* Caller must ensure PASID entry is not in use. */ + if (pasid_pte_is_present(pte)) + return -EBUSY; + pasid_clear_entry(pte); pasid_set_domain_id(pte, did); pasid_set_slptr(pte, pgd_val); @@ -723,6 +741,10 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, return -ENODEV; } + /* Caller must ensure PASID entry is not in use. */ + if (pasid_pte_is_present(pte)) + return -EBUSY; + pasid_clear_entry(pte); pasid_set_domain_id(pte, did); pasid_set_address_width(pte, iommu->agaw); diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 5ff61c3d401f..d5552e2c160d 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -28,12 +28,12 @@ #define VCMD_CMD_ALLOC 0x1 #define VCMD_CMD_FREE 0x2 #define VCMD_VRSP_IP 0x1 -#define VCMD_VRSP_SC(e) (((e) >> 1) & 0x3) +#define VCMD_VRSP_SC(e) (((e) & 0xff) >> 1) #define VCMD_VRSP_SC_SUCCESS 0 -#define VCMD_VRSP_SC_NO_PASID_AVAIL 2 -#define VCMD_VRSP_SC_INVALID_PASID 2 -#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xfffff) -#define VCMD_CMD_OPERAND(e) ((e) << 8) +#define VCMD_VRSP_SC_NO_PASID_AVAIL 16 +#define VCMD_VRSP_SC_INVALID_PASID 16 +#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 16) & 0xfffff) +#define VCMD_CMD_OPERAND(e) ((e) << 16) /* * Domain ID reserved for pasid entries programmed for first-level * only and pass-through transfer modes. @@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte) return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT; } +/* Get PGTT field of a PASID table entry */ +static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte) +{ + return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7); +} + extern unsigned int intel_pasid_max_id; int intel_pasid_alloc_table(struct device *dev); void intel_pasid_free_table(struct device *dev); diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c index 73b7ec705552..0e8e03252d92 100644 --- a/drivers/iommu/intel/perf.c +++ b/drivers/iommu/intel/perf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * perf.c - performance monitor * * Copyright (C) 2021 Intel Corporation diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 9b0f22bc0514..2014fe8695ac 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -31,8 +31,6 @@ static irqreturn_t prq_event_thread(int irq, void *d); static void intel_svm_drain_prq(struct device *dev, u32 pasid); #define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva) -#define PRQ_ORDER 0 - static DEFINE_XARRAY_ALLOC(pasid_private_array); static int pasid_private_add(ioasid_t pasid, void *priv) { @@ -675,7 +673,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree_rcu(sdev, rcu); if (list_empty(&svm->devs)) { - intel_svm_free_pasid(mm); if (svm->notifier.ops) { mmu_notifier_unregister(&svm->notifier, mm); /* Clear mm's pasid. */ @@ -690,6 +687,8 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree(svm); } } + /* Drop a PASID reference and free it if no reference. */ + intel_svm_free_pasid(mm); } out: return ret; @@ -724,8 +723,6 @@ struct page_req_dsc { u64 priv_data[2]; }; -#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) - static bool is_canonical_address(u64 addr) { int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); |