diff options
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/Kconfig | 9 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 1 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 414 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.h | 96 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_debugfs.c | 5 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 13 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_proto.h | 96 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_types.h | 17 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_v2.c | 14 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu-impl.c | 8 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu-qcom.c | 37 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu-v3.c | 84 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 14 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.h | 1 | ||||
-rw-r--r-- | drivers/iommu/hyperv-iommu.c | 2 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 9 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 80 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu.c | 2 | ||||
-rw-r--r-- | drivers/iommu/mtk_iommu_v1.c | 18 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 4 | ||||
-rw-r--r-- | drivers/iommu/sun50i-iommu.c | 1023 | ||||
-rw-r--r-- | drivers/iommu/virtio-iommu.c | 2 |
22 files changed, 1538 insertions, 411 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 2ab07ce17abb..aca76383f201 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -303,6 +303,15 @@ config ROCKCHIP_IOMMU Say Y here if you are using a Rockchip SoC that includes an IOMMU device. +config SUN50I_IOMMU + bool "Allwinner H6 IOMMU Support" + depends on ARCH_SUNXI || COMPILE_TEST + select ARM_DMA_USE_IOMMU + select IOMMU_API + select IOMMU_DMA + help + Support for the IOMMU introduced in the Allwinner H6 SoCs. + config TEGRA_IOMMU_GART bool "Tegra GART IOMMU Support" depends on ARCH_TEGRA_2x_SOC diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 9f33fdb3bb05..57cf4ba5e27c 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o +obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 1880811cec33..311ef7105c6d 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -22,7 +22,6 @@ #include <linux/dma-direct.h> #include <linux/dma-iommu.h> #include <linux/iommu-helper.h> -#include <linux/iommu.h> #include <linux/delay.h> #include <linux/amd-iommu.h> #include <linux/notifier.h> @@ -43,8 +42,7 @@ #include <asm/gart.h> #include <asm/dma.h> -#include "amd_iommu_proto.h" -#include "amd_iommu_types.h" +#include "amd_iommu.h" #include "irq_remapping.h" #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) @@ -71,6 +69,8 @@ */ #define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) +#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL + static DEFINE_SPINLOCK(pd_bitmap_lock); /* List of all available dev_data structures */ @@ -99,8 +99,9 @@ struct iommu_cmd { struct kmem_cache *amd_iommu_irq_cache; static void update_domain(struct protection_domain *domain); -static int protection_domain_init(struct protection_domain *domain); static void detach_device(struct device *dev); +static void update_and_flush_device_table(struct protection_domain *domain, + struct domain_pgtable *pgtable); /**************************************************************************** * @@ -125,7 +126,8 @@ static inline int get_acpihid_device_id(struct device *dev, return -ENODEV; list_for_each_entry(p, &acpihid_map, list) { - if (acpi_dev_hid_uid_match(adev, p->hid, p->uid)) { + if (acpi_dev_hid_uid_match(adev, p->hid, + p->uid[0] ? p->uid : NULL)) { if (entry) *entry = p; return p->devid; @@ -151,6 +153,26 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom) return container_of(dom, struct protection_domain, domain); } +static void amd_iommu_domain_get_pgtable(struct protection_domain *domain, + struct domain_pgtable *pgtable) +{ + u64 pt_root = atomic64_read(&domain->pt_root); + + pgtable->root = (u64 *)(pt_root & PAGE_MASK); + pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */ +} + +static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode) +{ + u64 pt_root; + + /* lowest 3 bits encode pgtable mode */ + pt_root = mode & 7; + pt_root |= (u64)root; + + return pt_root; +} + static struct iommu_dev_data *alloc_dev_data(u16 devid) { struct iommu_dev_data *dev_data; @@ -257,12 +279,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid) return dev_data; } -struct iommu_dev_data *get_dev_data(struct device *dev) -{ - return dev->archdata.iommu; -} -EXPORT_SYMBOL(get_dev_data); - /* * Find or create an IOMMU group for a acpihid device. */ @@ -291,16 +307,15 @@ static struct iommu_group *acpihid_device_group(struct device *dev) static bool pci_iommuv2_capable(struct pci_dev *pdev) { static const int caps[] = { - PCI_EXT_CAP_ID_ATS, PCI_EXT_CAP_ID_PRI, PCI_EXT_CAP_ID_PASID, }; int i, pos; - if (pci_ats_disabled()) + if (!pci_ats_supported(pdev)) return false; - for (i = 0; i < 3; ++i) { + for (i = 0; i < 2; ++i) { pos = pci_find_ext_capability(pdev, caps[i]); if (pos == 0) return false; @@ -313,7 +328,7 @@ static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum) { struct iommu_dev_data *dev_data; - dev_data = get_dev_data(&pdev->dev); + dev_data = dev_iommu_priv_get(&pdev->dev); return dev_data->errata & (1 << erratum) ? true : false; } @@ -348,7 +363,7 @@ static int iommu_init_device(struct device *dev) struct iommu_dev_data *dev_data; int devid; - if (dev->archdata.iommu) + if (dev_iommu_priv_get(dev)) return 0; devid = get_device_id(dev); @@ -375,7 +390,7 @@ static int iommu_init_device(struct device *dev) dev_data->iommu_v2 = iommu->is_iommu_v2; } - dev->archdata.iommu = dev_data; + dev_iommu_priv_set(dev, dev_data); return 0; } @@ -397,19 +412,16 @@ static void iommu_ignore_device(struct device *dev) static void amd_iommu_uninit_device(struct device *dev) { struct iommu_dev_data *dev_data; - int devid; - - devid = get_device_id(dev); - if (devid < 0) - return; - dev_data = search_dev_data(devid); + dev_data = dev_iommu_priv_get(dev); if (!dev_data) return; if (dev_data->domain) detach_device(dev); + dev_iommu_priv_set(dev, NULL); + /* * We keep dev_data around for unplugged devices and reuse it when the * device is re-plugged - not doing so would introduce a ton of races. @@ -472,7 +484,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id, pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid), devid & 0xff); if (pdev) - dev_data = get_dev_data(&pdev->dev); + dev_data = dev_iommu_priv_get(&pdev->dev); if (dev_data && __ratelimit(&dev_data->rs)) { pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n", @@ -1369,15 +1381,19 @@ static struct page *free_sub_pt(unsigned long root, int mode, return freelist; } -static void free_pagetable(struct protection_domain *domain) +static void free_pagetable(struct domain_pgtable *pgtable) { - unsigned long root = (unsigned long)domain->pt_root; struct page *freelist = NULL; + unsigned long root; - BUG_ON(domain->mode < PAGE_MODE_NONE || - domain->mode > PAGE_MODE_6_LEVEL); + if (pgtable->mode == PAGE_MODE_NONE) + return; - freelist = free_sub_pt(root, domain->mode, freelist); + BUG_ON(pgtable->mode < PAGE_MODE_NONE || + pgtable->mode > PAGE_MODE_6_LEVEL); + + root = (unsigned long)pgtable->root; + freelist = free_sub_pt(root, pgtable->mode, freelist); free_page_list(freelist); } @@ -1391,24 +1407,39 @@ static bool increase_address_space(struct protection_domain *domain, unsigned long address, gfp_t gfp) { + struct domain_pgtable pgtable; unsigned long flags; - bool ret = false; - u64 *pte; + bool ret = true; + u64 *pte, root; spin_lock_irqsave(&domain->lock, flags); - if (address <= PM_LEVEL_SIZE(domain->mode) || - WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) + amd_iommu_domain_get_pgtable(domain, &pgtable); + + if (address <= PM_LEVEL_SIZE(pgtable.mode)) + goto out; + + ret = false; + if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL)) goto out; pte = (void *)get_zeroed_page(gfp); if (!pte) goto out; - *pte = PM_LEVEL_PDE(domain->mode, - iommu_virt_to_phys(domain->pt_root)); - domain->pt_root = pte; - domain->mode += 1; + *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root)); + + pgtable.root = pte; + pgtable.mode += 1; + update_and_flush_device_table(domain, &pgtable); + domain_flush_complete(domain); + + /* + * Device Table needs to be updated and flushed before the new root can + * be published. + */ + root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode); + atomic64_set(&domain->pt_root, root); ret = true; @@ -1425,16 +1456,29 @@ static u64 *alloc_pte(struct protection_domain *domain, gfp_t gfp, bool *updated) { + struct domain_pgtable pgtable; int level, end_lvl; u64 *pte, *page; BUG_ON(!is_power_of_2(page_size)); - while (address > PM_LEVEL_SIZE(domain->mode)) - *updated = increase_address_space(domain, address, gfp) || *updated; + amd_iommu_domain_get_pgtable(domain, &pgtable); + + while (address > PM_LEVEL_SIZE(pgtable.mode)) { + /* + * Return an error if there is no memory to update the + * page-table. + */ + if (!increase_address_space(domain, address, gfp)) + return NULL; - level = domain->mode - 1; - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; + /* Read new values to check if update was successful */ + amd_iommu_domain_get_pgtable(domain, &pgtable); + } + + + level = pgtable.mode - 1; + pte = &pgtable.root[PM_LEVEL_INDEX(level, address)]; address = PAGE_SIZE_ALIGN(address, page_size); end_lvl = PAGE_SIZE_LEVEL(page_size); @@ -1510,16 +1554,19 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address, unsigned long *page_size) { + struct domain_pgtable pgtable; int level; u64 *pte; *page_size = 0; - if (address > PM_LEVEL_SIZE(domain->mode)) + amd_iommu_domain_get_pgtable(domain, &pgtable); + + if (address > PM_LEVEL_SIZE(pgtable.mode)) return NULL; - level = domain->mode - 1; - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; + level = pgtable.mode - 1; + pte = &pgtable.root[PM_LEVEL_INDEX(level, address)]; *page_size = PTE_LEVEL_PAGE_SIZE(level); while (level > 0) { @@ -1634,7 +1681,13 @@ out: unsigned long flags; spin_lock_irqsave(&dom->lock, flags); - update_domain(dom); + /* + * Flush domain TLB(s) and wait for completion. Any Device-Table + * Updates and flushing already happened in + * increase_address_space(). + */ + domain_flush_tlb_pde(dom); + domain_flush_complete(dom); spin_unlock_irqrestore(&dom->lock, flags); } @@ -1753,78 +1806,18 @@ static void free_gcr3_table(struct protection_domain *domain) free_page((unsigned long)domain->gcr3_tbl); } -/* - * Free a domain, only used if something went wrong in the - * allocation path and we need to free an already allocated page table - */ -static void dma_ops_domain_free(struct protection_domain *domain) -{ - if (!domain) - return; - - iommu_put_dma_cookie(&domain->domain); - - free_pagetable(domain); - - if (domain->id) - domain_id_free(domain->id); - - kfree(domain); -} - -/* - * Allocates a new protection domain usable for the dma_ops functions. - * It also initializes the page table and the address allocator data - * structures required for the dma_ops interface - */ -static struct protection_domain *dma_ops_domain_alloc(void) -{ - struct protection_domain *domain; - - domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL); - if (!domain) - return NULL; - - if (protection_domain_init(domain)) - goto free_domain; - - domain->mode = PAGE_MODE_3_LEVEL; - domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); - domain->flags = PD_DMA_OPS_MASK; - if (!domain->pt_root) - goto free_domain; - - if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM) - goto free_domain; - - return domain; - -free_domain: - dma_ops_domain_free(domain); - - return NULL; -} - -/* - * little helper function to check whether a given protection domain is a - * dma_ops domain - */ -static bool dma_ops_domain(struct protection_domain *domain) -{ - return domain->flags & PD_DMA_OPS_MASK; -} - static void set_dte_entry(u16 devid, struct protection_domain *domain, + struct domain_pgtable *pgtable, bool ats, bool ppr) { u64 pte_root = 0; u64 flags = 0; u32 old_domid; - if (domain->mode != PAGE_MODE_NONE) - pte_root = iommu_virt_to_phys(domain->pt_root); + if (pgtable->mode != PAGE_MODE_NONE) + pte_root = iommu_virt_to_phys(pgtable->root); - pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) + pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV; @@ -1897,6 +1890,7 @@ static void clear_dte_entry(u16 devid) static void do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { + struct domain_pgtable pgtable; struct amd_iommu *iommu; bool ats; @@ -1912,7 +1906,9 @@ static void do_attach(struct iommu_dev_data *dev_data, domain->dev_cnt += 1; /* Update device table */ - set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2); + amd_iommu_domain_get_pgtable(domain, &pgtable); + set_dte_entry(dev_data->devid, domain, &pgtable, + ats, dev_data->iommu_v2); clone_aliases(dev_data->pdev); device_flush_dte(dev_data); @@ -2028,7 +2024,7 @@ static int attach_device(struct device *dev, spin_lock_irqsave(&domain->lock, flags); - dev_data = get_dev_data(dev); + dev_data = dev_iommu_priv_get(dev); spin_lock(&dev_data->lock); @@ -2092,7 +2088,7 @@ static void detach_device(struct device *dev) struct iommu_dev_data *dev_data; unsigned long flags; - dev_data = get_dev_data(dev); + dev_data = dev_iommu_priv_get(dev); domain = dev_data->domain; spin_lock_irqsave(&domain->lock, flags); @@ -2141,7 +2137,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev) iommu = amd_iommu_rlookup_table[devid]; - if (get_dev_data(dev)) + if (dev_iommu_priv_get(dev)) return &iommu->iommu; ret = iommu_init_device(dev); @@ -2171,16 +2167,12 @@ static void amd_iommu_probe_finalize(struct device *dev) static void amd_iommu_release_device(struct device *dev) { + int devid = get_device_id(dev); struct amd_iommu *iommu; - int devid; if (!check_device(dev)) return; - devid = get_device_id(dev); - if (devid < 0) - return; - iommu = amd_iommu_rlookup_table[devid]; amd_iommu_uninit_device(dev); @@ -2221,23 +2213,36 @@ static int amd_iommu_domain_get_attr(struct iommu_domain *domain, * *****************************************************************************/ -static void update_device_table(struct protection_domain *domain) +static void update_device_table(struct protection_domain *domain, + struct domain_pgtable *pgtable) { struct iommu_dev_data *dev_data; list_for_each_entry(dev_data, &domain->dev_list, list) { - set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled, - dev_data->iommu_v2); + set_dte_entry(dev_data->devid, domain, pgtable, + dev_data->ats.enabled, dev_data->iommu_v2); clone_aliases(dev_data->pdev); } } +static void update_and_flush_device_table(struct protection_domain *domain, + struct domain_pgtable *pgtable) +{ + update_device_table(domain, pgtable); + domain_flush_devices(domain); +} + static void update_domain(struct protection_domain *domain) { - update_device_table(domain); + struct domain_pgtable pgtable; - domain_flush_devices(domain); + /* Update device table */ + amd_iommu_domain_get_pgtable(domain, &pgtable); + update_and_flush_device_table(domain, &pgtable); + + /* Flush domain TLB(s) and wait for completion */ domain_flush_tlb_pde(domain); + domain_flush_complete(domain); } int __init amd_iommu_init_api(void) @@ -2305,27 +2310,46 @@ static void cleanup_domain(struct protection_domain *domain) static void protection_domain_free(struct protection_domain *domain) { + struct domain_pgtable pgtable; + if (!domain) return; if (domain->id) domain_id_free(domain->id); + amd_iommu_domain_get_pgtable(domain, &pgtable); + atomic64_set(&domain->pt_root, 0); + free_pagetable(&pgtable); + kfree(domain); } -static int protection_domain_init(struct protection_domain *domain) +static int protection_domain_init(struct protection_domain *domain, int mode) { + u64 *pt_root = NULL, root; + + BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL); + spin_lock_init(&domain->lock); domain->id = domain_id_alloc(); if (!domain->id) return -ENOMEM; INIT_LIST_HEAD(&domain->dev_list); + if (mode != PAGE_MODE_NONE) { + pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!pt_root) + return -ENOMEM; + } + + root = amd_iommu_domain_encode_pgtable(pt_root, mode); + atomic64_set(&domain->pt_root, root); + return 0; } -static struct protection_domain *protection_domain_alloc(void) +static struct protection_domain *protection_domain_alloc(int mode) { struct protection_domain *domain; @@ -2333,7 +2357,7 @@ static struct protection_domain *protection_domain_alloc(void) if (!domain) return NULL; - if (protection_domain_init(domain)) + if (protection_domain_init(domain, mode)) goto out_err; return domain; @@ -2346,45 +2370,30 @@ out_err: static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) { - struct protection_domain *pdomain; + struct protection_domain *domain; + int mode = DEFAULT_PGTABLE_LEVEL; - switch (type) { - case IOMMU_DOMAIN_UNMANAGED: - pdomain = protection_domain_alloc(); - if (!pdomain) - return NULL; + if (type == IOMMU_DOMAIN_IDENTITY) + mode = PAGE_MODE_NONE; - pdomain->mode = PAGE_MODE_3_LEVEL; - pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); - if (!pdomain->pt_root) { - protection_domain_free(pdomain); - return NULL; - } + domain = protection_domain_alloc(mode); + if (!domain) + return NULL; - pdomain->domain.geometry.aperture_start = 0; - pdomain->domain.geometry.aperture_end = ~0ULL; - pdomain->domain.geometry.force_aperture = true; + domain->domain.geometry.aperture_start = 0; + domain->domain.geometry.aperture_end = ~0ULL; + domain->domain.geometry.force_aperture = true; - break; - case IOMMU_DOMAIN_DMA: - pdomain = dma_ops_domain_alloc(); - if (!pdomain) { - pr_err("Failed to allocate\n"); - return NULL; - } - break; - case IOMMU_DOMAIN_IDENTITY: - pdomain = protection_domain_alloc(); - if (!pdomain) - return NULL; + if (type == IOMMU_DOMAIN_DMA && + iommu_get_dma_cookie(&domain->domain) == -ENOMEM) + goto free_domain; - pdomain->mode = PAGE_MODE_NONE; - break; - default: - return NULL; - } + return &domain->domain; - return &pdomain->domain; +free_domain: + protection_domain_free(domain); + + return NULL; } static void amd_iommu_domain_free(struct iommu_domain *dom) @@ -2401,27 +2410,19 @@ static void amd_iommu_domain_free(struct iommu_domain *dom) if (!dom) return; - switch (dom->type) { - case IOMMU_DOMAIN_DMA: - /* Now release the domain */ - dma_ops_domain_free(domain); - break; - default: - if (domain->mode != PAGE_MODE_NONE) - free_pagetable(domain); + if (dom->type == IOMMU_DOMAIN_DMA) + iommu_put_dma_cookie(&domain->domain); - if (domain->flags & PD_IOMMUV2_MASK) - free_gcr3_table(domain); + if (domain->flags & PD_IOMMUV2_MASK) + free_gcr3_table(domain); - protection_domain_free(domain); - break; - } + protection_domain_free(domain); } static void amd_iommu_detach_device(struct iommu_domain *dom, struct device *dev) { - struct iommu_dev_data *dev_data = dev->archdata.iommu; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct amd_iommu *iommu; int devid; @@ -2459,7 +2460,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, if (!check_device(dev)) return -EINVAL; - dev_data = dev->archdata.iommu; + dev_data = dev_iommu_priv_get(dev); dev_data->defer_attach = false; iommu = amd_iommu_rlookup_table[dev_data->devid]; @@ -2490,10 +2491,12 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, gfp_t gfp) { struct protection_domain *domain = to_pdomain(dom); + struct domain_pgtable pgtable; int prot = 0; int ret; - if (domain->mode == PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + if (pgtable.mode == PAGE_MODE_NONE) return -EINVAL; if (iommu_prot & IOMMU_READ) @@ -2513,8 +2516,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, struct iommu_iotlb_gather *gather) { struct protection_domain *domain = to_pdomain(dom); + struct domain_pgtable pgtable; - if (domain->mode == PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + if (pgtable.mode == PAGE_MODE_NONE) return 0; return iommu_unmap_page(domain, iova, page_size); @@ -2525,9 +2530,11 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, { struct protection_domain *domain = to_pdomain(dom); unsigned long offset_mask, pte_pgsize; + struct domain_pgtable pgtable; u64 *pte, __pte; - if (domain->mode == PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + if (pgtable.mode == PAGE_MODE_NONE) return iova; pte = fetch_pte(domain, iova, &pte_pgsize); @@ -2609,12 +2616,14 @@ static void amd_iommu_get_resv_regions(struct device *dev, list_add_tail(®ion->list, head); } -static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, + struct device *dev) { - struct iommu_dev_data *dev_data = dev->archdata.iommu; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + return dev_data->defer_attach; } +EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred); static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) { @@ -2637,7 +2646,7 @@ static int amd_iommu_def_domain_type(struct device *dev) { struct iommu_dev_data *dev_data; - dev_data = get_dev_data(dev); + dev_data = dev_iommu_priv_get(dev); if (!dev_data) return 0; @@ -2696,18 +2705,22 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier); void amd_iommu_domain_direct_map(struct iommu_domain *dom) { struct protection_domain *domain = to_pdomain(dom); + struct domain_pgtable pgtable; unsigned long flags; spin_lock_irqsave(&domain->lock, flags); + /* First save pgtable configuration*/ + amd_iommu_domain_get_pgtable(domain, &pgtable); + /* Update data structure */ - domain->mode = PAGE_MODE_NONE; + atomic64_set(&domain->pt_root, 0); /* Make changes visible to IOMMUs */ update_domain(domain); /* Page-table is not visible to IOMMU anymore, so free it */ - free_pagetable(domain); + free_pagetable(&pgtable); spin_unlock_irqrestore(&domain->lock, flags); } @@ -2896,9 +2909,11 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc) static int __set_gcr3(struct protection_domain *domain, int pasid, unsigned long cr3) { + struct domain_pgtable pgtable; u64 *pte; - if (domain->mode != PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + if (pgtable.mode != PAGE_MODE_NONE) return -EINVAL; pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); @@ -2912,9 +2927,11 @@ static int __set_gcr3(struct protection_domain *domain, int pasid, static int __clear_gcr3(struct protection_domain *domain, int pasid) { + struct domain_pgtable pgtable; u64 *pte; - if (domain->mode != PAGE_MODE_NONE) + amd_iommu_domain_get_pgtable(domain, &pgtable); + if (pgtable.mode != PAGE_MODE_NONE) return -EINVAL; pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); @@ -2962,7 +2979,7 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid, struct amd_iommu *iommu; struct iommu_cmd cmd; - dev_data = get_dev_data(&pdev->dev); + dev_data = dev_iommu_priv_get(&pdev->dev); iommu = amd_iommu_rlookup_table[dev_data->devid]; build_complete_ppr(&cmd, dev_data->devid, pasid, status, @@ -2975,23 +2992,27 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr); struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev) { struct protection_domain *pdomain; - struct iommu_domain *io_domain; + struct iommu_dev_data *dev_data; struct device *dev = &pdev->dev; + struct iommu_domain *io_domain; if (!check_device(dev)) return NULL; - pdomain = get_dev_data(dev)->domain; - if (pdomain == NULL && get_dev_data(dev)->defer_attach) { - get_dev_data(dev)->defer_attach = false; - io_domain = iommu_get_domain_for_dev(dev); + dev_data = dev_iommu_priv_get(&pdev->dev); + pdomain = dev_data->domain; + io_domain = iommu_get_domain_for_dev(dev); + + if (pdomain == NULL && dev_data->defer_attach) { + dev_data->defer_attach = false; pdomain = to_pdomain(io_domain); attach_device(dev, pdomain); } + if (pdomain == NULL) return NULL; - if (!dma_ops_domain(pdomain)) + if (io_domain->type != IOMMU_DOMAIN_DMA) return NULL; /* Only return IOMMUv2 domains */ @@ -3009,7 +3030,7 @@ void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum) if (!amd_iommu_v2_supported()) return; - dev_data = get_dev_data(&pdev->dev); + dev_data = dev_iommu_priv_get(&pdev->dev); dev_data->errata |= (1 << erratum); } EXPORT_SYMBOL(amd_iommu_enable_device_erratum); @@ -3028,11 +3049,8 @@ int amd_iommu_device_info(struct pci_dev *pdev, memset(info, 0, sizeof(*info)); - if (!pci_ats_disabled()) { - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS); - if (pos) - info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP; - } + if (pci_ats_supported(pdev)) + info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); if (pos) diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h index 12d540d9b59b..f892992c8744 100644 --- a/drivers/iommu/amd_iommu.h +++ b/drivers/iommu/amd_iommu.h @@ -1,9 +1,103 @@ /* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. + * Author: Joerg Roedel <jroedel@suse.de> + */ #ifndef AMD_IOMMU_H #define AMD_IOMMU_H -int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line); +#include <linux/iommu.h> + +#include "amd_iommu_types.h" + +extern int amd_iommu_get_num_iommus(void); +extern int amd_iommu_init_dma_ops(void); +extern int amd_iommu_init_passthrough(void); +extern irqreturn_t amd_iommu_int_thread(int irq, void *data); +extern irqreturn_t amd_iommu_int_handler(int irq, void *data); +extern void amd_iommu_apply_erratum_63(u16 devid); +extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); +extern int amd_iommu_init_devices(void); +extern void amd_iommu_uninit_devices(void); +extern void amd_iommu_init_notifier(void); +extern int amd_iommu_init_api(void); + +#ifdef CONFIG_AMD_IOMMU_DEBUGFS +void amd_iommu_debugfs_setup(struct amd_iommu *iommu); +#else +static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {} +#endif + +/* Needed for interrupt remapping */ +extern int amd_iommu_prepare(void); +extern int amd_iommu_enable(void); +extern void amd_iommu_disable(void); +extern int amd_iommu_reenable(int); +extern int amd_iommu_enable_faulting(void); +extern int amd_iommu_guest_ir; + +/* IOMMUv2 specific functions */ +struct iommu_domain; + +extern bool amd_iommu_v2_supported(void); +extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb); +extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb); +extern void amd_iommu_domain_direct_map(struct iommu_domain *dom); +extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids); +extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid, + u64 address); +extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid); +extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid, + unsigned long cr3); +extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid); +extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev); + +#ifdef CONFIG_IRQ_REMAP +extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu); +#else +static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) +{ + return 0; +} +#endif + +#define PPR_SUCCESS 0x0 +#define PPR_INVALID 0x1 +#define PPR_FAILURE 0xf + +extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid, + int status, int tag); + +static inline bool is_rd890_iommu(struct pci_dev *pdev) +{ + return (pdev->vendor == PCI_VENDOR_ID_ATI) && + (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); +} + +static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) +{ + if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) + return false; + + return !!(iommu->features & f); +} + +static inline u64 iommu_virt_to_phys(void *vaddr) +{ + return (u64)__sme_set(virt_to_phys(vaddr)); +} + +static inline void *iommu_phys_to_virt(unsigned long paddr) +{ + return phys_to_virt(__sme_clr(paddr)); +} + +extern bool translation_pre_enabled(struct amd_iommu *iommu); +extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, + struct device *dev); +extern int __init add_special_device(u8 type, u8 id, u16 *devid, + bool cmd_line); #ifdef CONFIG_DMI void amd_iommu_apply_ivrs_quirks(void); diff --git a/drivers/iommu/amd_iommu_debugfs.c b/drivers/iommu/amd_iommu_debugfs.c index c6a5c737ef09..545372fcc72f 100644 --- a/drivers/iommu/amd_iommu_debugfs.c +++ b/drivers/iommu/amd_iommu_debugfs.c @@ -8,10 +8,9 @@ */ #include <linux/debugfs.h> -#include <linux/iommu.h> #include <linux/pci.h> -#include "amd_iommu_proto.h" -#include "amd_iommu_types.h" + +#include "amd_iommu.h" static struct dentry *amd_iommu_debugfs; static DEFINE_MUTEX(amd_iommu_debugfs_lock); diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 2b9a67ecc6ac..3faff7f80fd2 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -18,7 +18,6 @@ #include <linux/msi.h> #include <linux/amd-iommu.h> #include <linux/export.h> -#include <linux/iommu.h> #include <linux/kmemleak.h> #include <linux/mem_encrypt.h> #include <asm/pci-direct.h> @@ -32,9 +31,8 @@ #include <asm/irq_remapping.h> #include <linux/crash_dump.h> + #include "amd_iommu.h" -#include "amd_iommu_proto.h" -#include "amd_iommu_types.h" #include "irq_remapping.h" /* @@ -1329,8 +1327,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, } case IVHD_DEV_ACPI_HID: { u16 devid; - u8 hid[ACPIHID_HID_LEN] = {0}; - u8 uid[ACPIHID_UID_LEN] = {0}; + u8 hid[ACPIHID_HID_LEN]; + u8 uid[ACPIHID_UID_LEN]; int ret; if (h->type != 0x40) { @@ -1347,6 +1345,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; } + uid[0] = '\0'; switch (e->uidf) { case UID_NOT_PRESENT: @@ -1361,8 +1360,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; case UID_IS_CHARACTER: - memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1); - uid[ACPIHID_UID_LEN - 1] = '\0'; + memcpy(uid, &e->uid, e->uidl); + uid[e->uidl] = '\0'; break; default: diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h deleted file mode 100644 index 92c2ba6468a0..000000000000 --- a/drivers/iommu/amd_iommu_proto.h +++ /dev/null @@ -1,96 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <jroedel@suse.de> - */ - -#ifndef _ASM_X86_AMD_IOMMU_PROTO_H -#define _ASM_X86_AMD_IOMMU_PROTO_H - -#include "amd_iommu_types.h" - -extern int amd_iommu_get_num_iommus(void); -extern int amd_iommu_init_dma_ops(void); -extern int amd_iommu_init_passthrough(void); -extern irqreturn_t amd_iommu_int_thread(int irq, void *data); -extern irqreturn_t amd_iommu_int_handler(int irq, void *data); -extern void amd_iommu_apply_erratum_63(u16 devid); -extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); -extern int amd_iommu_init_devices(void); -extern void amd_iommu_uninit_devices(void); -extern void amd_iommu_init_notifier(void); -extern int amd_iommu_init_api(void); - -#ifdef CONFIG_AMD_IOMMU_DEBUGFS -void amd_iommu_debugfs_setup(struct amd_iommu *iommu); -#else -static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {} -#endif - -/* Needed for interrupt remapping */ -extern int amd_iommu_prepare(void); -extern int amd_iommu_enable(void); -extern void amd_iommu_disable(void); -extern int amd_iommu_reenable(int); -extern int amd_iommu_enable_faulting(void); -extern int amd_iommu_guest_ir; - -/* IOMMUv2 specific functions */ -struct iommu_domain; - -extern bool amd_iommu_v2_supported(void); -extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb); -extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb); -extern void amd_iommu_domain_direct_map(struct iommu_domain *dom); -extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids); -extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid, - u64 address); -extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid); -extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid, - unsigned long cr3); -extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid); -extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev); - -#ifdef CONFIG_IRQ_REMAP -extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu); -#else -static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) -{ - return 0; -} -#endif - -#define PPR_SUCCESS 0x0 -#define PPR_INVALID 0x1 -#define PPR_FAILURE 0xf - -extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid, - int status, int tag); - -static inline bool is_rd890_iommu(struct pci_dev *pdev) -{ - return (pdev->vendor == PCI_VENDOR_ID_ATI) && - (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); -} - -static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) -{ - if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) - return false; - - return !!(iommu->features & f); -} - -static inline u64 iommu_virt_to_phys(void *vaddr) -{ - return (u64)__sme_set(virt_to_phys(vaddr)); -} - -static inline void *iommu_phys_to_virt(unsigned long paddr) -{ - return phys_to_virt(__sme_clr(paddr)); -} - -extern bool translation_pre_enabled(struct amd_iommu *iommu); -extern struct iommu_dev_data *get_dev_data(struct device *dev); -#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index d0d7b6a0c3d8..30a5d412255a 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -395,10 +395,10 @@ #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ extern bool amd_iommu_dump; -#define DUMP_printk(format, arg...) \ - do { \ - if (amd_iommu_dump) \ - printk(KERN_INFO "AMD-Vi: " format, ## arg); \ +#define DUMP_printk(format, arg...) \ + do { \ + if (amd_iommu_dump) \ + pr_info("AMD-Vi: " format, ## arg); \ } while(0); /* global flag if IOMMUs cache non-present entries */ @@ -468,8 +468,7 @@ struct protection_domain { iommu core code */ spinlock_t lock; /* mostly used to lock the page table*/ u16 id; /* the domain id written to the device table */ - int mode; /* paging mode (0-6 levels) */ - u64 *pt_root; /* page table root pointer */ + atomic64_t pt_root; /* pgtable root and pgtable mode */ int glx; /* Number of levels for GCR3 table */ u64 *gcr3_tbl; /* Guest CR3 table */ unsigned long flags; /* flags to find out type of domain */ @@ -477,6 +476,12 @@ struct protection_domain { unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ }; +/* For decocded pt_root */ +struct domain_pgtable { + int mode; + u64 *root; +}; + /* * Structure where we save information about one hardware AMD IOMMU in the * system. diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index d6d85debd01b..c8a7b6b39222 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -13,13 +13,11 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/sched/mm.h> -#include <linux/iommu.h> #include <linux/wait.h> #include <linux/pci.h> #include <linux/gfp.h> -#include "amd_iommu_types.h" -#include "amd_iommu_proto.h" +#include "amd_iommu.h" MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>"); @@ -517,13 +515,12 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) struct amd_iommu_fault *iommu_fault; struct pasid_state *pasid_state; struct device_state *dev_state; + struct pci_dev *pdev = NULL; unsigned long flags; struct fault *fault; bool finish; u16 tag, devid; int ret; - struct iommu_dev_data *dev_data; - struct pci_dev *pdev = NULL; iommu_fault = data; tag = iommu_fault->tag & 0x1ff; @@ -534,12 +531,11 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) devid & 0xff); if (!pdev) return -ENODEV; - dev_data = get_dev_data(&pdev->dev); - /* In kdump kernel pci dev is not initialized yet -> send INVALID */ ret = NOTIFY_DONE; - if (translation_pre_enabled(amd_iommu_rlookup_table[devid]) - && dev_data->defer_attach) { + + /* In kdump kernel pci dev is not initialized yet -> send INVALID */ + if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) { amd_iommu_complete_ppr(pdev, iommu_fault->pasid, PPR_INVALID, tag); goto out; diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c index 74d97a886e93..c75b9d957b70 100644 --- a/drivers/iommu/arm-smmu-impl.c +++ b/drivers/iommu/arm-smmu-impl.c @@ -150,6 +150,8 @@ static const struct arm_smmu_impl arm_mmu500_impl = { struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) { + const struct device_node *np = smmu->dev->of_node; + /* * We will inevitably have to combine model-specific implementation * quirks with platform-specific integration quirks, but everything @@ -166,11 +168,11 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) break; } - if (of_property_read_bool(smmu->dev->of_node, - "calxeda,smmu-secure-config-access")) + if (of_property_read_bool(np, "calxeda,smmu-secure-config-access")) smmu->impl = &calxeda_impl; - if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500")) + if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") || + of_device_is_compatible(np, "qcom,sc7180-smmu-500")) return qcom_smmu_impl_init(smmu); return smmu; diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c index 24c071c1d8b0..cf01d0215a39 100644 --- a/drivers/iommu/arm-smmu-qcom.c +++ b/drivers/iommu/arm-smmu-qcom.c @@ -3,6 +3,7 @@ * Copyright (c) 2019, The Linux Foundation. All rights reserved. */ +#include <linux/of_device.h> #include <linux/qcom_scm.h> #include "arm-smmu.h" @@ -11,12 +12,29 @@ struct qcom_smmu { struct arm_smmu_device smmu; }; +static const struct of_device_id qcom_smmu_client_of_match[] = { + { .compatible = "qcom,adreno" }, + { .compatible = "qcom,mdp4" }, + { .compatible = "qcom,mdss" }, + { .compatible = "qcom,sc7180-mdss" }, + { .compatible = "qcom,sc7180-mss-pil" }, + { .compatible = "qcom,sdm845-mdss" }, + { .compatible = "qcom,sdm845-mss-pil" }, + { } +}; + +static int qcom_smmu_def_domain_type(struct device *dev) +{ + const struct of_device_id *match = + of_match_device(qcom_smmu_client_of_match, dev); + + return match ? IOMMU_DOMAIN_IDENTITY : 0; +} + static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu) { int ret; - arm_mmu500_reset(smmu); - /* * To address performance degradation in non-real time clients, * such as USB and UFS, turn off wait-for-safe on sdm845 based boards, @@ -30,8 +48,21 @@ static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu) return ret; } +static int qcom_smmu500_reset(struct arm_smmu_device *smmu) +{ + const struct device_node *np = smmu->dev->of_node; + + arm_mmu500_reset(smmu); + + if (of_device_is_compatible(np, "qcom,sdm845-smmu-500")) + return qcom_sdm845_smmu500_reset(smmu); + + return 0; +} + static const struct arm_smmu_impl qcom_smmu_impl = { - .reset = qcom_sdm845_smmu500_reset, + .def_domain_type = qcom_smmu_def_domain_type, + .reset = qcom_smmu500_reset, }; struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 42e1ee7e5197..f578677a5c41 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -171,6 +171,8 @@ #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc +#define ARM_SMMU_REG_SZ 0xe00 + /* Common MSI config fields */ #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2) #define MSI_CFG2_SH GENMASK(5, 4) @@ -628,6 +630,7 @@ struct arm_smmu_strtab_cfg { struct arm_smmu_device { struct device *dev; void __iomem *base; + void __iomem *page1; #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0) #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1) @@ -664,7 +667,6 @@ struct arm_smmu_device { #define ARM_SMMU_MAX_ASIDS (1 << 16) unsigned int asid_bits; - DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS); #define ARM_SMMU_MAX_VMIDS (1 << 16) unsigned int vmid_bits; @@ -724,6 +726,8 @@ struct arm_smmu_option_prop { const char *prop; }; +static DEFINE_XARRAY_ALLOC1(asid_xa); + static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, @@ -733,9 +737,8 @@ static struct arm_smmu_option_prop arm_smmu_options[] = { static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset, struct arm_smmu_device *smmu) { - if ((offset > SZ_64K) && - (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)) - offset -= SZ_64K; + if (offset > SZ_64K) + return smmu->page1 + offset - SZ_64K; return smmu->base + offset; } @@ -1763,6 +1766,14 @@ static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain) cdcfg->cdtab = NULL; } +static void arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd) +{ + if (!cd->asid) + return; + + xa_erase(&asid_xa, cd->asid); +} + /* Stream table manipulation functions */ static void arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc) @@ -2448,10 +2459,9 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; - if (cfg->cdcfg.cdtab) { + if (cfg->cdcfg.cdtab) arm_smmu_free_cd_tables(smmu_domain); - arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid); - } + arm_smmu_free_asid(&cfg->cd); } else { struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; if (cfg->vmid) @@ -2466,14 +2476,15 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, struct io_pgtable_cfg *pgtbl_cfg) { int ret; - int asid; + u32 asid; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr; - asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits); - if (asid < 0) - return asid; + ret = xa_alloc(&asid_xa, &asid, &cfg->cd, + XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); + if (ret) + return ret; cfg->s1cdmax = master->ssid_bits; @@ -2506,7 +2517,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, out_free_cd_tables: arm_smmu_free_cd_tables(smmu_domain); out_free_asid: - arm_smmu_bitmap_free(smmu->asid_map, asid); + arm_smmu_free_asid(&cfg->cd); return ret; } @@ -2652,26 +2663,20 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master) } } -#ifdef CONFIG_PCI_ATS static bool arm_smmu_ats_supported(struct arm_smmu_master *master) { - struct pci_dev *pdev; + struct device *dev = master->dev; struct arm_smmu_device *smmu = master->smmu; - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) || - !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled()) + if (!(smmu->features & ARM_SMMU_FEAT_ATS)) return false; - pdev = to_pci_dev(master->dev); - return !pdev->untrusted && pdev->ats_cap; -} -#else -static bool arm_smmu_ats_supported(struct arm_smmu_master *master) -{ - return false; + if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS)) + return false; + + return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)); } -#endif static void arm_smmu_enable_ats(struct arm_smmu_master *master) { @@ -2986,13 +2991,11 @@ static void arm_smmu_release_device(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct arm_smmu_master *master; - struct arm_smmu_device *smmu; if (!fwspec || fwspec->ops != &arm_smmu_ops) return; master = dev_iommu_priv_get(dev); - smmu = master->smmu; arm_smmu_detach_dev(master); arm_smmu_disable_pasid(master); kfree(master); @@ -4003,6 +4006,18 @@ err_reset_pci_ops: __maybe_unused; return err; } +static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start, + resource_size_t size) +{ + struct resource res = { + .flags = IORESOURCE_MEM, + .start = start, + .end = start + size - 1, + }; + + return devm_ioremap_resource(dev, &res); +} + static int arm_smmu_device_probe(struct platform_device *pdev) { int irq, ret; @@ -4038,10 +4053,23 @@ static int arm_smmu_device_probe(struct platform_device *pdev) } ioaddr = res->start; - smmu->base = devm_ioremap_resource(dev, res); + /* + * Don't map the IMPLEMENTATION DEFINED regions, since they may contain + * the PMCG registers which are reserved by the PMU driver. + */ + smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); + if (arm_smmu_resource_size(smmu) > SZ_64K) { + smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K, + ARM_SMMU_REG_SZ); + if (IS_ERR(smmu->page1)) + return PTR_ERR(smmu->page1); + } else { + smmu->page1 = smmu->base; + } + /* Interrupt lines */ irq = platform_get_irq_byname_optional(pdev, "combined"); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e622f4e33379..243bc4cb2705 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1609,6 +1609,17 @@ static void arm_smmu_get_resv_regions(struct device *dev, iommu_dma_get_resv_regions(dev, head); } +static int arm_smmu_def_domain_type(struct device *dev) +{ + struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); + const struct arm_smmu_impl *impl = cfg->smmu->impl; + + if (impl && impl->def_domain_type) + return impl->def_domain_type(dev); + + return 0; +} + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -1627,6 +1638,7 @@ static struct iommu_ops arm_smmu_ops = { .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, + .def_domain_type = arm_smmu_def_domain_type, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; @@ -2244,7 +2256,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) return -ENODEV; if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) - dev_err(&pdev->dev, "removing device with active domains!\n"); + dev_notice(&pdev->dev, "disabling translation\n"); arm_smmu_bus_init(NULL); iommu_device_unregister(&smmu->iommu); diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index 8d1cd54d82a6..d172c024be61 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -386,6 +386,7 @@ struct arm_smmu_impl { int (*init_context)(struct arm_smmu_domain *smmu_domain); void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync, int status); + int (*def_domain_type)(struct device *dev); }; static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c index a386b83e0e34..3c0c67a99c7b 100644 --- a/drivers/iommu/hyperv-iommu.c +++ b/drivers/iommu/hyperv-iommu.c @@ -131,7 +131,7 @@ static int hyperv_irq_remapping_activate(struct irq_domain *domain, return 0; } -static struct irq_domain_ops hyperv_ir_domain_ops = { +static const struct irq_domain_ops hyperv_ir_domain_ops = { .alloc = hyperv_irq_remapping_alloc, .free = hyperv_irq_remapping_free, .activate = hyperv_irq_remapping_activate, diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 5767882aa80f..648a785e078a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1432,8 +1432,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) info->pri_enabled = 1; #endif - if (!pdev->untrusted && info->ats_supported && - pci_ats_page_aligned(pdev) && + if (info->ats_supported && pci_ats_page_aligned(pdev) && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { info->ats_enabled = 1; domain_update_iotlb(info->domain); @@ -2542,10 +2541,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, if (dev && dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(info->dev); - if (!pdev->untrusted && - !pci_ats_disabled() && - ecap_dev_iotlb_support(iommu->ecap) && - pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) && + if (ecap_dev_iotlb_support(iommu->ecap) && + pci_ats_supported(pdev) && dmar_find_matched_atsr_unit(pdev)) info->ats_supported = 1; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 374b34fd6fac..b5ea203f6c68 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -80,7 +80,8 @@ static bool iommu_cmd_line_dma_api(void) return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API); } -static int iommu_alloc_default_domain(struct device *dev); +static int iommu_alloc_default_domain(struct iommu_group *group, + struct device *dev); static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, unsigned type); static int __iommu_attach_device(struct iommu_domain *domain, @@ -251,17 +252,17 @@ int iommu_probe_device(struct device *dev) if (ret) goto err_out; + group = iommu_group_get(dev); + if (!group) + goto err_release; + /* * Try to allocate a default domain - needs support from the * IOMMU driver. There are still some drivers which don't * support default domains, so the return value is not yet * checked. */ - iommu_alloc_default_domain(dev); - - group = iommu_group_get(dev); - if (!group) - goto err_release; + iommu_alloc_default_domain(group, dev); if (group->default_domain) ret = __iommu_attach_device(group->default_domain, dev); @@ -586,7 +587,7 @@ struct iommu_group *iommu_group_alloc(void) NULL, "%d", group->id); if (ret) { ida_simple_remove(&iommu_group_ida, group->id); - kfree(group); + kobject_put(&group->kobj); return ERR_PTR(ret); } @@ -769,6 +770,15 @@ out: return ret; } +static bool iommu_is_attach_deferred(struct iommu_domain *domain, + struct device *dev) +{ + if (domain->ops->is_attach_deferred) + return domain->ops->is_attach_deferred(domain, dev); + + return false; +} + /** * iommu_group_add_device - add a device to an iommu group * @group: the group into which to add the device (reference should be held) @@ -821,7 +831,7 @@ rename: mutex_lock(&group->mutex); list_add_tail(&device->list, &group->devices); - if (group->domain) + if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) ret = __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); if (ret) @@ -1478,15 +1488,11 @@ static int iommu_group_alloc_default_domain(struct bus_type *bus, return 0; } -static int iommu_alloc_default_domain(struct device *dev) +static int iommu_alloc_default_domain(struct iommu_group *group, + struct device *dev) { - struct iommu_group *group; unsigned int type; - group = iommu_group_get(dev); - if (!group) - return -ENODEV; - if (group->default_domain) return 0; @@ -1674,17 +1680,8 @@ static void probe_alloc_default_domain(struct bus_type *bus, static int iommu_group_do_dma_attach(struct device *dev, void *data) { struct iommu_domain *domain = data; - const struct iommu_ops *ops; - int ret; - - ret = __iommu_attach_device(domain, dev); - - ops = domain->ops; - if (ret == 0 && ops->probe_finalize) - ops->probe_finalize(dev); - - return ret; + return __iommu_attach_device(domain, dev); } static int __iommu_group_dma_attach(struct iommu_group *group) @@ -1693,6 +1690,22 @@ static int __iommu_group_dma_attach(struct iommu_group *group) iommu_group_do_dma_attach); } +static int iommu_group_do_probe_finalize(struct device *dev, void *data) +{ + struct iommu_domain *domain = data; + + if (domain->ops->probe_finalize) + domain->ops->probe_finalize(dev); + + return 0; +} + +static void __iommu_group_dma_finalize(struct iommu_group *group) +{ + __iommu_group_for_each_dev(group, group->default_domain, + iommu_group_do_probe_finalize); +} + static int iommu_do_create_direct_mappings(struct device *dev, void *data) { struct iommu_group *group = data; @@ -1745,6 +1758,8 @@ int bus_iommu_probe(struct bus_type *bus) if (ret) break; + + __iommu_group_dma_finalize(group); } return ret; @@ -1893,9 +1908,6 @@ static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev) { int ret; - if ((domain->ops->is_attach_deferred != NULL) && - domain->ops->is_attach_deferred(domain, dev)) - return 0; if (unlikely(domain->ops->attach_dev == NULL)) return -ENODEV; @@ -1967,8 +1979,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - if ((domain->ops->is_attach_deferred != NULL) && - domain->ops->is_attach_deferred(domain, dev)) + if (iommu_is_attach_deferred(domain, dev)) return; if (unlikely(domain->ops->detach_dev == NULL)) @@ -2813,17 +2824,6 @@ void iommu_sva_unbind_device(struct iommu_sva *handle) } EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); -int iommu_sva_set_ops(struct iommu_sva *handle, - const struct iommu_sva_ops *sva_ops) -{ - if (handle->ops && handle->ops != sva_ops) - return -EEXIST; - - handle->ops = sva_ops; - return 0; -} -EXPORT_SYMBOL_GPL(iommu_sva_set_ops); - int iommu_sva_get_pasid(struct iommu_sva *handle) { const struct iommu_ops *ops = handle->dev->bus->iommu_ops; diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 10cd4db0710a..3d8a63555c25 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -34,7 +34,7 @@ __asm__ __volatile__ ( \ /* bitmap of the page sizes currently supported */ #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) -DEFINE_SPINLOCK(msm_iommu_lock); +static DEFINE_SPINLOCK(msm_iommu_lock); static LIST_HEAD(qcom_iommu_devices); static struct iommu_ops msm_iommu_ops; diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 7bdd74c7cb9f..c9d79cff4d17 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -265,10 +265,13 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, { struct mtk_iommu_data *data = dev_iommu_priv_get(dev); struct mtk_iommu_domain *dom = to_mtk_domain(domain); + struct dma_iommu_mapping *mtk_mapping; int ret; - if (!data) - return -ENODEV; + /* Only allow the domain created internally. */ + mtk_mapping = data->dev->archdata.iommu; + if (mtk_mapping->domain != domain) + return 0; if (!data->m4u_dom) { data->m4u_dom = dom; @@ -288,9 +291,6 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain, { struct mtk_iommu_data *data = dev_iommu_priv_get(dev); - if (!data) - return; - mtk_iommu_config(data, dev, false); } @@ -416,6 +416,11 @@ static int mtk_iommu_create_mapping(struct device *dev, return 0; } +static int mtk_iommu_def_domain_type(struct device *dev) +{ + return IOMMU_DOMAIN_UNMANAGED; +} + static struct iommu_device *mtk_iommu_probe_device(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); @@ -464,12 +469,10 @@ static void mtk_iommu_probe_finalize(struct device *dev) static void mtk_iommu_release_device(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct mtk_iommu_data *data; if (!fwspec || fwspec->ops != &mtk_iommu_ops) return; - data = dev_iommu_priv_get(dev); iommu_fwspec_free(dev); } @@ -525,6 +528,7 @@ static const struct iommu_ops mtk_iommu_ops = { .probe_device = mtk_iommu_probe_device, .probe_finalize = mtk_iommu_probe_finalize, .release_device = mtk_iommu_release_device, + .def_domain_type = mtk_iommu_def_domain_type, .device_group = generic_device_group, .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, }; diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 6699fe6d9e06..c8282cc212cb 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1236,6 +1236,7 @@ static int omap_iommu_probe(struct platform_device *pdev) goto out_group; iommu_device_set_ops(&obj->iommu, &omap_iommu_ops); + iommu_device_set_fwnode(&obj->iommu, &of->fwnode); err = iommu_device_register(&obj->iommu); if (err) @@ -1726,6 +1727,9 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev) struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; struct iommu_group *group = ERR_PTR(-EINVAL); + if (!arch_data) + return ERR_PTR(-ENODEV); + if (arch_data->iommu_dev) group = iommu_group_ref_get(arch_data->iommu_dev->group); diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c new file mode 100644 index 000000000000..fce605e96aa2 --- /dev/null +++ b/drivers/iommu/sun50i-iommu.c @@ -0,0 +1,1023 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +// Copyright (C) 2016-2018, Allwinner Technology CO., LTD. +// Copyright (C) 2019-2020, Cerno + +#include <linux/bitfield.h> +#include <linux/bug.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-iommu.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/iommu.h> +#include <linux/iopoll.h> +#include <linux/ioport.h> +#include <linux/log2.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#define IOMMU_RESET_REG 0x010 +#define IOMMU_ENABLE_REG 0x020 +#define IOMMU_ENABLE_ENABLE BIT(0) + +#define IOMMU_BYPASS_REG 0x030 +#define IOMMU_AUTO_GATING_REG 0x040 +#define IOMMU_AUTO_GATING_ENABLE BIT(0) + +#define IOMMU_WBUF_CTRL_REG 0x044 +#define IOMMU_OOO_CTRL_REG 0x048 +#define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c +#define IOMMU_TTB_REG 0x050 +#define IOMMU_TLB_ENABLE_REG 0x060 +#define IOMMU_TLB_PREFETCH_REG 0x070 +#define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m) + +#define IOMMU_TLB_FLUSH_REG 0x080 +#define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17) +#define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16) +#define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0)) + +#define IOMMU_TLB_IVLD_ADDR_REG 0x090 +#define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094 +#define IOMMU_TLB_IVLD_ENABLE_REG 0x098 +#define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0) + +#define IOMMU_PC_IVLD_ADDR_REG 0x0a0 +#define IOMMU_PC_IVLD_ENABLE_REG 0x0a8 +#define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0) + +#define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4) +#define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2))) +#define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1)) + +#define IOMMU_DM_AUT_OVWT_REG 0x0d0 +#define IOMMU_INT_ENABLE_REG 0x100 +#define IOMMU_INT_CLR_REG 0x104 +#define IOMMU_INT_STA_REG 0x108 +#define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4) +#define IOMMU_INT_ERR_ADDR_L1_REG 0x130 +#define IOMMU_INT_ERR_ADDR_L2_REG 0x134 +#define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4) +#define IOMMU_L1PG_INT_REG 0x0180 +#define IOMMU_L2PG_INT_REG 0x0184 + +#define IOMMU_INT_INVALID_L2PG BIT(17) +#define IOMMU_INT_INVALID_L1PG BIT(16) +#define IOMMU_INT_MASTER_PERMISSION(m) BIT(m) +#define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \ + IOMMU_INT_MASTER_PERMISSION(1) | \ + IOMMU_INT_MASTER_PERMISSION(2) | \ + IOMMU_INT_MASTER_PERMISSION(3) | \ + IOMMU_INT_MASTER_PERMISSION(4) | \ + IOMMU_INT_MASTER_PERMISSION(5)) +#define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \ + IOMMU_INT_INVALID_L2PG | \ + IOMMU_INT_MASTER_MASK) + +#define PT_ENTRY_SIZE sizeof(u32) + +#define NUM_DT_ENTRIES 4096 +#define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE) + +#define NUM_PT_ENTRIES 256 +#define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE) + +struct sun50i_iommu { + struct iommu_device iommu; + + /* Lock to modify the IOMMU registers */ + spinlock_t iommu_lock; + + struct device *dev; + void __iomem *base; + struct reset_control *reset; + struct clk *clk; + + struct iommu_domain *domain; + struct iommu_group *group; + struct kmem_cache *pt_pool; +}; + +struct sun50i_iommu_domain { + struct iommu_domain domain; + + /* Number of devices attached to the domain */ + refcount_t refcnt; + + /* L1 Page Table */ + u32 *dt; + dma_addr_t dt_dma; + + struct sun50i_iommu *iommu; +}; + +static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain) +{ + return container_of(domain, struct sun50i_iommu_domain, domain); +} + +static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev) +{ + return dev_iommu_priv_get(dev); +} + +static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset) +{ + return readl(iommu->base + offset); +} + +static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value) +{ + writel(value, iommu->base + offset); +} + +/* + * The Allwinner H6 IOMMU uses a 2-level page table. + * + * The first level is the usual Directory Table (DT), that consists of + * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page + * Table (PT). + * + * Each PT consits of 256 4-bytes Page Table Entries (PTE), each + * pointing to a 4kB page of physical memory. + * + * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG + * register that contains its physical address. + */ + +#define SUN50I_IOVA_DTE_MASK GENMASK(31, 20) +#define SUN50I_IOVA_PTE_MASK GENMASK(19, 12) +#define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0) + +static u32 sun50i_iova_get_dte_index(dma_addr_t iova) +{ + return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova); +} + +static u32 sun50i_iova_get_pte_index(dma_addr_t iova) +{ + return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova); +} + +static u32 sun50i_iova_get_page_offset(dma_addr_t iova) +{ + return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova); +} + +/* + * Each Directory Table Entry has a Page Table address and a valid + * bit: + + * +---------------------+-----------+-+ + * | PT address | Reserved |V| + * +---------------------+-----------+-+ + * 31:10 - Page Table address + * 9:2 - Reserved + * 1:0 - 1 if the entry is valid + */ + +#define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10) +#define SUN50I_DTE_PT_ATTRS GENMASK(1, 0) +#define SUN50I_DTE_PT_VALID 1 + +static phys_addr_t sun50i_dte_get_pt_address(u32 dte) +{ + return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK; +} + +static bool sun50i_dte_is_pt_valid(u32 dte) +{ + return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID; +} + +static u32 sun50i_mk_dte(dma_addr_t pt_dma) +{ + return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID; +} + +/* + * Each PTE has a Page address, an authority index and a valid bit: + * + * +----------------+-----+-----+-----+---+-----+ + * | Page address | Rsv | ACI | Rsv | V | Rsv | + * +----------------+-----+-----+-----+---+-----+ + * 31:12 - Page address + * 11:8 - Reserved + * 7:4 - Authority Control Index + * 3:2 - Reserved + * 1 - 1 if the entry is valid + * 0 - Reserved + * + * The way permissions work is that the IOMMU has 16 "domains" that + * can be configured to give each masters either read or write + * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain + * 0 seems like the default domain, and its permissions in the + * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really + * useful to enforce any particular permission. + * + * Each page entry will then have a reference to the domain they are + * affected to, so that we can actually enforce them on a per-page + * basis. + * + * In order to make it work with the IOMMU framework, we will be using + * 4 different domains, starting at 1: RD_WR, RD, WR and NONE + * depending on the permission we want to enforce. Each domain will + * have each master setup in the same way, since the IOMMU framework + * doesn't seem to restrict page access on a per-device basis. And + * then we will use the relevant domain index when generating the page + * table entry depending on the permissions we want to be enforced. + */ + +enum sun50i_iommu_aci { + SUN50I_IOMMU_ACI_DO_NOT_USE = 0, + SUN50I_IOMMU_ACI_NONE, + SUN50I_IOMMU_ACI_RD, + SUN50I_IOMMU_ACI_WR, + SUN50I_IOMMU_ACI_RD_WR, +}; + +#define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12) +#define SUN50I_PTE_ACI_MASK GENMASK(7, 4) +#define SUN50I_PTE_PAGE_VALID BIT(1) + +static phys_addr_t sun50i_pte_get_page_address(u32 pte) +{ + return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK; +} + +static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte) +{ + return FIELD_GET(SUN50I_PTE_ACI_MASK, pte); +} + +static bool sun50i_pte_is_page_valid(u32 pte) +{ + return pte & SUN50I_PTE_PAGE_VALID; +} + +static u32 sun50i_mk_pte(phys_addr_t page, int prot) +{ + enum sun50i_iommu_aci aci; + u32 flags = 0; + + if (prot & (IOMMU_READ | IOMMU_WRITE)) + aci = SUN50I_IOMMU_ACI_RD_WR; + else if (prot & IOMMU_READ) + aci = SUN50I_IOMMU_ACI_RD; + else if (prot & IOMMU_WRITE) + aci = SUN50I_IOMMU_ACI_WR; + else + aci = SUN50I_IOMMU_ACI_NONE; + + flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci); + page &= SUN50I_PTE_PAGE_ADDRESS_MASK; + return page | flags | SUN50I_PTE_PAGE_VALID; +} + +static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain, + void *vaddr, unsigned int count) +{ + struct sun50i_iommu *iommu = sun50i_domain->iommu; + dma_addr_t dma = virt_to_phys(vaddr); + size_t size = count * PT_ENTRY_SIZE; + + dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE); +} + +static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu) +{ + u32 reg; + int ret; + + assert_spin_locked(&iommu->iommu_lock); + + iommu_write(iommu, + IOMMU_TLB_FLUSH_REG, + IOMMU_TLB_FLUSH_PTW_CACHE | + IOMMU_TLB_FLUSH_MACRO_TLB | + IOMMU_TLB_FLUSH_MICRO_TLB(5) | + IOMMU_TLB_FLUSH_MICRO_TLB(4) | + IOMMU_TLB_FLUSH_MICRO_TLB(3) | + IOMMU_TLB_FLUSH_MICRO_TLB(2) | + IOMMU_TLB_FLUSH_MICRO_TLB(1) | + IOMMU_TLB_FLUSH_MICRO_TLB(0)); + + ret = readl_poll_timeout(iommu->base + IOMMU_TLB_FLUSH_REG, + reg, !reg, + 1, 2000); + if (ret) + dev_warn(iommu->dev, "TLB Flush timed out!\n"); + + return ret; +} + +static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); + struct sun50i_iommu *iommu = sun50i_domain->iommu; + unsigned long flags; + + /* + * At boot, we'll have a first call into .flush_iotlb_all right after + * .probe_device, and since we link our (single) domain to our iommu in + * the .attach_device callback, we don't have that pointer set. + * + * It shouldn't really be any trouble to ignore it though since we flush + * all caches as part of the device powerup. + */ + if (!iommu) + return; + + spin_lock_irqsave(&iommu->iommu_lock, flags); + sun50i_iommu_flush_all_tlb(iommu); + spin_unlock_irqrestore(&iommu->iommu_lock, flags); +} + +static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) +{ + sun50i_iommu_flush_iotlb_all(domain); +} + +static int sun50i_iommu_enable(struct sun50i_iommu *iommu) +{ + struct sun50i_iommu_domain *sun50i_domain; + unsigned long flags; + int ret; + + if (!iommu->domain) + return 0; + + sun50i_domain = to_sun50i_domain(iommu->domain); + + ret = reset_control_deassert(iommu->reset); + if (ret) + return ret; + + ret = clk_prepare_enable(iommu->clk); + if (ret) + goto err_reset_assert; + + spin_lock_irqsave(&iommu->iommu_lock, flags); + + iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma); + iommu_write(iommu, IOMMU_TLB_PREFETCH_REG, + IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) | + IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) | + IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) | + IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) | + IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) | + IOMMU_TLB_PREFETCH_MASTER_ENABLE(5)); + iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK); + iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE), + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) | + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) | + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) | + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) | + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) | + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5)); + + iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD), + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) | + IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5)); + + iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR), + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) | + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) | + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) | + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) | + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) | + IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5)); + + ret = sun50i_iommu_flush_all_tlb(iommu); + if (ret) { + spin_unlock_irqrestore(&iommu->iommu_lock, flags); + goto err_clk_disable; + } + + iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE); + iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE); + + spin_unlock_irqrestore(&iommu->iommu_lock, flags); + + return 0; + +err_clk_disable: + clk_disable_unprepare(iommu->clk); + +err_reset_assert: + reset_control_assert(iommu->reset); + + return ret; +} + +static void sun50i_iommu_disable(struct sun50i_iommu *iommu) +{ + unsigned long flags; + + spin_lock_irqsave(&iommu->iommu_lock, flags); + + iommu_write(iommu, IOMMU_ENABLE_REG, 0); + iommu_write(iommu, IOMMU_TTB_REG, 0); + + spin_unlock_irqrestore(&iommu->iommu_lock, flags); + + clk_disable_unprepare(iommu->clk); + reset_control_assert(iommu->reset); +} + +static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu, + gfp_t gfp) +{ + dma_addr_t pt_dma; + u32 *page_table; + + page_table = kmem_cache_zalloc(iommu->pt_pool, gfp); + if (!page_table) + return ERR_PTR(-ENOMEM); + + pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(iommu->dev, pt_dma)) { + dev_err(iommu->dev, "Couldn't map L2 Page Table\n"); + kmem_cache_free(iommu->pt_pool, page_table); + return ERR_PTR(-ENOMEM); + } + + /* We rely on the physical address and DMA address being the same */ + WARN_ON(pt_dma != virt_to_phys(page_table)); + + return page_table; +} + +static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu, + u32 *page_table) +{ + phys_addr_t pt_phys = virt_to_phys(page_table); + + dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE); + kmem_cache_free(iommu->pt_pool, page_table); +} + +static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain, + dma_addr_t iova, gfp_t gfp) +{ + struct sun50i_iommu *iommu = sun50i_domain->iommu; + u32 *page_table; + u32 *dte_addr; + u32 old_dte; + u32 dte; + + dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; + dte = *dte_addr; + if (sun50i_dte_is_pt_valid(dte)) { + phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte); + return (u32 *)phys_to_virt(pt_phys); + } + + page_table = sun50i_iommu_alloc_page_table(iommu, gfp); + if (IS_ERR(page_table)) + return page_table; + + dte = sun50i_mk_dte(virt_to_phys(page_table)); + old_dte = cmpxchg(dte_addr, 0, dte); + if (old_dte) { + phys_addr_t installed_pt_phys = + sun50i_dte_get_pt_address(old_dte); + u32 *installed_pt = phys_to_virt(installed_pt_phys); + u32 *drop_pt = page_table; + + page_table = installed_pt; + dte = old_dte; + sun50i_iommu_free_page_table(iommu, drop_pt); + } + + sun50i_table_flush(sun50i_domain, page_table, PT_SIZE); + sun50i_table_flush(sun50i_domain, dte_addr, 1); + + return page_table; +} + +static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) +{ + struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); + struct sun50i_iommu *iommu = sun50i_domain->iommu; + u32 pte_index; + u32 *page_table, *pte_addr; + int ret = 0; + + page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp); + if (IS_ERR(page_table)) { + ret = PTR_ERR(page_table); + goto out; + } + + pte_index = sun50i_iova_get_pte_index(iova); + pte_addr = &page_table[pte_index]; + if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) { + phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr); + dev_err(iommu->dev, + "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n", + &iova, &page_phys, &paddr, prot); + ret = -EBUSY; + goto out; + } + + *pte_addr = sun50i_mk_pte(paddr, prot); + sun50i_table_flush(sun50i_domain, pte_addr, 1); + +out: + return ret; +} + +static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size, struct iommu_iotlb_gather *gather) +{ + struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); + phys_addr_t pt_phys; + dma_addr_t pte_dma; + u32 *pte_addr; + u32 dte; + + dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; + if (!sun50i_dte_is_pt_valid(dte)) + return 0; + + pt_phys = sun50i_dte_get_pt_address(dte); + pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova); + pte_dma = pt_phys + sun50i_iova_get_pte_index(iova) * PT_ENTRY_SIZE; + + if (!sun50i_pte_is_page_valid(*pte_addr)) + return 0; + + memset(pte_addr, 0, sizeof(*pte_addr)); + sun50i_table_flush(sun50i_domain, pte_addr, 1); + + return SZ_4K; +} + +static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); + phys_addr_t pt_phys; + u32 *page_table; + u32 dte, pte; + + dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)]; + if (!sun50i_dte_is_pt_valid(dte)) + return 0; + + pt_phys = sun50i_dte_get_pt_address(dte); + page_table = (u32 *)phys_to_virt(pt_phys); + pte = page_table[sun50i_iova_get_pte_index(iova)]; + if (!sun50i_pte_is_page_valid(pte)) + return 0; + + return sun50i_pte_get_page_address(pte) + + sun50i_iova_get_page_offset(iova); +} + +static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type) +{ + struct sun50i_iommu_domain *sun50i_domain; + + if (type != IOMMU_DOMAIN_DMA && + type != IOMMU_DOMAIN_IDENTITY && + type != IOMMU_DOMAIN_UNMANAGED) + return NULL; + + sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL); + if (!sun50i_domain) + return NULL; + + if (type == IOMMU_DOMAIN_DMA && + iommu_get_dma_cookie(&sun50i_domain->domain)) + goto err_free_domain; + + sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(DT_SIZE)); + if (!sun50i_domain->dt) + goto err_put_cookie; + + refcount_set(&sun50i_domain->refcnt, 1); + + sun50i_domain->domain.geometry.aperture_start = 0; + sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); + sun50i_domain->domain.geometry.force_aperture = true; + + return &sun50i_domain->domain; + +err_put_cookie: + if (type == IOMMU_DOMAIN_DMA) + iommu_put_dma_cookie(&sun50i_domain->domain); + +err_free_domain: + kfree(sun50i_domain); + + return NULL; +} + +static void sun50i_iommu_domain_free(struct iommu_domain *domain) +{ + struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); + + free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE)); + sun50i_domain->dt = NULL; + + iommu_put_dma_cookie(domain); + + kfree(sun50i_domain); +} + +static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu, + struct sun50i_iommu_domain *sun50i_domain) +{ + iommu->domain = &sun50i_domain->domain; + sun50i_domain->iommu = iommu; + + sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt, + DT_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) { + dev_err(iommu->dev, "Couldn't map L1 Page Table\n"); + return -ENOMEM; + } + + return sun50i_iommu_enable(iommu); +} + +static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu, + struct sun50i_iommu_domain *sun50i_domain) +{ + unsigned int i; + + for (i = 0; i < NUM_DT_ENTRIES; i++) { + phys_addr_t pt_phys; + u32 *page_table; + u32 *dte_addr; + u32 dte; + + dte_addr = &sun50i_domain->dt[i]; + dte = *dte_addr; + if (!sun50i_dte_is_pt_valid(dte)) + continue; + + memset(dte_addr, 0, sizeof(*dte_addr)); + sun50i_table_flush(sun50i_domain, dte_addr, 1); + + pt_phys = sun50i_dte_get_pt_address(dte); + page_table = phys_to_virt(pt_phys); + sun50i_iommu_free_page_table(iommu, page_table); + } + + + sun50i_iommu_disable(iommu); + + dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt), + DT_SIZE, DMA_TO_DEVICE); + + iommu->domain = NULL; +} + +static void sun50i_iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); + struct sun50i_iommu *iommu = dev_iommu_priv_get(dev); + + dev_dbg(dev, "Detaching from IOMMU domain\n"); + + if (iommu->domain != domain) + return; + + if (refcount_dec_and_test(&sun50i_domain->refcnt)) + sun50i_iommu_detach_domain(iommu, sun50i_domain); +} + +static int sun50i_iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); + struct sun50i_iommu *iommu; + + iommu = sun50i_iommu_from_dev(dev); + if (!iommu) + return -ENODEV; + + dev_dbg(dev, "Attaching to IOMMU domain\n"); + + refcount_inc(&sun50i_domain->refcnt); + + if (iommu->domain == domain) + return 0; + + if (iommu->domain) + sun50i_iommu_detach_device(iommu->domain, dev); + + sun50i_iommu_attach_domain(iommu, sun50i_domain); + + return 0; +} + +static struct iommu_device *sun50i_iommu_probe_device(struct device *dev) +{ + struct sun50i_iommu *iommu; + + iommu = sun50i_iommu_from_dev(dev); + if (!iommu) + return ERR_PTR(-ENODEV); + + return &iommu->iommu; +} + +static void sun50i_iommu_release_device(struct device *dev) {} + +static struct iommu_group *sun50i_iommu_device_group(struct device *dev) +{ + struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev); + + return iommu_group_ref_get(iommu->group); +} + +static int sun50i_iommu_of_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct platform_device *iommu_pdev = of_find_device_by_node(args->np); + unsigned id = args->args[0]; + + dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev)); + + return iommu_fwspec_add_ids(dev, &id, 1); +} + +static const struct iommu_ops sun50i_iommu_ops = { + .pgsize_bitmap = SZ_4K, + .attach_dev = sun50i_iommu_attach_device, + .detach_dev = sun50i_iommu_detach_device, + .device_group = sun50i_iommu_device_group, + .domain_alloc = sun50i_iommu_domain_alloc, + .domain_free = sun50i_iommu_domain_free, + .flush_iotlb_all = sun50i_iommu_flush_iotlb_all, + .iotlb_sync = sun50i_iommu_iotlb_sync, + .iova_to_phys = sun50i_iommu_iova_to_phys, + .map = sun50i_iommu_map, + .of_xlate = sun50i_iommu_of_xlate, + .probe_device = sun50i_iommu_probe_device, + .release_device = sun50i_iommu_release_device, + .unmap = sun50i_iommu_unmap, +}; + +static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu, + unsigned master, phys_addr_t iova, + unsigned prot) +{ + dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n", + &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd"); + + if (iommu->domain) + report_iommu_fault(iommu->domain, iommu->dev, iova, prot); + else + dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n"); +} + +static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu, + unsigned addr_reg, + unsigned blame_reg) +{ + phys_addr_t iova; + unsigned master; + u32 blame; + + assert_spin_locked(&iommu->iommu_lock); + + iova = iommu_read(iommu, addr_reg); + blame = iommu_read(iommu, blame_reg); + master = ilog2(blame & IOMMU_INT_MASTER_MASK); + + /* + * If the address is not in the page table, we can't get what + * operation triggered the fault. Assume it's a read + * operation. + */ + sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ); + + return iova; +} + +static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu) +{ + enum sun50i_iommu_aci aci; + phys_addr_t iova; + unsigned master; + unsigned dir; + u32 blame; + + assert_spin_locked(&iommu->iommu_lock); + + blame = iommu_read(iommu, IOMMU_INT_STA_REG); + master = ilog2(blame & IOMMU_INT_MASTER_MASK); + iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master)); + aci = sun50i_get_pte_aci(iommu_read(iommu, + IOMMU_INT_ERR_DATA_REG(master))); + + switch (aci) { + /* + * If we are in the read-only domain, then it means we + * tried to write. + */ + case SUN50I_IOMMU_ACI_RD: + dir = IOMMU_FAULT_WRITE; + break; + + /* + * If we are in the write-only domain, then it means + * we tried to read. + */ + case SUN50I_IOMMU_ACI_WR: + + /* + * If we are in the domain without any permission, we + * can't really tell. Let's default to a read + * operation. + */ + case SUN50I_IOMMU_ACI_NONE: + + /* WTF? */ + case SUN50I_IOMMU_ACI_RD_WR: + default: + dir = IOMMU_FAULT_READ; + break; + } + + /* + * If the address is not in the page table, we can't get what + * operation triggered the fault. Assume it's a read + * operation. + */ + sun50i_iommu_report_fault(iommu, master, iova, dir); + + return iova; +} + +static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) +{ + struct sun50i_iommu *iommu = dev_id; + phys_addr_t iova; + u32 status; + + spin_lock(&iommu->iommu_lock); + + status = iommu_read(iommu, IOMMU_INT_STA_REG); + if (!(status & IOMMU_INT_MASK)) { + spin_unlock(&iommu->iommu_lock); + return IRQ_NONE; + } + + if (status & IOMMU_INT_INVALID_L2PG) + iova = sun50i_iommu_handle_pt_irq(iommu, + IOMMU_INT_ERR_ADDR_L2_REG, + IOMMU_L2PG_INT_REG); + else if (status & IOMMU_INT_INVALID_L1PG) + iova = sun50i_iommu_handle_pt_irq(iommu, + IOMMU_INT_ERR_ADDR_L1_REG, + IOMMU_L1PG_INT_REG); + else + iova = sun50i_iommu_handle_perm_irq(iommu); + + iommu_write(iommu, IOMMU_INT_CLR_REG, status); + + iommu_write(iommu, IOMMU_RESET_REG, ~status); + iommu_write(iommu, IOMMU_RESET_REG, status); + + spin_unlock(&iommu->iommu_lock); + + return IRQ_HANDLED; +} + +static int sun50i_iommu_probe(struct platform_device *pdev) +{ + struct sun50i_iommu *iommu; + int ret, irq; + + iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return -ENOMEM; + spin_lock_init(&iommu->iommu_lock); + platform_set_drvdata(pdev, iommu); + iommu->dev = &pdev->dev; + + iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev), + PT_SIZE, PT_SIZE, + SLAB_HWCACHE_ALIGN, + NULL); + if (!iommu->pt_pool) + return -ENOMEM; + + iommu->group = iommu_group_alloc(); + if (IS_ERR(iommu->group)) { + ret = PTR_ERR(iommu->group); + goto err_free_cache; + } + + iommu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(iommu->base)) { + ret = PTR_ERR(iommu->base); + goto err_free_group; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err_free_group; + } + + iommu->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(iommu->clk)) { + dev_err(&pdev->dev, "Couldn't get our clock.\n"); + ret = PTR_ERR(iommu->clk); + goto err_free_group; + } + + iommu->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(iommu->reset)) { + dev_err(&pdev->dev, "Couldn't get our reset line.\n"); + ret = PTR_ERR(iommu->reset); + goto err_free_group; + } + + ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev, + NULL, dev_name(&pdev->dev)); + if (ret) + goto err_free_group; + + iommu_device_set_ops(&iommu->iommu, &sun50i_iommu_ops); + iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode); + + ret = iommu_device_register(&iommu->iommu); + if (ret) + goto err_remove_sysfs; + + ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0, + dev_name(&pdev->dev), iommu); + if (ret < 0) + goto err_unregister; + + bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops); + + return 0; + +err_unregister: + iommu_device_unregister(&iommu->iommu); + +err_remove_sysfs: + iommu_device_sysfs_remove(&iommu->iommu); + +err_free_group: + iommu_group_put(iommu->group); + +err_free_cache: + kmem_cache_destroy(iommu->pt_pool); + + return ret; +} + +static const struct of_device_id sun50i_iommu_dt[] = { + { .compatible = "allwinner,sun50i-h6-iommu", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, sun50i_iommu_dt); + +static struct platform_driver sun50i_iommu_driver = { + .driver = { + .name = "sun50i-iommu", + .of_match_table = sun50i_iommu_dt, + .suppress_bind_attrs = true, + } +}; +builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe); + +MODULE_DESCRIPTION("Allwinner H6 IOMMU driver"); +MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>"); +MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index bda300c2a438..f6f07489a9aa 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -453,7 +453,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev, if (!region) return -ENOMEM; - list_add(&vdev->resv_regions, ®ion->list); + list_add(®ion->list, &vdev->resv_regions); return 0; } |