diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-31 02:46:19 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-31 02:46:19 +0400 |
commit | 0cfdc724390fb9370f27bb9a133eadf69114dd21 (patch) | |
tree | 2abfb0112c46c837c6b42007eadfc389293b7710 /drivers/iommu | |
parent | b48aeab65e9fc4b0c9757c5fbc1d722544eb8786 (diff) | |
parent | 1abb4ba596a91a839f82e0c9c837b777d574e83d (diff) | |
download | linux-0cfdc724390fb9370f27bb9a133eadf69114dd21.tar.xz |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (33 commits)
iommu/core: Remove global iommu_ops and register_iommu
iommu/msm: Use bus_set_iommu instead of register_iommu
iommu/omap: Use bus_set_iommu instead of register_iommu
iommu/vt-d: Use bus_set_iommu instead of register_iommu
iommu/amd: Use bus_set_iommu instead of register_iommu
iommu/core: Use bus->iommu_ops in the iommu-api
iommu/core: Convert iommu_found to iommu_present
iommu/core: Add bus_type parameter to iommu_domain_alloc
Driver core: Add iommu_ops to bus_type
iommu/core: Define iommu_ops and register_iommu only with CONFIG_IOMMU_API
iommu/amd: Fix wrong shift direction
iommu/omap: always provide iommu debug code
iommu/core: let drivers know if an iommu fault handler isn't installed
iommu/core: export iommu_set_fault_handler()
iommu/omap: Fix build error with !IOMMU_SUPPORT
iommu/omap: Migrate to the generic fault report mechanism
iommu/core: Add fault reporting mechanism
iommu/core: Use PAGE_SIZE instead of hard-coded value
iommu/core: use the existing IS_ALIGNED macro
iommu/msm: ->unmap() should return order of unmapped page
...
Fixup trivial conflicts in drivers/iommu/Makefile: "move omap iommu to
dedicated iommu folder" vs "Rename the DMAR and INTR_REMAP config
options" just happened to touch lines next to each other.
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/Kconfig | 19 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 3 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 4 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 2 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 114 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu.c | 9 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu-debug.c | 418 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 1245 | ||||
-rw-r--r-- | drivers/iommu/omap-iovmm.c | 742 |
9 files changed, 2527 insertions, 29 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 7d7eaa15e773..5414253b185a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -112,4 +112,23 @@ config IRQ_REMAP To use x2apic mode in the CPU's which support x2APIC enhancements or to support platforms with CPU's having > 8 bit APIC ID, say Y. +# OMAP IOMMU support +config OMAP_IOMMU + bool "OMAP IOMMU Support" + depends on ARCH_OMAP + select IOMMU_API + +config OMAP_IOVMM + tristate "OMAP IO Virtual Memory Manager Support" + depends on OMAP_IOMMU + +config OMAP_IOMMU_DEBUG + tristate "Export OMAP IOMMU/IOVMM internals in DebugFS" + depends on OMAP_IOVMM && DEBUG_FS + help + Select this to see extensive information about + the internal state of OMAP IOMMU/IOVMM in debugfs. + + Say N unless you know you need this. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 6394994a2b9d..2f4448794bc7 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -4,3 +4,6 @@ obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o +obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o +obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o +obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 0e4227f457af..4ee277a8521a 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1283,7 +1283,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, if (!pte || !IOMMU_PTE_PRESENT(*pte)) continue; - dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); + dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1); } update_domain(&dma_dom->domain); @@ -2495,7 +2495,7 @@ static unsigned device_dma_ops_init(void) void __init amd_iommu_init_api(void) { - register_iommu(&amd_iommu_ops); + bus_set_iommu(&pci_bus_type, &amd_iommu_ops); } int __init amd_iommu_init_dma_ops(void) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index be1953c239b0..bb161d2fa03c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3642,7 +3642,7 @@ int __init intel_iommu_init(void) init_iommu_pm_ops(); - register_iommu(&intel_iommu_ops); + bus_set_iommu(&pci_bus_type, &intel_iommu_ops); bus_register_notifier(&pci_bus_type, &device_nb); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 6e6b6a11b3ce..2fb2963df553 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -16,6 +16,8 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/device.h> +#include <linux/kernel.h> #include <linux/bug.h> #include <linux/types.h> #include <linux/module.h> @@ -23,32 +25,78 @@ #include <linux/errno.h> #include <linux/iommu.h> -static struct iommu_ops *iommu_ops; +static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) +{ +} -void register_iommu(struct iommu_ops *ops) +/** + * bus_set_iommu - set iommu-callbacks for the bus + * @bus: bus. + * @ops: the callbacks provided by the iommu-driver + * + * This function is called by an iommu driver to set the iommu methods + * used for a particular bus. Drivers for devices on that bus can use + * the iommu-api after these ops are registered. + * This special function is needed because IOMMUs are usually devices on + * the bus itself, so the iommu drivers are not initialized when the bus + * is set up. With this function the iommu-driver can set the iommu-ops + * afterwards. + */ +int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops) { - if (iommu_ops) - BUG(); + if (bus->iommu_ops != NULL) + return -EBUSY; + + bus->iommu_ops = ops; + + /* Do IOMMU specific setup for this bus-type */ + iommu_bus_init(bus, ops); - iommu_ops = ops; + return 0; } +EXPORT_SYMBOL_GPL(bus_set_iommu); -bool iommu_found(void) +bool iommu_present(struct bus_type *bus) { - return iommu_ops != NULL; + return bus->iommu_ops != NULL; } -EXPORT_SYMBOL_GPL(iommu_found); +EXPORT_SYMBOL_GPL(iommu_present); -struct iommu_domain *iommu_domain_alloc(void) +/** + * iommu_set_fault_handler() - set a fault handler for an iommu domain + * @domain: iommu domain + * @handler: fault handler + * + * This function should be used by IOMMU users which want to be notified + * whenever an IOMMU fault happens. + * + * The fault handler itself should return 0 on success, and an appropriate + * error code otherwise. + */ +void iommu_set_fault_handler(struct iommu_domain *domain, + iommu_fault_handler_t handler) +{ + BUG_ON(!domain); + + domain->handler = handler; +} +EXPORT_SYMBOL_GPL(iommu_set_fault_handler); + +struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) { struct iommu_domain *domain; int ret; + if (bus == NULL || bus->iommu_ops == NULL) + return NULL; + domain = kmalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; - ret = iommu_ops->domain_init(domain); + domain->ops = bus->iommu_ops; + + ret = domain->ops->domain_init(domain); if (ret) goto out_free; @@ -63,62 +111,78 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc); void iommu_domain_free(struct iommu_domain *domain) { - iommu_ops->domain_destroy(domain); + if (likely(domain->ops->domain_destroy != NULL)) + domain->ops->domain_destroy(domain); + kfree(domain); } EXPORT_SYMBOL_GPL(iommu_domain_free); int iommu_attach_device(struct iommu_domain *domain, struct device *dev) { - return iommu_ops->attach_dev(domain, dev); + if (unlikely(domain->ops->attach_dev == NULL)) + return -ENODEV; + + return domain->ops->attach_dev(domain, dev); } EXPORT_SYMBOL_GPL(iommu_attach_device); void iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - iommu_ops->detach_dev(domain, dev); + if (unlikely(domain->ops->detach_dev == NULL)) + return; + + domain->ops->detach_dev(domain, dev); } EXPORT_SYMBOL_GPL(iommu_detach_device); phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, unsigned long iova) { - return iommu_ops->iova_to_phys(domain, iova); + if (unlikely(domain->ops->iova_to_phys == NULL)) + return 0; + + return domain->ops->iova_to_phys(domain, iova); } EXPORT_SYMBOL_GPL(iommu_iova_to_phys); int iommu_domain_has_cap(struct iommu_domain *domain, unsigned long cap) { - return iommu_ops->domain_has_cap(domain, cap); + if (unlikely(domain->ops->domain_has_cap == NULL)) + return 0; + + return domain->ops->domain_has_cap(domain, cap); } EXPORT_SYMBOL_GPL(iommu_domain_has_cap); int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, int gfp_order, int prot) { - unsigned long invalid_mask; size_t size; - size = 0x1000UL << gfp_order; - invalid_mask = size - 1; + if (unlikely(domain->ops->map == NULL)) + return -ENODEV; - BUG_ON((iova | paddr) & invalid_mask); + size = PAGE_SIZE << gfp_order; - return iommu_ops->map(domain, iova, paddr, gfp_order, prot); + BUG_ON(!IS_ALIGNED(iova | paddr, size)); + + return domain->ops->map(domain, iova, paddr, gfp_order, prot); } EXPORT_SYMBOL_GPL(iommu_map); int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) { - unsigned long invalid_mask; size_t size; - size = 0x1000UL << gfp_order; - invalid_mask = size - 1; + if (unlikely(domain->ops->unmap == NULL)) + return -ENODEV; + + size = PAGE_SIZE << gfp_order; - BUG_ON(iova & invalid_mask); + BUG_ON(!IS_ALIGNED(iova, size)); - return iommu_ops->unmap(domain, iova, gfp_order); + return domain->ops->unmap(domain, iova, gfp_order); } EXPORT_SYMBOL_GPL(iommu_unmap); diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 1a584e077c61..5865dd2e28f9 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -543,6 +543,13 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, } ret = __flush_iotlb(domain); + + /* + * the IOMMU API requires us to return the order of the unmapped + * page (on success). + */ + if (!ret) + ret = order; fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); return ret; @@ -721,7 +728,7 @@ static void __init setup_iommu_tex_classes(void) static int __init msm_iommu_init(void) { setup_iommu_tex_classes(); - register_iommu(&msm_iommu_ops); + bus_set_iommu(&platform_bus_type, &msm_iommu_ops); return 0; } diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c new file mode 100644 index 000000000000..9c192e79f806 --- /dev/null +++ b/drivers/iommu/omap-iommu-debug.c @@ -0,0 +1,418 @@ +/* + * omap iommu: debugfs interface + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/platform_device.h> +#include <linux/debugfs.h> + +#include <plat/iommu.h> +#include <plat/iovmm.h> + +#include <plat/iopgtable.h> + +#define MAXCOLUMN 100 /* for short messages */ + +static DEFINE_MUTEX(iommu_debug_lock); + +static struct dentry *iommu_debug_root; + +static ssize_t debug_read_ver(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + u32 ver = omap_iommu_arch_version(); + char buf[MAXCOLUMN], *p = buf; + + p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); + + return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); +} + +static ssize_t debug_read_regs(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct omap_iommu *obj = file->private_data; + char *p, *buf; + ssize_t bytes; + + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + mutex_lock(&iommu_debug_lock); + + bytes = omap_iommu_dump_ctx(obj, p, count); + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); + + mutex_unlock(&iommu_debug_lock); + kfree(buf); + + return bytes; +} + +static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct omap_iommu *obj = file->private_data; + char *p, *buf; + ssize_t bytes, rest; + + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + mutex_lock(&iommu_debug_lock); + + p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); + p += sprintf(p, "-----------------------------------------\n"); + rest = count - (p - buf); + p += omap_dump_tlb_entries(obj, p, rest); + + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + + mutex_unlock(&iommu_debug_lock); + kfree(buf); + + return bytes; +} + +static ssize_t debug_write_pagetable(struct file *file, + const char __user *userbuf, size_t count, loff_t *ppos) +{ + struct iotlb_entry e; + struct cr_regs cr; + int err; + struct omap_iommu *obj = file->private_data; + char buf[MAXCOLUMN], *p = buf; + + count = min(count, sizeof(buf)); + + mutex_lock(&iommu_debug_lock); + if (copy_from_user(p, userbuf, count)) { + mutex_unlock(&iommu_debug_lock); + return -EFAULT; + } + + sscanf(p, "%x %x", &cr.cam, &cr.ram); + if (!cr.cam || !cr.ram) { + mutex_unlock(&iommu_debug_lock); + return -EINVAL; + } + + omap_iotlb_cr_to_e(&cr, &e); + err = omap_iopgtable_store_entry(obj, &e); + if (err) + dev_err(obj->dev, "%s: fail to store cr\n", __func__); + + mutex_unlock(&iommu_debug_lock); + return count; +} + +#define dump_ioptable_entry_one(lv, da, val) \ + ({ \ + int __err = 0; \ + ssize_t bytes; \ + const int maxcol = 22; \ + const char *str = "%d: %08x %08x\n"; \ + bytes = snprintf(p, maxcol, str, lv, da, val); \ + p += bytes; \ + len -= bytes; \ + if (len < maxcol) \ + __err = -ENOMEM; \ + __err; \ + }) + +static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len) +{ + int i; + u32 *iopgd; + char *p = buf; + + spin_lock(&obj->page_table_lock); + + iopgd = iopgd_offset(obj, 0); + for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { + int j, err; + u32 *iopte; + u32 da; + + if (!*iopgd) + continue; + + if (!(*iopgd & IOPGD_TABLE)) { + da = i << IOPGD_SHIFT; + + err = dump_ioptable_entry_one(1, da, *iopgd); + if (err) + goto out; + continue; + } + + iopte = iopte_offset(iopgd, 0); + + for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { + if (!*iopte) + continue; + + da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); + err = dump_ioptable_entry_one(2, da, *iopgd); + if (err) + goto out; + } + } +out: + spin_unlock(&obj->page_table_lock); + + return p - buf; +} + +static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct omap_iommu *obj = file->private_data; + char *p, *buf; + size_t bytes; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); + p += sprintf(p, "-----------------------------------------\n"); + + mutex_lock(&iommu_debug_lock); + + bytes = PAGE_SIZE - (p - buf); + p += dump_ioptable(obj, p, bytes); + + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + + mutex_unlock(&iommu_debug_lock); + free_page((unsigned long)buf); + + return bytes; +} + +static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct omap_iommu *obj = file->private_data; + char *p, *buf; + struct iovm_struct *tmp; + int uninitialized_var(i); + ssize_t bytes; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n", + "No", "start", "end", "size", "flags"); + p += sprintf(p, "-------------------------------------------------\n"); + + mutex_lock(&iommu_debug_lock); + + list_for_each_entry(tmp, &obj->mmap, list) { + size_t len; + const char *str = "%3d %08x-%08x %6x %8x\n"; + const int maxcol = 39; + + len = tmp->da_end - tmp->da_start; + p += snprintf(p, maxcol, str, + i, tmp->da_start, tmp->da_end, len, tmp->flags); + + if (PAGE_SIZE - (p - buf) < maxcol) + break; + i++; + } + + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + + mutex_unlock(&iommu_debug_lock); + free_page((unsigned long)buf); + + return bytes; +} + +static ssize_t debug_read_mem(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct omap_iommu *obj = file->private_data; + char *p, *buf; + struct iovm_struct *area; + ssize_t bytes; + + count = min_t(ssize_t, count, PAGE_SIZE); + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + mutex_lock(&iommu_debug_lock); + + area = omap_find_iovm_area(obj, (u32)ppos); + if (IS_ERR(area)) { + bytes = -EINVAL; + goto err_out; + } + memcpy(p, area->va, count); + p += count; + + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); +err_out: + mutex_unlock(&iommu_debug_lock); + free_page((unsigned long)buf); + + return bytes; +} + +static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct omap_iommu *obj = file->private_data; + struct iovm_struct *area; + char *p, *buf; + + count = min_t(size_t, count, PAGE_SIZE); + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + mutex_lock(&iommu_debug_lock); + + if (copy_from_user(p, userbuf, count)) { + count = -EFAULT; + goto err_out; + } + + area = omap_find_iovm_area(obj, (u32)ppos); + if (IS_ERR(area)) { + count = -EINVAL; + goto err_out; + } + memcpy(area->va, p, count); +err_out: + mutex_unlock(&iommu_debug_lock); + free_page((unsigned long)buf); + + return count; +} + +static int debug_open_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define DEBUG_FOPS(name) \ + static const struct file_operations debug_##name##_fops = { \ + .open = debug_open_generic, \ + .read = debug_read_##name, \ + .write = debug_write_##name, \ + .llseek = generic_file_llseek, \ + }; + +#define DEBUG_FOPS_RO(name) \ + static const struct file_operations debug_##name##_fops = { \ + .open = debug_open_generic, \ + .read = debug_read_##name, \ + .llseek = generic_file_llseek, \ + }; + +DEBUG_FOPS_RO(ver); +DEBUG_FOPS_RO(regs); +DEBUG_FOPS_RO(tlb); +DEBUG_FOPS(pagetable); +DEBUG_FOPS_RO(mmap); +DEBUG_FOPS(mem); + +#define __DEBUG_ADD_FILE(attr, mode) \ + { \ + struct dentry *dent; \ + dent = debugfs_create_file(#attr, mode, parent, \ + obj, &debug_##attr##_fops); \ + if (!dent) \ + return -ENOMEM; \ + } + +#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600) +#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400) + +static int iommu_debug_register(struct device *dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct omap_iommu *obj = platform_get_drvdata(pdev); + struct dentry *d, *parent; + + if (!obj || !obj->dev) + return -EINVAL; + + d = debugfs_create_dir(obj->name, iommu_debug_root); + if (!d) + return -ENOMEM; + parent = d; + + d = debugfs_create_u8("nr_tlb_entries", 400, parent, + (u8 *)&obj->nr_tlb_entries); + if (!d) + return -ENOMEM; + + DEBUG_ADD_FILE_RO(ver); + DEBUG_ADD_FILE_RO(regs); + DEBUG_ADD_FILE_RO(tlb); + DEBUG_ADD_FILE(pagetable); + DEBUG_ADD_FILE_RO(mmap); + DEBUG_ADD_FILE(mem); + + return 0; +} + +static int __init iommu_debug_init(void) +{ + struct dentry *d; + int err; + + d = debugfs_create_dir("iommu", NULL); + if (!d) + return -ENOMEM; + iommu_debug_root = d; + + err = omap_foreach_iommu_device(d, iommu_debug_register); + if (err) + goto err_out; + return 0; + +err_out: + debugfs_remove_recursive(iommu_debug_root); + return err; +} +module_init(iommu_debug_init) + +static void __exit iommu_debugfs_exit(void) +{ + debugfs_remove_recursive(iommu_debug_root); +} +module_exit(iommu_debugfs_exit) + +MODULE_DESCRIPTION("omap iommu: debugfs interface"); +MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c new file mode 100644 index 000000000000..8f32b2bf7587 --- /dev/null +++ b/drivers/iommu/omap-iommu.c @@ -0,0 +1,1245 @@ +/* + * omap iommu: tlb and pagetable primitives + * + * Copyright (C) 2008-2010 Nokia Corporation + * + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, + * Paul Mundt and Toshihiro Kobayashi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/iommu.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> + +#include <asm/cacheflush.h> + +#include <plat/iommu.h> + +#include <plat/iopgtable.h> + +#define for_each_iotlb_cr(obj, n, __i, cr) \ + for (__i = 0; \ + (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ + __i++) + +/** + * struct omap_iommu_domain - omap iommu domain + * @pgtable: the page table + * @iommu_dev: an omap iommu device attached to this domain. only a single + * iommu device can be attached for now. + * @lock: domain lock, should be taken when attaching/detaching + */ +struct omap_iommu_domain { + u32 *pgtable; + struct omap_iommu *iommu_dev; + spinlock_t lock; +}; + +/* accommodate the difference between omap1 and omap2/3 */ +static const struct iommu_functions *arch_iommu; + +static struct platform_driver omap_iommu_driver; +static struct kmem_cache *iopte_cachep; + +/** + * omap_install_iommu_arch - Install archtecure specific iommu functions + * @ops: a pointer to architecture specific iommu functions + * + * There are several kind of iommu algorithm(tlb, pagetable) among + * omap series. This interface installs such an iommu algorighm. + **/ +int omap_install_iommu_arch(const struct iommu_functions *ops) +{ + if (arch_iommu) + return -EBUSY; + + arch_iommu = ops; + return 0; +} +EXPORT_SYMBOL_GPL(omap_install_iommu_arch); + +/** + * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions + * @ops: a pointer to architecture specific iommu functions + * + * This interface uninstalls the iommu algorighm installed previously. + **/ +void omap_uninstall_iommu_arch(const struct iommu_functions *ops) +{ + if (arch_iommu != ops) + pr_err("%s: not your arch\n", __func__); + + arch_iommu = NULL; +} +EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); + +/** + * omap_iommu_save_ctx - Save registers for pm off-mode support + * @obj: target iommu + **/ +void omap_iommu_save_ctx(struct omap_iommu *obj) +{ + arch_iommu->save_ctx(obj); +} +EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); + +/** + * omap_iommu_restore_ctx - Restore registers for pm off-mode support + * @obj: target iommu + **/ +void omap_iommu_restore_ctx(struct omap_iommu *obj) +{ + arch_iommu->restore_ctx(obj); +} +EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); + +/** + * omap_iommu_arch_version - Return running iommu arch version + **/ +u32 omap_iommu_arch_version(void) +{ + return arch_iommu->version; +} +EXPORT_SYMBOL_GPL(omap_iommu_arch_version); + +static int iommu_enable(struct omap_iommu *obj) +{ + int err; + + if (!obj) + return -EINVAL; + + if (!arch_iommu) + return -ENODEV; + + clk_enable(obj->clk); + + err = arch_iommu->enable(obj); + + clk_disable(obj->clk); + return err; +} + +static void iommu_disable(struct omap_iommu *obj) +{ + if (!obj) + return; + + clk_enable(obj->clk); + + arch_iommu->disable(obj); + + clk_disable(obj->clk); +} + +/* + * TLB operations + */ +void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) +{ + BUG_ON(!cr || !e); + + arch_iommu->cr_to_e(cr, e); +} +EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); + +static inline int iotlb_cr_valid(struct cr_regs *cr) +{ + if (!cr) + return -EINVAL; + + return arch_iommu->cr_valid(cr); +} + +static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, + struct iotlb_entry *e) +{ + if (!e) + return NULL; + + return arch_iommu->alloc_cr(obj, e); +} + +static u32 iotlb_cr_to_virt(struct cr_regs *cr) +{ + return arch_iommu->cr_to_virt(cr); +} + +static u32 get_iopte_attr(struct iotlb_entry *e) +{ + return arch_iommu->get_pte_attr(e); +} + +static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) +{ + return arch_iommu->fault_isr(obj, da); +} + +static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) +{ + u32 val; + + val = iommu_read_reg(obj, MMU_LOCK); + + l->base = MMU_LOCK_BASE(val); + l->vict = MMU_LOCK_VICT(val); + +} + +static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) +{ + u32 val; + + val = (l->base << MMU_LOCK_BASE_SHIFT); + val |= (l->vict << MMU_LOCK_VICT_SHIFT); + + iommu_write_reg(obj, val, MMU_LOCK); +} + +static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) +{ + arch_iommu->tlb_read_cr(obj, cr); +} + +static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) +{ + arch_iommu->tlb_load_cr(obj, cr); + + iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); + iommu_write_reg(obj, 1, MMU_LD_TLB); +} + +/** + * iotlb_dump_cr - Dump an iommu tlb entry into buf + * @obj: target iommu + * @cr: contents of cam and ram register + * @buf: output buffer + **/ +static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, + char *buf) +{ + BUG_ON(!cr || !buf); + + return arch_iommu->dump_cr(obj, cr, buf); +} + +/* only used in iotlb iteration for-loop */ +static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) +{ + struct cr_regs cr; + struct iotlb_lock l; + + iotlb_lock_get(obj, &l); + l.vict = n; + iotlb_lock_set(obj, &l); + iotlb_read_cr(obj, &cr); + + return cr; +} + +/** + * load_iotlb_entry - Set an iommu tlb entry + * @obj: target iommu + * @e: an iommu tlb entry info + **/ +#ifdef PREFETCH_IOTLB +static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) +{ + int err = 0; + struct iotlb_lock l; + struct cr_regs *cr; + + if (!obj || !obj->nr_tlb_entries || !e) + return -EINVAL; + + clk_enable(obj->clk); + + iotlb_lock_get(obj, &l); + if (l.base == obj->nr_tlb_entries) { + dev_warn(obj->dev, "%s: preserve entries full\n", __func__); + err = -EBUSY; + goto out; + } + if (!e->prsvd) { + int i; + struct cr_regs tmp; + + for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) + if (!iotlb_cr_valid(&tmp)) + break; + + if (i == obj->nr_tlb_entries) { + dev_dbg(obj->dev, "%s: full: no entry\n", __func__); + err = -EBUSY; + goto out; + } + + iotlb_lock_get(obj, &l); + } else { + l.vict = l.base; + iotlb_lock_set(obj, &l); + } + + cr = iotlb_alloc_cr(obj, e); + if (IS_ERR(cr)) { + clk_disable(obj->clk); + return PTR_ERR(cr); + } + + iotlb_load_cr(obj, cr); + kfree(cr); + + if (e->prsvd) + l.base++; + /* increment victim for next tlb load */ + if (++l.vict == obj->nr_tlb_entries) + l.vict = l.base; + iotlb_lock_set(obj, &l); +out: + clk_disable(obj->clk); + return err; +} + +#else /* !PREFETCH_IOTLB */ + +static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) +{ + return 0; +} + +#endif /* !PREFETCH_IOTLB */ + +static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) +{ + return load_iotlb_entry(obj, e); +} + +/** + * flush_iotlb_page - Clear an iommu tlb entry + * @obj: target iommu + * @da: iommu device virtual address + * + * Clear an iommu tlb entry which includes 'da' address. + **/ +static void flush_iotlb_page(struct omap_iommu *obj, u32 da) +{ + int i; + struct cr_regs cr; + + clk_enable(obj->clk); + + for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { + u32 start; + size_t bytes; + + if (!iotlb_cr_valid(&cr)) + continue; + + start = iotlb_cr_to_virt(&cr); + bytes = iopgsz_to_bytes(cr.cam & 3); + + if ((start <= da) && (da < start + bytes)) { + dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", + __func__, start, da, bytes); + iotlb_load_cr(obj, &cr); + iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); + } + } + clk_disable(obj->clk); + + if (i == obj->nr_tlb_entries) + dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); +} + +/** + * flush_iotlb_all - Clear all iommu tlb entries + * @obj: target iommu + **/ +static void flush_iotlb_all(struct omap_iommu *obj) +{ + struct iotlb_lock l; + + clk_enable(obj->clk); + + l.base = 0; + l.vict = 0; + iotlb_lock_set(obj, &l); + + iommu_write_reg(obj, 1, MMU_GFLUSH); + + clk_disable(obj->clk); +} + +#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) + +ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) +{ + if (!obj || !buf) + return -EINVAL; + + clk_enable(obj->clk); + + bytes = arch_iommu->dump_ctx(obj, buf, bytes); + + clk_disable(obj->clk); + + return bytes; +} +EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); + +static int +__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) +{ + int i; + struct iotlb_lock saved; + struct cr_regs tmp; + struct cr_regs *p = crs; + + clk_enable(obj->clk); + iotlb_lock_get(obj, &saved); + + for_each_iotlb_cr(obj, num, i, tmp) { + if (!iotlb_cr_valid(&tmp)) + continue; + *p++ = tmp; + } + + iotlb_lock_set(obj, &saved); + clk_disable(obj->clk); + + return p - crs; +} + +/** + * omap_dump_tlb_entries - dump cr arrays to given buffer + * @obj: target iommu + * @buf: output buffer + **/ +size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) +{ + int i, num; + struct cr_regs *cr; + char *p = buf; + + num = bytes / sizeof(*cr); + num = min(obj->nr_tlb_entries, num); + + cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); + if (!cr) + return 0; + + num = __dump_tlb_entries(obj, cr, num); + for (i = 0; i < num; i++) + p += iotlb_dump_cr(obj, cr + i, p); + kfree(cr); + + return p - buf; +} +EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); + +int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) +{ + return driver_for_each_device(&omap_iommu_driver.driver, + NULL, data, fn); +} +EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); + +#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ + +/* + * H/W pagetable operations + */ +static void flush_iopgd_range(u32 *first, u32 *last) +{ + /* FIXME: L2 cache should be taken care of if it exists */ + do { + asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" + : : "r" (first)); + first += L1_CACHE_BYTES / sizeof(*first); + } while (first <= last); +} + +static void flush_iopte_range(u32 *first, u32 *last) +{ + /* FIXME: L2 cache should be taken care of if it exists */ + do { + asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" + : : "r" (first)); + first += L1_CACHE_BYTES / sizeof(*first); + } while (first <= last); +} + +static void iopte_free(u32 *iopte) +{ + /* Note: freed iopte's must be clean ready for re-use */ + kmem_cache_free(iopte_cachep, iopte); +} + +static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) +{ + u32 *iopte; + + /* a table has already existed */ + if (*iopgd) + goto pte_ready; + + /* + * do the allocation outside the page table lock + */ + spin_unlock(&obj->page_table_lock); + iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); + spin_lock(&obj->page_table_lock); + + if (!*iopgd) { + if (!iopte) + return ERR_PTR(-ENOMEM); + + *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; + flush_iopgd_range(iopgd, iopgd); + + dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); + } else { + /* We raced, free the reduniovant table */ + iopte_free(iopte); + } + +pte_ready: + iopte = iopte_offset(iopgd, da); + + dev_vdbg(obj->dev, + "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", + __func__, da, iopgd, *iopgd, iopte, *iopte); + + return iopte; +} + +static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) +{ + u32 *iopgd = iopgd_offset(obj, da); + + if ((da | pa) & ~IOSECTION_MASK) { + dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", + __func__, da, pa, IOSECTION_SIZE); + return -EINVAL; + } + + *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; + flush_iopgd_range(iopgd, iopgd); + return 0; +} + +static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) +{ + u32 *iopgd = iopgd_offset(obj, da); + int i; + + if ((da | pa) & ~IOSUPER_MASK) { + dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", + __func__, da, pa, IOSUPER_SIZE); + return -EINVAL; + } + + for (i = 0; i < 16; i++) + *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; + flush_iopgd_range(iopgd, iopgd + 15); + return 0; +} + +static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) +{ + u32 *iopgd = iopgd_offset(obj, da); + u32 *iopte = iopte_alloc(obj, iopgd, da); + + if (IS_ERR(iopte)) + return PTR_ERR(iopte); + + *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; + flush_iopte_range(iopte, iopte); + + dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", + __func__, da, pa, iopte, *iopte); + + return 0; +} + +static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) +{ + u32 *iopgd = iopgd_offset(obj, da); + u32 *iopte = iopte_alloc(obj, iopgd, da); + int i; + + if ((da | pa) & ~IOLARGE_MASK) { + dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", + __func__, da, pa, IOLARGE_SIZE); + return -EINVAL; + } + + if (IS_ERR(iopte)) + return PTR_ERR(iopte); + + for (i = 0; i < 16; i++) + *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; + flush_iopte_range(iopte, iopte + 15); + return 0; +} + +static int +iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) +{ + int (*fn)(struct omap_iommu *, u32, u32, u32); + u32 prot; + int err; + + if (!obj || !e) + return -EINVAL; + + switch (e->pgsz) { + case MMU_CAM_PGSZ_16M: + fn = iopgd_alloc_super; + break; + case MMU_CAM_PGSZ_1M: + fn = iopgd_alloc_section; + break; + case MMU_CAM_PGSZ_64K: + fn = iopte_alloc_large; + break; + case MMU_CAM_PGSZ_4K: + fn = iopte_alloc_page; + break; + default: + fn = NULL; + BUG(); + break; + } + + prot = get_iopte_attr(e); + + spin_lock(&obj->page_table_lock); + err = fn(obj, e->da, e->pa, prot); + spin_unlock(&obj->page_table_lock); + + return err; +} + +/** + * omap_iopgtable_store_entry - Make an iommu pte entry + * @obj: target iommu + * @e: an iommu tlb entry info + **/ +int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) +{ + int err; + + flush_iotlb_page(obj, e->da); + err = iopgtable_store_entry_core(obj, e); + if (!err) + prefetch_iotlb_entry(obj, e); + return err; +} +EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); + +/** + * iopgtable_lookup_entry - Lookup an iommu pte entry + * @obj: target iommu + * @da: iommu device virtual address + * @ppgd: iommu pgd entry pointer to be returned + * @ppte: iommu pte entry pointer to be returned + **/ +static void +iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) +{ + u32 *iopgd, *iopte = NULL; + + iopgd = iopgd_offset(obj, da); + if (!*iopgd) + goto out; + + if (iopgd_is_table(*iopgd)) + iopte = iopte_offset(iopgd, da); +out: + *ppgd = iopgd; + *ppte = iopte; +} + +static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) +{ + size_t bytes; + u32 *iopgd = iopgd_offset(obj, da); + int nent = 1; + + if (!*iopgd) + return 0; + + if (iopgd_is_table(*iopgd)) { + int i; + u32 *iopte = iopte_offset(iopgd, da); + + bytes = IOPTE_SIZE; + if (*iopte & IOPTE_LARGE) { + nent *= 16; + /* rewind to the 1st entry */ + iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); + } + bytes *= nent; + memset(iopte, 0, nent * sizeof(*iopte)); + flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); + + /* + * do table walk to check if this table is necessary or not + */ + iopte = iopte_offset(iopgd, 0); + for (i = 0; i < PTRS_PER_IOPTE; i++) + if (iopte[i]) + goto out; + + iopte_free(iopte); + nent = 1; /* for the next L1 entry */ + } else { + bytes = IOPGD_SIZE; + if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { + nent *= 16; + /* rewind to the 1st entry */ + iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); + } + bytes *= nent; + } + memset(iopgd, 0, nent * sizeof(*iopgd)); + flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); +out: + return bytes; +} + +/** + * iopgtable_clear_entry - Remove an iommu pte entry + * @obj: target iommu + * @da: iommu device virtual address + **/ +static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) +{ + size_t bytes; + + spin_lock(&obj->page_table_lock); + + bytes = iopgtable_clear_entry_core(obj, da); + flush_iotlb_page(obj, da); + + spin_unlock(&obj->page_table_lock); + + return bytes; +} + +static void iopgtable_clear_entry_all(struct omap_iommu *obj) +{ + int i; + + spin_lock(&obj->page_table_lock); + + for (i = 0; i < PTRS_PER_IOPGD; i++) { + u32 da; + u32 *iopgd; + + da = i << IOPGD_SHIFT; + iopgd = iopgd_offset(obj, da); + + if (!*iopgd) + continue; + + if (iopgd_is_table(*iopgd)) + iopte_free(iopte_offset(iopgd, 0)); + + *iopgd = 0; + flush_iopgd_range(iopgd, iopgd); + } + + flush_iotlb_all(obj); + + spin_unlock(&obj->page_table_lock); +} + +/* + * Device IOMMU generic operations + */ +static irqreturn_t iommu_fault_handler(int irq, void *data) +{ + u32 da, errs; + u32 *iopgd, *iopte; + struct omap_iommu *obj = data; + struct iommu_domain *domain = obj->domain; + + if (!obj->refcount) + return IRQ_NONE; + + clk_enable(obj->clk); + errs = iommu_report_fault(obj, &da); + clk_disable(obj->clk); + if (errs == 0) + return IRQ_HANDLED; + + /* Fault callback or TLB/PTE Dynamic loading */ + if (!report_iommu_fault(domain, obj->dev, da, 0)) + return IRQ_HANDLED; + + iommu_disable(obj); + + iopgd = iopgd_offset(obj, da); + + if (!iopgd_is_table(*iopgd)) { + dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " + "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); + return IRQ_NONE; + } + + iopte = iopte_offset(iopgd, da); + + dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " + "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, + iopte, *iopte); + + return IRQ_NONE; +} + +static int device_match_by_alias(struct device *dev, void *data) +{ + struct omap_iommu *obj = to_iommu(dev); + const char *name = data; + + pr_debug("%s: %s %s\n", __func__, obj->name, name); + + return strcmp(obj->name, name) == 0; +} + +/** + * omap_find_iommu_device() - find an omap iommu device by name + * @name: name of the iommu device + * + * The generic iommu API requires the caller to provide the device + * he wishes to attach to a certain iommu domain. + * + * Drivers generally should not bother with this as it should just + * be taken care of by the DMA-API using dev_archdata. + * + * This function is provided as an interim solution until the latter + * materializes, and omap3isp is fully migrated to the DMA-API. + */ +struct device *omap_find_iommu_device(const char *name) +{ + return driver_find_device(&omap_iommu_driver.driver, NULL, + (void *)name, + device_match_by_alias); +} +EXPORT_SYMBOL_GPL(omap_find_iommu_device); + +/** + * omap_iommu_attach() - attach iommu device to an iommu domain + * @dev: target omap iommu device + * @iopgd: page table + **/ +static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd) +{ + int err = -ENOMEM; + struct omap_iommu *obj = to_iommu(dev); + + spin_lock(&obj->iommu_lock); + + /* an iommu device can only be attached once */ + if (++obj->refcount > 1) { + dev_err(dev, "%s: already attached!\n", obj->name); + err = -EBUSY; + goto err_enable; + } + + obj->iopgd = iopgd; + err = iommu_enable(obj); + if (err) + goto err_enable; + flush_iotlb_all(obj); + + if (!try_module_get(obj->owner)) + goto err_module; + + spin_unlock(&obj->iommu_lock); + + dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); + return obj; + +err_module: + if (obj->refcount == 1) + iommu_disable(obj); +err_enable: + obj->refcount--; + spin_unlock(&obj->iommu_lock); + return ERR_PTR(err); +} + +/** + * omap_iommu_detach - release iommu device + * @obj: target iommu + **/ +static void omap_iommu_detach(struct omap_iommu *obj) +{ + if (!obj || IS_ERR(obj)) + return; + + spin_lock(&obj->iommu_lock); + + if (--obj->refcount == 0) + iommu_disable(obj); + + module_put(obj->owner); + + obj->iopgd = NULL; + + spin_unlock(&obj->iommu_lock); + + dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); +} + +/* + * OMAP Device MMU(IOMMU) detection + */ +static int __devinit omap_iommu_probe(struct platform_device *pdev) +{ + int err = -ENODEV; + int irq; + struct omap_iommu *obj; + struct resource *res; + struct iommu_platform_data *pdata = pdev->dev.platform_data; + + if (pdev->num_resources != 2) + return -EINVAL; + + obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); + if (!obj) + return -ENOMEM; + + obj->clk = clk_get(&pdev->dev, pdata->clk_name); + if (IS_ERR(obj->clk)) + goto err_clk; + + obj->nr_tlb_entries = pdata->nr_tlb_entries; + obj->name = pdata->name; + obj->dev = &pdev->dev; + obj->ctx = (void *)obj + sizeof(*obj); + obj->da_start = pdata->da_start; + obj->da_end = pdata->da_end; + + spin_lock_init(&obj->iommu_lock); + mutex_init(&obj->mmap_lock); + spin_lock_init(&obj->page_table_lock); + INIT_LIST_HEAD(&obj->mmap); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + err = -ENODEV; + goto err_mem; + } + + res = request_mem_region(res->start, resource_size(res), + dev_name(&pdev->dev)); + if (!res) { + err = -EIO; + goto err_mem; + } + + obj->regbase = ioremap(res->start, resource_size(res)); + if (!obj->regbase) { + err = -ENOMEM; + goto err_ioremap; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + err = -ENODEV; + goto err_irq; + } + err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, + dev_name(&pdev->dev), obj); + if (err < 0) + goto err_irq; + platform_set_drvdata(pdev, obj); + + dev_info(&pdev->dev, "%s registered\n", obj->name); + return 0; + +err_irq: + iounmap(obj->regbase); +err_ioremap: + release_mem_region(res->start, resource_size(res)); +err_mem: + clk_put(obj->clk); +err_clk: + kfree(obj); + return err; +} + +static int __devexit omap_iommu_remove(struct platform_device *pdev) +{ + int irq; + struct resource *res; + struct omap_iommu *obj = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + iopgtable_clear_entry_all(obj); + + irq = platform_get_irq(pdev, 0); + free_irq(irq, obj); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + iounmap(obj->regbase); + + clk_put(obj->clk); + dev_info(&pdev->dev, "%s removed\n", obj->name); + kfree(obj); + return 0; +} + +static struct platform_driver omap_iommu_driver = { + .probe = omap_iommu_probe, + .remove = __devexit_p(omap_iommu_remove), + .driver = { + .name = "omap-iommu", + }, +}; + +static void iopte_cachep_ctor(void *iopte) +{ + clean_dcache_area(iopte, IOPTE_TABLE_SIZE); +} + +static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, + phys_addr_t pa, int order, int prot) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + struct omap_iommu *oiommu = omap_domain->iommu_dev; + struct device *dev = oiommu->dev; + size_t bytes = PAGE_SIZE << order; + struct iotlb_entry e; + int omap_pgsz; + u32 ret, flags; + + /* we only support mapping a single iommu page for now */ + omap_pgsz = bytes_to_iopgsz(bytes); + if (omap_pgsz < 0) { + dev_err(dev, "invalid size to map: %d\n", bytes); + return -EINVAL; + } + + dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); + + flags = omap_pgsz | prot; + + iotlb_init_entry(&e, da, pa, flags); + + ret = omap_iopgtable_store_entry(oiommu, &e); + if (ret) + dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); + + return ret; +} + +static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, + int order) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + struct omap_iommu *oiommu = omap_domain->iommu_dev; + struct device *dev = oiommu->dev; + size_t unmap_size; + + dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order); + + unmap_size = iopgtable_clear_entry(oiommu, da); + + return unmap_size ? get_order(unmap_size) : -EINVAL; +} + +static int +omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + struct omap_iommu *oiommu; + int ret = 0; + + spin_lock(&omap_domain->lock); + + /* only a single device is supported per domain for now */ + if (omap_domain->iommu_dev) { + dev_err(dev, "iommu domain is already attached\n"); + ret = -EBUSY; + goto out; + } + + /* get a handle to and enable the omap iommu */ + oiommu = omap_iommu_attach(dev, omap_domain->pgtable); + if (IS_ERR(oiommu)) { + ret = PTR_ERR(oiommu); + dev_err(dev, "can't get omap iommu: %d\n", ret); + goto out; + } + + omap_domain->iommu_dev = oiommu; + oiommu->domain = domain; + +out: + spin_unlock(&omap_domain->lock); + return ret; +} + +static void omap_iommu_detach_dev(struct iommu_domain *domain, + struct device *dev) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + struct omap_iommu *oiommu = to_iommu(dev); + + spin_lock(&omap_domain->lock); + + /* only a single device is supported per domain for now */ + if (omap_domain->iommu_dev != oiommu) { + dev_err(dev, "invalid iommu device\n"); + goto out; + } + + iopgtable_clear_entry_all(oiommu); + + omap_iommu_detach(oiommu); + + omap_domain->iommu_dev = NULL; + +out: + spin_unlock(&omap_domain->lock); +} + +static int omap_iommu_domain_init(struct iommu_domain *domain) +{ + struct omap_iommu_domain *omap_domain; + + omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); + if (!omap_domain) { + pr_err("kzalloc failed\n"); + goto out; + } + + omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); + if (!omap_domain->pgtable) { + pr_err("kzalloc failed\n"); + goto fail_nomem; + } + + /* + * should never fail, but please keep this around to ensure + * we keep the hardware happy + */ + BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); + + clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); + spin_lock_init(&omap_domain->lock); + + domain->priv = omap_domain; + + return 0; + +fail_nomem: + kfree(omap_domain); +out: + return -ENOMEM; +} + +/* assume device was already detached */ +static void omap_iommu_domain_destroy(struct iommu_domain *domain) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + + domain->priv = NULL; + + kfree(omap_domain->pgtable); + kfree(omap_domain); +} + +static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long da) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + struct omap_iommu *oiommu = omap_domain->iommu_dev; + struct device *dev = oiommu->dev; + u32 *pgd, *pte; + phys_addr_t ret = 0; + + iopgtable_lookup_entry(oiommu, da, &pgd, &pte); + + if (pte) { + if (iopte_is_small(*pte)) + ret = omap_iommu_translate(*pte, da, IOPTE_MASK); + else if (iopte_is_large(*pte)) + ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); + else + dev_err(dev, "bogus pte 0x%x", *pte); + } else { + if (iopgd_is_section(*pgd)) + ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); + else if (iopgd_is_super(*pgd)) + ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); + else + dev_err(dev, "bogus pgd 0x%x", *pgd); + } + + return ret; +} + +static int omap_iommu_domain_has_cap(struct iommu_domain *domain, + unsigned long cap) +{ + return 0; +} + +static struct iommu_ops omap_iommu_ops = { + .domain_init = omap_iommu_domain_init, + .domain_destroy = omap_iommu_domain_destroy, + .attach_dev = omap_iommu_attach_dev, + .detach_dev = omap_iommu_detach_dev, + .map = omap_iommu_map, + .unmap = omap_iommu_unmap, + .iova_to_phys = omap_iommu_iova_to_phys, + .domain_has_cap = omap_iommu_domain_has_cap, +}; + +static int __init omap_iommu_init(void) +{ + struct kmem_cache *p; + const unsigned long flags = SLAB_HWCACHE_ALIGN; + size_t align = 1 << 10; /* L2 pagetable alignement */ + + p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, + iopte_cachep_ctor); + if (!p) + return -ENOMEM; + iopte_cachep = p; + + bus_set_iommu(&platform_bus_type, &omap_iommu_ops); + + return platform_driver_register(&omap_iommu_driver); +} +module_init(omap_iommu_init); + +static void __exit omap_iommu_exit(void) +{ + kmem_cache_destroy(iopte_cachep); + + platform_driver_unregister(&omap_iommu_driver); +} +module_exit(omap_iommu_exit); + +MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); +MODULE_ALIAS("platform:omap-iommu"); +MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c new file mode 100644 index 000000000000..e8fdb8830f69 --- /dev/null +++ b/drivers/iommu/omap-iovmm.c @@ -0,0 +1,742 @@ +/* + * omap iommu: simple virtual address space management + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/device.h> +#include <linux/scatterlist.h> +#include <linux/iommu.h> + +#include <asm/cacheflush.h> +#include <asm/mach/map.h> + +#include <plat/iommu.h> +#include <plat/iovmm.h> + +#include <plat/iopgtable.h> + +static struct kmem_cache *iovm_area_cachep; + +/* return the offset of the first scatterlist entry in a sg table */ +static unsigned int sgtable_offset(const struct sg_table *sgt) +{ + if (!sgt || !sgt->nents) + return 0; + + return sgt->sgl->offset; +} + +/* return total bytes of sg buffers */ +static size_t sgtable_len(const struct sg_table *sgt) +{ + unsigned int i, total = 0; + struct scatterlist *sg; + + if (!sgt) + return 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes; + + bytes = sg->length + sg->offset; + + if (!iopgsz_ok(bytes)) { + pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n", + __func__, i, bytes, sg->offset); + return 0; + } + + if (i && sg->offset) { + pr_err("%s: sg[%d] offset not allowed in internal " + "entries\n", __func__, i); + return 0; + } + + total += bytes; + } + + return total; +} +#define sgtable_ok(x) (!!sgtable_len(x)) + +static unsigned max_alignment(u32 addr) +{ + int i; + unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; + for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) + ; + return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; +} + +/* + * calculate the optimal number sg elements from total bytes based on + * iommu superpages + */ +static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) +{ + unsigned nr_entries = 0, ent_sz; + + if (!IS_ALIGNED(bytes, PAGE_SIZE)) { + pr_err("%s: wrong size %08x\n", __func__, bytes); + return 0; + } + + while (bytes) { + ent_sz = max_alignment(da | pa); + ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); + nr_entries++; + da += ent_sz; + pa += ent_sz; + bytes -= ent_sz; + } + + return nr_entries; +} + +/* allocate and initialize sg_table header(a kind of 'superblock') */ +static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, + u32 da, u32 pa) +{ + unsigned int nr_entries; + int err; + struct sg_table *sgt; + + if (!bytes) + return ERR_PTR(-EINVAL); + + if (!IS_ALIGNED(bytes, PAGE_SIZE)) + return ERR_PTR(-EINVAL); + + if (flags & IOVMF_LINEAR) { + nr_entries = sgtable_nents(bytes, da, pa); + if (!nr_entries) + return ERR_PTR(-EINVAL); + } else + nr_entries = bytes / PAGE_SIZE; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); + if (err) { + kfree(sgt); + return ERR_PTR(err); + } + + pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); + + return sgt; +} + +/* free sg_table header(a kind of superblock) */ +static void sgtable_free(struct sg_table *sgt) +{ + if (!sgt) + return; + + sg_free_table(sgt); + kfree(sgt); + + pr_debug("%s: sgt:%p\n", __func__, sgt); +} + +/* map 'sglist' to a contiguous mpu virtual area and return 'va' */ +static void *vmap_sg(const struct sg_table *sgt) +{ + u32 va; + size_t total; + unsigned int i; + struct scatterlist *sg; + struct vm_struct *new; + const struct mem_type *mtype; + + mtype = get_mem_type(MT_DEVICE); + if (!mtype) + return ERR_PTR(-EINVAL); + + total = sgtable_len(sgt); + if (!total) + return ERR_PTR(-EINVAL); + + new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); + if (!new) + return ERR_PTR(-ENOMEM); + va = (u32)new->addr; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes; + u32 pa; + int err; + + pa = sg_phys(sg) - sg->offset; + bytes = sg->length + sg->offset; + + BUG_ON(bytes != PAGE_SIZE); + + err = ioremap_page(va, pa, mtype); + if (err) + goto err_out; + + va += bytes; + } + + flush_cache_vmap((unsigned long)new->addr, + (unsigned long)(new->addr + total)); + return new->addr; + +err_out: + WARN_ON(1); /* FIXME: cleanup some mpu mappings */ + vunmap(new->addr); + return ERR_PTR(-EAGAIN); +} + +static inline void vunmap_sg(const void *va) +{ + vunmap(va); +} + +static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj, + const u32 da) +{ + struct iovm_struct *tmp; + + list_for_each_entry(tmp, &obj->mmap, list) { + if ((da >= tmp->da_start) && (da < tmp->da_end)) { + size_t len; + + len = tmp->da_end - tmp->da_start; + + dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", + __func__, tmp->da_start, da, tmp->da_end, len, + tmp->flags); + + return tmp; + } + } + + return NULL; +} + +/** + * omap_find_iovm_area - find iovma which includes @da + * @da: iommu device virtual address + * + * Find the existing iovma starting at @da + */ +struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da) +{ + struct iovm_struct *area; + + mutex_lock(&obj->mmap_lock); + area = __find_iovm_area(obj, da); + mutex_unlock(&obj->mmap_lock); + + return area; +} +EXPORT_SYMBOL_GPL(omap_find_iovm_area); + +/* + * This finds the hole(area) which fits the requested address and len + * in iovmas mmap, and returns the new allocated iovma. + */ +static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da, + size_t bytes, u32 flags) +{ + struct iovm_struct *new, *tmp; + u32 start, prev_end, alignment; + + if (!obj || !bytes) + return ERR_PTR(-EINVAL); + + start = da; + alignment = PAGE_SIZE; + + if (~flags & IOVMF_DA_FIXED) { + /* Don't map address 0 */ + start = obj->da_start ? obj->da_start : alignment; + + if (flags & IOVMF_LINEAR) + alignment = iopgsz_max(bytes); + start = roundup(start, alignment); + } else if (start < obj->da_start || start > obj->da_end || + obj->da_end - start < bytes) { + return ERR_PTR(-EINVAL); + } + + tmp = NULL; + if (list_empty(&obj->mmap)) + goto found; + + prev_end = 0; + list_for_each_entry(tmp, &obj->mmap, list) { + + if (prev_end > start) + break; + + if (tmp->da_start > start && (tmp->da_start - start) >= bytes) + goto found; + + if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) + start = roundup(tmp->da_end + 1, alignment); + + prev_end = tmp->da_end; + } + + if ((start >= prev_end) && (obj->da_end - start >= bytes)) + goto found; + + dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", + __func__, da, bytes, flags); + + return ERR_PTR(-EINVAL); + +found: + new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); + if (!new) + return ERR_PTR(-ENOMEM); + + new->iommu = obj; + new->da_start = start; + new->da_end = start + bytes; + new->flags = flags; + + /* + * keep ascending order of iovmas + */ + if (tmp) + list_add_tail(&new->list, &tmp->list); + else + list_add(&new->list, &obj->mmap); + + dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", + __func__, new->da_start, start, new->da_end, bytes, flags); + + return new; +} + +static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area) +{ + size_t bytes; + + BUG_ON(!obj || !area); + + bytes = area->da_end - area->da_start; + + dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", + __func__, area->da_start, area->da_end, bytes, area->flags); + + list_del(&area->list); + kmem_cache_free(iovm_area_cachep, area); +} + +/** + * omap_da_to_va - convert (d) to (v) + * @obj: objective iommu + * @da: iommu device virtual address + * @va: mpu virtual address + * + * Returns mpu virtual addr which corresponds to a given device virtual addr + */ +void *omap_da_to_va(struct omap_iommu *obj, u32 da) +{ + void *va = NULL; + struct iovm_struct *area; + + mutex_lock(&obj->mmap_lock); + + area = __find_iovm_area(obj, da); + if (!area) { + dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); + goto out; + } + va = area->va; +out: + mutex_unlock(&obj->mmap_lock); + + return va; +} +EXPORT_SYMBOL_GPL(omap_da_to_va); + +static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) +{ + unsigned int i; + struct scatterlist *sg; + void *va = _va; + void *va_end; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + struct page *pg; + const size_t bytes = PAGE_SIZE; + + /* + * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()' + */ + pg = vmalloc_to_page(va); + BUG_ON(!pg); + sg_set_page(sg, pg, bytes, 0); + + va += bytes; + } + + va_end = _va + PAGE_SIZE * i; +} + +static inline void sgtable_drain_vmalloc(struct sg_table *sgt) +{ + /* + * Actually this is not necessary at all, just exists for + * consistency of the code readability. + */ + BUG_ON(!sgt); +} + +/* create 'da' <-> 'pa' mapping from 'sgt' */ +static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, + const struct sg_table *sgt, u32 flags) +{ + int err; + unsigned int i, j; + struct scatterlist *sg; + u32 da = new->da_start; + int order; + + if (!domain || !sgt) + return -EINVAL; + + BUG_ON(!sgtable_ok(sgt)); + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + u32 pa; + size_t bytes; + + pa = sg_phys(sg) - sg->offset; + bytes = sg->length + sg->offset; + + flags &= ~IOVMF_PGSZ_MASK; + + if (bytes_to_iopgsz(bytes) < 0) + goto err_out; + + order = get_order(bytes); + + pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, + i, da, pa, bytes); + + err = iommu_map(domain, da, pa, order, flags); + if (err) + goto err_out; + + da += bytes; + } + return 0; + +err_out: + da = new->da_start; + + for_each_sg(sgt->sgl, sg, i, j) { + size_t bytes; + + bytes = sg->length + sg->offset; + order = get_order(bytes); + + /* ignore failures.. we're already handling one */ + iommu_unmap(domain, da, order); + + da += bytes; + } + return err; +} + +/* release 'da' <-> 'pa' mapping */ +static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, + struct iovm_struct *area) +{ + u32 start; + size_t total = area->da_end - area->da_start; + const struct sg_table *sgt = area->sgt; + struct scatterlist *sg; + int i, err; + + BUG_ON(!sgtable_ok(sgt)); + BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); + + start = area->da_start; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes; + int order; + + bytes = sg->length + sg->offset; + order = get_order(bytes); + + err = iommu_unmap(domain, start, order); + if (err < 0) + break; + + dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", + __func__, start, bytes, area->flags); + + BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); + + total -= bytes; + start += bytes; + } + BUG_ON(total); +} + +/* template function for all unmapping */ +static struct sg_table *unmap_vm_area(struct iommu_domain *domain, + struct omap_iommu *obj, const u32 da, + void (*fn)(const void *), u32 flags) +{ + struct sg_table *sgt = NULL; + struct iovm_struct *area; + + if (!IS_ALIGNED(da, PAGE_SIZE)) { + dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); + return NULL; + } + + mutex_lock(&obj->mmap_lock); + + area = __find_iovm_area(obj, da); + if (!area) { + dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); + goto out; + } + + if ((area->flags & flags) != flags) { + dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, + area->flags); + goto out; + } + sgt = (struct sg_table *)area->sgt; + + unmap_iovm_area(domain, obj, area); + + fn(area->va); + + dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, + area->da_start, da, area->da_end, + area->da_end - area->da_start, area->flags); + + free_iovm_area(obj, area); +out: + mutex_unlock(&obj->mmap_lock); + + return sgt; +} + +static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj, + u32 da, const struct sg_table *sgt, void *va, + size_t bytes, u32 flags) +{ + int err = -ENOMEM; + struct iovm_struct *new; + + mutex_lock(&obj->mmap_lock); + + new = alloc_iovm_area(obj, da, bytes, flags); + if (IS_ERR(new)) { + err = PTR_ERR(new); + goto err_alloc_iovma; + } + new->va = va; + new->sgt = sgt; + + if (map_iovm_area(domain, new, sgt, new->flags)) + goto err_map; + + mutex_unlock(&obj->mmap_lock); + + dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", + __func__, new->da_start, bytes, new->flags, va); + + return new->da_start; + +err_map: + free_iovm_area(obj, new); +err_alloc_iovma: + mutex_unlock(&obj->mmap_lock); + return err; +} + +static inline u32 +__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, + u32 da, const struct sg_table *sgt, + void *va, size_t bytes, u32 flags) +{ + return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); +} + +/** + * omap_iommu_vmap - (d)-(p)-(v) address mapper + * @obj: objective iommu + * @sgt: address of scatter gather table + * @flags: iovma and page property + * + * Creates 1-n-1 mapping with given @sgt and returns @da. + * All @sgt element must be io page size aligned. + */ +u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, + const struct sg_table *sgt, u32 flags) +{ + size_t bytes; + void *va = NULL; + + if (!obj || !obj->dev || !sgt) + return -EINVAL; + + bytes = sgtable_len(sgt); + if (!bytes) + return -EINVAL; + bytes = PAGE_ALIGN(bytes); + + if (flags & IOVMF_MMIO) { + va = vmap_sg(sgt); + if (IS_ERR(va)) + return PTR_ERR(va); + } + + flags |= IOVMF_DISCONT; + flags |= IOVMF_MMIO; + + da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); + if (IS_ERR_VALUE(da)) + vunmap_sg(va); + + return da + sgtable_offset(sgt); +} +EXPORT_SYMBOL_GPL(omap_iommu_vmap); + +/** + * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()' + * @obj: objective iommu + * @da: iommu device virtual address + * + * Free the iommu virtually contiguous memory area starting at + * @da, which was returned by 'omap_iommu_vmap()'. + */ +struct sg_table * +omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da) +{ + struct sg_table *sgt; + /* + * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. + * Just returns 'sgt' to the caller to free + */ + da &= PAGE_MASK; + sgt = unmap_vm_area(domain, obj, da, vunmap_sg, + IOVMF_DISCONT | IOVMF_MMIO); + if (!sgt) + dev_dbg(obj->dev, "%s: No sgt\n", __func__); + return sgt; +} +EXPORT_SYMBOL_GPL(omap_iommu_vunmap); + +/** + * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper + * @obj: objective iommu + * @da: contiguous iommu virtual memory + * @bytes: allocation size + * @flags: iovma and page property + * + * Allocate @bytes linearly and creates 1-n-1 mapping and returns + * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. + */ +u32 +omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, + size_t bytes, u32 flags) +{ + void *va; + struct sg_table *sgt; + + if (!obj || !obj->dev || !bytes) + return -EINVAL; + + bytes = PAGE_ALIGN(bytes); + + va = vmalloc(bytes); + if (!va) + return -ENOMEM; + + flags |= IOVMF_DISCONT; + flags |= IOVMF_ALLOC; + + sgt = sgtable_alloc(bytes, flags, da, 0); + if (IS_ERR(sgt)) { + da = PTR_ERR(sgt); + goto err_sgt_alloc; + } + sgtable_fill_vmalloc(sgt, va); + + da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); + if (IS_ERR_VALUE(da)) + goto err_iommu_vmap; + + return da; + +err_iommu_vmap: + sgtable_drain_vmalloc(sgt); + sgtable_free(sgt); +err_sgt_alloc: + vfree(va); + return da; +} +EXPORT_SYMBOL_GPL(omap_iommu_vmalloc); + +/** + * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()' + * @obj: objective iommu + * @da: iommu device virtual address + * + * Frees the iommu virtually continuous memory area starting at + * @da, as obtained from 'omap_iommu_vmalloc()'. + */ +void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj, + const u32 da) +{ + struct sg_table *sgt; + + sgt = unmap_vm_area(domain, obj, da, vfree, + IOVMF_DISCONT | IOVMF_ALLOC); + if (!sgt) + dev_dbg(obj->dev, "%s: No sgt\n", __func__); + sgtable_free(sgt); +} +EXPORT_SYMBOL_GPL(omap_iommu_vfree); + +static int __init iovmm_init(void) +{ + const unsigned long flags = SLAB_HWCACHE_ALIGN; + struct kmem_cache *p; + + p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, + flags, NULL); + if (!p) + return -ENOMEM; + iovm_area_cachep = p; + + return 0; +} +module_init(iovmm_init); + +static void __exit iovmm_exit(void) +{ + kmem_cache_destroy(iovm_area_cachep); +} +module_exit(iovmm_exit); + +MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); +MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); +MODULE_LICENSE("GPL v2"); |