diff options
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r-- | drivers/iommu/iommu.c | 354 |
1 files changed, 252 insertions, 102 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 857d4c2fd1a2..847ad47a2dfd 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -18,7 +18,6 @@ #include <linux/errno.h> #include <linux/iommu.h> #include <linux/idr.h> -#include <linux/notifier.h> #include <linux/err.h> #include <linux/pci.h> #include <linux/bitops.h> @@ -40,14 +39,16 @@ struct iommu_group { struct kobject *devices_kobj; struct list_head devices; struct mutex mutex; - struct blocking_notifier_head notifier; void *iommu_data; void (*iommu_data_release)(void *iommu_data); char *name; int id; struct iommu_domain *default_domain; + struct iommu_domain *blocking_domain; struct iommu_domain *domain; struct list_head entry; + unsigned int owner_cnt; + void *owner; }; struct group_device { @@ -82,8 +83,8 @@ static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev); static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); -static void __iommu_detach_group(struct iommu_domain *domain, - struct iommu_group *group); +static int __iommu_group_set_domain(struct iommu_group *group, + struct iommu_domain *new_domain); static int iommu_create_device_direct_mappings(struct iommu_group *group, struct device *dev); static struct iommu_group *iommu_group_get_for_dev(struct device *dev); @@ -294,7 +295,11 @@ int iommu_probe_device(struct device *dev) mutex_lock(&group->mutex); iommu_alloc_default_domain(group, dev); - if (group->default_domain) { + /* + * If device joined an existing group which has been claimed, don't + * attach the default domain. + */ + if (group->default_domain && !group->owner) { ret = __iommu_attach_device(group->default_domain, dev); if (ret) { mutex_unlock(&group->mutex); @@ -599,6 +604,8 @@ static void iommu_group_release(struct kobject *kobj) if (group->default_domain) iommu_domain_free(group->default_domain); + if (group->blocking_domain) + iommu_domain_free(group->blocking_domain); kfree(group->name); kfree(group); @@ -633,7 +640,6 @@ struct iommu_group *iommu_group_alloc(void) mutex_init(&group->mutex); INIT_LIST_HEAD(&group->devices); INIT_LIST_HEAD(&group->entry); - BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); if (ret < 0) { @@ -906,10 +912,6 @@ rename: if (ret) goto err_put_group; - /* Notify any listeners about change to group. */ - blocking_notifier_call_chain(&group->notifier, - IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); - trace_add_device_to_group(group->id, dev); dev_info(dev, "Adding to iommu group %d\n", group->id); @@ -951,10 +953,6 @@ void iommu_group_remove_device(struct device *dev) dev_info(dev, "Removing from iommu group %d\n", group->id); - /* Pre-notify listeners that a device is being removed. */ - blocking_notifier_call_chain(&group->notifier, - IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); - mutex_lock(&group->mutex); list_for_each_entry(tmp_device, &group->devices, list) { if (tmp_device->dev == dev) { @@ -1077,36 +1075,6 @@ void iommu_group_put(struct iommu_group *group) EXPORT_SYMBOL_GPL(iommu_group_put); /** - * iommu_group_register_notifier - Register a notifier for group changes - * @group: the group to watch - * @nb: notifier block to signal - * - * This function allows iommu group users to track changes in a group. - * See include/linux/iommu.h for actions sent via this notifier. Caller - * should hold a reference to the group throughout notifier registration. - */ -int iommu_group_register_notifier(struct iommu_group *group, - struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&group->notifier, nb); -} -EXPORT_SYMBOL_GPL(iommu_group_register_notifier); - -/** - * iommu_group_unregister_notifier - Unregister a notifier - * @group: the group to watch - * @nb: notifier block to signal - * - * Unregister a previously registered group notifier block. - */ -int iommu_group_unregister_notifier(struct iommu_group *group, - struct notifier_block *nb) -{ - return blocking_notifier_chain_unregister(&group->notifier, nb); -} -EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); - -/** * iommu_register_device_fault_handler() - Register a device fault handler * @dev: the device * @handler: the fault handler @@ -1651,14 +1619,8 @@ static int remove_iommu_group(struct device *dev, void *data) static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { - unsigned long group_action = 0; struct device *dev = data; - struct iommu_group *group; - /* - * ADD/DEL call into iommu driver ops if provided, which may - * result in ADD/DEL notifiers to group->notifier - */ if (action == BUS_NOTIFY_ADD_DEVICE) { int ret; @@ -1669,34 +1631,6 @@ static int iommu_bus_notifier(struct notifier_block *nb, return NOTIFY_OK; } - /* - * Remaining BUS_NOTIFYs get filtered and republished to the - * group, if anyone is listening - */ - group = iommu_group_get(dev); - if (!group) - return 0; - - switch (action) { - case BUS_NOTIFY_BIND_DRIVER: - group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; - break; - case BUS_NOTIFY_BOUND_DRIVER: - group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; - break; - case BUS_NOTIFY_UNBIND_DRIVER: - group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; - break; - case BUS_NOTIFY_UNBOUND_DRIVER: - group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; - break; - } - - if (group_action) - blocking_notifier_call_chain(&group->notifier, - group_action, dev); - - iommu_group_put(group); return 0; } @@ -1913,6 +1847,29 @@ bool iommu_present(struct bus_type *bus) } EXPORT_SYMBOL_GPL(iommu_present); +/** + * device_iommu_capable() - check for a general IOMMU capability + * @dev: device to which the capability would be relevant, if available + * @cap: IOMMU capability + * + * Return: true if an IOMMU is present and supports the given capability + * for the given device, otherwise false. + */ +bool device_iommu_capable(struct device *dev, enum iommu_cap cap) +{ + const struct iommu_ops *ops; + + if (!dev->iommu || !dev->iommu->iommu_dev) + return false; + + ops = dev_iommu_ops(dev); + if (!ops->capable) + return false; + + return ops->capable(cap); +} +EXPORT_SYMBOL_GPL(device_iommu_capable); + bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) { if (!bus->iommu_ops || !bus->iommu_ops->capable) @@ -1983,6 +1940,24 @@ void iommu_domain_free(struct iommu_domain *domain) } EXPORT_SYMBOL_GPL(iommu_domain_free); +/* + * Put the group's domain back to the appropriate core-owned domain - either the + * standard kernel-mode DMA configuration or an all-DMA-blocked domain. + */ +static void __iommu_group_set_core_domain(struct iommu_group *group) +{ + struct iommu_domain *new_domain; + int ret; + + if (group->owner) + new_domain = group->blocking_domain; + else + new_domain = group->default_domain; + + ret = __iommu_group_set_domain(group, new_domain); + WARN(ret, "iommu driver failed to attach the default/blocking domain"); +} + static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev) { @@ -2039,9 +2014,6 @@ static void __iommu_detach_device(struct iommu_domain *domain, if (iommu_is_attach_deferred(dev)) return; - if (unlikely(domain->ops->detach_dev == NULL)) - return; - domain->ops->detach_dev(domain, dev); trace_detach_device_from_domain(dev); } @@ -2055,12 +2027,10 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) return; mutex_lock(&group->mutex); - if (iommu_group_device_count(group) != 1) { - WARN_ON(1); + if (WARN_ON(domain != group->domain) || + WARN_ON(iommu_group_device_count(group) != 1)) goto out_unlock; - } - - __iommu_detach_group(domain, group); + __iommu_group_set_core_domain(group); out_unlock: mutex_unlock(&group->mutex); @@ -2116,7 +2086,8 @@ static int __iommu_attach_group(struct iommu_domain *domain, { int ret; - if (group->default_domain && group->domain != group->default_domain) + if (group->domain && group->domain != group->default_domain && + group->domain != group->blocking_domain) return -EBUSY; ret = __iommu_group_for_each_dev(group, domain, @@ -2148,34 +2119,49 @@ static int iommu_group_do_detach_device(struct device *dev, void *data) return 0; } -static void __iommu_detach_group(struct iommu_domain *domain, - struct iommu_group *group) +static int __iommu_group_set_domain(struct iommu_group *group, + struct iommu_domain *new_domain) { int ret; - if (!group->default_domain) { - __iommu_group_for_each_dev(group, domain, + if (group->domain == new_domain) + return 0; + + /* + * New drivers should support default domains and so the detach_dev() op + * will never be called. Otherwise the NULL domain represents some + * platform specific behavior. + */ + if (!new_domain) { + if (WARN_ON(!group->domain->ops->detach_dev)) + return -EINVAL; + __iommu_group_for_each_dev(group, group->domain, iommu_group_do_detach_device); group->domain = NULL; - return; + return 0; } - if (group->domain == group->default_domain) - return; - - /* Detach by re-attaching to the default domain */ - ret = __iommu_group_for_each_dev(group, group->default_domain, + /* + * Changing the domain is done by calling attach_dev() on the new + * domain. This switch does not have to be atomic and DMA can be + * discarded during the transition. DMA must only be able to access + * either new_domain or group->domain, never something else. + * + * Note that this is called in error unwind paths, attaching to a + * domain that has already been attached cannot fail. + */ + ret = __iommu_group_for_each_dev(group, new_domain, iommu_group_do_attach_device); - if (ret != 0) - WARN_ON(1); - else - group->domain = group->default_domain; + if (ret) + return ret; + group->domain = new_domain; + return 0; } void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) { mutex_lock(&group->mutex); - __iommu_detach_group(domain, group); + __iommu_group_set_core_domain(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_group); @@ -3102,3 +3088,167 @@ out: return ret; } + +/** + * iommu_device_use_default_domain() - Device driver wants to handle device + * DMA through the kernel DMA API. + * @dev: The device. + * + * The device driver about to bind @dev wants to do DMA through the kernel + * DMA API. Return 0 if it is allowed, otherwise an error. + */ +int iommu_device_use_default_domain(struct device *dev) +{ + struct iommu_group *group = iommu_group_get(dev); + int ret = 0; + + if (!group) + return 0; + + mutex_lock(&group->mutex); + if (group->owner_cnt) { + if (group->domain != group->default_domain || + group->owner) { + ret = -EBUSY; + goto unlock_out; + } + } + + group->owner_cnt++; + +unlock_out: + mutex_unlock(&group->mutex); + iommu_group_put(group); + + return ret; +} + +/** + * iommu_device_unuse_default_domain() - Device driver stops handling device + * DMA through the kernel DMA API. + * @dev: The device. + * + * The device driver doesn't want to do DMA through kernel DMA API anymore. + * It must be called after iommu_device_use_default_domain(). + */ +void iommu_device_unuse_default_domain(struct device *dev) +{ + struct iommu_group *group = iommu_group_get(dev); + + if (!group) + return; + + mutex_lock(&group->mutex); + if (!WARN_ON(!group->owner_cnt)) + group->owner_cnt--; + + mutex_unlock(&group->mutex); + iommu_group_put(group); +} + +static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) +{ + struct group_device *dev = + list_first_entry(&group->devices, struct group_device, list); + + if (group->blocking_domain) + return 0; + + group->blocking_domain = + __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); + if (!group->blocking_domain) { + /* + * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED + * create an empty domain instead. + */ + group->blocking_domain = __iommu_domain_alloc( + dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); + if (!group->blocking_domain) + return -EINVAL; + } + return 0; +} + +/** + * iommu_group_claim_dma_owner() - Set DMA ownership of a group + * @group: The group. + * @owner: Caller specified pointer. Used for exclusive ownership. + * + * This is to support backward compatibility for vfio which manages + * the dma ownership in iommu_group level. New invocations on this + * interface should be prohibited. + */ +int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) +{ + int ret = 0; + + mutex_lock(&group->mutex); + if (group->owner_cnt) { + ret = -EPERM; + goto unlock_out; + } else { + if (group->domain && group->domain != group->default_domain) { + ret = -EBUSY; + goto unlock_out; + } + + ret = __iommu_group_alloc_blocking_domain(group); + if (ret) + goto unlock_out; + + ret = __iommu_group_set_domain(group, group->blocking_domain); + if (ret) + goto unlock_out; + group->owner = owner; + } + + group->owner_cnt++; +unlock_out: + mutex_unlock(&group->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); + +/** + * iommu_group_release_dma_owner() - Release DMA ownership of a group + * @group: The group. + * + * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). + */ +void iommu_group_release_dma_owner(struct iommu_group *group) +{ + int ret; + + mutex_lock(&group->mutex); + if (WARN_ON(!group->owner_cnt || !group->owner)) + goto unlock_out; + + group->owner_cnt = 0; + group->owner = NULL; + ret = __iommu_group_set_domain(group, group->default_domain); + WARN(ret, "iommu driver failed to attach the default domain"); + +unlock_out: + mutex_unlock(&group->mutex); +} +EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); + +/** + * iommu_group_dma_owner_claimed() - Query group dma ownership status + * @group: The group. + * + * This provides status query on a given group. It is racy and only for + * non-binding status reporting. + */ +bool iommu_group_dma_owner_claimed(struct iommu_group *group) +{ + unsigned int user; + + mutex_lock(&group->mutex); + user = group->owner_cnt; + mutex_unlock(&group->mutex); + + return user; +} +EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); |