summaryrefslogtreecommitdiff
path: root/drivers/iommu/virtio-iommu.c
diff options
context:
space:
mode:
authorJean-Philippe Brucker <jean-philippe@linaro.org>2021-12-01 20:33:24 +0300
committerJoerg Roedel <jroedel@suse.de>2021-12-06 17:03:05 +0300
commitc0c763598960153e10622ff0a802012a073174a0 (patch)
tree35616d0aab27652c3489f03b9d9804ba2951d31e /drivers/iommu/virtio-iommu.c
parent5610979415649f3743a7c3de03ab46c9a3bfff16 (diff)
downloadlinux-c0c763598960153e10622ff0a802012a073174a0.tar.xz
iommu/virtio: Pass end address to viommu_add_mapping()
To support identity mappings, the virtio-iommu driver must be able to represent full 64-bit ranges internally. Pass (start, end) instead of (start, size) to viommu_add/del_mapping(). Clean comments. The one about the returned size was never true: when sweeping the whole address space the returned size will most certainly be smaller than 2^64. Reviewed-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Link: https://lore.kernel.org/r/20211201173323.1045819-5-jean-philippe@linaro.org Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/virtio-iommu.c')
-rw-r--r--drivers/iommu/virtio-iommu.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 1b3c1f2741c6..2fa370c2659c 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -311,8 +311,8 @@ out_unlock:
*
* On success, return the new mapping. Otherwise return NULL.
*/
-static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
- phys_addr_t paddr, size_t size, u32 flags)
+static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
+ phys_addr_t paddr, u32 flags)
{
unsigned long irqflags;
struct viommu_mapping *mapping;
@@ -323,7 +323,7 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
mapping->paddr = paddr;
mapping->iova.start = iova;
- mapping->iova.last = iova + size - 1;
+ mapping->iova.last = end;
mapping->flags = flags;
spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
@@ -338,26 +338,24 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
*
* @vdomain: the domain
* @iova: start of the range
- * @size: size of the range. A size of 0 corresponds to the entire address
- * space.
+ * @end: end of the range
*
- * On success, returns the number of unmapped bytes (>= size)
+ * On success, returns the number of unmapped bytes
*/
static size_t viommu_del_mappings(struct viommu_domain *vdomain,
- unsigned long iova, size_t size)
+ u64 iova, u64 end)
{
size_t unmapped = 0;
unsigned long flags;
- unsigned long last = iova + size - 1;
struct viommu_mapping *mapping = NULL;
struct interval_tree_node *node, *next;
spin_lock_irqsave(&vdomain->mappings_lock, flags);
- next = interval_tree_iter_first(&vdomain->mappings, iova, last);
+ next = interval_tree_iter_first(&vdomain->mappings, iova, end);
while (next) {
node = next;
mapping = container_of(node, struct viommu_mapping, iova);
- next = interval_tree_iter_next(node, iova, last);
+ next = interval_tree_iter_next(node, iova, end);
/* Trying to split a mapping? */
if (mapping->iova.start < iova)
@@ -656,8 +654,8 @@ static void viommu_domain_free(struct iommu_domain *domain)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
- /* Free all remaining mappings (size 2^64) */
- viommu_del_mappings(vdomain, 0, 0);
+ /* Free all remaining mappings */
+ viommu_del_mappings(vdomain, 0, ULLONG_MAX);
if (vdomain->viommu)
ida_free(&vdomain->viommu->domain_ids, vdomain->id);
@@ -742,6 +740,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
{
int ret;
u32 flags;
+ u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -752,7 +751,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
if (flags & ~vdomain->map_flags)
return -EINVAL;
- ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
+ ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
if (ret)
return ret;
@@ -761,7 +760,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
.domain = cpu_to_le32(vdomain->id),
.virt_start = cpu_to_le64(iova),
.phys_start = cpu_to_le64(paddr),
- .virt_end = cpu_to_le64(iova + size - 1),
+ .virt_end = cpu_to_le64(end),
.flags = cpu_to_le32(flags),
};
@@ -770,7 +769,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret)
- viommu_del_mappings(vdomain, iova, size);
+ viommu_del_mappings(vdomain, iova, end);
return ret;
}
@@ -783,7 +782,7 @@ static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
- unmapped = viommu_del_mappings(vdomain, iova, size);
+ unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size)
return 0;