summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-01-20 19:17:37 +0300
committerJoerg Roedel <joerg.roedel@amd.com>2010-03-07 20:01:11 +0300
commitb146a1c9f7f1feeacf840fa1ba197a99593cea15 (patch)
treea224acfd8926b1a315e2280ac3b3ae8acd49ef8e /drivers/pci
parent67651786948c360c3122b8a17cb1e59209d50880 (diff)
downloadlinux-b146a1c9f7f1feeacf840fa1ba197a99593cea15.tar.xz
VT-d: Change {un}map_range functions to implement {un}map interface
This patch changes the iommu-api functions for mapping and unmapping page ranges to use the new page-size based interface. This allows to remove the range based functions later. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index a714e3db13c1..371dc564e2e4 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -3626,14 +3626,15 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
domain_remove_one_dev_info(dmar_domain, pdev);
}
-static int intel_iommu_map_range(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot)
+static int intel_iommu_map(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t hpa,
+ int gfp_order, int iommu_prot)
{
struct dmar_domain *dmar_domain = domain->priv;
u64 max_addr;
int addr_width;
int prot = 0;
+ size_t size;
int ret;
if (iommu_prot & IOMMU_READ)
@@ -3643,6 +3644,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
+ size = PAGE_SIZE << gfp_order;
max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
int min_agaw;
@@ -3669,19 +3671,19 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
return ret;
}
-static void intel_iommu_unmap_range(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static int intel_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, int gfp_order)
{
struct dmar_domain *dmar_domain = domain->priv;
-
- if (!size)
- return;
+ size_t size = PAGE_SIZE << gfp_order;
dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
+
+ return gfp_order;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3714,8 +3716,8 @@ static struct iommu_ops intel_iommu_ops = {
.domain_destroy = intel_iommu_domain_destroy,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
- .map_range = intel_iommu_map_range,
- .unmap_range = intel_iommu_unmap_range,
+ .map = intel_iommu_map,
+ .unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
};