diff options
author | Joerg Roedel <jroedel@suse.de> | 2015-12-21 21:07:50 +0300 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2015-12-28 19:18:53 +0300 |
commit | 7bfa5bd2708d096c79fc2c9f32de478ade7a013f (patch) | |
tree | 3afa9e8640a67ca1370d67a1bff364cad7151561 /drivers/iommu/amd_iommu.c | |
parent | 266a3bd28f9842bac54f934df8dc9834799efbff (diff) | |
download | linux-7bfa5bd2708d096c79fc2c9f32de478ade7a013f.tar.xz |
iommu/amd: Build io page-tables with cmpxchg64
This allows to build up the page-tables without holding any
locks. As a consequence it removes the need to pre-populate
dma_ops page-tables.
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 4c926dadb281..ecdd3f7dfb89 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1206,11 +1206,21 @@ static u64 *alloc_pte(struct protection_domain *domain, end_lvl = PAGE_SIZE_LEVEL(page_size); while (level > end_lvl) { - if (!IOMMU_PTE_PRESENT(*pte)) { + u64 __pte, __npte; + + __pte = *pte; + + if (!IOMMU_PTE_PRESENT(__pte)) { page = (u64 *)get_zeroed_page(gfp); if (!page) return NULL; - *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); + + __npte = PM_LEVEL_PDE(level, virt_to_phys(page)); + + if (cmpxchg64(pte, __pte, __npte)) { + free_page((unsigned long)page); + continue; + } } /* No level skipping support yet */ @@ -1607,7 +1617,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); - if (address == -1 && alloc_new_range(dom, true, GFP_ATOMIC)) + if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC)) break; } |