diff options
author | Vasant Hegde <vasant.hegde@amd.com> | 2022-08-25 09:39:31 +0300 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2022-09-07 17:12:32 +0300 |
commit | 8cc233dec3137a2d243e6996e425c32032672c88 (patch) | |
tree | 0133f884323f1ae8fbec479af557edf78c7c5ef3 | |
parent | 7e18e42e4b280c85b76967a9106a13ca61c16179 (diff) | |
download | linux-8cc233dec3137a2d243e6996e425c32032672c88.tar.xz |
iommu/amd/io-pgtable: Implement map_pages io_pgtable_ops callback
Implement the io_pgtable_ops->map_pages() callback for AMD driver.
Also deprecate io_pgtable->map callback.
Suggested-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Link: https://lore.kernel.org/r/20220825063939.8360-2-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/amd/io_pgtable.c | 59 |
1 files changed, 34 insertions, 25 deletions
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 7d4b61e5db47..df7799317f66 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -360,8 +360,9 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist) * supporting all features of AMD IOMMU page tables like level skipping * and full 64 bit address spaces. */ -static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) +static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { struct protection_domain *dom = io_pgtable_ops_to_domain(ops); LIST_HEAD(freelist); @@ -369,39 +370,47 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova, u64 __pte, *pte; int ret, i, count; - BUG_ON(!IS_ALIGNED(iova, size)); - BUG_ON(!IS_ALIGNED(paddr, size)); + BUG_ON(!IS_ALIGNED(iova, pgsize)); + BUG_ON(!IS_ALIGNED(paddr, pgsize)); ret = -EINVAL; if (!(prot & IOMMU_PROT_MASK)) goto out; - count = PAGE_SIZE_PTE_COUNT(size); - pte = alloc_pte(dom, iova, size, NULL, gfp, &updated); + while (pgcount > 0) { + count = PAGE_SIZE_PTE_COUNT(pgsize); + pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated); - ret = -ENOMEM; - if (!pte) - goto out; + ret = -ENOMEM; + if (!pte) + goto out; - for (i = 0; i < count; ++i) - free_clear_pte(&pte[i], pte[i], &freelist); + for (i = 0; i < count; ++i) + free_clear_pte(&pte[i], pte[i], &freelist); - if (!list_empty(&freelist)) - updated = true; + if (!list_empty(&freelist)) + updated = true; - if (count > 1) { - __pte = PAGE_SIZE_PTE(__sme_set(paddr), size); - __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC; - } else - __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC; + if (count > 1) { + __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize); + __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC; + } else + __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC; - if (prot & IOMMU_PROT_IR) - __pte |= IOMMU_PTE_IR; - if (prot & IOMMU_PROT_IW) - __pte |= IOMMU_PTE_IW; + if (prot & IOMMU_PROT_IR) + __pte |= IOMMU_PTE_IR; + if (prot & IOMMU_PROT_IW) + __pte |= IOMMU_PTE_IW; - for (i = 0; i < count; ++i) - pte[i] = __pte; + for (i = 0; i < count; ++i) + pte[i] = __pte; + + iova += pgsize; + paddr += pgsize; + pgcount--; + if (mapped) + *mapped += pgsize; + } ret = 0; @@ -514,7 +523,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE, cfg->tlb = &v1_flush_ops; - pgtable->iop.ops.map = iommu_v1_map_page; + pgtable->iop.ops.map_pages = iommu_v1_map_pages; pgtable->iop.ops.unmap = iommu_v1_unmap_page; pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; |