diff options
author | Will Deacon <will@kernel.org> | 2019-07-02 18:44:16 +0300 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2019-07-29 19:22:55 +0300 |
commit | 3445545b2248300319b6965208e77140c960c3fd (patch) | |
tree | 5e5c54c1f85b904030b86620634bf1fcddf60916 /include/linux/io-pgtable.h | |
parent | 56f8af5e9d38f120cba2c2adb0786fa2dbc901a4 (diff) | |
download | linux-3445545b2248300319b6965208e77140c960c3fd.tar.xz |
iommu/io-pgtable: Introduce tlb_flush_walk() and tlb_flush_leaf()
In preparation for deferring TLB flushes to iommu_tlb_sync(), introduce
two new synchronous invalidation helpers to the io-pgtable API, which
allow the unmap() code to force invalidation in cases where it cannot be
deferred (e.g. when replacing a table with a block or when TLBI_ON_MAP
is set).
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'include/linux/io-pgtable.h')
-rw-r--r-- | include/linux/io-pgtable.h | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 6292ea15d674..27275575b305 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -19,17 +19,31 @@ enum io_pgtable_fmt { /** * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management. * - * @tlb_flush_all: Synchronously invalidate the entire TLB context. - * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. - * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and - * any corresponding page table updates are visible to the - * IOMMU. + * @tlb_flush_all: Synchronously invalidate the entire TLB context. + * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state + * (sometimes referred to as the "walk cache") for a virtual + * address range. + * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual + * address range. + * @tlb_add_flush: Optional callback to queue up leaf TLB invalidation for a + * virtual address range. This function exists purely as an + * optimisation for IOMMUs that cannot batch TLB invalidation + * operations efficiently and are therefore better suited to + * issuing them early rather than deferring them until + * iommu_tlb_sync(). + * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and + * any corresponding page table updates are visible to the + * IOMMU. * * Note that these can all be called in atomic context and must therefore * not block. */ struct iommu_flush_ops { void (*tlb_flush_all)(void *cookie); + void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule, + void *cookie); + void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, + void *cookie); void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie); void (*tlb_sync)(void *cookie); |