diff options
author | Keqian Zhu <zhukeqian1@huawei.com> | 2021-01-25 05:46:42 +0300 |
---|---|---|
committer | Alex Williamson <alex.williamson@redhat.com> | 2021-02-01 23:40:52 +0300 |
commit | 010321565a7d2cd73eeed6f589693ce752e154bd (patch) | |
tree | ee6a988bdbe26047c719de46b39c59aa41346369 /drivers/vfio | |
parent | 4a19f37a3dd3f29997735e61b25ddad24b8abe73 (diff) | |
download | linux-010321565a7d2cd73eeed6f589693ce752e154bd.tar.xz |
vfio/iommu_type1: Mantain a counter for non_pinned_groups
With this counter, we never need to traverse all groups to update
pinned_scope of vfio_iommu.
Suggested-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r-- | drivers/vfio/vfio_iommu_type1.c | 40 |
1 files changed, 5 insertions, 35 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 78bd28873945..706b88b5d4ee 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -70,10 +70,10 @@ struct vfio_iommu { struct blocking_notifier_head notifier; unsigned int dma_avail; uint64_t pgsize_bitmap; + uint64_t num_non_pinned_groups; bool v2; bool nesting; bool dirty_page_tracking; - bool pinned_page_dirty_scope; }; struct vfio_domain { @@ -148,7 +148,6 @@ static int put_pfn(unsigned long pfn, int prot); static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, struct iommu_group *iommu_group); -static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu); /* * This code handles mapping and unmapping of user data buffers * into DMA'ble space using the IOMMU @@ -726,7 +725,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, group = vfio_iommu_find_iommu_group(iommu, iommu_group); if (!group->pinned_page_dirty_scope) { group->pinned_page_dirty_scope = true; - update_pinned_page_dirty_scope(iommu); + iommu->num_non_pinned_groups--; } goto pin_done; @@ -1004,7 +1003,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, * mark all pages dirty if any IOMMU capable device is not able * to report dirty pages and all pages are pinned and mapped. */ - if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped) + if (iommu->num_non_pinned_groups && dma->iommu_mapped) bitmap_set(dma->bitmap, 0, nbits); if (shift) { @@ -1635,33 +1634,6 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, return group; } -static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu) -{ - struct vfio_domain *domain; - struct vfio_group *group; - - list_for_each_entry(domain, &iommu->domain_list, next) { - list_for_each_entry(group, &domain->group_list, next) { - if (!group->pinned_page_dirty_scope) { - iommu->pinned_page_dirty_scope = false; - return; - } - } - } - - if (iommu->external_domain) { - domain = iommu->external_domain; - list_for_each_entry(group, &domain->group_list, next) { - if (!group->pinned_page_dirty_scope) { - iommu->pinned_page_dirty_scope = false; - return; - } - } - } - - iommu->pinned_page_dirty_scope = true; -} - static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions, phys_addr_t *base) { @@ -2070,8 +2042,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, * addition of a dirty tracking group. */ group->pinned_page_dirty_scope = true; - if (!iommu->pinned_page_dirty_scope) - update_pinned_page_dirty_scope(iommu); mutex_unlock(&iommu->lock); return 0; @@ -2201,7 +2171,7 @@ done: * demotes the iommu scope until it declares itself dirty tracking * capable via the page pinning interface. */ - iommu->pinned_page_dirty_scope = false; + iommu->num_non_pinned_groups++; mutex_unlock(&iommu->lock); vfio_iommu_resv_free(&group_resv_regions); @@ -2414,7 +2384,7 @@ detach_group_done: * to be promoted. */ if (update_dirty_scope) { - update_pinned_page_dirty_scope(iommu); + iommu->num_non_pinned_groups--; if (iommu->dirty_page_tracking) vfio_iommu_populate_bitmap_full(iommu); } |