diff options
author | Christian König <christian.koenig@amd.com> | 2021-04-12 16:11:47 +0300 |
---|---|---|
committer | Christian König <christian.koenig@amd.com> | 2021-06-02 12:07:25 +0300 |
commit | d3116756a710e3cd51293a9d58b525957ab7e784 (patch) | |
tree | fde36f419dfcaadc3386320f9170cc8b14a35701 /drivers/gpu/drm/ttm | |
parent | 9450129ed944b3c31c440f5422147103828c2b99 (diff) | |
download | linux-d3116756a710e3cd51293a9d58b525957ab7e784.tar.xz |
drm/ttm: rename bo->mem and make it a pointer
When we want to decouble resource management from buffer management we need to
be able to handle resources separately.
Add a resource pointer and rename bo->mem so that all code needs to
change to access the pointer instead.
No functional change.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210430092508.60710-4-christian.koenig@amd.com
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 49 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 22 |
3 files changed, 56 insertions, 52 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 51a94fd63bd7..5a7ab4b35b2d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -58,7 +58,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, int i, mem_type; drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n", - bo, bo->mem.num_pages, bo->base.size >> 10, + bo, bo->resource->num_pages, bo->base.size >> 10, bo->base.size >> 20); for (i = 0; i < placement->num_placement; i++) { mem_type = placement->placement[i].mem_type; @@ -109,7 +109,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, bdev->funcs->del_from_lru_notify(bo); if (bulk && !bo->pin_count) { - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { case TTM_PL_TT: ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo); break; @@ -163,11 +163,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, struct ttm_place *hop) { + struct ttm_resource_manager *old_man, *new_man; struct ttm_device *bdev = bo->bdev; - struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); - struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; + old_man = ttm_manager_type(bdev, bo->resource->mem_type); + new_man = ttm_manager_type(bdev, mem->mem_type); + ttm_bo_unmap_virtual(bo); /* @@ -200,7 +202,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, return 0; out_err: - new_man = ttm_manager_type(bdev, bo->mem.mem_type); + new_man = ttm_manager_type(bdev, bo->resource->mem_type); if (!new_man->use_tt) ttm_bo_tt_destroy(bo); @@ -221,7 +223,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) bo->bdev->funcs->delete_mem_notify(bo); ttm_bo_tt_destroy(bo); - ttm_resource_free(bo, &bo->mem); + ttm_resource_free(bo, bo->resource); } static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) @@ -417,7 +419,7 @@ static void ttm_bo_release(struct kref *kref) bo->bdev->funcs->release_notify(bo); drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); - ttm_mem_io_free(bdev, &bo->mem); + ttm_mem_io_free(bdev, bo->resource); } if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || @@ -438,7 +440,7 @@ static void ttm_bo_release(struct kref *kref) */ if (bo->pin_count) { bo->pin_count = 0; - ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); + ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); } kref_init(&bo->kref); @@ -534,8 +536,8 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, /* Don't evict this BO if it's outside of the * requested placement range */ - if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) || - (place->lpfn && place->lpfn <= bo->mem.start)) + if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) || + (place->lpfn && place->lpfn <= bo->resource->start)) return false; return true; @@ -851,7 +853,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, } error: - if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count) + if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count) ttm_bo_move_to_lru_tail_unlocked(bo); return ret; @@ -987,7 +989,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, /* * Check whether we need to move buffer. */ - if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { + if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) { ret = ttm_bo_move_buffer(bo, placement, ctx); if (ret) return ret; @@ -995,7 +997,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, /* * We might need to add a TTM. */ - if (bo->mem.mem_type == TTM_PL_SYSTEM) { + if (bo->resource->mem_type == TTM_PL_SYSTEM) { ret = ttm_tt_create(bo, true); if (ret) return ret; @@ -1027,7 +1029,8 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, bo->bdev = bdev; bo->type = type; bo->page_alignment = page_alignment; - ttm_resource_alloc(bo, &sys_mem, &bo->mem); + bo->resource = &bo->_mem; + ttm_resource_alloc(bo, &sys_mem, bo->resource); bo->moving = NULL; bo->pin_count = 0; bo->sg = sg; @@ -1046,7 +1049,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, - bo->mem.num_pages); + bo->resource->num_pages); /* passed reservation objects should already be locked, * since otherwise lockdep will be angered in radeon. @@ -1108,7 +1111,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) struct ttm_device *bdev = bo->bdev; drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); - ttm_mem_io_free(bdev, &bo->mem); + ttm_mem_io_free(bdev, bo->resource); } EXPORT_SYMBOL(ttm_bo_unmap_virtual); @@ -1165,7 +1168,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, /* * Move to system cached */ - if (bo->mem.mem_type != TTM_PL_SYSTEM) { + if (bo->resource->mem_type != TTM_PL_SYSTEM) { struct ttm_operation_ctx ctx = { false, false }; struct ttm_resource evict_mem; struct ttm_place place, hop; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ae8b61460724..aedf02a31c70 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -179,7 +179,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_tt *ttm = bo->ttm; - struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource *old_mem = bo->resource; struct ttm_resource old_copy = *old_mem; void *old_iomap; void *new_iomap; @@ -365,24 +365,23 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, unsigned long size, struct ttm_bo_kmap_obj *map) { - struct ttm_resource *mem = &bo->mem; + struct ttm_resource *mem = bo->resource; - if (bo->mem.bus.addr) { + if (bo->resource->bus.addr) { map->bo_kmap_type = ttm_bo_map_premapped; - map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); + map->virtual = ((u8 *)bo->resource->bus.addr) + offset; } else { + resource_size_t res = bo->resource->bus.offset + offset; + map->bo_kmap_type = ttm_bo_map_iomap; if (mem->bus.caching == ttm_write_combined) - map->virtual = ioremap_wc(bo->mem.bus.offset + offset, - size); + map->virtual = ioremap_wc(res, size); #ifdef CONFIG_X86 else if (mem->bus.caching == ttm_cached) - map->virtual = ioremap_cache(bo->mem.bus.offset + offset, - size); + map->virtual = ioremap_cache(res, size); #endif else - map->virtual = ioremap(bo->mem.bus.offset + offset, - size); + map->virtual = ioremap(res, size); } return (!map->virtual) ? -ENOMEM : 0; } @@ -392,7 +391,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_resource *mem = &bo->mem; + struct ttm_resource *mem = bo->resource; struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false @@ -438,15 +437,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, map->virtual = NULL; map->bo = bo; - if (num_pages > bo->mem.num_pages) + if (num_pages > bo->resource->num_pages) return -EINVAL; - if ((start_page + num_pages) > bo->mem.num_pages) + if ((start_page + num_pages) > bo->resource->num_pages) return -EINVAL; - ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); + ret = ttm_mem_io_reserve(bo->bdev, bo->resource); if (ret) return ret; - if (!bo->mem.bus.is_iomem) { + if (!bo->resource->bus.is_iomem) { return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); } else { offset = start_page << PAGE_SHIFT; @@ -475,7 +474,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) default: BUG(); } - ttm_mem_io_free(map->bo->bdev, &map->bo->mem); + ttm_mem_io_free(map->bo->bdev, map->bo->resource); map->virtual = NULL; map->page = NULL; } @@ -483,7 +482,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap); int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) { - struct ttm_resource *mem = &bo->mem; + struct ttm_resource *mem = bo->resource; int ret; ret = ttm_mem_io_reserve(bo->bdev, mem); @@ -542,7 +541,7 @@ EXPORT_SYMBOL(ttm_bo_vmap); void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) { - struct ttm_resource *mem = &bo->mem; + struct ttm_resource *mem = bo->resource; if (dma_buf_map_is_null(map)) return; @@ -553,7 +552,7 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) iounmap(map->vaddr_iomem); dma_buf_map_clear(map); - ttm_mem_io_free(bo->bdev, &bo->mem); + ttm_mem_io_free(bo->bdev, bo->resource); } EXPORT_SYMBOL(ttm_bo_vunmap); @@ -567,7 +566,7 @@ static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, if (!dst_use_tt) ttm_bo_tt_destroy(bo); - ttm_resource_free(bo, &bo->mem); + ttm_resource_free(bo, bo->resource); return 0; } @@ -615,7 +614,9 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, struct dma_fence *fence) { struct ttm_device *bdev = bo->bdev; - struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *from; + + from = ttm_manager_type(bdev, bo->resource->mem_type); /** * BO doesn't have a TTM we need to bind/unbind. Just remember @@ -628,7 +629,7 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, } spin_unlock(&from->move_lock); - ttm_resource_free(bo, &bo->mem); + ttm_resource_free(bo, bo->resource); dma_fence_put(bo->moving); bo->moving = dma_fence_get(fence); @@ -641,7 +642,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_resource *new_mem) { struct ttm_device *bdev = bo->bdev; - struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type); struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); int ret = 0; @@ -677,7 +678,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) if (ret) ttm_bo_wait(bo, false, false); - ttm_resource_alloc(bo, &sys_mem, &bo->mem); + ttm_resource_alloc(bo, &sys_mem, bo->resource); bo->ttm = NULL; dma_resv_unlock(&ghost->base._resv); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 9bd15cb39145..61828488ae2b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -102,7 +102,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, if (bdev->funcs->io_mem_pfn) return bdev->funcs->io_mem_pfn(bo, page_offset); - return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset; + return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset; } /** @@ -200,10 +200,10 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, /* Fault should not cross bo boundary. */ page_offset &= ~(fault_page_size - 1); - if (page_offset + fault_page_size > bo->mem.num_pages) + if (page_offset + fault_page_size > bo->resource->num_pages) goto out_fallback; - if (bo->mem.bus.is_iomem) + if (bo->resource->bus.is_iomem) pfn = ttm_bo_io_mem_pfn(bo, page_offset); else pfn = page_to_pfn(ttm->pages[page_offset]); @@ -213,7 +213,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, goto out_fallback; /* Check that memory is contiguous. */ - if (!bo->mem.bus.is_iomem) { + if (!bo->resource->bus.is_iomem) { for (i = 1; i < fault_page_size; ++i) { if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) goto out_fallback; @@ -299,7 +299,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, if (unlikely(ret != 0)) return ret; - err = ttm_mem_io_reserve(bdev, &bo->mem); + err = ttm_mem_io_reserve(bdev, bo->resource); if (unlikely(err != 0)) return VM_FAULT_SIGBUS; @@ -308,11 +308,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, page_last = vma_pages(vma) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); - if (unlikely(page_offset >= bo->mem.num_pages)) + if (unlikely(page_offset >= bo->resource->num_pages)) return VM_FAULT_SIGBUS; - prot = ttm_io_prot(bo, &bo->mem, prot); - if (!bo->mem.bus.is_iomem) { + prot = ttm_io_prot(bo, bo->resource, prot); + if (!bo->resource->bus.is_iomem) { struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false, @@ -337,7 +337,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, * first page. */ for (i = 0; i < num_prefault; ++i) { - if (bo->mem.bus.is_iomem) { + if (bo->resource->bus.is_iomem) { pfn = ttm_bo_io_mem_pfn(bo, page_offset); } else { page = ttm->pages[page_offset]; @@ -521,14 +521,14 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, << PAGE_SHIFT); int ret; - if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages) + if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages) return -EIO; ret = ttm_bo_reserve(bo, true, false, NULL); if (ret) return ret; - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { case TTM_PL_SYSTEM: if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { ret = ttm_tt_swapin(bo->ttm); |