From d3116756a710e3cd51293a9d58b525957ab7e784 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 12 Apr 2021 15:11:47 +0200 Subject: drm/ttm: rename bo->mem and make it a pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we want to decouble resource management from buffer management we need to be able to handle resources separately. Add a resource pointer and rename bo->mem so that all code needs to change to access the pointer instead. No functional change. Signed-off-by: Christian König Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20210430092508.60710-4-christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 50 +++++++++++++++--------------- 1 file changed, 25 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 3b509b0db68f..03c6b63d1d54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -362,14 +362,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, if (cpu_addr) amdgpu_bo_kunmap(*bo_ptr); - ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); + ttm_resource_free(&(*bo_ptr)->tbo, (*bo_ptr)->tbo.resource); for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; } r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, - &(*bo_ptr)->tbo.mem, &ctx); + (*bo_ptr)->tbo.resource, &ctx); if (r) goto error; @@ -573,15 +573,15 @@ int amdgpu_bo_create(struct amdgpu_device *adev, return r; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - bo->tbo.mem.mem_type == TTM_PL_VRAM && - bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT) + bo->tbo.resource->mem_type == TTM_PL_VRAM && + bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT) amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, ctx.bytes_moved); else amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && - bo->tbo.mem.mem_type == TTM_PL_VRAM) { + bo->tbo.resource->mem_type == TTM_PL_VRAM) { struct dma_fence *fence; r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); @@ -761,7 +761,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) if (r < 0) return r; - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); if (r) return r; @@ -884,8 +884,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, domain = amdgpu_bo_get_preferred_pin_domain(adev, domain); if (bo->tbo.pin_count) { - uint32_t mem_type = bo->tbo.mem.mem_type; - uint32_t mem_flags = bo->tbo.mem.placement; + uint32_t mem_type = bo->tbo.resource->mem_type; + uint32_t mem_flags = bo->tbo.resource->placement; if (!(domain & amdgpu_mem_type_to_domain(mem_type))) return -EINVAL; @@ -935,7 +935,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ttm_bo_pin(&bo->tbo); - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); if (domain == AMDGPU_GEM_DOMAIN_VRAM) { atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size); atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo), @@ -987,11 +987,11 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo) if (bo->tbo.base.import_attach) dma_buf_unpin(bo->tbo.base.import_attach); - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), &adev->visible_pin_size); - } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { + } else if (bo->tbo.resource->mem_type == TTM_PL_TT) { atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); } } @@ -1223,7 +1223,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource *old_mem = bo->resource; if (!amdgpu_bo_is_amdgpu_bo(bo)) return; @@ -1234,7 +1234,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, amdgpu_bo_kunmap(abo); if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && - bo->mem.mem_type != TTM_PL_SYSTEM) + bo->resource->mem_type != TTM_PL_SYSTEM) dma_buf_move_notify(abo->tbo.base.dma_buf); /* remember the eviction */ @@ -1254,7 +1254,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, { unsigned int domain; - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: *vram_mem += amdgpu_bo_size(bo); @@ -1296,7 +1296,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (bo->base.resv == &bo->base._resv) amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); - if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node || + if (bo->resource->mem_type != TTM_PL_VRAM || !bo->resource->mm_node || !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) return; @@ -1333,10 +1333,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* Remember that this BO was accessed by the CPU */ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - if (bo->mem.mem_type != TTM_PL_VRAM) + if (bo->resource->mem_type != TTM_PL_VRAM) return 0; - offset = bo->mem.start << PAGE_SHIFT; + offset = bo->resource->start << PAGE_SHIFT; if ((offset + bo->base.size) <= adev->gmc.visible_vram_size) return 0; @@ -1359,9 +1359,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) else if (unlikely(r)) return VM_FAULT_SIGBUS; - offset = bo->mem.start << PAGE_SHIFT; + offset = bo->resource->start << PAGE_SHIFT; /* this should never happen */ - if (bo->mem.mem_type == TTM_PL_VRAM && + if (bo->resource->mem_type == TTM_PL_VRAM && (offset + bo->base.size) > adev->gmc.visible_vram_size) return VM_FAULT_SIGBUS; @@ -1446,11 +1446,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) */ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) { - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); + WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM); WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel); - WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && + WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET); + WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM && !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); return amdgpu_bo_gpu_offset_no_check(bo); @@ -1468,8 +1468,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); uint64_t offset; - offset = (bo->tbo.mem.start << PAGE_SHIFT) + - amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type); + offset = (bo->tbo.resource->start << PAGE_SHIFT) + + amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type); return amdgpu_gmc_sign_extend(offset); } @@ -1522,7 +1522,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) unsigned int pin_count; u64 size; - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: placement = "VRAM"; -- cgit v1.2.3 From bfa3357ef9abc9d56a2910222d2deeb9f15c91ff Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 15 Apr 2021 09:52:58 +0200 Subject: drm/ttm: allocate resource object instead of embedding it v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To improve the handling we want the establish the resource object as base class for the backend allocations. v2: add missing error handling Signed-off-by: Christian König Acked-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-1-christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 54 +++++++++---------- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 83 ++++++++++-------------------- drivers/gpu/drm/ttm/ttm_bo_util.c | 43 ++++++++-------- drivers/gpu/drm/ttm/ttm_resource.c | 31 ++++++++--- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 2 +- include/drm/ttm/ttm_bo_api.h | 1 - include/drm/ttm/ttm_bo_driver.h | 10 ++-- include/drm/ttm/ttm_resource.h | 4 +- 11 files changed, 110 insertions(+), 126 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 03c6b63d1d54..59723c3d5826 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -362,14 +362,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, if (cpu_addr) amdgpu_bo_kunmap(*bo_ptr); - ttm_resource_free(&(*bo_ptr)->tbo, (*bo_ptr)->tbo.resource); + ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource); for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; } r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, - (*bo_ptr)->tbo.resource, &ctx); + &(*bo_ptr)->tbo.resource, &ctx); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 663aa7d2e2ea..69db89261650 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -491,7 +491,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, return r; amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, bo->resource); + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_mem); goto out; } @@ -950,9 +950,9 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; - struct ttm_resource tmp; struct ttm_placement placement; struct ttm_place placements; + struct ttm_resource *tmp; uint64_t addr, flags; int r; @@ -962,37 +962,37 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) addr = amdgpu_gmc_agp_addr(bo); if (addr != AMDGPU_BO_INVALID_OFFSET) { bo->resource->start = addr >> PAGE_SHIFT; - } else { + return 0; + } - /* allocate GART space */ - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; - placements.mem_type = TTM_PL_TT; - placements.flags = bo->resource->placement; - - r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); - if (unlikely(r)) - return r; + /* allocate GART space */ + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements.fpfn = 0; + placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; + placements.mem_type = TTM_PL_TT; + placements.flags = bo->resource->placement; - /* compute PTE flags for this buffer object */ - flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); + r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); + if (unlikely(r)) + return r; - /* Bind pages */ - gtt->offset = (u64)tmp.start << PAGE_SHIFT; - r = amdgpu_ttm_gart_bind(adev, bo, flags); - if (unlikely(r)) { - ttm_resource_free(bo, &tmp); - return r; - } + /* compute PTE flags for this buffer object */ + flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); - ttm_resource_free(bo, bo->resource); - ttm_bo_assign_mem(bo, &tmp); + /* Bind pages */ + gtt->offset = (u64)tmp->start << PAGE_SHIFT; + r = amdgpu_ttm_gart_bind(adev, bo, flags); + if (unlikely(r)) { + ttm_resource_free(bo, &tmp); + return r; } + ttm_resource_free(bo, &bo->resource); + ttm_bo_assign_mem(bo, tmp); + return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index e688ca77483d..3a0d9b3bf991 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1009,7 +1009,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_reg->mem_type == TTM_PL_TT && new_reg->mem_type == TTM_PL_SYSTEM) { nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, bo->resource); + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_reg); goto out; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 2507c1741681..cdffa9b65108 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -229,7 +229,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) { radeon_ttm_tt_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, bo->resource); + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_mem); goto out; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 5a7ab4b35b2d..4ed56520b81d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -223,7 +223,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) bo->bdev->funcs->delete_mem_notify(bo); ttm_bo_tt_destroy(bo); - ttm_resource_free(bo, bo->resource); + ttm_resource_free(bo, &bo->resource); } static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) @@ -489,7 +489,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_device *bdev = bo->bdev; - struct ttm_resource evict_mem; + struct ttm_resource *evict_mem; struct ttm_placement placement; struct ttm_place hop; int ret = 0; @@ -519,7 +519,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, goto out; } - ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop); + ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop); if (unlikely(ret)) { WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n"); if (ret != -ERESTARTSYS) @@ -728,14 +728,15 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, */ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *mem, + struct ttm_resource **mem, struct ttm_operation_ctx *ctx) { struct ttm_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *man; struct ww_acquire_ctx *ticket; int ret; + man = ttm_manager_type(bdev, (*mem)->mem_type); ticket = dma_resv_locking_ctx(bo->base.resv); do { ret = ttm_resource_alloc(bo, place, mem); @@ -749,37 +750,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ret; } while (1); - return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); -} - -/** - * ttm_bo_mem_placement - check if placement is compatible - * @bo: BO to find memory for - * @place: where to search - * @mem: the memory object to fill in - * - * Check if placement is compatible and fill in mem structure. - * Returns -EBUSY if placement won't work or negative error code. - * 0 when placement can be used. - */ -static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_resource *mem) -{ - struct ttm_device *bdev = bo->bdev; - struct ttm_resource_manager *man; - - man = ttm_manager_type(bdev, place->mem_type); - if (!man || !ttm_resource_manager_used(man)) - return -EBUSY; - - mem->mem_type = place->mem_type; - mem->placement = place->flags; - - spin_lock(&bo->bdev->lru_lock); - ttm_bo_move_to_lru_tail(bo, mem, NULL); - spin_unlock(&bo->bdev->lru_lock); - return 0; + return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu); } /* @@ -792,7 +763,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, - struct ttm_resource *mem, + struct ttm_resource **mem, struct ttm_operation_ctx *ctx) { struct ttm_device *bdev = bo->bdev; @@ -807,8 +778,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, const struct ttm_place *place = &placement->placement[i]; struct ttm_resource_manager *man; - ret = ttm_bo_mem_placement(bo, place, mem); - if (ret) + man = ttm_manager_type(bdev, place->mem_type); + if (!man || !ttm_resource_manager_used(man)) continue; type_found = true; @@ -818,8 +789,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (unlikely(ret)) goto error; - man = ttm_manager_type(bdev, mem->mem_type); - ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); + ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu); if (unlikely(ret)) { ttm_resource_free(bo, mem); if (ret == -EBUSY) @@ -832,9 +802,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, for (i = 0; i < placement->num_busy_placement; ++i) { const struct ttm_place *place = &placement->busy_placement[i]; + struct ttm_resource_manager *man; - ret = ttm_bo_mem_placement(bo, place, mem); - if (ret) + man = ttm_manager_type(bdev, place->mem_type); + if (!man || !ttm_resource_manager_used(man)) continue; type_found = true; @@ -861,12 +832,12 @@ error: EXPORT_SYMBOL(ttm_bo_mem_space); static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, - struct ttm_resource *mem, + struct ttm_resource **mem, struct ttm_operation_ctx *ctx, struct ttm_place *hop) { struct ttm_placement hop_placement; - struct ttm_resource hop_mem; + struct ttm_resource *hop_mem; int ret; hop_placement.num_placement = hop_placement.num_busy_placement = 1; @@ -877,7 +848,7 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, if (ret) return ret; /* move to the bounce domain */ - ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL); + ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL); if (ret) { ttm_resource_free(bo, &hop_mem); return ret; @@ -889,14 +860,12 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_operation_ctx *ctx) { + struct ttm_resource *mem; struct ttm_place hop; - struct ttm_resource mem; int ret; dma_resv_assert_held(bo->base.resv); - memset(&hop, 0, sizeof(hop)); - /* * Determine where to move the buffer. * @@ -910,7 +879,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, if (ret) return ret; bounce: - ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop); + ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop); if (ret == -EMULTIHOP) { ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop); if (ret) @@ -1019,7 +988,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, { static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM }; bool locked; - int ret = 0; + int ret; bo->destroy = destroy ? destroy : ttm_bo_default_destroy; @@ -1029,8 +998,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, bo->bdev = bdev; bo->type = type; bo->page_alignment = page_alignment; - bo->resource = &bo->_mem; - ttm_resource_alloc(bo, &sys_mem, bo->resource); bo->moving = NULL; bo->pin_count = 0; bo->sg = sg; @@ -1042,6 +1009,12 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, } atomic_inc(&ttm_glob.bo_count); + ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource); + if (unlikely(ret)) { + ttm_bo_put(bo); + return ret; + } + /* * For ttm_bo_type_device buffers, allocate * address space from the device. @@ -1170,7 +1143,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, */ if (bo->resource->mem_type != TTM_PL_SYSTEM) { struct ttm_operation_ctx ctx = { false, false }; - struct ttm_resource evict_mem; + struct ttm_resource *evict_mem; struct ttm_place place, hop; memset(&place, 0, sizeof(place)); @@ -1182,7 +1155,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, if (unlikely(ret)) goto out; - ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop); + ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop); if (unlikely(ret != 0)) { WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n"); goto out; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index aedf02a31c70..1b326e70cb02 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -176,16 +176,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) { + struct ttm_resource *old_mem = bo->resource; struct ttm_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + struct ttm_resource_manager *man; struct ttm_tt *ttm = bo->ttm; - struct ttm_resource *old_mem = bo->resource; - struct ttm_resource old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; unsigned long i; + man = ttm_manager_type(bdev, new_mem->mem_type); + ret = ttm_bo_wait_ctx(bo, ctx); if (ret) return ret; @@ -201,7 +202,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * Single TTM move. NOP. */ if (old_iomap == NULL && new_iomap == NULL) - goto out2; + goto out1; /* * Don't move nonexistent data. Clear destination instead. @@ -210,7 +211,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, (ttm == NULL || (!ttm_tt_is_populated(ttm) && !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); - goto out2; + goto out1; } /* @@ -235,27 +236,25 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ret = ttm_copy_io_page(new_iomap, old_iomap, i); } if (ret) - goto out1; + break; } mb(); -out2: - old_copy = *old_mem; +out1: + ttm_resource_iounmap(bdev, new_mem, new_iomap); +out: + ttm_resource_iounmap(bdev, old_mem, old_iomap); + + if (ret) { + ttm_resource_free(bo, &new_mem); + return ret; + } + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_mem); if (!man->use_tt) ttm_bo_tt_destroy(bo); -out1: - ttm_resource_iounmap(bdev, old_mem, new_iomap); -out: - ttm_resource_iounmap(bdev, &old_copy, old_iomap); - - /* - * On error, keep the mm node! - */ - if (!ret) - ttm_resource_free(bo, &old_copy); return ret; } EXPORT_SYMBOL(ttm_bo_move_memcpy); @@ -566,7 +565,7 @@ static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, if (!dst_use_tt) ttm_bo_tt_destroy(bo); - ttm_resource_free(bo, bo->resource); + ttm_resource_free(bo, &bo->resource); return 0; } @@ -629,7 +628,7 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, } spin_unlock(&from->move_lock); - ttm_resource_free(bo, bo->resource); + ttm_resource_free(bo, &bo->resource); dma_fence_put(bo->moving); bo->moving = dma_fence_get(fence); @@ -678,11 +677,11 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) if (ret) ttm_bo_wait(bo, false, false); - ttm_resource_alloc(bo, &sys_mem, bo->resource); + ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource); bo->ttm = NULL; dma_resv_unlock(&ghost->base._resv); ttm_bo_put(ghost); - return 0; + return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 59e2b7157e41..65451e1bc303 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -27,10 +27,16 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *res) + struct ttm_resource **res_ptr) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, place->mem_type); + struct ttm_resource *res; + int r; + + res = kmalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; res->mm_node = NULL; res->start = 0; @@ -41,18 +47,27 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo, res->bus.offset = 0; res->bus.is_iomem = false; res->bus.caching = ttm_cached; + r = man->func->alloc(man, bo, place, res); + if (r) { + kfree(res); + return r; + } - return man->func->alloc(man, bo, place, res); + *res_ptr = res; + return 0; } -void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res) +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) { - struct ttm_resource_manager *man = - ttm_manager_type(bo->bdev, res->mem_type); + struct ttm_resource_manager *man; - man->func->free(man, res); - res->mm_node = NULL; - res->mem_type = TTM_PL_SYSTEM; + if (!*res) + return; + + man = ttm_manager_type(bo->bdev, (*res)->mem_type); + man->func->free(man, *res); + kfree(*res); + *res = NULL; } EXPORT_SYMBOL(ttm_resource_free); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index ed8563ef9a3b..bfcf31bf7e37 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -741,7 +741,7 @@ static int vmw_move(struct ttm_buffer_object *bo, goto fail; vmw_ttm_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, bo->resource); + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_mem); return 0; } else { diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 291a339a7e08..f681bbdbc698 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -137,7 +137,6 @@ struct ttm_buffer_object { */ struct ttm_resource *resource; - struct ttm_resource _mem; struct ttm_tt *ttm; bool deleted; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 1a9ba0b13622..ead0ef7136c8 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -96,7 +96,7 @@ struct ttm_lru_bulk_move { */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, - struct ttm_resource *mem, + struct ttm_resource **mem, struct ttm_operation_ctx *ctx); /** @@ -188,8 +188,8 @@ ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, struct ttm_resource *new_mem) { - bo->_mem = *new_mem; - new_mem->mm_node = NULL; + WARN_ON(bo->resource); + bo->resource = new_mem; } /** @@ -202,9 +202,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, struct ttm_resource *new_mem) { - struct ttm_resource *old_mem = bo->resource; - - WARN_ON(old_mem->mm_node != NULL); + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_mem); } diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 890b9d369519..c17c1a52070d 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -225,8 +225,8 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man) int ttm_resource_alloc(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *res); -void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res); + struct ttm_resource **res); +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res); void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size); -- cgit v1.2.3 From cb1c81467af355829a4a9d8fa3f92ffab355d93c Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 30 Apr 2021 09:48:27 +0200 Subject: drm/ttm: flip the switch for driver allocated resources v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of both driver and TTM allocating memory finalize embedding the ttm_resource object as base into the driver backends. v2: fix typo in vmwgfx grid mgr and double init in amdgpu_vram_mgr.c Signed-off-by: Christian König Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-10-christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 44 ++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 5 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 60 ++++++++++++-------------- drivers/gpu/drm/drm_gem_vram_helper.c | 3 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 8 +--- drivers/gpu/drm/nouveau/nouveau_mem.c | 11 +++-- drivers/gpu/drm/nouveau/nouveau_mem.h | 14 +++--- drivers/gpu/drm/nouveau/nouveau_ttm.c | 32 +++++++------- drivers/gpu/drm/ttm/ttm_range_manager.c | 23 ++++------ drivers/gpu/drm/ttm/ttm_resource.c | 18 +------- drivers/gpu/drm/ttm/ttm_sys_manager.c | 12 +++--- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 24 +++++------ drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 27 ++++++------ include/drm/ttm/ttm_range_manager.h | 3 +- include/drm/ttm/ttm_resource.h | 43 ++++++++---------- 16 files changed, 140 insertions(+), 189 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 29113f72bc39..194f9eecf89c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -40,8 +40,7 @@ to_gtt_mgr(struct ttm_resource_manager *man) static inline struct amdgpu_gtt_node * to_amdgpu_gtt_node(struct ttm_resource *res) { - return container_of(res->mm_node, struct amdgpu_gtt_node, - base.mm_nodes[0]); + return container_of(res, struct amdgpu_gtt_node, base.base); } /** @@ -102,13 +101,13 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = { /** * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space * - * @mem: the mem object to check + * @res: the mem object to check * * Check if a mem object has already address space allocated. */ -bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res) { - struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem); + struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res); return drm_mm_node_allocated(&node->base.mm_nodes[0]); } @@ -126,19 +125,20 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, - struct ttm_resource *mem) + struct ttm_resource **res) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); + uint32_t num_pages = PFN_UP(tbo->base.size); struct amdgpu_gtt_node *node; int r; spin_lock(&mgr->lock); - if ((tbo->resource == mem || tbo->resource->mem_type != TTM_PL_TT) && - atomic64_read(&mgr->available) < mem->num_pages) { + if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT && + atomic64_read(&mgr->available) < num_pages) { spin_unlock(&mgr->lock); return -ENOSPC; } - atomic64_sub(mem->num_pages, &mgr->available); + atomic64_sub(num_pages, &mgr->available); spin_unlock(&mgr->lock); node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL); @@ -154,29 +154,28 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, spin_lock(&mgr->lock); r = drm_mm_insert_node_in_range(&mgr->mm, &node->base.mm_nodes[0], - mem->num_pages, - tbo->page_alignment, 0, - place->fpfn, place->lpfn, + num_pages, tbo->page_alignment, + 0, place->fpfn, place->lpfn, DRM_MM_INSERT_BEST); spin_unlock(&mgr->lock); if (unlikely(r)) goto err_free; - mem->start = node->base.mm_nodes[0].start; + node->base.base.start = node->base.mm_nodes[0].start; } else { node->base.mm_nodes[0].start = 0; - node->base.mm_nodes[0].size = mem->num_pages; - mem->start = AMDGPU_BO_INVALID_OFFSET; + node->base.mm_nodes[0].size = node->base.base.num_pages; + node->base.base.start = AMDGPU_BO_INVALID_OFFSET; } - mem->mm_node = &node->base.mm_nodes[0]; + *res = &node->base.base; return 0; err_free: kfree(node); err_out: - atomic64_add(mem->num_pages, &mgr->available); + atomic64_add(num_pages, &mgr->available); return r; } @@ -190,21 +189,16 @@ err_out: * Free the allocated GTT again. */ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, - struct ttm_resource *mem) + struct ttm_resource *res) { + struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); - struct amdgpu_gtt_node *node; - - if (!mem->mm_node) - return; - - node = to_amdgpu_gtt_node(mem); spin_lock(&mgr->lock); if (drm_mm_node_allocated(&node->base.mm_nodes[0])) drm_mm_remove_node(&node->base.mm_nodes[0]); spin_unlock(&mgr->lock); - atomic64_add(mem->num_pages, &mgr->available); + atomic64_add(res->num_pages, &mgr->available); kfree(node); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 59723c3d5826..19c1384a133f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1296,7 +1296,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (bo->base.resv == &bo->base._resv) amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); - if (bo->resource->mem_type != TTM_PL_VRAM || !bo->resource->mm_node || + if (bo->resource->mem_type != TTM_PL_VRAM || !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 40f2adf305bc..59e0fefb15aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -28,6 +28,7 @@ #include #include +#include /* state back for walking over vram_mgr and gtt_mgr allocations */ struct amdgpu_res_cursor { @@ -53,7 +54,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, { struct drm_mm_node *node; - if (!res || !res->mm_node) { + if (!res) { cur->start = start; cur->size = size; cur->remaining = size; @@ -63,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, BUG_ON(start + size > res->num_pages << PAGE_SHIFT); - node = res->mm_node; + node = to_ttm_range_mgr_node(res)->mm_nodes; while (start >= node->size << PAGE_SHIFT) start -= node++->size << PAGE_SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 5ebfaed37e47..9a6df02477ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -219,19 +219,20 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct ttm_resource *mem = bo->tbo.resource; - struct drm_mm_node *nodes = mem->mm_node; - unsigned pages = mem->num_pages; + struct ttm_resource *res = bo->tbo.resource; + unsigned pages = res->num_pages; + struct drm_mm_node *mm; u64 usage; if (amdgpu_gmc_vram_full_visible(&adev->gmc)) return amdgpu_bo_size(bo); - if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) + if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) return 0; - for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) - usage += amdgpu_vram_mgr_vis_size(adev, nodes); + mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0]; + for (usage = 0; pages; pages -= mm->size, mm++) + usage += amdgpu_vram_mgr_vis_size(adev, mm); return usage; } @@ -367,7 +368,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, - struct ttm_resource *mem) + struct ttm_resource **res) { unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages; struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -388,7 +389,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, max_bytes -= AMDGPU_VM_RESERVED_VRAM; /* bail out quickly if there's likely not enough VRAM for this BO */ - mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; + mem_bytes = tbo->base.size; if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { r = -ENOSPC; goto error_sub; @@ -406,7 +407,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, #endif pages_per_node = max_t(uint32_t, pages_per_node, tbo->page_alignment); - num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); + num_nodes = DIV_ROUND_UP(PFN_UP(mem_bytes), pages_per_node); } node = kvmalloc(struct_size(node, mm_nodes, num_nodes), @@ -422,8 +423,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, if (place->flags & TTM_PL_FLAG_TOPDOWN) mode = DRM_MM_INSERT_HIGH; - mem->start = 0; - pages_left = mem->num_pages; + pages_left = node->base.num_pages; /* Limit maximum size to 2GB due to SG table limitations */ pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); @@ -451,7 +451,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, } vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]); - amdgpu_vram_mgr_virt_start(mem, &node->mm_nodes[i]); + amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]); pages_left -= pages; ++i; @@ -461,10 +461,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, spin_unlock(&mgr->lock); if (i == 1) - mem->placement |= TTM_PL_FLAG_CONTIGUOUS; + node->base.placement |= TTM_PL_FLAG_CONTIGUOUS; atomic64_add(vis_usage, &mgr->vis_usage); - mem->mm_node = &node->mm_nodes[0]; + *res = &node->base; return 0; error_free: @@ -487,28 +487,22 @@ error_sub: * Free the allocated VRAM again. */ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, - struct ttm_resource *mem) + struct ttm_resource *res) { + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - struct ttm_range_mgr_node *node; uint64_t usage = 0, vis_usage = 0; - unsigned pages = mem->num_pages; - struct drm_mm_node *nodes; - - if (!mem->mm_node) - return; - - node = to_ttm_range_mgr_node(mem); - nodes = &node->mm_nodes[0]; + unsigned i, pages; spin_lock(&mgr->lock); - while (pages) { - pages -= nodes->size; - drm_mm_remove_node(nodes); - usage += nodes->size << PAGE_SHIFT; - vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes); - ++nodes; + for (i = 0, pages = res->num_pages; pages; + pages -= node->mm_nodes[i].size, ++i) { + struct drm_mm_node *mm = &node->mm_nodes[i]; + + drm_mm_remove_node(mm); + usage += mm->size << PAGE_SHIFT; + vis_usage += amdgpu_vram_mgr_vis_size(adev, mm); } amdgpu_vram_mgr_do_reserve(man); spin_unlock(&mgr->lock); @@ -533,7 +527,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, * Allocate and fill a sg table from a VRAM allocation. */ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, - struct ttm_resource *mem, + struct ttm_resource *res, u64 offset, u64 length, struct device *dev, enum dma_data_direction dir, @@ -549,7 +543,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, return -ENOMEM; /* Determine the number of DRM_MM nodes to export */ - amdgpu_res_first(mem, offset, length, &cursor); + amdgpu_res_first(res, offset, length, &cursor); while (cursor.remaining) { num_entries++; amdgpu_res_next(&cursor, cursor.size); @@ -569,7 +563,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, * and the number of bytes from it. Access the following * DRM_MM node(s) if more buffer needs to exported */ - amdgpu_res_first(mem, offset, length, &cursor); + amdgpu_res_first(res, offset, length, &cursor); for_each_sgtable_sg((*sgt), sg, i) { phys_addr_t phys = cursor.start + adev->gmc.aper_base; size_t size = cursor.size; diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 17a4c5d47b6a..2a1229b8364e 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -250,7 +250,8 @@ EXPORT_SYMBOL(drm_gem_vram_put); static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo) { /* Keep TTM behavior for now, remove when drivers are audited */ - if (WARN_ON_ONCE(!gbo->bo.resource->mm_node)) + if (WARN_ON_ONCE(!gbo->bo.resource || + gbo->bo.resource->mem_type == TTM_PL_SYSTEM)) return 0; return gbo->bo.resource->start; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 3a0d9b3bf991..c3d20bc80022 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -918,12 +918,8 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, } } - if (new_reg) { - if (new_reg->mm_node) - nvbo->offset = (new_reg->start << PAGE_SHIFT); - else - nvbo->offset = 0; - } + if (new_reg) + nvbo->offset = (new_reg->start << PAGE_SHIFT); } diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index a1049e9feee1..0de6549fb875 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -178,25 +178,24 @@ void nouveau_mem_del(struct ttm_resource *reg) { struct nouveau_mem *mem = nouveau_mem(reg); - if (!mem) - return; + nouveau_mem_fini(mem); - kfree(reg->mm_node); - reg->mm_node = NULL; + kfree(mem); } int nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp, - struct ttm_resource *reg) + struct ttm_resource **res) { struct nouveau_mem *mem; if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL))) return -ENOMEM; + mem->cli = cli; mem->kind = kind; mem->comp = comp; - reg->mm_node = mem; + *res = &mem->base; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h index 3a6a1be2ed52..2c01166a90f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.h +++ b/drivers/gpu/drm/nouveau/nouveau_mem.h @@ -6,12 +6,6 @@ struct ttm_tt; #include #include -static inline struct nouveau_mem * -nouveau_mem(struct ttm_resource *reg) -{ - return reg->mm_node; -} - struct nouveau_mem { struct ttm_resource base; struct nouveau_cli *cli; @@ -21,8 +15,14 @@ struct nouveau_mem { struct nvif_vma vma[2]; }; +static inline struct nouveau_mem * +nouveau_mem(struct ttm_resource *reg) +{ + return container_of(reg, struct nouveau_mem, base); +} + int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp, - struct ttm_resource *); + struct ttm_resource **); void nouveau_mem_del(struct ttm_resource *); int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 1ac2417effc0..f4c2e46b6fe1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -45,7 +45,7 @@ static int nouveau_vram_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *reg) + struct ttm_resource **res) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -54,15 +54,15 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man, if (drm->client.device.info.ram_size == 0) return -ENOMEM; - ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); + ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res); if (ret) return ret; - ttm_resource_init(bo, place, reg->mm_node); + ttm_resource_init(bo, place, *res); - ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page); + ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page); if (ret) { - nouveau_mem_del(reg); + nouveau_mem_del(*res); return ret; } @@ -78,18 +78,18 @@ static int nouveau_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *reg) + struct ttm_resource **res) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); int ret; - ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); + ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res); if (ret) return ret; - ttm_resource_init(bo, place, reg->mm_node); - reg->start = 0; + ttm_resource_init(bo, place, *res); + (*res)->start = 0; return 0; } @@ -102,27 +102,27 @@ static int nv04_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *reg) + struct ttm_resource **res) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_mem *mem; int ret; - ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); - mem = nouveau_mem(reg); + ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res); if (ret) return ret; - ttm_resource_init(bo, place, reg->mm_node); + mem = nouveau_mem(*res); + ttm_resource_init(bo, place, *res); ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0, - (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]); + (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]); if (ret) { - nouveau_mem_del(reg); + nouveau_mem_del(*res); return ret; } - reg->start = mem->vma[0].addr >> PAGE_SHIFT; + (*res)->start = mem->vma[0].addr >> PAGE_SHIFT; return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index ce5d07ca384c..c32e1aee2481 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -58,7 +58,7 @@ to_range_manager(struct ttm_resource_manager *man) static int ttm_range_man_alloc(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *mem) + struct ttm_resource **res) { struct ttm_range_manager *rman = to_range_manager(man); struct ttm_range_mgr_node *node; @@ -83,37 +83,30 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man, spin_lock(&rman->lock); ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0], - mem->num_pages, bo->page_alignment, 0, + node->base.num_pages, + bo->page_alignment, 0, place->fpfn, lpfn, mode); spin_unlock(&rman->lock); - if (unlikely(ret)) { + if (unlikely(ret)) kfree(node); - } else { - mem->mm_node = &node->mm_nodes[0]; - mem->start = node->mm_nodes[0].start; - } + else + node->base.start = node->mm_nodes[0].start; return ret; } static void ttm_range_man_free(struct ttm_resource_manager *man, - struct ttm_resource *mem) + struct ttm_resource *res) { + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); struct ttm_range_manager *rman = to_range_manager(man); - struct ttm_range_mgr_node *node; - - if (!mem->mm_node) - return; - - node = to_ttm_range_mgr_node(mem); spin_lock(&rman->lock); drm_mm_remove_node(&node->mm_nodes[0]); spin_unlock(&rman->lock); kfree(node); - mem->mm_node = NULL; } static void ttm_range_man_debug(struct ttm_resource_manager *man, diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 2a51ace17614..2a68145572cc 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -29,7 +29,6 @@ void ttm_resource_init(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource *res) { - res->mm_node = NULL; res->start = 0; res->num_pages = PFN_UP(bo->base.size); res->mem_type = place->mem_type; @@ -47,22 +46,8 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo, { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, place->mem_type); - struct ttm_resource *res; - int r; - - res = kmalloc(sizeof(*res), GFP_KERNEL); - if (!res) - return -ENOMEM; - - ttm_resource_init(bo, place, res); - r = man->func->alloc(man, bo, place, res); - if (r) { - kfree(res); - return r; - } - *res_ptr = res; - return 0; + return man->func->alloc(man, bo, place, res_ptr); } void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) @@ -74,7 +59,6 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) man = ttm_manager_type(bo->bdev, (*res)->mem_type); man->func->free(man, *res); - kfree(*res); *res = NULL; } EXPORT_SYMBOL(ttm_resource_free); diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c index 2b75f493c3c9..63aca52f75e1 100644 --- a/drivers/gpu/drm/ttm/ttm_sys_manager.c +++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c @@ -10,20 +10,20 @@ static int ttm_sys_man_alloc(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *mem) + struct ttm_resource **res) { - mem->mm_node = kzalloc(sizeof(*mem), GFP_KERNEL); - if (!mem->mm_node) + *res = kzalloc(sizeof(**res), GFP_KERNEL); + if (!*res) return -ENOMEM; - ttm_resource_init(bo, place, mem->mm_node); + ttm_resource_init(bo, place, *res); return 0; } static void ttm_sys_man_free(struct ttm_resource_manager *man, - struct ttm_resource *mem) + struct ttm_resource *res) { - kfree(mem->mm_node); + kfree(res); } static const struct ttm_resource_manager_func ttm_sys_manager_func = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 82a5e6489810..28ceb749a733 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -52,16 +52,16 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *mem) + struct ttm_resource **res) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); int id; - mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL); - if (!mem->mm_node) + *res = kmalloc(sizeof(**res), GFP_KERNEL); + if (!*res) return -ENOMEM; - ttm_resource_init(bo, place, mem->mm_node); + ttm_resource_init(bo, place, *res); id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) @@ -70,34 +70,34 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, spin_lock(&gman->lock); if (gman->max_gmr_pages > 0) { - gman->used_gmr_pages += mem->num_pages; + gman->used_gmr_pages += (*res)->num_pages; if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) goto nospace; } - mem->mm_node = gman; - mem->start = id; + (*res)->start = id; spin_unlock(&gman->lock); return 0; nospace: - gman->used_gmr_pages -= mem->num_pages; + gman->used_gmr_pages -= (*res)->num_pages; spin_unlock(&gman->lock); ida_free(&gman->gmr_ida, id); + kfree(*res); return -ENOSPC; } static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, - struct ttm_resource *mem) + struct ttm_resource *res) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); - ida_free(&gman->gmr_ida, mem->start); + ida_free(&gman->gmr_ida, res->start); spin_lock(&gman->lock); - gman->used_gmr_pages -= mem->num_pages; + gman->used_gmr_pages -= res->num_pages; spin_unlock(&gman->lock); - kfree(mem->mm_node); + kfree(res); } static const struct ttm_resource_manager_func vmw_gmrid_manager_func; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 8765835696ac..2a3d3468e4e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -51,7 +51,7 @@ static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo, static int vmw_thp_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *mem) + struct ttm_resource **res) { struct vmw_thp_manager *rman = to_thp_manager(man); struct drm_mm *mm = &rman->mm; @@ -78,26 +78,27 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man, spin_lock(&rman->lock); if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) { align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT); - if (mem->num_pages >= align_pages) { + if (node->base.num_pages >= align_pages) { ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0], - align_pages, place, mem, - lpfn, mode); + align_pages, place, + &node->base, lpfn, mode); if (!ret) goto found_unlock; } } align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT); - if (mem->num_pages >= align_pages) { + if (node->base.num_pages >= align_pages) { ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0], - align_pages, place, mem, lpfn, - mode); + align_pages, place, &node->base, + lpfn, mode); if (!ret) goto found_unlock; } ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0], - mem->num_pages, bo->page_alignment, 0, + node->base.num_pages, + bo->page_alignment, 0, place->fpfn, lpfn, mode); found_unlock: spin_unlock(&rman->lock); @@ -105,20 +106,18 @@ found_unlock: if (unlikely(ret)) { kfree(node); } else { - mem->mm_node = &node->mm_nodes[0]; - mem->start = node->mm_nodes[0].start; + node->base.start = node->mm_nodes[0].start; + *res = &node->base; } return ret; } - - static void vmw_thp_put_node(struct ttm_resource_manager *man, - struct ttm_resource *mem) + struct ttm_resource *res) { + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); struct vmw_thp_manager *rman = to_thp_manager(man); - struct ttm_range_mgr_node * node = mem->mm_node; spin_lock(&rman->lock); drm_mm_remove_node(&node->mm_nodes[0]); diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h index 983f452ce54b..22b6fa42ac20 100644 --- a/include/drm/ttm/ttm_range_manager.h +++ b/include/drm/ttm/ttm_range_manager.h @@ -30,8 +30,7 @@ struct ttm_range_mgr_node { static inline struct ttm_range_mgr_node * to_ttm_range_mgr_node(struct ttm_resource *res) { - return container_of(res->mm_node, struct ttm_range_mgr_node, - mm_nodes[0]); + return container_of(res, struct ttm_range_mgr_node, base); } int ttm_range_man_init(struct ttm_device *bdev, diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 803e4875d779..4abb95b9fd11 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -45,46 +45,38 @@ struct ttm_resource_manager_func { * * @man: Pointer to a memory type manager. * @bo: Pointer to the buffer object we're allocating space for. - * @placement: Placement details. - * @flags: Additional placement flags. - * @mem: Pointer to a struct ttm_resource to be filled in. + * @place: Placement details. + * @res: Resulting pointer to the ttm_resource. * * This function should allocate space in the memory type managed - * by @man. Placement details if - * applicable are given by @placement. If successful, - * @mem::mm_node should be set to a non-null value, and - * @mem::start should be set to a value identifying the beginning + * by @man. Placement details if applicable are given by @place. If + * successful, a filled in ttm_resource object should be returned in + * @res. @res::start should be set to a value identifying the beginning * of the range allocated, and the function should return zero. - * If the memory region accommodate the buffer object, @mem::mm_node - * should be set to NULL, and the function should return 0. + * If the manager can't fulfill the request -ENOSPC should be returned. * If a system error occurred, preventing the request to be fulfilled, * the function should return a negative error code. * - * Note that @mem::mm_node will only be dereferenced by - * struct ttm_resource_manager functions and optionally by the driver, - * which has knowledge of the underlying type. - * - * This function may not be called from within atomic context, so - * an implementation can and must use either a mutex or a spinlock to - * protect any data structures managing the space. + * This function may not be called from within atomic context and needs + * to take care of its own locking to protect any data structures + * managing the space. */ int (*alloc)(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *mem); + struct ttm_resource **res); /** * struct ttm_resource_manager_func member free * * @man: Pointer to a memory type manager. - * @mem: Pointer to a struct ttm_resource to be filled in. + * @res: Pointer to a struct ttm_resource to be freed. * - * This function frees memory type resources previously allocated - * and that are identified by @mem::mm_node and @mem::start. May not - * be called from within atomic context. + * This function frees memory type resources previously allocated. + * May not be called from within atomic context. */ void (*free)(struct ttm_resource_manager *man, - struct ttm_resource *mem); + struct ttm_resource *res); /** * struct ttm_resource_manager_func member debug @@ -158,9 +150,9 @@ struct ttm_bus_placement { /** * struct ttm_resource * - * @mm_node: Memory manager node. - * @size: Requested size of memory region. - * @num_pages: Actual size of memory region in pages. + * @start: Start of the allocation. + * @num_pages: Actual size of resource in pages. + * @mem_type: Resource type of the allocation. * @placement: Placement flags. * @bus: Placement on io bus accessible to the CPU * @@ -168,7 +160,6 @@ struct ttm_bus_placement { * buffer object. */ struct ttm_resource { - void *mm_node; unsigned long start; unsigned long num_pages; uint32_t mem_type; -- cgit v1.2.3 From d3fae3b3daac09961ab871a25093b0ae404282d5 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 2 Jun 2021 13:01:15 +0200 Subject: dma-buf: drop the _rcu postfix on function names v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The functions can be called both in _rcu context as well as while holding the lock. v2: add some kerneldoc as suggested by Daniel v3: fix indentation Signed-off-by: Christian König Reviewed-by: Jason Ekstrand Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-7-christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 3 +-- drivers/dma-buf/dma-resv.c | 32 ++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 5 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 5 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ++++---- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 ++-- drivers/gpu/drm/drm_gem.c | 5 ++-- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 6 ++--- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 6 ++--- drivers/gpu/drm/i915/dma_resv_utils.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 4 +-- drivers/gpu/drm/i915/gem/i915_gem_wait.c | 6 ++--- drivers/gpu/drm/i915/i915_request.c | 4 +-- drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 3 +-- drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +-- drivers/gpu/drm/panfrost/panfrost_drv.c | 3 +-- drivers/gpu/drm/radeon/radeon_gem.c | 6 ++--- drivers/gpu/drm/radeon/radeon_mn.c | 4 +-- drivers/gpu/drm/ttm/ttm_bo.c | 18 ++++++------- drivers/gpu/drm/vgem/vgem_fence.c | 3 +-- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 5 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 6 ++--- include/linux/dma-resv.h | 17 ++++-------- 31 files changed, 84 insertions(+), 103 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c') diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index d419cf90ee73..511fe0d217a0 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, long ret; /* Wait on any implicit rendering fences */ - ret = dma_resv_wait_timeout_rcu(resv, write, true, - MAX_SCHEDULE_TIMEOUT); + ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 62e7e055ac62..f26c71747d43 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -396,7 +396,7 @@ retry: EXPORT_SYMBOL(dma_resv_copy_fences); /** - * dma_resv_get_fences_rcu - Get an object's shared and exclusive + * dma_resv_get_fences - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object * @pfence_excl: the returned exclusive fence (or NULL) @@ -408,10 +408,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences); * exclusive fence is not specified the fence is put into the array of the * shared fences as well. Returns either zero or -ENOMEM. */ -int dma_resv_get_fences_rcu(struct dma_resv *obj, - struct dma_fence **pfence_excl, - unsigned int *pshared_count, - struct dma_fence ***pshared) +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, + unsigned int *pshared_count, + struct dma_fence ***pshared) { struct dma_fence **shared = NULL; struct dma_fence *fence_excl; @@ -494,23 +493,24 @@ unlock: *pshared = shared; return ret; } -EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); +EXPORT_SYMBOL_GPL(dma_resv_get_fences); /** - * dma_resv_wait_timeout_rcu - Wait on reservation's objects + * dma_resv_wait_timeout - Wait on reservation's objects * shared and/or exclusive fences. * @obj: the reservation object * @wait_all: if true, wait on all fences, else wait on just exclusive fence * @intr: if true, do interruptible wait * @timeout: timeout value in jiffies or zero to return immediately * + * Callers are not required to hold specific locks, but maybe hold + * dma_resv_lock() already * RETURNS * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than zer on success. */ -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, - bool wait_all, bool intr, - unsigned long timeout) +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout) { long ret = timeout ? timeout : 1; unsigned int seq, shared_count; @@ -582,7 +582,7 @@ unlock_retry: rcu_read_unlock(); goto retry; } -EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); +EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) @@ -602,16 +602,18 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) } /** - * dma_resv_test_signaled_rcu - Test if a reservation object's - * fences have been signaled. + * dma_resv_test_signaled - Test if a reservation object's fences have been + * signaled. * @obj: the reservation object * @test_all: if true, test all fences, otherwise only test the exclusive * fence * + * Callers are not required to hold specific locks, but maybe hold + * dma_resv_lock() already * RETURNS * true if all fences signaled, else false */ -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) { unsigned int seq, shared_count; int ret; @@ -660,7 +662,7 @@ retry: rcu_read_unlock(); return ret; } -EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu); +EXPORT_SYMBOL_GPL(dma_resv_test_signaled); #if IS_ENABLED(CONFIG_LOCKDEP) static int __init dma_resv_lockdep(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 49f73b5b89b0..ac7b37dfff5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, - &work->shared_count, - &work->shared); + r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl, + &work->shared_count, &work->shared); if (unlikely(r != 0)) { DRM_ERROR("failed to get fences for buffer\n"); goto unpin; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 04caa31056d0..c3053b83b80c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -52,7 +52,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj) if (!dma_resv_shared_list(obj)) /* no shared fences to convert */ return 0; - r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); + r = dma_resv_get_fences(obj, NULL, &count, &fences); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7d5aaf584634..1c3e3b608332 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -526,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, - timeout); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); /* ret == 0 means not signaled, * ret > 0 means signaled diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index b4971e90b98c..df69b1e9e451 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, unsigned count; int r; - r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); + r = dma_resv_get_fences(resv, NULL, &count, &fences); if (r) goto fallback; @@ -156,8 +156,7 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, - MAX_SCHEDULE_TIMEOUT); + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 2741c28ff1b5..d6c54c7f7679 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); mutex_unlock(&adev->notifier_lock); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 19c1384a133f..96447e1d4c9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -756,8 +756,8 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, + MAX_SCHEDULE_TIMEOUT); if (r < 0) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 82f0542c7792..a692a4570627 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1126,9 +1126,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, - true, false, - msecs_to_jiffies(10)); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + msecs_to_jiffies(10)); if (r == 0) r = -ETIMEDOUT; if (r < 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index bcfd4a8d0288..d1a229212e7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2022,13 +2022,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) unsigned i, shared_count; int r; - r = dma_resv_get_fences_rcu(resv, &excl, - &shared_count, &shared); + r = dma_resv_get_fences(resv, &excl, &shared_count, &shared); if (r) { /* Not enough memory to grab the fence list, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); return; } @@ -2640,7 +2639,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return true; /* Don't evict VM page tables while they are busy */ - if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) + if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) return false; /* Try to block ongoing updates */ @@ -2820,8 +2819,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, - true, true, timeout); + timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true, + true, timeout); if (timeout <= 0) return timeout; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3267eb2e35dd..6dde2873d47b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8400,9 +8400,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * deadlock during GPU reset when this fence will not signal * but we hold reservation lock for the BO. */ - r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true, - false, - msecs_to_jiffies(5000)); + r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false, + msecs_to_jiffies(5000)); if (unlikely(r <= 0)) DRM_ERROR("Waiting for fences timed out!"); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 263b4fb03303..d62fb1a3c916 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -770,8 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, return -EINVAL; } - ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, - true, timeout); + ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout); if (ret == 0) ret = -ETIME; else if (ret > 0) @@ -1380,7 +1379,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array, return drm_gem_fence_array_add(fence_array, fence); } - ret = dma_resv_get_fences_rcu(obj->resv, NULL, + ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences); if (ret || !fence_count) return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 8792d8dd5106..b8fa6ed3dd73 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -390,14 +390,12 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (op & ETNA_PREP_NOSYNC) { - if (!dma_resv_test_signaled_rcu(obj->resv, - write)) + if (!dma_resv_test_signaled(obj->resv, write)) return -EBUSY; } else { unsigned long remain = etnaviv_timeout_to_jiffies(timeout); - ret = dma_resv_wait_timeout_rcu(obj->resv, - write, true, remain); + ret = dma_resv_wait_timeout(obj->resv, write, true, remain); if (ret <= 0) return ret == 0 ? -ETIMEDOUT : ret; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index c942d2a8c252..d53856d7a747 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -189,9 +189,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) continue; if (bo->flags & ETNA_SUBMIT_BO_WRITE) { - ret = dma_resv_get_fences_rcu(robj, &bo->excl, - &bo->nr_shared, - &bo->shared); + ret = dma_resv_get_fences(robj, &bo->excl, + &bo->nr_shared, + &bo->shared); if (ret) return ret; } else { diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c index 9e508e7d4629..7df91b7e4ca8 100644 --- a/drivers/gpu/drm/i915/dma_resv_utils.c +++ b/drivers/gpu/drm/i915/dma_resv_utils.c @@ -10,7 +10,7 @@ void dma_resv_prune(struct dma_resv *resv) { if (dma_resv_trylock(resv)) { - if (dma_resv_test_signaled_rcu(resv, true)) + if (dma_resv_test_signaled(resv, true)) dma_resv_add_excl_fence(resv, NULL); dma_resv_unlock(resv); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 35279dd561f5..6234e17259c1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * Alternatively, we can trade that extra information on read/write * activity with * args->busy = - * !dma_resv_test_signaled_rcu(obj->resv, true); + * !dma_resv_test_signaled(obj->resv, true); * to report the overall busyness. This is what the wait-ioctl does. * */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 297143511f99..66789111a24b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma) if (DBG_FORCE_RELOC) return false; - return !dma_resv_test_signaled_rcu(vma->resv, true); + return !dma_resv_test_signaled(vma->resv, true); } static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index a657b99ec760..b5cbbe659a77 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -85,8 +85,8 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, return true; /* we will unbind on next submission, still have userptr pins */ - r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(obj->base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); if (r <= 0) drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index c13aeddf5aa7..1e97520c62b2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, unsigned int count, i; int ret; - ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); + ret = dma_resv_get_fences(resv, &excl, &count, &shared); if (ret) return ret; @@ -158,8 +158,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int count, i; int ret; - ret = dma_resv_get_fences_rcu(obj->base.resv, - &excl, &count, &shared); + ret = dma_resv_get_fences(obj->base.resv, &excl, &count, + &shared); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index c85494f411f4..6cb91f042642 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1594,8 +1594,8 @@ i915_request_await_object(struct i915_request *to, struct dma_fence **shared; unsigned int count, i; - ret = dma_resv_get_fences_rcu(obj->base.resv, - &excl, &count, &shared); + ret = dma_resv_get_fences(obj->base.resv, &excl, &count, + &shared); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 7aaf74552d06..c589a681da77 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct dma_fence **shared; unsigned int count, i; - ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); + ret = dma_resv_get_fences(resv, &excl, &count, &shared); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 410a93a7e77f..a94a43de95ef 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -915,8 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); long ret; - ret = dma_resv_wait_timeout_rcu(obj->resv, write, - true, remain); + ret = dma_resv_wait_timeout(obj->resv, write, true, remain); if (ret == 0) return remain == 0 ? -EBUSY : -ETIMEDOUT; else if (ret < 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index d863e5ed954a..5b27845075a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -964,8 +964,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, return -ENOENT; nvbo = nouveau_gem_object(gem); - lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, - no_wait ? 0 : 30 * HZ); + lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true, + no_wait ? 0 : 30 * HZ); if (!lret) ret = -EBUSY; else if (lret > 0) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 1596559f3d14..075ec0ef746c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -312,8 +312,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, if (!gem_obj) return -ENOENT; - ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true, - true, timeout); + ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout); if (!ret) ret = timeout ? -ETIMEDOUT : -EBUSY; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3272c33af8fe..458f92a70887 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, } if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ - r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); + r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); if (!r) r = -EBUSY; @@ -523,7 +523,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); - r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); + r = dma_resv_test_signaled(robj->tbo.base.resv, true); if (r == 0) r = -EBUSY; else @@ -552,7 +552,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); if (ret == 0) r = -EBUSY; else if (ret < 0) diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index e37c9a57a7c3..9fa88549c89e 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, return true; } - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f04a269b7065..7e7284da5630 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -296,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, struct dma_resv *resv = &bo->base._resv; int ret; - if (dma_resv_test_signaled_rcu(resv, true)) + if (dma_resv_test_signaled(resv, true)) ret = 0; else ret = -EBUSY; @@ -308,8 +308,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, dma_resv_unlock(bo->base.resv); spin_unlock(&bo->bdev->lru_lock); - lret = dma_resv_wait_timeout_rcu(resv, true, interruptible, - 30 * HZ); + lret = dma_resv_wait_timeout(resv, true, interruptible, + 30 * HZ); if (lret < 0) return lret; @@ -411,8 +411,8 @@ static void ttm_bo_release(struct kref *kref) /* Last resort, if we fail to allocate memory for the * fences block for the BO to become idle */ - dma_resv_wait_timeout_rcu(bo->base.resv, true, false, - 30 * HZ); + dma_resv_wait_timeout(bo->base.resv, true, false, + 30 * HZ); } if (bo->bdev->funcs->release_notify) @@ -422,7 +422,7 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_free(bdev, bo->resource); } - if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || + if (!dma_resv_test_signaled(bo->base.resv, true) || !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); @@ -1094,14 +1094,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, long timeout = 15 * HZ; if (no_wait) { - if (dma_resv_test_signaled_rcu(bo->base.resv, true)) + if (dma_resv_test_signaled(bo->base.resv, true)) return 0; else return -EBUSY; } - timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, - interruptible, timeout); + timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible, + timeout); if (timeout < 0) return timeout; diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 2902dc6e64fa..bd6f75285fd9 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -151,8 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, /* Check for a conflicting fence */ resv = obj->resv; - if (!dma_resv_test_signaled_rcu(resv, - arg->flags & VGEM_FENCE_WRITE)) { + if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) { ret = -EBUSY; goto err_fence; } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 669f2ee39515..5c1ad1596889 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -451,10 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, return -ENOENT; if (args->flags & VIRTGPU_WAIT_NOWAIT) { - ret = dma_resv_test_signaled_rcu(obj->resv, true); + ret = dma_resv_test_signaled(obj->resv, true); } else { - ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, - timeout); + ret = dma_resv_wait_timeout(obj->resv, true, true, timeout); } if (ret == 0) ret = -EBUSY; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 176b6201ef2b..362f56d5b12b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -743,9 +743,9 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, if (flags & drm_vmw_synccpu_allow_cs) { long lret; - lret = dma_resv_wait_timeout_rcu - (bo->base.resv, true, true, - nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); + lret = dma_resv_wait_timeout(bo->base.resv, true, true, + nonblock ? 0 : + MAX_SCHEDULE_TIMEOUT); if (!lret) return -EBUSY; else if (lret < 0) diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 3e0eefcead44..562b885cf9c3 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -271,19 +271,12 @@ void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); - void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); - -int dma_resv_get_fences_rcu(struct dma_resv *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared); - +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, + unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); - -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout); - -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout); +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); #endif /* _LINUX_RESERVATION_H */ -- cgit v1.2.3