diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 55 | 
1 files changed, 35 insertions, 20 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 4277125a79ee..4ed9958af94e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -29,6 +29,7 @@  #include <linux/module.h>  #include <linux/pagemap.h>  #include <linux/pci.h> +#include <linux/dma-buf.h>  #include <drm/amdgpu_drm.h>  #include <drm/drm_debugfs.h> @@ -161,16 +162,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,  	struct amdgpu_bo_list_entry vm_pd;  	struct list_head list, duplicates; +	struct dma_fence *fence = NULL;  	struct ttm_validate_buffer tv;  	struct ww_acquire_ctx ticket;  	struct amdgpu_bo_va *bo_va; -	int r; +	long r;  	INIT_LIST_HEAD(&list);  	INIT_LIST_HEAD(&duplicates);  	tv.bo = &bo->tbo; -	tv.num_shared = 1; +	tv.num_shared = 2;  	list_add(&tv.head, &list);  	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); @@ -178,28 +180,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,  	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);  	if (r) {  		dev_err(adev->dev, "leaking bo va because " -			"we fail to reserve bo (%d)\n", r); +			"we fail to reserve bo (%ld)\n", r);  		return;  	}  	bo_va = amdgpu_vm_bo_find(vm, bo); -	if (bo_va && --bo_va->ref_count == 0) { -		amdgpu_vm_bo_rmv(adev, bo_va); - -		if (amdgpu_vm_ready(vm)) { -			struct dma_fence *fence = NULL; +	if (!bo_va || --bo_va->ref_count) +		goto out_unlock; -			r = amdgpu_vm_clear_freed(adev, vm, &fence); -			if (unlikely(r)) { -				dev_err(adev->dev, "failed to clear page " -					"tables on GEM object close (%d)\n", r); -			} +	amdgpu_vm_bo_rmv(adev, bo_va); +	if (!amdgpu_vm_ready(vm)) +		goto out_unlock; -			if (fence) { -				amdgpu_bo_fence(bo, fence, true); -				dma_fence_put(fence); -			} -		} +	fence = dma_resv_get_excl(bo->tbo.base.resv); +	if (fence) { +		amdgpu_bo_fence(bo, fence, true); +		fence = NULL;  	} + +	r = amdgpu_vm_clear_freed(adev, vm, &fence); +	if (r || !fence) +		goto out_unlock; + +	amdgpu_bo_fence(bo, fence, true); +	dma_fence_put(fence); + +out_unlock: +	if (unlikely(r < 0)) +		dev_err(adev->dev, "failed to clear page " +			"tables on GEM object close (%ld)\n", r);  	ttm_eu_backoff_reservation(&ticket, &list);  } @@ -226,7 +234,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,  		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |  		      AMDGPU_GEM_CREATE_VRAM_CLEARED |  		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | -		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC)) +		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC | +		      AMDGPU_GEM_CREATE_ENCRYPTED))  		return -EINVAL; @@ -234,6 +243,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,  	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)  		return -EINVAL; +	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) { +		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n"); +		return -EINVAL; +	} +  	/* create a gem object to contain this object in */  	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |  	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { @@ -854,7 +868,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)  	attachment = READ_ONCE(bo->tbo.base.import_attach);  	if (attachment) -		seq_printf(m, " imported from %p", dma_buf); +		seq_printf(m, " imported from %p%s", dma_buf, +			   attachment->peer2peer ? " P2P" : "");  	else if (dma_buf)  		seq_printf(m, " exported as %p", dma_buf);  | 
