diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 80 | 
1 files changed, 47 insertions, 33 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e3281cacc586..a81dfaeeb8c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)  	for (i = 0; i < adev->uvd.max_handles; ++i) {  		uint32_t handle = atomic_read(&adev->uvd.handles[i]);  		if (handle != 0 && adev->uvd.filp[i] == filp) { -			struct fence *fence; +			struct dma_fence *fence;  			r = amdgpu_uvd_get_destroy_msg(ring, handle,  						       false, &fence); @@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)  				continue;  			} -			fence_wait(fence, false); -			fence_put(fence); +			dma_fence_wait(fence, false); +			dma_fence_put(fence);  			adev->uvd.filp[i] = NULL;  			atomic_set(&adev->uvd.handles[i], 0); @@ -360,6 +360,18 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)  	}  } +static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx) +{ +	uint32_t lo, hi; +	uint64_t addr; + +	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); +	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); +	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); + +	return addr; +} +  /**   * amdgpu_uvd_cs_pass1 - first parsing round   * @@ -372,14 +384,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)  {  	struct amdgpu_bo_va_mapping *mapping;  	struct amdgpu_bo *bo; -	uint32_t cmd, lo, hi; -	uint64_t addr; +	uint32_t cmd; +	uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);  	int r = 0; -	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); -	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); -	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); -  	mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);  	if (mapping == NULL) {  		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); @@ -698,18 +706,16 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)  {  	struct amdgpu_bo_va_mapping *mapping;  	struct amdgpu_bo *bo; -	uint32_t cmd, lo, hi; +	uint32_t cmd;  	uint64_t start, end; -	uint64_t addr; +	uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);  	int r; -	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); -	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); -	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); -  	mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); -	if (mapping == NULL) +	if (mapping == NULL) { +		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);  		return -EINVAL; +	}  	start = amdgpu_bo_gpu_offset(bo); @@ -876,6 +882,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)  	struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];  	int r; +	parser->job->vm = NULL; +	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); +  	if (ib->length_dw % 16) {  		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",  			  ib->length_dw); @@ -890,10 +899,13 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)  	ctx.buf_sizes = buf_sizes;  	ctx.ib_idx = ib_idx; -	/* first round, make sure the buffers are actually in the UVD segment */ -	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); -	if (r) -		return r; +	/* first round only required on chips without UVD 64 bit address support */ +	if (!parser->adev->uvd.address_64_bit) { +		/* first round, make sure the buffers are actually in the UVD segment */ +		r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); +		if (r) +			return r; +	}  	/* second round, patch buffer addresses into the command stream */  	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2); @@ -909,14 +921,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)  }  static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, -			       bool direct, struct fence **fence) +			       bool direct, struct dma_fence **fence)  {  	struct ttm_validate_buffer tv;  	struct ww_acquire_ctx ticket;  	struct list_head head;  	struct amdgpu_job *job;  	struct amdgpu_ib *ib; -	struct fence *f = NULL; +	struct dma_fence *f = NULL;  	struct amdgpu_device *adev = ring->adev;  	uint64_t addr;  	int i, r; @@ -931,7 +943,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,  	if (r)  		return r; -	if (!bo->adev->uvd.address_64_bit) { +	if (!ring->adev->uvd.address_64_bit) {  		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);  		amdgpu_uvd_force_into_uvd_segment(bo);  	} @@ -960,7 +972,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,  	if (direct) {  		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); -		job->fence = fence_get(f); +		job->fence = dma_fence_get(f);  		if (r)  			goto err_free; @@ -975,9 +987,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,  	ttm_eu_fence_buffer_objects(&ticket, &head, f);  	if (fence) -		*fence = fence_get(f); +		*fence = dma_fence_get(f);  	amdgpu_bo_unref(&bo); -	fence_put(f); +	dma_fence_put(f);  	return 0; @@ -993,7 +1005,7 @@ err:     crash the vcpu so just try to emmit a dummy create/destroy msg to     avoid this */  int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, -			      struct fence **fence) +			      struct dma_fence **fence)  {  	struct amdgpu_device *adev = ring->adev;  	struct amdgpu_bo *bo; @@ -1002,7 +1014,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,  	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,  			     AMDGPU_GEM_DOMAIN_VRAM, -			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, +			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | +			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,  			     NULL, NULL, &bo);  	if (r)  		return r; @@ -1042,7 +1055,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,  }  int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, -			       bool direct, struct fence **fence) +			       bool direct, struct dma_fence **fence)  {  	struct amdgpu_device *adev = ring->adev;  	struct amdgpu_bo *bo; @@ -1051,7 +1064,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,  	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,  			     AMDGPU_GEM_DOMAIN_VRAM, -			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, +			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | +			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,  			     NULL, NULL, &bo);  	if (r)  		return r; @@ -1128,7 +1142,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)   */  int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)  { -	struct fence *fence; +	struct dma_fence *fence;  	long r;  	r = amdgpu_uvd_get_create_msg(ring, 1, NULL); @@ -1143,7 +1157,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)  		goto error;  	} -	r = fence_wait_timeout(fence, false, timeout); +	r = dma_fence_wait_timeout(fence, false, timeout);  	if (r == 0) {  		DRM_ERROR("amdgpu: IB test timed out.\n");  		r = -ETIMEDOUT; @@ -1154,7 +1168,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)  		r = 0;  	} -	fence_put(fence); +	dma_fence_put(fence);  error:  	return r;  | 
