diff options
| author | Christian König <christian.koenig@amd.com> | 2018-08-17 16:07:13 +0300 | 
|---|---|---|
| committer | Alex Deucher <alexander.deucher@amd.com> | 2018-08-27 19:11:17 +0300 | 
| commit | 9a02ece43ee49efdfad19a3ca90c02d20f491031 (patch) | |
| tree | b3ebe9f6952be60368760167441e436edb20090b /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |
| parent | 262b9c392e7dbe264b075fa5ad6a0de5a403da7a (diff) | |
| download | linux-9a02ece43ee49efdfad19a3ca90c02d20f491031.tar.xz | |
drm/amdgpu: cleanup VM handling in the CS a bit
Add a helper function for getting the root PD addr and cleanup join the
two VM related functions and cleanup the function name.
No functional change.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 160 | 
1 files changed, 74 insertions, 86 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 313ac971eaaf..5b70a30967ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -804,8 +804,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,  	amdgpu_bo_unref(&parser->uf_entry.robj);  } -static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) +static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)  { +	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);  	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;  	struct amdgpu_device *adev = p->adev;  	struct amdgpu_vm *vm = &fpriv->vm; @@ -814,6 +815,71 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)  	struct amdgpu_bo *bo;  	int r; +	/* Only for UVD/VCE VM emulation */ +	if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) { +		unsigned i, j; + +		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { +			struct drm_amdgpu_cs_chunk_ib *chunk_ib; +			struct amdgpu_bo_va_mapping *m; +			struct amdgpu_bo *aobj = NULL; +			struct amdgpu_cs_chunk *chunk; +			uint64_t offset, va_start; +			struct amdgpu_ib *ib; +			uint8_t *kptr; + +			chunk = &p->chunks[i]; +			ib = &p->job->ibs[j]; +			chunk_ib = chunk->kdata; + +			if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) +				continue; + +			va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK; +			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); +			if (r) { +				DRM_ERROR("IB va_start is invalid\n"); +				return r; +			} + +			if ((va_start + chunk_ib->ib_bytes) > +			    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { +				DRM_ERROR("IB va_start+ib_bytes is invalid\n"); +				return -EINVAL; +			} + +			/* the IB should be reserved at this point */ +			r = amdgpu_bo_kmap(aobj, (void **)&kptr); +			if (r) { +				return r; +			} + +			offset = m->start * AMDGPU_GPU_PAGE_SIZE; +			kptr += va_start - offset; + +			if (ring->funcs->parse_cs) { +				memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); +				amdgpu_bo_kunmap(aobj); + +				r = amdgpu_ring_parse_cs(ring, p, j); +				if (r) +					return r; +			} else { +				ib->ptr = (uint32_t *)kptr; +				r = amdgpu_ring_patch_cs_in_place(ring, p, j); +				amdgpu_bo_kunmap(aobj); +				if (r) +					return r; +			} + +			j++; +		} +	} + +	if (!p->job->vm) +		return amdgpu_cs_sync_rings(p); + +  	r = amdgpu_vm_clear_freed(adev, vm, NULL);  	if (r)  		return r; @@ -876,6 +942,12 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)  	if (r)  		return r; +	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); +	if (r) +		return r; + +	p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo); +  	if (amdgpu_vm_debug) {  		/* Invalidate all BOs to test for userspace bugs */  		amdgpu_bo_list_for_each_entry(e, p->bo_list) { @@ -887,90 +959,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)  		}  	} -	return r; -} - -static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, -				 struct amdgpu_cs_parser *p) -{ -	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); -	struct amdgpu_fpriv *fpriv = p->filp->driver_priv; -	struct amdgpu_vm *vm = &fpriv->vm; -	int r; - -	/* Only for UVD/VCE VM emulation */ -	if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) { -		unsigned i, j; - -		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { -			struct drm_amdgpu_cs_chunk_ib *chunk_ib; -			struct amdgpu_bo_va_mapping *m; -			struct amdgpu_bo *aobj = NULL; -			struct amdgpu_cs_chunk *chunk; -			uint64_t offset, va_start; -			struct amdgpu_ib *ib; -			uint8_t *kptr; - -			chunk = &p->chunks[i]; -			ib = &p->job->ibs[j]; -			chunk_ib = chunk->kdata; - -			if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) -				continue; - -			va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK; -			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); -			if (r) { -				DRM_ERROR("IB va_start is invalid\n"); -				return r; -			} - -			if ((va_start + chunk_ib->ib_bytes) > -			    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { -				DRM_ERROR("IB va_start+ib_bytes is invalid\n"); -				return -EINVAL; -			} - -			/* the IB should be reserved at this point */ -			r = amdgpu_bo_kmap(aobj, (void **)&kptr); -			if (r) { -				return r; -			} - -			offset = m->start * AMDGPU_GPU_PAGE_SIZE; -			kptr += va_start - offset; - -			if (ring->funcs->parse_cs) { -				memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); -				amdgpu_bo_kunmap(aobj); - -				r = amdgpu_ring_parse_cs(ring, p, j); -				if (r) -					return r; -			} else { -				ib->ptr = (uint32_t *)kptr; -				r = amdgpu_ring_patch_cs_in_place(ring, p, j); -				amdgpu_bo_kunmap(aobj); -				if (r) -					return r; -			} - -			j++; -		} -	} - -	if (p->job->vm) { -		p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo); - -		r = amdgpu_bo_vm_update_pte(p); -		if (r) -			return r; - -		r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); -		if (r) -			return r; -	} -  	return amdgpu_cs_sync_rings(p);  } @@ -1309,7 +1297,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)  	for (i = 0; i < parser.job->num_ibs; i++)  		trace_amdgpu_cs(&parser, i); -	r = amdgpu_cs_ib_vm_chunk(adev, &parser); +	r = amdgpu_cs_vm_handling(&parser);  	if (r)  		goto out; | 
