diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/mes_userqueue.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/mes_userqueue.c | 95 | 
1 files changed, 88 insertions, 7 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index aee26f80bd53..2db9b2c63693 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -254,6 +254,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,  	struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];  	struct drm_amdgpu_userq_in *mqd_user = args_in;  	struct amdgpu_mqd_prop *userq_props; +	struct amdgpu_gfx_shadow_info shadow_info;  	int r;  	/* Structure to initialize MQD for userqueue using generic MQD init function */ @@ -263,13 +264,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,  		return -ENOMEM;  	} -	if (!mqd_user->wptr_va || !mqd_user->rptr_va || -	    !mqd_user->queue_va || mqd_user->queue_size == 0) { -		DRM_ERROR("Invalid MQD parameters for userqueue\n"); -		r = -EINVAL; -		goto free_props; -	} -  	r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);  	if (r) {  		DRM_ERROR("Failed to create MQD object for userqueue\n"); @@ -286,6 +280,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,  	userq_props->doorbell_index = queue->doorbell_index;  	userq_props->fence_address = queue->fence_drv->gpu_addr; +	if (adev->gfx.funcs->get_gfx_shadow_info) +		adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);  	if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {  		struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; @@ -302,6 +298,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,  			goto free_mqd;  		} +		if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va, +		    max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE))) +			goto free_mqd; +  		userq_props->eop_gpu_addr = compute_mqd->eop_va;  		userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;  		userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; @@ -329,6 +329,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,  		userq_props->csa_addr = mqd_gfx_v11->csa_va;  		userq_props->tmz_queue =  			mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; + +		if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va, +		    shadow_info.shadow_size)) +			goto free_mqd; +  		kfree(mqd_gfx_v11);  	} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {  		struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11; @@ -346,6 +351,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,  			goto free_mqd;  		} +		if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va, +		    shadow_info.csa_size)) +			goto free_mqd; +  		userq_props->csa_addr = mqd_sdma_v11->csa_va;  		kfree(mqd_sdma_v11);  	} @@ -395,10 +404,82 @@ mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,  	amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);  } +static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr, +				struct amdgpu_usermode_queue *queue) +{ +	struct amdgpu_device *adev = uq_mgr->adev; +	struct mes_suspend_gang_input queue_input; +	struct amdgpu_userq_obj *ctx = &queue->fw_obj; +	signed long timeout = 2100000; /* 2100 ms */ +	u64 fence_gpu_addr; +	u32 fence_offset; +	u64 *fence_ptr; +	int i, r; + +	if (queue->state != AMDGPU_USERQ_STATE_MAPPED) +		return 0; +	r = amdgpu_device_wb_get(adev, &fence_offset); +	if (r) +		return r; + +	fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4); +	fence_ptr = (u64 *)&adev->wb.wb[fence_offset]; +	*fence_ptr = 0; + +	memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input)); +	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; +	queue_input.suspend_fence_addr = fence_gpu_addr; +	queue_input.suspend_fence_value = 1; +	amdgpu_mes_lock(&adev->mes); +	r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input); +	amdgpu_mes_unlock(&adev->mes); +	if (r) { +		DRM_ERROR("Failed to suspend gang: %d\n", r); +		goto out; +	} + +	for (i = 0; i < timeout; i++) { +		if (*fence_ptr == 1) +			goto out; +		udelay(1); +	} +	r = -ETIMEDOUT; + +out: +	amdgpu_device_wb_free(adev, fence_offset); +	return r; +} + +static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr, +				struct amdgpu_usermode_queue *queue) +{ +	struct amdgpu_device *adev = uq_mgr->adev; +	struct mes_resume_gang_input queue_input; +	struct amdgpu_userq_obj *ctx = &queue->fw_obj; +	int r; + +	if (queue->state == AMDGPU_USERQ_STATE_HUNG) +		return -EINVAL; +	if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED) +		return 0; + +	memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input)); +	queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; + +	amdgpu_mes_lock(&adev->mes); +	r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input); +	amdgpu_mes_unlock(&adev->mes); +	if (r) +		dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r); +	return r; +} +  const struct amdgpu_userq_funcs userq_mes_funcs = {  	.mqd_create = mes_userq_mqd_create,  	.mqd_destroy = mes_userq_mqd_destroy,  	.unmap = mes_userq_unmap,  	.map = mes_userq_map,  	.detect_and_reset = mes_userq_detect_and_reset, +	.preempt = mes_userq_preempt, +	.restore = mes_userq_restore,  }; | 
