diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2015-08-05 14:07:08 +0300 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-08-17 23:51:02 +0300 |
commit | 80de5913cf31c86d64547af0715de4822c9b1abe (patch) | |
tree | 4c46bff9a98907bdeaa9fb1d48a6b7f9b19f5b6b /drivers/gpu/drm | |
parent | 47f38501f11fa45d8a7797f1965448c1e20049d4 (diff) | |
download | linux-80de5913cf31c86d64547af0715de4822c9b1abe.tar.xz |
Revert "drm/amdgpu: return new seq_no for amd_sched_push_job"
This reverts commit d1d33da8eb86b8ca41dd9ed95738030df5267b95.
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Conflicts:
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 2 |
5 files changed, 38 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index aa1bc24b7edb..f72a8583b1a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -901,6 +901,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (amdgpu_enable_scheduler && parser->num_ibs) { struct amdgpu_ring * ring = amdgpu_cs_parser_get_ring(adev, parser); + parser->ibs[parser->num_ibs - 1].sequence = atomic64_inc_return( + &parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq); if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) { r = amdgpu_cs_parser_prepare_job(parser); if (r) @@ -910,8 +912,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) parser->ring = ring; parser->run_job = amdgpu_cs_parser_run_job; parser->free_job = amdgpu_cs_parser_free_job; - parser->ibs[parser->num_ibs - 1].sequence = - amd_sched_push_job(ring->scheduler, + amd_sched_push_job(ring->scheduler, &parser->ctx->rings[ring->idx].c_entity, parser); cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 995901b9e428..0fcf020917d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -121,6 +121,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, { int r = 0; if (amdgpu_enable_scheduler) { + uint64_t v_seq; struct amdgpu_cs_parser *sched_job = amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, ibs, 1); @@ -128,12 +129,16 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, return -ENOMEM; } sched_job->free_job = free_job; - ibs[num_ibs - 1].sequence = amd_sched_push_job(ring->scheduler, + v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq); + ibs[num_ibs - 1].sequence = v_seq; + amd_sched_push_job(ring->scheduler, &adev->kernel_ctx.rings[ring->idx].c_entity, sched_job); r = amd_sched_wait_emit( &adev->kernel_ctx.rings[ring->idx].c_entity, - ibs[num_ibs - 1].sequence, false, -1); + v_seq, + false, + -1); if (r) WARN(true, "emit timeout\n"); } else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 78713ae3b158..9d5043c42fc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -371,6 +371,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (amdgpu_enable_scheduler) { int r; + uint64_t v_seq; sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, &adev->kernel_ctx, ib, 1); if(!sched_job) @@ -378,11 +379,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, sched_job->job_param.vm.bo = bo; sched_job->run_job = amdgpu_vm_run_job; sched_job->free_job = amdgpu_vm_free_job; - ib->sequence = amd_sched_push_job(ring->scheduler, + v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq); + ib->sequence = v_seq; + amd_sched_push_job(ring->scheduler, &adev->kernel_ctx.rings[ring->idx].c_entity, sched_job); r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity, - ib->sequence, false, -1); + v_seq, + false, + -1); if (r) DRM_ERROR("emit timeout\n"); @@ -516,6 +521,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (amdgpu_enable_scheduler) { int r; + uint64_t v_seq; sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, &adev->kernel_ctx, ib, 1); @@ -524,11 +530,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, sched_job->job_param.vm.bo = pd; sched_job->run_job = amdgpu_vm_run_job; sched_job->free_job = amdgpu_vm_free_job; - ib->sequence = amd_sched_push_job(ring->scheduler, + v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq); + ib->sequence = v_seq; + amd_sched_push_job(ring->scheduler, &adev->kernel_ctx.rings[ring->idx].c_entity, sched_job); r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity, - ib->sequence, false, -1); + v_seq, + false, + -1); if (r) DRM_ERROR("emit timeout\n"); } else { @@ -862,6 +872,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (amdgpu_enable_scheduler) { int r; + uint64_t v_seq; sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, &adev->kernel_ctx, ib, 1); if(!sched_job) @@ -872,11 +883,15 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, sched_job->job_param.vm_mapping.fence = fence; sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job; sched_job->free_job = amdgpu_vm_free_job; - ib->sequence = amd_sched_push_job(ring->scheduler, + v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq); + ib->sequence = v_seq; + amd_sched_push_job(ring->scheduler, &adev->kernel_ctx.rings[ring->idx].c_entity, sched_job); r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity, - ib->sequence, false, -1); + v_seq, + false, + -1); if (r) DRM_ERROR("emit timeout\n"); } else { diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index b9aa572980d2..1204b7386b39 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -289,9 +289,12 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched, * @sched The pointer to the scheduler * @c_entity The pointer to amd_context_entity * @job The pointer to job required to submit - * return the virtual sequence number + * return 0 if succeed. -1 if failed. + * -2 indicate queue is full for this client, client should wait untill + * scheduler consum some queued command. + * -1 other fail. */ -uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched, +int amd_sched_push_job(struct amd_gpu_scheduler *sched, struct amd_context_entity *c_entity, void *job) { @@ -305,8 +308,7 @@ uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched, } wake_up_interruptible(&sched->wait_queue); - - return atomic64_inc_return(&c_entity->last_queued_v_seq); + return 0; } /** diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index c46d0854ab75..1a01ac45cd4c 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -124,7 +124,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device, int amd_sched_destroy(struct amd_gpu_scheduler *sched); -uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched, +int amd_sched_push_job(struct amd_gpu_scheduler *sched, struct amd_context_entity *c_entity, void *job); |