summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/v3d/v3d_gem.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2021-08-05 13:46:55 +0300
committerDaniel Vetter <daniel.vetter@ffwll.ch>2021-08-30 11:58:47 +0300
commitda3208e8637ec01014872e17a22aa03a5b8f3376 (patch)
tree953976f59fc333a551dfc6b9f9d6f08efff1a5a9 /drivers/gpu/drm/v3d/v3d_gem.c
parent916044fac862323c5a1a5fdaf3edd6d22ae81428 (diff)
downloadlinux-da3208e8637ec01014872e17a22aa03a5b8f3376.tar.xz
drm/v3d: Use scheduler dependency handling
With the prep work out of the way this isn't tricky anymore. Aside: The chaining of the various jobs is a bit awkward, with the possibility of failure in bad places. I think with the drm_sched_job_init/arm split and maybe preloading the job->dependencies xarray this should be fixable. v2: Rebase over renamed function names for adding dependencies. Reviewed-by: Melissa Wen <mwen@igalia.com> (v1) Acked-by: Emma Anholt <emma@anholt.net> Cc: Melissa Wen <melissa.srw@gmail.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Emma Anholt <emma@anholt.net> Link: https://patchwork.freedesktop.org/patch/msgid/20210805104705.862416-11-daniel.vetter@ffwll.ch
Diffstat (limited to 'drivers/gpu/drm/v3d/v3d_gem.c')
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c26
1 files changed, 9 insertions, 17 deletions
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 42587248c54e..a3529809d547 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -259,8 +259,8 @@ v3d_lock_bo_reservations(struct v3d_job *job,
return ret;
for (i = 0; i < job->bo_count; i++) {
- ret = drm_gem_fence_array_add_implicit(&job->deps,
- job->bo[i], true);
+ ret = drm_sched_job_add_implicit_dependencies(&job->base,
+ job->bo[i], true);
if (ret) {
drm_gem_unlock_reservations(job->bo, job->bo_count,
acquire_ctx);
@@ -356,8 +356,6 @@ static void
v3d_job_free(struct kref *ref)
{
struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
- unsigned long index;
- struct dma_fence *fence;
int i;
for (i = 0; i < job->bo_count; i++) {
@@ -366,11 +364,6 @@ v3d_job_free(struct kref *ref)
}
kvfree(job->bo);
- xa_for_each(&job->deps, index, fence) {
- dma_fence_put(fence);
- }
- xa_destroy(&job->deps);
-
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
@@ -457,7 +450,6 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
if (ret < 0)
return ret;
- xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
v3d_priv);
if (ret)
@@ -467,7 +459,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
if (ret == -EINVAL)
goto fail_job;
- ret = drm_gem_fence_array_add(&job->deps, in_fence);
+ ret = drm_sched_job_add_dependency(&job->base, in_fence);
if (ret)
goto fail_job;
@@ -477,7 +469,6 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
fail_job:
drm_sched_job_cleanup(&job->base);
fail:
- xa_destroy(&job->deps);
pm_runtime_put_autosuspend(v3d->drm.dev);
return ret;
}
@@ -640,8 +631,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
v3d_perfmon_get(bin->base.perfmon);
v3d_push_job(&bin->base);
- ret = drm_gem_fence_array_add(&render->base.deps,
- dma_fence_get(bin->base.done_fence));
+ ret = drm_sched_job_add_dependency(&render->base.base,
+ dma_fence_get(bin->base.done_fence));
if (ret)
goto fail_unreserve;
}
@@ -651,7 +642,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (clean_job) {
struct dma_fence *render_fence =
dma_fence_get(render->base.done_fence);
- ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ render_fence);
if (ret)
goto fail_unreserve;
clean_job->perfmon = render->base.perfmon;
@@ -853,8 +845,8 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
mutex_lock(&v3d->sched_lock);
v3d_push_job(&job->base);
- ret = drm_gem_fence_array_add(&clean_job->deps,
- dma_fence_get(job->base.done_fence));
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ dma_fence_get(job->base.done_fence));
if (ret)
goto fail_unreserve;