summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_submitqueue.c
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2025-06-29 23:13:14 +0300
committerRob Clark <robin.clark@oss.qualcomm.com>2025-07-05 03:48:37 +0300
commit92395af63a9958615edfa9d4ef1ea72c92a00410 (patch)
tree644d6e96fd9c92f04f69334acb2cd24ef2bc8374 /drivers/gpu/drm/msm/msm_submitqueue.c
parentcefb919cfa5359c72325be8c7dc8a245c85c2756 (diff)
downloadlinux-92395af63a9958615edfa9d4ef1ea72c92a00410.tar.xz
drm/msm: Add VM_BIND submitqueue
This submitqueue type isn't tied to a hw ringbuffer, but instead executes on the CPU for performing async VM_BIND ops. Signed-off-by: Rob Clark <robdclark@chromium.org> Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com> Tested-by: Antonino Maniscalco <antomani103@gmail.com> Reviewed-by: Antonino Maniscalco <antomani103@gmail.com> Patchwork: https://patchwork.freedesktop.org/patch/661517/
Diffstat (limited to 'drivers/gpu/drm/msm/msm_submitqueue.c')
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c67
1 files changed, 51 insertions, 16 deletions
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 8ced49c7557b..8617a82cd6b3 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -72,6 +72,9 @@ void msm_submitqueue_destroy(struct kref *kref)
idr_destroy(&queue->fence_idr);
+ if (queue->entity == &queue->_vm_bind_entity[0])
+ drm_sched_entity_destroy(queue->entity);
+
msm_context_put(queue->ctx);
kfree(queue);
@@ -102,7 +105,7 @@ struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx,
void msm_submitqueue_close(struct msm_context *ctx)
{
- struct msm_gpu_submitqueue *entry, *tmp;
+ struct msm_gpu_submitqueue *queue, *tmp;
if (!ctx)
return;
@@ -111,10 +114,17 @@ void msm_submitqueue_close(struct msm_context *ctx)
* No lock needed in close and there won't
* be any more user ioctls coming our way
*/
- list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
- list_del(&entry->node);
- msm_submitqueue_put(entry);
+ list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) {
+ if (queue->entity == &queue->_vm_bind_entity[0])
+ drm_sched_entity_flush(queue->entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ list_del(&queue->node);
+ msm_submitqueue_put(queue);
}
+
+ if (!ctx->vm)
+ return;
+
+ msm_gem_vm_close(ctx->vm);
}
static struct drm_sched_entity *
@@ -160,8 +170,6 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx,
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
enum drm_sched_priority sched_prio;
- extern int enable_preemption;
- bool preemption_supported;
unsigned ring_nr;
int ret;
@@ -171,26 +179,53 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx,
if (!priv->gpu)
return -ENODEV;
- preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0;
+ if (flags & MSM_SUBMITQUEUE_VM_BIND) {
+ unsigned sz;
- if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
- return -EINVAL;
+ /* Not allowed for kernel managed VMs (ie. kernel allocs VA) */
+ if (!msm_context_is_vmbind(ctx))
+ return -EINVAL;
- ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
- if (ret)
- return ret;
+ if (prio)
+ return -EINVAL;
+
+ sz = struct_size(queue, _vm_bind_entity, 1);
+ queue = kzalloc(sz, GFP_KERNEL);
+ } else {
+ extern int enable_preemption;
+ bool preemption_supported =
+ priv->gpu->nr_rings == 1 && enable_preemption != 0;
+
+ if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
+ return -EINVAL;
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
+ if (ret)
+ return ret;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ }
if (!queue)
return -ENOMEM;
kref_init(&queue->ref);
queue->flags = flags;
- queue->ring_nr = ring_nr;
- queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
- ring_nr, sched_prio);
+ if (flags & MSM_SUBMITQUEUE_VM_BIND) {
+ struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched;
+
+ queue->entity = &queue->_vm_bind_entity[0];
+
+ drm_sched_entity_init(queue->entity, DRM_SCHED_PRIORITY_KERNEL,
+ &sched, 1, NULL);
+ } else {
+ queue->ring_nr = ring_nr;
+
+ queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
+ ring_nr, sched_prio);
+ }
+
if (IS_ERR(queue->entity)) {
ret = PTR_ERR(queue->entity);
kfree(queue);