diff options
Diffstat (limited to 'drivers/accel/ivpu/ivpu_job.c')
-rw-r--r-- | drivers/accel/ivpu/ivpu_job.c | 105 |
1 files changed, 78 insertions, 27 deletions
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 673801889c7b..4a8013f669f9 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -17,6 +17,7 @@ #include "ivpu_ipc.h" #include "ivpu_job.h" #include "ivpu_jsm_msg.h" +#include "ivpu_mmu.h" #include "ivpu_pm.h" #include "ivpu_trace.h" #include "vpu_boot_api.h" @@ -83,23 +84,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) if (!cmdq) return NULL; - ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next, - GFP_KERNEL); - if (ret < 0) { - ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret); - goto err_free_cmdq; - } - - ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit, - &file_priv->cmdq_id_next, GFP_KERNEL); - if (ret < 0) { - ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret); - goto err_erase_db_xa; - } - cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); if (!cmdq->mem) - goto err_erase_cmdq_xa; + goto err_free_cmdq; ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); if (ret) @@ -107,10 +94,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) return cmdq; -err_erase_cmdq_xa: - xa_erase(&file_priv->cmdq_xa, cmdq->id); -err_erase_db_xa: - xa_erase(&vdev->db_xa, cmdq->db_id); err_free_cmdq: kfree(cmdq); return NULL; @@ -234,30 +217,88 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm return 0; } +static int ivpu_db_id_alloc(struct ivpu_device *vdev, u32 *db_id) +{ + int ret; + u32 id; + + ret = xa_alloc_cyclic(&vdev->db_xa, &id, NULL, vdev->db_limit, &vdev->db_next, GFP_KERNEL); + if (ret < 0) + return ret; + + *db_id = id; + return 0; +} + +static int ivpu_cmdq_id_alloc(struct ivpu_file_priv *file_priv, u32 *cmdq_id) +{ + int ret; + u32 id; + + ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &id, NULL, file_priv->cmdq_limit, + &file_priv->cmdq_id_next, GFP_KERNEL); + if (ret < 0) + return ret; + + *cmdq_id = id; + return 0; +} + static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority) { + struct ivpu_device *vdev = file_priv->vdev; struct ivpu_cmdq *cmdq; - unsigned long cmdq_id; + unsigned long id; int ret; lockdep_assert_held(&file_priv->lock); - xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) + xa_for_each(&file_priv->cmdq_xa, id, cmdq) if (cmdq->priority == priority) break; if (!cmdq) { cmdq = ivpu_cmdq_alloc(file_priv); - if (!cmdq) + if (!cmdq) { + ivpu_err(vdev, "Failed to allocate command queue\n"); return NULL; + } + + ret = ivpu_db_id_alloc(vdev, &cmdq->db_id); + if (ret) { + ivpu_err(file_priv->vdev, "Failed to allocate doorbell ID: %d\n", ret); + goto err_free_cmdq; + } + + ret = ivpu_cmdq_id_alloc(file_priv, &cmdq->id); + if (ret) { + ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret); + goto err_erase_db_id; + } + cmdq->priority = priority; + ret = xa_err(xa_store(&file_priv->cmdq_xa, cmdq->id, cmdq, GFP_KERNEL)); + if (ret) { + ivpu_err(vdev, "Failed to store command queue in cmdq_xa: %d\n", ret); + goto err_erase_cmdq_id; + } } ret = ivpu_cmdq_init(file_priv, cmdq, priority); - if (ret) - return NULL; + if (ret) { + ivpu_err(vdev, "Failed to initialize command queue: %d\n", ret); + goto err_free_cmdq; + } return cmdq; + +err_erase_cmdq_id: + xa_erase(&file_priv->cmdq_xa, cmdq->id); +err_erase_db_id: + xa_erase(&vdev->db_xa, cmdq->db_id); +err_free_cmdq: + ivpu_cmdq_free(file_priv, cmdq); + return NULL; } void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv) @@ -320,12 +361,15 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv) struct ivpu_device *vdev = file_priv->vdev; lockdep_assert_held(&file_priv->lock); + ivpu_dbg(vdev, JOB, "Context ID: %u abort\n", file_priv->ctx.id); ivpu_cmdq_fini_all(file_priv); if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS) ivpu_jsm_context_release(vdev, file_priv->ctx.id); + ivpu_mmu_disable_ssid_events(vdev, file_priv->ctx.id); + file_priv->aborted = true; } @@ -606,8 +650,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority) err_erase_xa: xa_erase(&vdev->submitted_jobs_xa, job->job_id); err_unlock: - mutex_unlock(&vdev->submitted_jobs_lock); mutex_unlock(&file_priv->lock); + mutex_unlock(&vdev->submitted_jobs_lock); ivpu_rpm_put(vdev); return ret; } @@ -805,12 +849,12 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev) ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer); } -void ivpu_context_abort_thread_handler(struct work_struct *work) +void ivpu_context_abort_work_fn(struct work_struct *work) { struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work); struct ivpu_file_priv *file_priv; - unsigned long ctx_id; struct ivpu_job *job; + unsigned long ctx_id; unsigned long id; if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) @@ -827,6 +871,13 @@ void ivpu_context_abort_thread_handler(struct work_struct *work) } mutex_unlock(&vdev->context_list_lock); + /* + * We will not receive new MMU event interrupts until existing events are discarded + * however, we want to discard these events only after aborting the faulty context + * to avoid generating new faults from that context + */ + ivpu_mmu_discard_events(vdev); + if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW) return; |