summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c197
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h4
10 files changed, 179 insertions, 195 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 2edd7bb13fae..7542e1787183 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -127,21 +127,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{
struct nouveau_abi16_ntfy *ntfy, *temp;
- /* When a client exits without waiting for it's queued up jobs to
- * finish it might happen that we fault the channel. This is due to
- * drm_file_free() calling drm_gem_release() before the postclose()
- * callback. Hence, we can't tear down this scheduler entity before
- * uvmm mappings are unmapped. Currently, we can't detect this case.
- *
- * However, this should be rare and harmless, since the channel isn't
- * needed anymore.
- */
- nouveau_sched_entity_fini(&chan->sched_entity);
+ /* Cancel all jobs from the entity's queue. */
+ drm_sched_entity_fini(&chan->sched.entity);
- /* wait for all activity to stop before cleaning up */
if (chan->chan)
nouveau_channel_idle(chan->chan);
+ nouveau_sched_fini(&chan->sched);
+
/* cleanup notifier state */
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -344,8 +337,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
- drm->sched_wq);
+ ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 9f538486c10e..1f5e243c0c75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -26,7 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
- struct nouveau_sched_entity sched_entity;
+ struct nouveau_sched sched;
};
struct nouveau_abi16 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index f603eaef1560..7e5f19153829 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -201,9 +201,9 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
+ nouveau_sched_fini(&cli->sched);
if (uvmm)
nouveau_uvmm_fini(uvmm);
- nouveau_sched_entity_fini(&cli->sched_entity);
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
@@ -310,8 +310,17 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
cli->mem = &mems[ret];
- ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
- drm->sched_wq);
+ /* Don't pass in the (shared) sched_wq in order to let
+ * nouveau_sched_init() create a dedicated one for VM_BIND jobs.
+ *
+ * This is required to ensure that for VM_BIND jobs free_job() work and
+ * run_job() work can always run concurrently and hence, free_job() work
+ * can never stall run_job() work. For EXEC jobs we don't have this
+ * requirement, since EXEC job's free_job() does not require to take any
+ * locks which indirectly or directly are held for allocations
+ * elsewhere.
+ */
+ ret = nouveau_sched_init(&cli->sched, drm, NULL);
if (ret)
goto done;
@@ -582,13 +591,16 @@ nouveau_drm_device_init(struct drm_device *dev)
nvif_parent_ctor(&nouveau_parent, &drm->parent);
drm->master.base.object.parent = &drm->parent;
- ret = nouveau_sched_init(drm);
- if (ret)
+ drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
+ WQ_MAX_ACTIVE);
+ if (!drm->sched_wq) {
+ ret = -ENOMEM;
goto fail_alloc;
+ }
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
if (ret)
- goto fail_sched;
+ goto fail_wq;
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
@@ -658,8 +670,8 @@ fail_ttm:
nouveau_cli_fini(&drm->client);
fail_master:
nouveau_cli_fini(&drm->master);
-fail_sched:
- nouveau_sched_fini(drm);
+fail_wq:
+ destroy_workqueue(drm->sched_wq);
fail_alloc:
nvif_parent_dtor(&drm->parent);
kfree(drm);
@@ -711,10 +723,9 @@ nouveau_drm_device_fini(struct drm_device *dev)
}
mutex_unlock(&drm->clients_lock);
- nouveau_sched_fini(drm);
-
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
+ destroy_workqueue(drm->sched_wq);
nvif_parent_dtor(&drm->parent);
mutex_destroy(&drm->clients_lock);
kfree(drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 7f7051df84a6..8a6d94c8b163 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -98,7 +98,7 @@ struct nouveau_cli {
bool disabled;
} uvmm;
- struct nouveau_sched_entity sched_entity;
+ struct nouveau_sched sched;
const struct nvif_mclass *mem;
@@ -258,6 +258,9 @@ struct nouveau_drm {
u64 context_base;
} *runl;
+ /* Workqueue used for channel schedulers. */
+ struct workqueue_struct *sched_wq;
+
/* context for accelerated drm-internal operations */
struct nouveau_channel *cechan;
struct nouveau_channel *channel;
@@ -298,10 +301,6 @@ struct nouveau_drm {
struct mutex lock;
bool component_registered;
} audio;
-
- struct drm_gpu_scheduler sched;
- struct workqueue_struct *sched_wq;
-
};
static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index f8ef7004d133..a179eeb6df09 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -165,6 +165,7 @@ nouveau_exec_job_free(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+ nouveau_job_done(job);
nouveau_job_free(job);
kfree(exec_job->fence);
@@ -184,8 +185,6 @@ nouveau_exec_job_timeout(struct nouveau_job *job)
NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
chan->chid);
- nouveau_sched_entity_fini(job->entity);
-
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -234,7 +233,7 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob,
job->chan = __args->chan;
- args.sched_entity = __args->sched_entity;
+ args.sched = __args->sched;
args.file_priv = __args->file_priv;
args.in_sync.count = __args->in_sync.count;
@@ -388,7 +387,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (ret)
goto out;
- args.sched_entity = &chan16->sched_entity;
+ args.sched = &chan16->sched;
args.file_priv = file_priv;
args.chan = chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h
index 5a8fe1208a44..9b3b151facfd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.h
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.h
@@ -8,7 +8,7 @@
struct nouveau_exec_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
+ struct nouveau_sched *sched;
struct nouveau_channel *chan;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index f69f4cbf251b..0f4583f6fcad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -12,30 +12,29 @@
#include "nouveau_abi16.h"
#include "nouveau_sched.h"
-/* FIXME
- *
- * We want to make sure that jobs currently executing can't be deferred by
- * other jobs competing for the hardware. Otherwise we might end up with job
- * timeouts just because of too many clients submitting too many jobs. We don't
- * want jobs to time out because of system load, but because of the job being
- * too bulky.
- *
- * For now allow for up to 16 concurrent jobs in flight until we know how many
- * rings the hardware can process in parallel.
- */
-#define NOUVEAU_SCHED_HW_SUBMISSIONS 16
+#define NOUVEAU_SCHED_HW_SUBMISSIONS 1
#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
+/* Starts at 0, since the DRM scheduler interprets those parameters as (initial)
+ * index to the run-queue array.
+ */
+enum nouveau_sched_priority {
+ NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_MIN,
+ NOUVEAU_SCHED_PRIORITY_COUNT,
+};
+
int
nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args)
{
- struct nouveau_sched_entity *entity = args->sched_entity;
+ struct nouveau_sched *sched = args->sched;
int ret;
+ INIT_LIST_HEAD(&job->entry);
+
job->file_priv = args->file_priv;
job->cli = nouveau_cli(args->file_priv);
- job->entity = entity;
+ job->sched = sched;
job->sync = args->sync;
job->resv_usage = args->resv_usage;
@@ -89,7 +88,7 @@ nouveau_job_init(struct nouveau_job *job,
}
- ret = drm_sched_job_init(&job->base, &entity->base, 1, NULL);
+ ret = drm_sched_job_init(&job->base, &sched->entity, 1, NULL);
if (ret)
goto err_free_chains;
@@ -109,6 +108,27 @@ return ret;
}
void
+nouveau_job_fini(struct nouveau_job *job)
+{
+ dma_fence_put(job->done_fence);
+ drm_sched_job_cleanup(&job->base);
+
+ job->ops->free(job);
+}
+
+void
+nouveau_job_done(struct nouveau_job *job)
+{
+ struct nouveau_sched *sched = job->sched;
+
+ spin_lock(&sched->job.list.lock);
+ list_del(&job->entry);
+ spin_unlock(&sched->job.list.lock);
+
+ wake_up(&sched->job.wq);
+}
+
+void
nouveau_job_free(struct nouveau_job *job)
{
kfree(job->in_sync.data);
@@ -117,13 +137,6 @@ nouveau_job_free(struct nouveau_job *job)
kfree(job->out_sync.chains);
}
-void nouveau_job_fini(struct nouveau_job *job)
-{
- dma_fence_put(job->done_fence);
- drm_sched_job_cleanup(&job->base);
- job->ops->free(job);
-}
-
static int
sync_find_fence(struct nouveau_job *job,
struct drm_nouveau_sync *sync,
@@ -261,7 +274,7 @@ nouveau_job_fence_attach(struct nouveau_job *job)
int
nouveau_job_submit(struct nouveau_job *job)
{
- struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
+ struct nouveau_sched *sched = job->sched;
struct dma_fence *done_fence = NULL;
struct drm_gpuvm_exec vm_exec = {
.vm = &nouveau_cli_uvmm(job->cli)->base,
@@ -281,7 +294,7 @@ nouveau_job_submit(struct nouveau_job *job)
/* Make sure the job appears on the sched_entity's queue in the same
* order as it was submitted.
*/
- mutex_lock(&entity->mutex);
+ mutex_lock(&sched->mutex);
/* Guarantee we won't fail after the submit() callback returned
* successfully.
@@ -292,33 +305,16 @@ nouveau_job_submit(struct nouveau_job *job)
goto err_cleanup;
}
+ /* Submit was successful; add the job to the schedulers job list. */
+ spin_lock(&sched->job.list.lock);
+ list_add(&job->entry, &sched->job.list.head);
+ spin_unlock(&sched->job.list.lock);
+
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
if (job->sync)
done_fence = dma_fence_get(job->done_fence);
- /* If a sched job depends on a dma-fence from a job from the same GPU
- * scheduler instance, but a different scheduler entity, the GPU
- * scheduler does only wait for the particular job to be scheduled,
- * rather than for the job to fully complete. This is due to the GPU
- * scheduler assuming that there is a scheduler instance per ring.
- * However, the current implementation, in order to avoid arbitrary
- * amounts of kthreads, has a single scheduler instance while scheduler
- * entities represent rings.
- *
- * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all
- * out-fences in order to force the scheduler to wait for full job
- * completion for dependent jobs from different entities and same
- * scheduler instance.
- *
- * There is some work in progress [1] to address the issues of firmware
- * schedulers; once it is in-tree the scheduler topology in Nouveau
- * should be re-worked accordingly.
- *
- * [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/
- */
- set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags);
-
if (job->ops->armed_submit)
job->ops->armed_submit(job, &vm_exec);
@@ -331,7 +327,7 @@ nouveau_job_submit(struct nouveau_job *job)
drm_sched_entity_push_job(&job->base);
- mutex_unlock(&entity->mutex);
+ mutex_unlock(&sched->mutex);
if (done_fence) {
dma_fence_wait(done_fence, true);
@@ -341,20 +337,13 @@ nouveau_job_submit(struct nouveau_job *job)
return 0;
err_cleanup:
- mutex_unlock(&entity->mutex);
+ mutex_unlock(&sched->mutex);
nouveau_job_fence_attach_cleanup(job);
err:
job->state = NOUVEAU_JOB_SUBMIT_FAILED;
return ret;
}
-bool
-nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
- struct work_struct *work)
-{
- return queue_work(entity->sched_wq, work);
-}
-
static struct dma_fence *
nouveau_job_run(struct nouveau_job *job)
{
@@ -404,50 +393,82 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job)
nouveau_job_fini(job);
}
-int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
- struct drm_gpu_scheduler *sched,
- struct workqueue_struct *sched_wq)
-{
- mutex_init(&entity->mutex);
- spin_lock_init(&entity->job.list.lock);
- INIT_LIST_HEAD(&entity->job.list.head);
- init_waitqueue_head(&entity->job.wq);
-
- entity->sched_wq = sched_wq;
- return drm_sched_entity_init(&entity->base,
- DRM_SCHED_PRIORITY_NORMAL,
- &sched, 1, NULL);
-}
-
-void
-nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
-{
- drm_sched_entity_destroy(&entity->base);
-}
-
static const struct drm_sched_backend_ops nouveau_sched_ops = {
.run_job = nouveau_sched_run_job,
.timedout_job = nouveau_sched_timedout_job,
.free_job = nouveau_sched_free_job,
};
-int nouveau_sched_init(struct nouveau_drm *drm)
+int
+nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq)
{
- struct drm_gpu_scheduler *sched = &drm->sched;
+ struct drm_gpu_scheduler *drm_sched = &sched->base;
+ struct drm_sched_entity *entity = &sched->entity;
long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+ int ret;
- drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
- if (!drm->sched_wq)
- return -ENOMEM;
+ if (!wq) {
+ wq = alloc_workqueue("nouveau_sched_wq_%d", 0, WQ_MAX_ACTIVE,
+ current->pid);
+ if (!wq)
+ return -ENOMEM;
+
+ sched->wq = wq;
+ }
- return drm_sched_init(sched, &nouveau_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
- NULL, NULL, "nouveau_sched", drm->dev->dev);
+ ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
+ NOUVEAU_SCHED_PRIORITY_COUNT,
+ NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
+ NULL, NULL, "nouveau_sched", drm->dev->dev);
+ if (ret)
+ goto fail_wq;
+
+ /* Using DRM_SCHED_PRIORITY_MIN, since that's what we're required to use
+ * when we want to have a single run-queue only.
+ *
+ * It's not documented, but one will find out when trying to use any
+ * other priority running into faults, because the scheduler uses the
+ * priority as array index.
+ *
+ * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not
+ * matching the enum type used in drm_sched_entity_init().
+ */
+ ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_MIN,
+ &drm_sched, 1, NULL);
+ if (ret)
+ goto fail_sched;
+
+ mutex_init(&sched->mutex);
+ spin_lock_init(&sched->job.list.lock);
+ INIT_LIST_HEAD(&sched->job.list.head);
+ init_waitqueue_head(&sched->job.wq);
+
+ return 0;
+
+fail_sched:
+ drm_sched_fini(drm_sched);
+fail_wq:
+ if (sched->wq)
+ destroy_workqueue(sched->wq);
+ return ret;
}
-void nouveau_sched_fini(struct nouveau_drm *drm)
+void
+nouveau_sched_fini(struct nouveau_sched *sched)
{
- destroy_workqueue(drm->sched_wq);
- drm_sched_fini(&drm->sched);
+ struct drm_gpu_scheduler *drm_sched = &sched->base;
+ struct drm_sched_entity *entity = &sched->entity;
+
+ rmb(); /* for list_empty to work without lock */
+ wait_event(sched->job.wq, list_empty(&sched->job.list.head));
+
+ drm_sched_entity_fini(entity);
+ drm_sched_fini(drm_sched);
+
+ /* Destroy workqueue after scheduler tear down, otherwise it might still
+ * be in use.
+ */
+ if (sched->wq)
+ destroy_workqueue(sched->wq);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index 0f87697dbc9e..23eff4b0f5b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -26,7 +26,7 @@ enum nouveau_job_state {
struct nouveau_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
+ struct nouveau_sched *sched;
enum dma_resv_usage resv_usage;
bool sync;
@@ -49,7 +49,8 @@ struct nouveau_job {
enum nouveau_job_state state;
- struct nouveau_sched_entity *entity;
+ struct nouveau_sched *sched;
+ struct list_head entry;
struct drm_file *file_priv;
struct nouveau_cli *cli;
@@ -89,20 +90,17 @@ int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
int nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args);
-void nouveau_job_free(struct nouveau_job *job);
-
-int nouveau_job_submit(struct nouveau_job *job);
void nouveau_job_fini(struct nouveau_job *job);
+int nouveau_job_submit(struct nouveau_job *job);
+void nouveau_job_done(struct nouveau_job *job);
+void nouveau_job_free(struct nouveau_job *job);
-#define to_nouveau_sched_entity(entity) \
- container_of((entity), struct nouveau_sched_entity, base)
-
-struct nouveau_sched_entity {
- struct drm_sched_entity base;
+struct nouveau_sched {
+ struct drm_gpu_scheduler base;
+ struct drm_sched_entity entity;
+ struct workqueue_struct *wq;
struct mutex mutex;
- struct workqueue_struct *sched_wq;
-
struct {
struct {
struct list_head head;
@@ -112,15 +110,8 @@ struct nouveau_sched_entity {
} job;
};
-int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
- struct drm_gpu_scheduler *sched,
- struct workqueue_struct *sched_wq);
-void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
-
-bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
- struct work_struct *work);
-
-int nouveau_sched_init(struct nouveau_drm *drm);
-void nouveau_sched_fini(struct nouveau_drm *drm);
+int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq);
+void nouveau_sched_fini(struct nouveau_sched *sched);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 90e6732ee9b5..46493fcb82fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -963,6 +963,12 @@ nouveau_uvmm_bind_job_free(struct kref *kref)
{
struct nouveau_uvmm_bind_job *job =
container_of(kref, struct nouveau_uvmm_bind_job, kref);
+ struct bind_job_op *op, *next;
+
+ list_for_each_op_safe(op, next, &job->ops) {
+ list_del(&op->entry);
+ kfree(op);
+ }
nouveau_job_free(&job->base);
kfree(job);
@@ -1004,14 +1010,16 @@ bind_validate_op(struct nouveau_job *job,
static void
bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
{
- struct nouveau_uvmm_bind_job *bind_job;
- struct nouveau_sched_entity *entity = job->entity;
+ struct nouveau_sched *sched = job->sched;
+ struct nouveau_job *__job;
struct bind_job_op *op;
u64 end = addr + range;
again:
- spin_lock(&entity->job.list.lock);
- list_for_each_entry(bind_job, &entity->job.list.head, entry) {
+ spin_lock(&sched->job.list.lock);
+ list_for_each_entry(__job, &sched->job.list.head, entry) {
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
+
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_UNMAP) {
u64 op_addr = op->va.addr;
@@ -1019,7 +1027,7 @@ again:
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
- spin_unlock(&entity->job.list.lock);
+ spin_unlock(&sched->job.list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
@@ -1027,7 +1035,7 @@ again:
}
}
}
- spin_unlock(&entity->job.list.lock);
+ spin_unlock(&sched->job.list.lock);
}
static int
@@ -1183,7 +1191,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
- struct nouveau_sched_entity *entity = job->entity;
struct drm_exec *exec = &vme->exec;
struct bind_job_op *op;
int ret;
@@ -1380,10 +1387,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
}
nouveau_uvmm_unlock(uvmm);
- spin_lock(&entity->job.list.lock);
- list_add(&bind_job->entry, &entity->job.list.head);
- spin_unlock(&entity->job.list.lock);
-
return 0;
unwind_continue:
@@ -1466,14 +1469,11 @@ out:
}
static void
-nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
+nouveau_uvmm_bind_job_cleanup(struct nouveau_job *job)
{
- struct nouveau_uvmm_bind_job *bind_job =
- container_of(work, struct nouveau_uvmm_bind_job, work);
- struct nouveau_job *job = &bind_job->base;
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
- struct nouveau_sched_entity *entity = job->entity;
- struct bind_job_op *op, *next;
+ struct bind_job_op *op;
list_for_each_op(op, &bind_job->ops) {
struct drm_gem_object *obj = op->gem.obj;
@@ -1525,38 +1525,17 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
drm_gem_object_put(obj);
}
- spin_lock(&entity->job.list.lock);
- list_del(&bind_job->entry);
- spin_unlock(&entity->job.list.lock);
-
+ nouveau_job_done(job);
complete_all(&bind_job->complete);
- wake_up(&entity->job.wq);
-
- /* Remove and free ops after removing the bind job from the job list to
- * avoid races against bind_validate_map_sparse().
- */
- list_for_each_op_safe(op, next, &bind_job->ops) {
- list_del(&op->entry);
- kfree(op);
- }
nouveau_uvmm_bind_job_put(bind_job);
}
-static void
-nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job)
-{
- struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
- struct nouveau_sched_entity *entity = job->entity;
-
- nouveau_sched_entity_qwork(entity, &bind_job->work);
-}
-
static struct nouveau_job_ops nouveau_bind_job_ops = {
.submit = nouveau_uvmm_bind_job_submit,
.armed_submit = nouveau_uvmm_bind_job_armed_submit,
.run = nouveau_uvmm_bind_job_run,
- .free = nouveau_uvmm_bind_job_free_qwork,
+ .free = nouveau_uvmm_bind_job_cleanup,
};
static int
@@ -1617,7 +1596,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
return ret;
INIT_LIST_HEAD(&job->ops);
- INIT_LIST_HEAD(&job->entry);
for (i = 0; i < __args->op.count; i++) {
ret = bind_job_op_from_uop(&op, &__args->op.s[i]);
@@ -1628,9 +1606,8 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
}
init_completion(&job->complete);
- INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn);
- args.sched_entity = __args->sched_entity;
+ args.sched = __args->sched;
args.file_priv = __args->file_priv;
args.in_sync.count = __args->in_sync.count;
@@ -1758,7 +1735,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
if (ret)
return ret;
- args.sched_entity = &cli->sched_entity;
+ args.sched = &cli->sched;
args.file_priv = file_priv;
ret = nouveau_uvmm_vm_bind(&args);
@@ -1910,12 +1887,8 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
MA_STATE(mas, &uvmm->region_mt, 0, 0);
struct nouveau_uvma_region *reg;
struct nouveau_cli *cli = uvmm->vmm.cli;
- struct nouveau_sched_entity *entity = &cli->sched_entity;
struct drm_gpuva *va, *next;
- rmb(); /* for list_empty to work without lock */
- wait_event(entity->job.wq, list_empty(&entity->job.list.head));
-
nouveau_uvmm_lock(uvmm);
drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
struct nouveau_uvma *uvma = uvma_from_va(va);
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
index f0a6d98ace4f..9d3c348581eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -44,8 +44,6 @@ struct nouveau_uvmm_bind_job {
struct nouveau_job base;
struct kref kref;
- struct list_head entry;
- struct work_struct work;
struct completion complete;
/* struct bind_job_op */
@@ -54,7 +52,7 @@ struct nouveau_uvmm_bind_job {
struct nouveau_uvmm_bind_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
+ struct nouveau_sched *sched;
unsigned int flags;