summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_sched_job.c
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2023-01-13 01:25:14 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-12 22:06:00 +0300
commit765b65e5bde79a9e8332c58f54a98e20fdb25fc7 (patch)
treeffded2ad171d82adc5de746e5b91b47fa5956512 /drivers/gpu/drm/xe/xe_sched_job.c
parente9d285ff9d4998d20790395adc8a62f283bdb72b (diff)
downloadlinux-765b65e5bde79a9e8332c58f54a98e20fdb25fc7.tar.xz
drm/xe: Take memory ref on kernel job creation
When a job is inflight we may access memory to read the hardware seqno. All user jobs have VM open which has a ref but kernel jobs do not require VM so it is possible to not have memory ref. To avoid this, take a memory ref on kernel job creation. Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_sched_job.c')
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index ab81bfe17e8a..d9add0370a98 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -8,7 +8,7 @@
#include <linux/dma-fence-array.h>
#include <linux/slab.h>
-#include "xe_device_types.h"
+#include "xe_device.h"
#include "xe_engine.h"
#include "xe_gt.h"
#include "xe_hw_engine_types.h"
@@ -72,6 +72,11 @@ static void job_free(struct xe_sched_job *job)
xe_sched_job_parallel_slab : xe_sched_job_slab, job);
}
+static struct xe_device *job_to_xe(struct xe_sched_job *job)
+{
+ return gt_to_xe(job->engine->gt);
+}
+
struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
u64 *batch_addr)
{
@@ -149,6 +154,11 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
for (i = 0; i < width; ++i)
job->batch_addr[i] = batch_addr[i];
+ /* All other jobs require a VM to be open which has a ref */
+ if (unlikely(e->flags & ENGINE_FLAG_KERNEL))
+ xe_device_mem_access_get(job_to_xe(job));
+ xe_device_assert_mem_access(job_to_xe(job));
+
trace_xe_sched_job_create(job);
return job;
@@ -178,6 +188,8 @@ void xe_sched_job_destroy(struct kref *ref)
struct xe_sched_job *job =
container_of(ref, struct xe_sched_job, refcount);
+ if (unlikely(job->engine->flags & ENGINE_FLAG_KERNEL))
+ xe_device_mem_access_put(job_to_xe(job));
xe_engine_put(job->engine);
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->drm);