summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorFrancois Dugast <francois.dugast@intel.com>2024-06-13 20:03:48 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2024-06-17 23:46:52 +0300
commit731e46c032281601756f08cfa7d8505fe41166a9 (patch)
treeea9910c2428f6f9032f3f6b758447b6f66f5a7a1 /drivers
parent5d7612ae201ec199b46bbf81a36cb4667e29d973 (diff)
downloadlinux-731e46c032281601756f08cfa7d8505fe41166a9.tar.xz
drm/xe/exec_queue: Rename xe_exec_queue::compute to xe_exec_queue::lr
The properties of this struct are used in long running context so make that clear by renaming it to lr, in alignment with the rest of the code. Cc: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Francois Dugast <francois.dugast@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240613170348.723245-1-francois.dugast@intel.com Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c6
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h14
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c58
4 files changed, 40 insertions, 40 deletions
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index cf45df0328da..0ba37835849b 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -67,7 +67,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops;
- INIT_LIST_HEAD(&q->compute.link);
+ INIT_LIST_HEAD(&q->lr.link);
INIT_LIST_HEAD(&q->multi_gt_link);
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
@@ -633,8 +633,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(q);
if (xe_vm_in_preempt_fence_mode(vm)) {
- q->compute.context = dma_fence_context_alloc(1);
- spin_lock_init(&q->compute.lock);
+ q->lr.context = dma_fence_context_alloc(1);
+ spin_lock_init(&q->lr.lock);
err = xe_vm_add_compute_exec_queue(vm, q);
if (XE_IOCTL_DBG(xe, err))
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index f0c5f82ce7e3..201588ec33c3 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -113,19 +113,19 @@ struct xe_exec_queue {
enum xe_exec_queue_priority priority;
} sched_props;
- /** @compute: compute exec queue state */
+ /** @lr: long-running exec queue state */
struct {
- /** @compute.pfence: preemption fence */
+ /** @lr.pfence: preemption fence */
struct dma_fence *pfence;
- /** @compute.context: preemption fence context */
+ /** @lr.context: preemption fence context */
u64 context;
- /** @compute.seqno: preemption fence seqno */
+ /** @lr.seqno: preemption fence seqno */
u32 seqno;
- /** @compute.link: link into VM's list of exec queues */
+ /** @lr.link: link into VM's list of exec queues */
struct list_head link;
- /** @compute.lock: preemption fences lock */
+ /** @lr.lock: preemption fences lock */
spinlock_t lock;
- } compute;
+ } lr;
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 5b243b7feb59..e8b8ae5c6485 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -129,7 +129,7 @@ xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
list_del_init(&pfence->link);
pfence->q = xe_exec_queue_get(q);
dma_fence_init(&pfence->base, &preempt_fence_ops,
- &q->compute.lock, context, seqno);
+ &q->lr.lock, context, seqno);
return &pfence->base;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index ffda487653d8..61d4d95a5377 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -83,10 +83,10 @@ static bool preempt_fences_waiting(struct xe_vm *vm)
lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
- if (!q->compute.pfence ||
+ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
+ if (!q->lr.pfence ||
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &q->compute.pfence->flags)) {
+ &q->lr.pfence->flags)) {
return true;
}
}
@@ -129,14 +129,14 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
xe_vm_assert_held(vm);
- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
- if (q->compute.pfence) {
- long timeout = dma_fence_wait(q->compute.pfence, false);
+ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
+ if (q->lr.pfence) {
+ long timeout = dma_fence_wait(q->lr.pfence, false);
if (timeout < 0)
return -ETIME;
- dma_fence_put(q->compute.pfence);
- q->compute.pfence = NULL;
+ dma_fence_put(q->lr.pfence);
+ q->lr.pfence = NULL;
}
}
@@ -148,7 +148,7 @@ static bool xe_vm_is_idle(struct xe_vm *vm)
struct xe_exec_queue *q;
xe_vm_assert_held(vm);
- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
if (!xe_exec_queue_is_idle(q))
return false;
}
@@ -161,17 +161,17 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
struct list_head *link;
struct xe_exec_queue *q;
- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
struct dma_fence *fence;
link = list->next;
xe_assert(vm->xe, link != list);
fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
- q, q->compute.context,
- ++q->compute.seqno);
- dma_fence_put(q->compute.pfence);
- q->compute.pfence = fence;
+ q, q->lr.context,
+ ++q->lr.seqno);
+ dma_fence_put(q->lr.pfence);
+ q->lr.pfence = fence;
}
}
@@ -191,10 +191,10 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
if (err)
goto out_unlock;
- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
- if (q->compute.pfence) {
+ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
+ if (q->lr.pfence) {
dma_resv_add_fence(bo->ttm.base.resv,
- q->compute.pfence,
+ q->lr.pfence,
DMA_RESV_USAGE_BOOKKEEP);
}
@@ -211,10 +211,10 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
q->ops->resume(q);
- drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
+ drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
}
}
@@ -238,16 +238,16 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
if (err)
goto out_up_write;
- pfence = xe_preempt_fence_create(q, q->compute.context,
- ++q->compute.seqno);
+ pfence = xe_preempt_fence_create(q, q->lr.context,
+ ++q->lr.seqno);
if (!pfence) {
err = -ENOMEM;
goto out_fini;
}
- list_add(&q->compute.link, &vm->preempt.exec_queues);
+ list_add(&q->lr.link, &vm->preempt.exec_queues);
++vm->preempt.num_exec_queues;
- q->compute.pfence = pfence;
+ q->lr.pfence = pfence;
down_read(&vm->userptr.notifier_lock);
@@ -284,12 +284,12 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
return;
down_write(&vm->lock);
- list_del(&q->compute.link);
+ list_del(&q->lr.link);
--vm->preempt.num_exec_queues;
- if (q->compute.pfence) {
- dma_fence_enable_sw_signaling(q->compute.pfence);
- dma_fence_put(q->compute.pfence);
- q->compute.pfence = NULL;
+ if (q->lr.pfence) {
+ dma_fence_enable_sw_signaling(q->lr.pfence);
+ dma_fence_put(q->lr.pfence);
+ q->lr.pfence = NULL;
}
up_write(&vm->lock);
}
@@ -327,7 +327,7 @@ static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
vm->flags |= XE_VM_FLAG_BANNED;
trace_xe_vm_kill(vm);
- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
+ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
q->ops->kill(q);
if (unlocked)