summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2023-01-18 07:31:24 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-20 02:27:45 +0300
commita9351846d94568d96e7400be343392c58e4f82e6 (patch)
tree49e5af59b95dae63c7ce4466b0bcb874e714bc1b /drivers/gpu
parent5b643660875d01c203782a86ac5e3353849bc513 (diff)
downloadlinux-a9351846d94568d96e7400be343392c58e4f82e6.tar.xz
drm/xe: Break of TLB invalidation into its own file
TLB invalidation is used by more than USM (page faults) so break this code out into its own file. Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/xe/Makefile1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c99
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c115
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c1
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c1
9 files changed, 146 insertions, 99 deletions
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index f8da32b550bc..998f7044b047 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -57,6 +57,7 @@ xe-y += xe_bb.o \
xe_gt_mcr.o \
xe_gt_pagefault.o \
xe_gt_sysfs.o \
+ xe_gt_tlb_invalidation.o \
xe_gt_topology.o \
xe_guc.o \
xe_guc_ads.o \
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 61a6430cb435..96136f130eda 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -19,6 +19,7 @@
#include "xe_gt_mcr.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_sysfs.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
@@ -571,6 +572,10 @@ int xe_gt_init(struct xe_gt *gt)
xe_hw_fence_irq_init(&gt->fence_irq[i]);
}
+ err = xe_gt_tlb_invalidation_init(gt);
+ if (err)
+ return err;
+
err = xe_gt_pagefault_init(gt);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index cd1888784141..01303bbe073c 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -12,6 +12,7 @@
#include "xe_gt_debugfs.h"
#include "xe_gt_mcr.h"
#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
#include "xe_hw_engine.h"
#include "xe_macros.h"
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 7125113b7390..93a8efe5d0a0 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -10,9 +10,10 @@
#include "xe_bo.h"
#include "xe_gt.h"
+#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
-#include "xe_gt_pagefault.h"
#include "xe_migrate.h"
#include "xe_pt.h"
#include "xe_trace.h"
@@ -61,40 +62,6 @@ guc_to_gt(struct xe_guc *guc)
return container_of(guc, struct xe_gt, uc.guc);
}
-static int send_tlb_invalidation(struct xe_guc *guc)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- u32 action[] = {
- XE_GUC_ACTION_TLB_INVALIDATION,
- 0,
- XE_GUC_TLB_INVAL_FULL << XE_GUC_TLB_INVAL_TYPE_SHIFT |
- XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT |
- XE_GUC_TLB_INVAL_FLUSH_CACHE,
- };
- int seqno;
- int ret;
-
- /*
- * XXX: The seqno algorithm relies on TLB invalidation being processed
- * in order which they currently are, if that changes the algorithm will
- * need to be updated.
- */
- mutex_lock(&guc->ct.lock);
- seqno = gt->usm.tlb_invalidation_seqno;
- action[1] = seqno;
- gt->usm.tlb_invalidation_seqno = (gt->usm.tlb_invalidation_seqno + 1) %
- TLB_INVALIDATION_SEQNO_MAX;
- if (!gt->usm.tlb_invalidation_seqno)
- gt->usm.tlb_invalidation_seqno = 1;
- ret = xe_guc_ct_send_locked(&guc->ct, action, ARRAY_SIZE(action),
- G2H_LEN_DW_TLB_INVALIDATE, 1);
- if (!ret)
- ret = seqno;
- mutex_unlock(&guc->ct.lock);
-
- return ret;
-}
-
static bool access_is_atomic(enum access_type access_type)
{
return access_type == ACCESS_TYPE_ATOMIC;
@@ -278,7 +245,7 @@ unlock_vm:
* defer TLB invalidate + fault response to a callback of fence
* too
*/
- ret = send_tlb_invalidation(&gt->uc.guc);
+ ret = xe_gt_tlb_invalidation(gt);
if (ret >= 0)
ret = 0;
}
@@ -433,7 +400,6 @@ int xe_gt_pagefault_init(struct xe_gt *gt)
if (!xe->info.supports_usm)
return 0;
- gt->usm.tlb_invalidation_seqno = 1;
for (i = 0; i < NUM_PF_QUEUE; ++i) {
gt->usm.pf_queue[i].gt = gt;
spin_lock_init(&gt->usm.pf_queue[i].lock);
@@ -482,65 +448,6 @@ void xe_gt_pagefault_reset(struct xe_gt *gt)
}
}
-int xe_gt_tlb_invalidation(struct xe_gt *gt)
-{
- return send_tlb_invalidation(&gt->uc.guc);
-}
-
-static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
-{
- if (gt->usm.tlb_invalidation_seqno_recv >= seqno)
- return true;
-
- if (seqno - gt->usm.tlb_invalidation_seqno_recv >
- (TLB_INVALIDATION_SEQNO_MAX / 2))
- return true;
-
- return false;
-}
-
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
-{
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_guc *guc = &gt->uc.guc;
- int ret;
-
- /*
- * XXX: See above, this algorithm only works if seqno are always in
- * order
- */
- ret = wait_event_timeout(guc->ct.wq,
- tlb_invalidation_seqno_past(gt, seqno),
- HZ / 5);
- if (!ret) {
- drm_err(&xe->drm, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
- seqno, gt->usm.tlb_invalidation_seqno_recv);
- return -ETIME;
- }
-
- return 0;
-}
-
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- int expected_seqno;
-
- if (unlikely(len != 1))
- return -EPROTO;
-
- /* Sanity check on seqno */
- expected_seqno = (gt->usm.tlb_invalidation_seqno_recv + 1) %
- TLB_INVALIDATION_SEQNO_MAX;
- XE_WARN_ON(expected_seqno != msg[0]);
-
- gt->usm.tlb_invalidation_seqno_recv = msg[0];
- smp_wmb();
- wake_up_all(&guc->ct.wq);
-
- return 0;
-}
-
static int granularity_in_byte(int val)
{
switch (val) {
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.h b/drivers/gpu/drm/xe/xe_gt_pagefault.h
index 35f68027cc9c..839c065a5e4c 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.h
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.h
@@ -13,10 +13,7 @@ struct xe_guc;
int xe_gt_pagefault_init(struct xe_gt *gt);
void xe_gt_pagefault_reset(struct xe_gt *gt);
-int xe_gt_tlb_invalidation(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len);
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len);
#endif /* _XE_GT_PAGEFAULT_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
new file mode 100644
index 000000000000..fea7a557d213
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_gt.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+
+static struct xe_gt *
+guc_to_gt(struct xe_guc *guc)
+{
+ return container_of(guc, struct xe_gt, uc.guc);
+}
+
+int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
+{
+ gt->usm.tlb_invalidation_seqno = 1;
+
+ return 0;
+}
+
+static int send_tlb_invalidation(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION,
+ 0,
+ XE_GUC_TLB_INVAL_FULL << XE_GUC_TLB_INVAL_TYPE_SHIFT |
+ XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT |
+ XE_GUC_TLB_INVAL_FLUSH_CACHE,
+ };
+ int seqno;
+ int ret;
+
+ /*
+ * XXX: The seqno algorithm relies on TLB invalidation being processed
+ * in order which they currently are, if that changes the algorithm will
+ * need to be updated.
+ */
+ mutex_lock(&guc->ct.lock);
+ seqno = gt->usm.tlb_invalidation_seqno;
+ action[1] = seqno;
+ gt->usm.tlb_invalidation_seqno = (gt->usm.tlb_invalidation_seqno + 1) %
+ TLB_INVALIDATION_SEQNO_MAX;
+ if (!gt->usm.tlb_invalidation_seqno)
+ gt->usm.tlb_invalidation_seqno = 1;
+ ret = xe_guc_ct_send_locked(&guc->ct, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_TLB_INVALIDATE, 1);
+ if (!ret)
+ ret = seqno;
+ mutex_unlock(&guc->ct.lock);
+
+ return ret;
+}
+
+int xe_gt_tlb_invalidation(struct xe_gt *gt)
+{
+ return send_tlb_invalidation(&gt->uc.guc);
+}
+
+static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
+{
+ if (gt->usm.tlb_invalidation_seqno_recv >= seqno)
+ return true;
+
+ if (seqno - gt->usm.tlb_invalidation_seqno_recv >
+ (TLB_INVALIDATION_SEQNO_MAX / 2))
+ return true;
+
+ return false;
+}
+
+int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_guc *guc = &gt->uc.guc;
+ int ret;
+
+ /*
+ * XXX: See above, this algorithm only works if seqno are always in
+ * order
+ */
+ ret = wait_event_timeout(guc->ct.wq,
+ tlb_invalidation_seqno_past(gt, seqno),
+ HZ / 5);
+ if (!ret) {
+ drm_err(&xe->drm, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
+ seqno, gt->usm.tlb_invalidation_seqno_recv);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ int expected_seqno;
+
+ if (unlikely(len != 1))
+ return -EPROTO;
+
+ /* Sanity check on seqno */
+ expected_seqno = (gt->usm.tlb_invalidation_seqno_recv + 1) %
+ TLB_INVALIDATION_SEQNO_MAX;
+ XE_WARN_ON(expected_seqno != msg[0]);
+
+ gt->usm.tlb_invalidation_seqno_recv = msg[0];
+ smp_wmb();
+ wake_up_all(&guc->ct.wq);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
new file mode 100644
index 000000000000..f1c3b34b1993
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_TLB_INVALIDATION_H_
+#define _XE_GT_TLB_INVALIDATION_H_
+
+#include <linux/types.h>
+
+struct xe_gt;
+struct xe_guc;
+
+int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
+int xe_gt_tlb_invalidation(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
+int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+
+#endif /* _XE_GT_TLB_INVALIDATION_ */
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 6e25c1d5d43e..84d4302d4e72 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -15,6 +15,7 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_guc_submit.h"
#include "xe_map.h"
#include "xe_trace.h"
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d47a8617c5b6..c548cd04f9cf 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -19,6 +19,7 @@
#include "xe_engine.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pm.h"
#include "xe_preempt_fence.h"