summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_drm_client.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_drm_client.c')
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c85
1 files changed, 59 insertions, 26 deletions
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 22f0f1a6dfd5..2d4874d2b922 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -135,8 +135,8 @@ void xe_drm_client_add_bo(struct xe_drm_client *client,
XE_WARN_ON(bo->client);
XE_WARN_ON(!list_empty(&bo->client_link));
- spin_lock(&client->bos_lock);
bo->client = xe_drm_client_get(client);
+ spin_lock(&client->bos_lock);
list_add_tail(&bo->client_link, &client->bos_list);
spin_unlock(&client->bos_lock);
}
@@ -261,6 +261,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
if (man) {
drm_print_memory_stats(p,
&stats[mem_type],
+ DRM_GEM_OBJECT_ACTIVE |
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
@@ -269,6 +270,49 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
}
}
+static struct xe_hw_engine *any_engine(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned long gt_id;
+
+ for_each_gt(gt, xe, gt_id) {
+ struct xe_hw_engine *hwe = xe_gt_any_hw_engine(gt);
+
+ if (hwe)
+ return hwe;
+ }
+
+ return NULL;
+}
+
+static bool force_wake_get_any_engine(struct xe_device *xe,
+ struct xe_hw_engine **phwe,
+ unsigned int *pfw_ref)
+{
+ enum xe_force_wake_domains domain;
+ unsigned int fw_ref;
+ struct xe_hw_engine *hwe;
+ struct xe_force_wake *fw;
+
+ hwe = any_engine(xe);
+ if (!hwe)
+ return false;
+
+ domain = xe_hw_engine_to_fw_domain(hwe);
+ fw = gt_to_fw(hwe->gt);
+
+ fw_ref = xe_force_wake_get(fw, domain);
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain)) {
+ xe_force_wake_put(fw, fw_ref);
+ return false;
+ }
+
+ *phwe = hwe;
+ *pfw_ref = fw_ref;
+
+ return true;
+}
+
static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
{
unsigned long class, i, gt_id, capacity[XE_ENGINE_CLASS_MAX] = { };
@@ -280,7 +324,18 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
u64 gpu_timestamp;
unsigned int fw_ref;
+ /*
+ * Wait for any exec queue going away: their cycles will get updated on
+ * context switch out, so wait for that to happen
+ */
+ wait_var_event(&xef->exec_queue.pending_removal,
+ !atomic_read(&xef->exec_queue.pending_removal));
+
xe_pm_runtime_get(xe);
+ if (!force_wake_get_any_engine(xe, &hwe, &fw_ref)) {
+ xe_pm_runtime_put(xe);
+ return;
+ }
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
@@ -295,33 +350,11 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
}
mutex_unlock(&xef->exec_queue.lock);
- /* Get the total GPU cycles */
- for_each_gt(gt, xe, gt_id) {
- enum xe_force_wake_domains fw;
-
- hwe = xe_gt_any_hw_engine(gt);
- if (!hwe)
- continue;
-
- fw = xe_hw_engine_to_fw_domain(hwe);
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), fw);
- if (!xe_force_wake_ref_has_domain(fw_ref, fw)) {
- hwe = NULL;
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- break;
- }
-
- gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- break;
- }
+ gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
+ xe_force_wake_put(gt_to_fw(hwe->gt), fw_ref);
xe_pm_runtime_put(xe);
- if (unlikely(!hwe))
- return;
-
for (class = 0; class < XE_ENGINE_CLASS_MAX; class++) {
const char *class_name;
@@ -352,7 +385,7 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
* @p: The drm_printer ptr
* @file: The drm_file ptr
*
- * This is callabck for drm fdinfo interface. Register this callback
+ * This is callback for drm fdinfo interface. Register this callback
* in drm driver ops for show_fdinfo.
*
* Return: void