diff options
author | Matthew Brost <matthew.brost@intel.com> | 2023-01-30 21:55:35 +0300 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-20 02:27:47 +0300 |
commit | da3799c975726572066f1c6bc6a6f65cb1f01c84 (patch) | |
tree | b8f629bd61cfaa8cf0fa04db1d1b9bcc25c3c0a2 /drivers/gpu/drm/xe | |
parent | 74a8b2c6e2d6f17fcd9977de298eff20a46b0af7 (diff) | |
download | linux-da3799c975726572066f1c6bc6a6f65cb1f01c84.tar.xz |
drm/xe: Use GuC to do GGTT invalidations for the GuC firmware
Only the GuC should be issuing TLB invalidations if it is enabled. Part
of this patch is sanitize the device on driver unload to ensure we do
not send GuC based TLB invalidations during driver unload.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe')
-rw-r--r-- | drivers/gpu/drm/xe/xe_device.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_ggtt.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt_pagefault.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 43 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_guc.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_guc_types.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pt.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_uc.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_uc.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 2 |
13 files changed, 89 insertions, 21 deletions
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 98f08cd9d4b0..8fe0324ccef3 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -215,6 +215,16 @@ err_put: return ERR_PTR(err); } +static void xe_device_sanitize(struct drm_device *drm, void *arg) +{ + struct xe_device *xe = arg; + struct xe_gt *gt; + u8 id; + + for_each_gt(gt, xe, id) + xe_gt_sanitize(gt); +} + int xe_device_probe(struct xe_device *xe) { struct xe_gt *gt; @@ -274,6 +284,10 @@ int xe_device_probe(struct xe_device *xe) xe_debugfs_register(xe); + err = drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe); + if (err) + return err; + return 0; err_irq_shutdown: diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index baa080cd1133..20450ed8400b 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -13,6 +13,7 @@ #include "xe_device.h" #include "xe_bo.h" #include "xe_gt.h" +#include "xe_gt_tlb_invalidation.h" #include "xe_map.h" #include "xe_mmio.h" #include "xe_wopcm.h" @@ -200,10 +201,17 @@ void xe_ggtt_invalidate(struct xe_gt *gt) * therefore flushing WC buffers. Is that really true here? */ xe_mmio_write32(gt, GFX_FLSH_CNTL_GEN6.reg, GFX_FLSH_CNTL_EN); - if (xe_device_guc_submission_enabled(gt_to_xe(gt))) { + + if (gt->uc.guc.submission_state.enabled) { + int seqno; + + seqno = xe_gt_tlb_invalidation_guc(gt); + XE_WARN_ON(seqno <= 0); + if (seqno > 0) + xe_gt_tlb_invalidation_wait(gt, seqno); + } else if (xe_device_guc_submission_enabled(gt_to_xe(gt))) { struct xe_device *xe = gt_to_xe(gt); - /* TODO: also use vfunc here */ if (xe->info.platform == XE_PVC) { xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1.reg, PVC_GUC_TLB_INV_DESC1_INVALIDATE); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 28bbb3159531..0e0d5cadb3e7 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -196,6 +196,15 @@ static int gt_ttm_mgr_init(struct xe_gt *gt) return 0; } +void xe_gt_sanitize(struct xe_gt *gt) +{ + /* + * FIXME: if xe_uc_sanitize is called here, on TGL driver will not + * reload + */ + gt->uc.guc.submission_state.enabled = false; +} + static void gt_fini(struct drm_device *drm, void *arg) { struct xe_gt *gt = arg; @@ -662,6 +671,8 @@ static int gt_reset(struct xe_gt *gt) drm_info(&xe->drm, "GT reset started\n"); + xe_gt_sanitize(gt); + xe_device_mem_access_get(gt_to_xe(gt)); err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); if (err) @@ -742,6 +753,8 @@ int xe_gt_suspend(struct xe_gt *gt) if (!xe_device_guc_submission_enabled(gt_to_xe(gt))) return -ENODEV; + xe_gt_sanitize(gt); + xe_device_mem_access_get(gt_to_xe(gt)); err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); if (err) diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h index 5dc08a993cfe..5635f2803170 100644 --- a/drivers/gpu/drm/xe/xe_gt.h +++ b/drivers/gpu/drm/xe/xe_gt.h @@ -26,6 +26,7 @@ int xe_gt_suspend(struct xe_gt *gt); int xe_gt_resume(struct xe_gt *gt); void xe_gt_reset_async(struct xe_gt *gt); void xe_gt_migrate_wait(struct xe_gt *gt); +void xe_gt_sanitize(struct xe_gt *gt); struct xe_gt *xe_find_full_gt(struct xe_gt *gt); diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index e1a5a3a70c92..ce79eb48feb8 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -240,7 +240,7 @@ unlock_vm: goto retry_userptr; if (!ret) { - ret = xe_gt_tlb_invalidation(gt, NULL, vma); + ret = xe_gt_tlb_invalidation_vma(gt, NULL, vma); if (ret >= 0) ret = 0; } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 0b37cd09a59a..f6a2dd26cad4 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -135,8 +135,34 @@ static int send_tlb_invalidation(struct xe_guc *guc, return ret; } +#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \ + XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \ + XE_GUC_TLB_INVAL_FLUSH_CACHE) + /** - * xe_gt_tlb_invalidation - Issue a TLB invalidation on this GT + * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC + * @gt: graphics tile + * + * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and + * caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion. + * + * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success, + * negative error code on error. + */ +int xe_gt_tlb_invalidation_guc(struct xe_gt *gt) +{ + u32 action[] = { + XE_GUC_ACTION_TLB_INVALIDATION, + 0, /* seqno, replaced in send_tlb_invalidation */ + MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC), + }; + + return send_tlb_invalidation(>->uc.guc, NULL, action, + ARRAY_SIZE(action)); +} + +/** + * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA * @gt: graphics tile * @fence: invalidation fence which will be signal on TLB invalidation * completion, can be NULL @@ -150,9 +176,9 @@ static int send_tlb_invalidation(struct xe_guc *guc, * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success, * negative error code on error. */ -int xe_gt_tlb_invalidation(struct xe_gt *gt, - struct xe_gt_tlb_invalidation_fence *fence, - struct xe_vma *vma) +int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, + struct xe_gt_tlb_invalidation_fence *fence, + struct xe_vma *vma) { struct xe_device *xe = gt_to_xe(gt); #define MAX_TLB_INVALIDATION_LEN 7 @@ -161,12 +187,9 @@ int xe_gt_tlb_invalidation(struct xe_gt *gt, XE_BUG_ON(!vma); + action[len++] = XE_GUC_ACTION_TLB_INVALIDATION; + action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */ if (!xe->info.has_range_tlb_invalidation) { - action[len++] = XE_GUC_ACTION_TLB_INVALIDATION; - action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */ -#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \ - XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \ - XE_GUC_TLB_INVAL_FLUSH_CACHE) action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL); } else { u64 start = vma->start; @@ -205,8 +228,6 @@ int xe_gt_tlb_invalidation(struct xe_gt *gt, XE_BUG_ON(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1)); XE_BUG_ON(!IS_ALIGNED(start, length)); - action[len++] = XE_GUC_ACTION_TLB_INVALIDATION; - action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE); action[len++] = vma->vm->usm.asid; action[len++] = lower_32_bits(start); diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index b4c4f717bc8a..b333c1709397 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -16,9 +16,10 @@ struct xe_vma; int xe_gt_tlb_invalidation_init(struct xe_gt *gt); void xe_gt_tlb_invalidation_reset(struct xe_gt *gt); -int xe_gt_tlb_invalidation(struct xe_gt *gt, - struct xe_gt_tlb_invalidation_fence *fence, - struct xe_vma *vma); +int xe_gt_tlb_invalidation_guc(struct xe_gt *gt); +int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, + struct xe_gt_tlb_invalidation_fence *fence, + struct xe_vma *vma); int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno); int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len); diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 88a3a96da084..5cdfdfd0de40 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -309,6 +309,7 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc) int xe_guc_post_load_init(struct xe_guc *guc) { xe_guc_ads_populate_post_load(&guc->ads); + guc->submission_state.enabled = true; return 0; } @@ -795,6 +796,7 @@ void xe_guc_sanitize(struct xe_guc *guc) { xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); xe_guc_ct_disable(&guc->ct); + guc->submission_state.enabled = false; } int xe_guc_reset_prepare(struct xe_guc *guc) diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index c2a484282ef2..ac7eec28934d 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -60,6 +60,8 @@ struct xe_guc { /** @patch: patch version of GuC submission */ u32 patch; } version; + /** @enabled: submission is enabled */ + bool enabled; } submission_state; /** @hwconfig: Hardware config state */ struct { diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index cde75708d843..3333b413686e 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -1506,7 +1506,7 @@ static void invalidation_fence_work_func(struct work_struct *w) container_of(w, struct invalidation_fence, work); trace_xe_gt_tlb_invalidation_fence_work_func(&ifence->base); - xe_gt_tlb_invalidation(ifence->gt, &ifence->base, ifence->vma); + xe_gt_tlb_invalidation_vma(ifence->gt, &ifence->base, ifence->vma); } static int invalidation_fence_init(struct xe_gt *gt, diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c index 938d14698003..7886c8b85397 100644 --- a/drivers/gpu/drm/xe/xe_uc.c +++ b/drivers/gpu/drm/xe/xe_uc.c @@ -88,10 +88,15 @@ static int uc_reset(struct xe_uc *uc) return 0; } -static int uc_sanitize(struct xe_uc *uc) +void xe_uc_sanitize(struct xe_uc *uc) { xe_huc_sanitize(&uc->huc); xe_guc_sanitize(&uc->guc); +} + +static int xe_uc_sanitize_reset(struct xe_uc *uc) +{ + xe_uc_sanitize(uc); return uc_reset(uc); } @@ -129,7 +134,7 @@ int xe_uc_init_hw(struct xe_uc *uc) if (!xe_device_guc_submission_enabled(uc_to_xe(uc))) return 0; - ret = uc_sanitize(uc); + ret = xe_uc_sanitize_reset(uc); if (ret) return ret; diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h index 380e722f95fc..d6efc9ef00d3 100644 --- a/drivers/gpu/drm/xe/xe_uc.h +++ b/drivers/gpu/drm/xe/xe_uc.h @@ -17,5 +17,6 @@ void xe_uc_stop_prepare(struct xe_uc *uc); int xe_uc_stop(struct xe_uc *uc); int xe_uc_start(struct xe_uc *uc); int xe_uc_suspend(struct xe_uc *uc); +void xe_uc_sanitize(struct xe_uc *uc); #endif diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 8ba548e49add..4bbb0d0b0928 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3356,7 +3356,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) if (xe_pt_zap_ptes(gt, vma)) { gt_needs_invalidate |= BIT(id); xe_device_wmb(xe); - seqno[id] = xe_gt_tlb_invalidation(gt, NULL, vma); + seqno[id] = xe_gt_tlb_invalidation_vma(gt, NULL, vma); if (seqno[id] < 0) return seqno[id]; } |