diff options
| author | Nitin Gote <nitin.r.gote@intel.com> | 2026-03-04 15:38:01 +0300 |
|---|---|---|
| committer | Matthew Auld <matthew.auld@intel.com> | 2026-03-12 12:37:40 +0300 |
| commit | be97fd06458d66a53aefb6d9429db0df734c81c0 (patch) | |
| tree | f053f10366e2eee262d13c855a2ead96eea4548d | |
| parent | 2270bd7124f4d25497d58c293cd40ea014ddaf01 (diff) | |
| download | linux-be97fd06458d66a53aefb6d9429db0df734c81c0.tar.xz | |
drm/xe: add xe_migrate_resolve wrapper and is_vram_resolve support
Introduce an internal __xe_migrate_copy(..., is_vram_resolve) path and
expose a small wrapper xe_migrate_resolve() that calls it with
is_vram_resolve=true.
For resolve/decompression operations we must ensure the copy code uses
the compression PAT index when appropriate; this change centralizes that
behavior and allows callers to schedule a resolve (decompress) operation
via the migrate API.
v3: Fix kernel-doc warnings
v2: (Matt)
- Simplify xe_migrate_resolve(), use single BO/resource;
remove copy_only_ccs argument as it's always false.
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Nitin Gote <nitin.r.gote@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patch.msgid.link/20260304123758.3050386-7-nitin.r.gote@intel.com
| -rw-r--r-- | drivers/gpu/drm/xe/xe_migrate.c | 90 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_migrate.h | 4 |
2 files changed, 67 insertions, 27 deletions
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 283d55fef6b8..bcd9aa595bcf 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -860,31 +860,13 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m, return flush_flags; } -/** - * xe_migrate_copy() - Copy content of TTM resources. - * @m: The migration context. - * @src_bo: The buffer object @src is currently bound to. - * @dst_bo: If copying between resources created for the same bo, set this to - * the same value as @src_bo. If copying between buffer objects, set it to - * the buffer object @dst is currently bound to. - * @src: The source TTM resource. - * @dst: The dst TTM resource. - * @copy_only_ccs: If true copy only CCS metadata - * - * Copies the contents of @src to @dst: On flat CCS devices, - * the CCS metadata is copied as well if needed, or if not present, - * the CCS metadata of @dst is cleared for security reasons. - * - * Return: Pointer to a dma_fence representing the last copy batch, or - * an error pointer on failure. If there is a failure, any copy operation - * started by the function call has been synced. - */ -struct dma_fence *xe_migrate_copy(struct xe_migrate *m, - struct xe_bo *src_bo, - struct xe_bo *dst_bo, - struct ttm_resource *src, - struct ttm_resource *dst, - bool copy_only_ccs) +static struct dma_fence *__xe_migrate_copy(struct xe_migrate *m, + struct xe_bo *src_bo, + struct xe_bo *dst_bo, + struct ttm_resource *src, + struct ttm_resource *dst, + bool copy_only_ccs, + bool is_vram_resolve) { struct xe_gt *gt = m->tile->primary_gt; struct xe_device *xe = gt_to_xe(gt); @@ -905,8 +887,15 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, bool copy_ccs = xe_device_has_flat_ccs(xe) && xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo); bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram); - bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) && - GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram; + + /* + * For decompression operation, always use the compression PAT index. + * Otherwise, only use the compression PAT index for device memory + * when copying from VRAM to system memory. + */ + bool use_comp_pat = is_vram_resolve || (type_device && + xe_device_has_flat_ccs(xe) && + GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram); /* Copying CCS between two different BOs is not supported yet. */ if (XE_WARN_ON(copy_ccs && src_bo != dst_bo)) @@ -1066,6 +1055,53 @@ err_sync: } /** + * xe_migrate_copy() - Copy content of TTM resources. + * @m: The migration context. + * @src_bo: The buffer object @src is currently bound to. + * @dst_bo: If copying between resources created for the same bo, set this to + * the same value as @src_bo. If copying between buffer objects, set it to + * the buffer object @dst is currently bound to. + * @src: The source TTM resource. + * @dst: The dst TTM resource. + * @copy_only_ccs: If true copy only CCS metadata + * + * Copies the contents of @src to @dst: On flat CCS devices, + * the CCS metadata is copied as well if needed, or if not present, + * the CCS metadata of @dst is cleared for security reasons. + * + * Return: Pointer to a dma_fence representing the last copy batch, or + * an error pointer on failure. If there is a failure, any copy operation + * started by the function call has been synced. + */ +struct dma_fence *xe_migrate_copy(struct xe_migrate *m, + struct xe_bo *src_bo, + struct xe_bo *dst_bo, + struct ttm_resource *src, + struct ttm_resource *dst, + bool copy_only_ccs) +{ + return __xe_migrate_copy(m, src_bo, dst_bo, src, dst, copy_only_ccs, false); +} + +/** + * xe_migrate_resolve() - Resolve and decompress a buffer object if required. + * @m: The migrate context + * @bo: The buffer object to resolve + * @res: The reservation object + * + * Wrapper around __xe_migrate_copy() with is_vram_resolve set to true + * to trigger decompression if needed. + * + * Return: A dma_fence that signals on completion, or an ERR_PTR on failure. + */ +struct dma_fence *xe_migrate_resolve(struct xe_migrate *m, + struct xe_bo *bo, + struct ttm_resource *res) +{ + return __xe_migrate_copy(m, bo, bo, res, res, false, true); +} + +/** * xe_migrate_lrc() - Get the LRC from migrate context. * @migrate: Migrate context. * diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h index 1522afb37dcf..169279d9d8c2 100644 --- a/drivers/gpu/drm/xe/xe_migrate.h +++ b/drivers/gpu/drm/xe/xe_migrate.h @@ -132,6 +132,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, struct ttm_resource *dst, bool copy_only_ccs); +struct dma_fence *xe_migrate_resolve(struct xe_migrate *m, + struct xe_bo *bo, + struct ttm_resource *res); + int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, struct xe_bo *src_bo, enum xe_sriov_vf_ccs_rw_ctxs read_write); |
