diff options
author | fred gao <fred.gao@intel.com> | 2017-08-18 10:41:07 +0300 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2017-09-08 09:21:14 +0300 |
commit | a3cfdca920b274618d6046d85a474308ee28e5bb (patch) | |
tree | 37b523f4a2fbdce4eb74a97b90596edc5e500551 /drivers/gpu/drm/i915/gvt/scheduler.c | |
parent | 0a53bc07f044c4c51eb0dc1386c504db80ca8d00 (diff) | |
download | linux-a3cfdca920b274618d6046d85a474308ee28e5bb.tar.xz |
drm/i915/gvt: Add error handling for intel_gvt_scan_and_shadow_workload
When an error occurs after shadow_indirect_ctx, this patch is to do the
proper cleanup and rollback to the original states for shadowed indirect
context before the workload is abandoned.
v2:
- split the mixed several error paths for better review. (Zhenyu)
v3:
- no return check for clean up functions. (Changbin)
v4:
- expose and reuse the existing release_shadow_wa_ctx. (Zhenyu)
v5:
- move the release function to scheduler.c file. (Zhenyu)
v6:
- move error handling code of intel_gvt_scan_and_shadow_workload
to here. (Zhenyu)
Signed-off-by: fred gao <fred.gao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 28 |
1 files changed, 21 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 0e480f59f659..29171961af5e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -229,6 +229,15 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) return 0; } +void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +{ + if (!wa_ctx->indirect_ctx.obj) + return; + + i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); + i915_gem_object_put(wa_ctx->indirect_ctx.obj); +} + /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. @@ -263,13 +272,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ret = intel_gvt_scan_and_shadow_ringbuffer(workload); if (ret) - goto out; + goto err_scan; if ((workload->ring_id == RCS) && (workload->wa_ctx.indirect_ctx.size != 0)) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) - goto out; + goto err_scan; } /* pin shadow context by gvt even the shadow context will be pinned @@ -283,18 +292,18 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) if (IS_ERR(ring)) { ret = PTR_ERR(ring); gvt_vgpu_err("fail to pin shadow context\n"); - goto out; + goto err_shadow; } ret = populate_shadow_context(workload); if (ret) - goto out; + goto err_unpin; rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); if (IS_ERR(rq)) { gvt_vgpu_err("fail to allocate gem request\n"); ret = PTR_ERR(rq); - goto out; + goto err_unpin; } gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); @@ -302,10 +311,15 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) workload->req = i915_gem_request_get(rq); ret = copy_workload_to_ring_buffer(workload); if (ret) - goto out; + goto err_unpin; workload->shadowed = true; + return 0; -out: +err_unpin: + engine->context_unpin(engine, shadow_ctx); +err_shadow: + release_shadow_wa_ctx(&workload->wa_ctx); +err_scan: return ret; } |