diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 64 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gpu_error.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_sysfs.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_psr.c | 6 |
8 files changed, 89 insertions, 39 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1ad8c5e1455d..5567ddc7760f 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -356,6 +356,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, return 0; } +static int +intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; + struct i915_request *rq; + int ret = 0; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + if (workload->req) + goto out; + + rq = i915_request_alloc(engine, shadow_ctx); + if (IS_ERR(rq)) { + gvt_vgpu_err("fail to allocate gem request\n"); + ret = PTR_ERR(rq); + goto out; + } + workload->req = i915_request_get(rq); +out: + return ret; +} + /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. @@ -372,12 +399,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; struct intel_context *ce; - struct i915_request *rq; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); - if (workload->req) + if (workload->shadow) return 0; ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); @@ -417,22 +443,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) goto err_shadow; } - rq = i915_request_alloc(engine, shadow_ctx); - if (IS_ERR(rq)) { - gvt_vgpu_err("fail to allocate gem request\n"); - ret = PTR_ERR(rq); - goto err_shadow; - } - workload->req = i915_request_get(rq); - - ret = populate_shadow_context(workload); - if (ret) - goto err_req; - + workload->shadow = true; return 0; -err_req: - rq = fetch_and_zero(&workload->req); - i915_request_put(rq); err_shadow: release_shadow_wa_ctx(&workload->wa_ctx); err_unpin: @@ -671,23 +683,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&vgpu->vgpu_lock); mutex_lock(&dev_priv->drm.struct_mutex); + ret = intel_gvt_workload_req_alloc(workload); + if (ret) + goto err_req; + ret = intel_gvt_scan_and_shadow_workload(workload); if (ret) goto out; - ret = prepare_workload(workload); + ret = populate_shadow_context(workload); + if (ret) { + release_shadow_wa_ctx(&workload->wa_ctx); + goto out; + } + ret = prepare_workload(workload); out: - if (ret) - workload->status = ret; - if (!IS_ERR_OR_NULL(workload->req)) { gvt_dbg_sched("ring id %d submit workload to i915 %p\n", ring_id, workload->req); i915_request_add(workload->req); workload->dispatched = true; } - +err_req: + if (ret) + workload->status = ret; mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&vgpu->vgpu_lock); return ret; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index ca5529d0e48e..2065cba59aab 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -83,6 +83,7 @@ struct intel_vgpu_workload { struct i915_request *req; /* if this workload has been dispatched to i915? */ bool dispatched; + bool shadow; /* if workload has done shadow of guest request */ int status; struct intel_vgpu_mm *shadow_mm; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 38dcee1ca062..40a61ef9aac1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -984,8 +984,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) intel_runtime_pm_get(i915); gpu = i915_capture_gpu_state(i915); intel_runtime_pm_put(i915); - if (!gpu) - return -ENOMEM; + if (IS_ERR(gpu)) + return PTR_ERR(gpu); file->private_data = gpu; return 0; @@ -1018,7 +1018,13 @@ i915_error_state_write(struct file *filp, static int i915_error_state_open(struct inode *inode, struct file *file) { - file->private_data = i915_first_error_state(inode->i_private); + struct i915_gpu_state *error; + + error = i915_first_error_state(inode->i_private); + if (IS_ERR(error)) + return PTR_ERR(error); + + file->private_data = error; return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index add1fe7aeb93..bd17dd1f5da5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2075,6 +2075,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) { struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + int err; /* * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt @@ -2090,9 +2091,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - return i915_vma_pin(ppgtt->vma, - 0, GEN6_PD_ALIGN, - PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(ppgtt->vma, + 0, GEN6_PD_ALIGN, + PIN_GLOBAL | PIN_HIGH); + if (err) + goto unpin; + + return 0; + +unpin: + ppgtt->pin_count = 0; + return err; } void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 07465123c166..3f9ce403c755 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1907,9 +1907,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915) { struct i915_gpu_state *error; + /* Check if GPU capture has been disabled */ + error = READ_ONCE(i915->gpu_error.first_error); + if (IS_ERR(error)) + return error; + error = kzalloc(sizeof(*error), GFP_ATOMIC); - if (!error) - return NULL; + if (!error) { + i915_disable_error_state(i915, -ENOMEM); + return ERR_PTR(-ENOMEM); + } kref_init(&error->ref); error->i915 = i915; @@ -1945,11 +1952,8 @@ void i915_capture_error_state(struct drm_i915_private *i915, return; error = i915_capture_gpu_state(i915); - if (!error) { - DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); - i915_disable_error_state(i915, -ENOMEM); + if (IS_ERR(error)) return; - } i915_error_capture_msg(i915, error, engine_mask, error_msg); DRM_INFO("%s\n", error->error_msg); @@ -1987,7 +1991,7 @@ i915_first_error_state(struct drm_i915_private *i915) spin_lock_irq(&i915->gpu_error.lock); error = i915->gpu_error.first_error; - if (error) + if (!IS_ERR_OR_NULL(error)) i915_gpu_state_get(error); spin_unlock_irq(&i915->gpu_error.lock); @@ -2000,10 +2004,11 @@ void i915_reset_error_state(struct drm_i915_private *i915) spin_lock_irq(&i915->gpu_error.lock); error = i915->gpu_error.first_error; - i915->gpu_error.first_error = NULL; + if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ + i915->gpu_error.first_error = NULL; spin_unlock_irq(&i915->gpu_error.lock); - if (!IS_ERR(error)) + if (!IS_ERR_OR_NULL(error)) i915_gpu_state_put(error); } diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 535caebd9813..c0cfe7ae2ba5 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, ssize_t ret; gpu = i915_first_error_state(i915); - if (gpu) { + if (IS_ERR(gpu)) { + ret = PTR_ERR(gpu); + } else if (gpu) { ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); i915_gpu_state_put(gpu); } else { diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 4be167dcd209..4796f40a6d4f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2244,6 +2244,8 @@ static int logical_ring_init(struct intel_engine_cs *engine) if (ret) return ret; + intel_engine_init_workarounds(engine); + if (HAS_LOGICAL_RING_ELSQ(i915)) { execlists->submit_reg = i915->regs + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); @@ -2310,7 +2312,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine) } intel_engine_init_whitelist(engine); - intel_engine_init_workarounds(engine); return 0; } diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 419e56342523..f71970df9936 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -274,10 +274,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", intel_dp->psr_dpcd[0]); + if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { + DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); + return; + } + if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); return; } + dev_priv->psr.sink_support = true; dev_priv->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); |