diff options
author | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2018-05-11 19:08:10 +0300 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2018-05-11 19:08:10 +0300 |
commit | 94cc2fde365fb4484080ea6675bb1e0c933f8002 (patch) | |
tree | a249c6f6b12ff2dbe39d78bfb050e9c28619bee9 /drivers/gpu/drm/i915/i915_gem.c | |
parent | 900aa8ad21587e909603f471b6cd81fd5338ec45 (diff) | |
parent | 8eb008c80841e3410ef2c043093478ea36bb5ff1 (diff) | |
download | linux-94cc2fde365fb4484080ea6675bb1e0c933f8002.tar.xz |
Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
drm-misc-next is still based on v4.16-rc7, and was getting a bit stale.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 237 |
1 files changed, 161 insertions, 76 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7b5a9d7c9593..4c9d2a6f7d28 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -35,6 +35,7 @@ #include "intel_drv.h" #include "intel_frontbuffer.h" #include "intel_mocs.h" +#include "intel_workarounds.h" #include "i915_gemfs.h" #include <linux/dma-fence-array.h> #include <linux/kthread.h> @@ -136,6 +137,100 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) return 0; } +static u32 __i915_gem_park(struct drm_i915_private *i915) +{ + lockdep_assert_held(&i915->drm.struct_mutex); + GEM_BUG_ON(i915->gt.active_requests); + + if (!i915->gt.awake) + return I915_EPOCH_INVALID; + + GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID); + + /* + * Be paranoid and flush a concurrent interrupt to make sure + * we don't reactivate any irq tasklets after parking. + * + * FIXME: Note that even though we have waited for execlists to be idle, + * there may still be an in-flight interrupt even though the CSB + * is now empty. synchronize_irq() makes sure that a residual interrupt + * is completed before we continue, but it doesn't prevent the HW from + * raising a spurious interrupt later. To complete the shield we should + * coordinate disabling the CS irq with flushing the interrupts. + */ + synchronize_irq(i915->drm.irq); + + intel_engines_park(i915); + i915_gem_timelines_park(i915); + + i915_pmu_gt_parked(i915); + + i915->gt.awake = false; + + if (INTEL_GEN(i915) >= 6) + gen6_rps_idle(i915); + + intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ); + + intel_runtime_pm_put(i915); + + return i915->gt.epoch; +} + +void i915_gem_park(struct drm_i915_private *i915) +{ + lockdep_assert_held(&i915->drm.struct_mutex); + GEM_BUG_ON(i915->gt.active_requests); + + if (!i915->gt.awake) + return; + + /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */ + mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100)); +} + +void i915_gem_unpark(struct drm_i915_private *i915) +{ + lockdep_assert_held(&i915->drm.struct_mutex); + GEM_BUG_ON(!i915->gt.active_requests); + + if (i915->gt.awake) + return; + + intel_runtime_pm_get_noresume(i915); + + /* + * It seems that the DMC likes to transition between the DC states a lot + * when there are no connected displays (no active power domains) during + * command submission. + * + * This activity has negative impact on the performance of the chip with + * huge latencies observed in the interrupt handler and elsewhere. + * + * Work around it by grabbing a GT IRQ power domain whilst there is any + * GT activity, preventing any DC state transitions. + */ + intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); + + i915->gt.awake = true; + if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */ + i915->gt.epoch = 1; + + intel_enable_gt_powersave(i915); + i915_update_gfx_val(i915); + if (INTEL_GEN(i915) >= 6) + gen6_rps_busy(i915); + i915_pmu_gt_unparked(i915); + + intel_engines_unpark(i915); + + i915_queue_hangcheck(i915); + + queue_delayed_work(i915->wq, + &i915->gt.retire_work, + round_jiffies_up_relative(HZ)); +} + int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -2896,20 +2991,6 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) return active; } -static bool engine_stalled(struct intel_engine_cs *engine) -{ - if (!engine->hangcheck.stalled) - return false; - - /* Check for possible seqno movement after hang declaration */ - if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { - DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); - return false; - } - - return true; -} - /* * Ensure irq handler finishes, and not run again. * Also return the active request so that we only search for it once. @@ -2998,6 +3079,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) } i915_gem_revoke_fences(dev_priv); + intel_uc_sanitize(dev_priv); return err; } @@ -3047,7 +3129,8 @@ static void engine_skip_context(struct i915_request *request) /* Returns the request if it was guilty of the hang */ static struct i915_request * i915_gem_reset_request(struct intel_engine_cs *engine, - struct i915_request *request) + struct i915_request *request, + bool stalled) { /* The guilty request will get skipped on a hung engine. * @@ -3070,7 +3153,15 @@ i915_gem_reset_request(struct intel_engine_cs *engine, * subsequent hangs. */ - if (engine_stalled(engine)) { + if (i915_request_completed(request)) { + GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n", + engine->name, request->global_seqno, + request->fence.context, request->fence.seqno, + intel_engine_get_seqno(engine)); + stalled = false; + } + + if (stalled) { i915_gem_context_mark_guilty(request->ctx); skip_request(request); @@ -3101,7 +3192,8 @@ i915_gem_reset_request(struct intel_engine_cs *engine, } void i915_gem_reset_engine(struct intel_engine_cs *engine, - struct i915_request *request) + struct i915_request *request, + bool stalled) { /* * Make sure this write is visible before we re-enable the interrupt @@ -3111,7 +3203,7 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine, smp_store_mb(engine->irq_posted, 0); if (request) - request = i915_gem_reset_request(engine, request); + request = i915_gem_reset_request(engine, request, stalled); if (request) { DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", @@ -3122,7 +3214,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine, engine->reset_hw(engine, request); } -void i915_gem_reset(struct drm_i915_private *dev_priv) +void i915_gem_reset(struct drm_i915_private *dev_priv, + unsigned int stalled_mask) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -3134,7 +3227,9 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) { struct i915_gem_context *ctx; - i915_gem_reset_engine(engine, engine->hangcheck.active_request); + i915_gem_reset_engine(engine, + engine->hangcheck.active_request, + stalled_mask & ENGINE_MASK(id)); ctx = fetch_and_zero(&engine->last_retired_context); if (ctx) engine->context_unpin(engine, ctx); @@ -3160,13 +3255,6 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) } i915_gem_restore_fences(dev_priv); - - if (dev_priv->gt.awake) { - intel_sanitize_gt_powersave(dev_priv); - intel_enable_gt_powersave(dev_priv); - if (INTEL_GEN(dev_priv) >= 6) - gen6_rps_busy(dev_priv); - } } void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) @@ -3192,6 +3280,9 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) static void nop_submit_request(struct i915_request *request) { + GEM_TRACE("%s fence %llx:%d -> -EIO\n", + request->engine->name, + request->fence.context, request->fence.seqno); dma_fence_set_error(&request->fence, -EIO); i915_request_submit(request); @@ -3201,6 +3292,9 @@ static void nop_complete_submit_request(struct i915_request *request) { unsigned long flags; + GEM_TRACE("%s fence %llx:%d -> -EIO\n", + request->engine->name, + request->fence.context, request->fence.seqno); dma_fence_set_error(&request->fence, -EIO); spin_lock_irqsave(&request->engine->timeline->lock, flags); @@ -3214,6 +3308,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) struct intel_engine_cs *engine; enum intel_engine_id id; + GEM_TRACE("start\n"); + if (drm_debug & DRM_UT_DRIVER) { struct drm_printer p = drm_debug_printer(__func__); @@ -3237,6 +3333,9 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) } i915->caps.scheduler = 0; + /* Even if the GPU reset fails, it should still stop the engines */ + intel_gpu_reset(i915, ALL_ENGINES); + /* * Make sure no one is running the old callback before we proceed with * cancelling requests and resetting the completion tracking. Otherwise @@ -3278,6 +3377,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) i915_gem_reset_finish_engine(engine); } + GEM_TRACE("end\n"); + wake_up_all(&i915->gpu_error.reset_queue); } @@ -3290,7 +3391,10 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) return true; - /* Before unwedging, make sure that all pending operations + GEM_TRACE("start\n"); + + /* + * Before unwedging, make sure that all pending operations * are flushed and errored out - we may have requests waiting upon * third party fences. We marked all inflight requests as EIO, and * every execbuf since returned EIO, for consistency we want all @@ -3308,7 +3412,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) if (!rq) continue; - /* We can't use our normal waiter as we want to + /* + * We can't use our normal waiter as we want to * avoid recursively trying to handle the current * reset. The basic dma_fence_default_wait() installs * a callback for dma_fence_signal(), which is @@ -3323,8 +3428,11 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) return false; } } + i915_retire_requests(i915); + GEM_BUG_ON(i915->gt.active_requests); - /* Undo nop_submit_request. We prevent all new i915 requests from + /* + * Undo nop_submit_request. We prevent all new i915 requests from * being queued (by disallowing execbuf whilst wedged) so having * waited for all active requests above, we know the system is idle * and do not have to worry about a thread being inside @@ -3335,6 +3443,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) intel_engines_reset_default_submission(i915); i915_gem_contexts_lost(i915); + GEM_TRACE("end\n"); + smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ clear_bit(I915_WEDGED, &i915->gpu_error.flags); @@ -3473,36 +3583,9 @@ i915_gem_idle_work_handler(struct work_struct *work) if (new_requests_since_last_retire(dev_priv)) goto out_unlock; - /* - * Be paranoid and flush a concurrent interrupt to make sure - * we don't reactivate any irq tasklets after parking. - * - * FIXME: Note that even though we have waited for execlists to be idle, - * there may still be an in-flight interrupt even though the CSB - * is now empty. synchronize_irq() makes sure that a residual interrupt - * is completed before we continue, but it doesn't prevent the HW from - * raising a spurious interrupt later. To complete the shield we should - * coordinate disabling the CS irq with flushing the interrupts. - */ - synchronize_irq(dev_priv->drm.irq); - - intel_engines_park(dev_priv); - i915_gem_timelines_park(dev_priv); - - i915_pmu_gt_parked(dev_priv); + epoch = __i915_gem_park(dev_priv); - GEM_BUG_ON(!dev_priv->gt.awake); - dev_priv->gt.awake = false; - epoch = dev_priv->gt.epoch; - GEM_BUG_ON(epoch == I915_EPOCH_INVALID); rearm_hangcheck = false; - - if (INTEL_GEN(dev_priv) >= 6) - gen6_rps_idle(dev_priv); - - intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ); - - intel_runtime_pm_put(dev_priv); out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); @@ -3666,16 +3749,7 @@ static int wait_for_engines(struct drm_i915_private *i915) if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) { dev_err(i915->drm.dev, "Failed to idle engines, declaring wedged!\n"); - if (drm_debug & DRM_UT_DRIVER) { - struct drm_printer p = drm_debug_printer(__func__); - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) - intel_engine_dump(engine, &p, - "%s\n", engine->name); - } - + GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); return -EIO; } @@ -4088,9 +4162,10 @@ out: } /* - * Prepare buffer for display plane (scanout, cursors, etc). - * Can be called from an uninterruptible phase (modesetting) and allows - * any flushes to be pipelined (for pageflips). + * Prepare buffer for display plane (scanout, cursors, etc). Can be called from + * an uninterruptible phase (modesetting) and allows any flushes to be pipelined + * (for pageflips). We only flush the caches while preparing the buffer for + * display, the callers are responsible for frontbuffer flush. */ struct i915_vma * i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, @@ -4146,9 +4221,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, vma->display_alignment = max_t(u64, vma->display_alignment, alignment); - /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ __i915_gem_object_flush_for_display(obj); - intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); /* It should now be out of any other write domains, and we can update * the domain values for our changes. @@ -4973,6 +5046,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) * machines is a good idea, we don't - just in case it leaves the * machine in an unusable condition. */ + intel_uc_sanitize(dev_priv); i915_gem_sanitize(dev_priv); intel_runtime_pm_put(dev_priv); @@ -5118,6 +5192,8 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) } } + intel_gt_workarounds_apply(dev_priv); + i915_gem_init_swizzling(dev_priv); /* @@ -5140,6 +5216,12 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) goto out; } + ret = intel_wopcm_init_hw(&dev_priv->wopcm); + if (ret) { + DRM_ERROR("Enabling WOPCM failed (%d)\n", ret); + goto out; + } + /* We can't enable contexts until all firmware is loaded */ ret = intel_uc_init_hw(dev_priv); if (ret) { @@ -5297,6 +5379,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) return ret; + ret = intel_wopcm_init(&dev_priv->wopcm); + if (ret) + return ret; + ret = intel_uc_init_misc(dev_priv); if (ret) return ret; @@ -5478,8 +5564,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); } -int -i915_gem_load_init(struct drm_i915_private *dev_priv) +int i915_gem_init_early(struct drm_i915_private *dev_priv) { int err = -ENOMEM; @@ -5554,7 +5639,7 @@ err_out: return err; } -void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) +void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) { i915_gem_drain_freed_objects(dev_priv); GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); |