diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.h')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 139 |
1 files changed, 99 insertions, 40 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 010750e8ee44..f5ffa6d31e82 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -122,7 +122,8 @@ struct intel_engine_hangcheck { int deadlock; struct intel_instdone instdone; struct i915_request *active_request; - bool stalled; + bool stalled:1; + bool wedged:1; }; struct intel_ring { @@ -192,6 +193,11 @@ struct i915_priolist { int priority; }; +struct st_preempt_hang { + struct completion completion; + bool inject_hang; +}; + /** * struct intel_engine_execlists - execlist submission queue and port state * @@ -291,32 +297,49 @@ struct intel_engine_execlists { /** * @queue: queue of requests, in priority lists */ - struct rb_root queue; + struct rb_root_cached queue; /** - * @first: leftmost level in priority @queue + * @csb_read: control register for Context Switch buffer + * + * Note this register is always in mmio. */ - struct rb_node *first; + u32 __iomem *csb_read; /** - * @fw_domains: forcewake domains for irq tasklet + * @csb_write: control register for Context Switch buffer + * + * Note this register may be either mmio or HWSP shadow. */ - unsigned int fw_domains; + u32 *csb_write; /** - * @csb_head: context status buffer head + * @csb_status: status array for Context Switch buffer + * + * Note these register may be either mmio or HWSP shadow. */ - unsigned int csb_head; + u32 *csb_status; /** - * @csb_use_mmio: access csb through mmio, instead of hwsp + * @preempt_complete_status: expected CSB upon completing preemption */ - bool csb_use_mmio; + u32 preempt_complete_status; /** - * @preempt_complete_status: expected CSB upon completing preemption + * @csb_write_reset: reset value for CSB write pointer + * + * As the CSB write pointer maybe either in HWSP or as a field + * inside an mmio register, we want to reprogram it slightly + * differently to avoid later confusion. */ - u32 preempt_complete_status; + u32 csb_write_reset; + + /** + * @csb_head: context status buffer head + */ + u8 csb_head; + + I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) }; #define INTEL_ENGINE_CS_MAX_NAME 8 @@ -342,11 +365,10 @@ struct intel_engine_cs { struct i915_timeline timeline; struct drm_i915_gem_object *default_state; + void *pinned_default_state; - atomic_t irq_count; unsigned long irq_posted; #define ENGINE_IRQ_BREADCRUMB 0 -#define ENGINE_IRQ_EXECLIST 1 /* Rather than have every client wait upon all user interrupts, * with the herd waking after every interrupt and each doing the @@ -378,6 +400,7 @@ struct intel_engine_cs { unsigned int hangcheck_interrupts; unsigned int irq_enabled; + unsigned int irq_count; bool irq_armed : 1; I915_SELFTEST_DECLARE(bool mock : 1); @@ -423,18 +446,22 @@ struct intel_engine_cs { void (*irq_disable)(struct intel_engine_cs *engine); int (*init_hw)(struct intel_engine_cs *engine); - void (*reset_hw)(struct intel_engine_cs *engine, - struct i915_request *rq); + + struct { + struct i915_request *(*prepare)(struct intel_engine_cs *engine); + void (*reset)(struct intel_engine_cs *engine, + struct i915_request *rq); + void (*finish)(struct intel_engine_cs *engine); + } reset; void (*park)(struct intel_engine_cs *engine); void (*unpark)(struct intel_engine_cs *engine); void (*set_default_submission)(struct intel_engine_cs *engine); - struct intel_ring *(*context_pin)(struct intel_engine_cs *engine, - struct i915_gem_context *ctx); - void (*context_unpin)(struct intel_engine_cs *engine, - struct i915_gem_context *ctx); + struct intel_context *(*context_pin)(struct intel_engine_cs *engine, + struct i915_gem_context *ctx); + int (*request_alloc)(struct i915_request *rq); int (*init_context)(struct i915_request *rq); @@ -550,16 +577,7 @@ struct intel_engine_cs { * to the kernel context and trash it as the save may not happen * before the hardware is powered down. */ - struct i915_gem_context *last_retired_context; - - /* We track the current MI_SET_CONTEXT in order to eliminate - * redudant context switches. This presumes that requests are not - * reordered! Or when they are the tracking is updated along with - * the emission of individual requests into the legacy command - * stream (ring). - */ - struct i915_gem_context *legacy_active_context; - struct i915_hw_ppgtt *legacy_active_ppgtt; + struct intel_context *last_retired_context; /* status_notifier: list of callbacks for context-switch changes */ struct atomic_notifier_head context_status_notifier; @@ -672,6 +690,12 @@ execlists_clear_active(struct intel_engine_execlists *execlists, __clear_bit(bit, (unsigned long *)&execlists->active); } +static inline void +execlists_clear_all_active(struct intel_engine_execlists *execlists) +{ + execlists->active = 0; +} + static inline bool execlists_is_active(const struct intel_engine_execlists *execlists, unsigned int bit) @@ -809,6 +833,19 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) return pos & (ring->size - 1); } +static inline bool +intel_ring_offset_valid(const struct intel_ring *ring, + unsigned int pos) +{ + if (pos & -ring->size) /* must be strictly within the ring */ + return false; + + if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */ + return false; + + return true; +} + static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) { /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ @@ -820,12 +857,7 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) static inline void assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) { - /* We could combine these into a single tail operation, but keeping - * them as seperate tests will help identify the cause should one - * ever fire. - */ - GEM_BUG_ON(!IS_ALIGNED(tail, 8)); - GEM_BUG_ON(tail >= ring->size); + GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); /* * "Ring Buffer Use" @@ -865,14 +897,19 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); void intel_engine_setup_common(struct intel_engine_cs *engine); int intel_engine_init_common(struct intel_engine_cs *engine); -int intel_engine_create_scratch(struct intel_engine_cs *engine, int size); void intel_engine_cleanup_common(struct intel_engine_cs *engine); +int intel_engine_create_scratch(struct intel_engine_cs *engine, + unsigned int size); +void intel_engine_cleanup_scratch(struct intel_engine_cs *engine); + int intel_init_render_ring_buffer(struct intel_engine_cs *engine); int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); +int intel_engine_stop_cs(struct intel_engine_cs *engine); + u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); @@ -918,11 +955,10 @@ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); -static inline void intel_wait_init(struct intel_wait *wait, - struct i915_request *rq) +static inline void intel_wait_init(struct intel_wait *wait) { wait->tsk = current; - wait->request = rq; + wait->request = NULL; } static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno) @@ -1042,10 +1078,13 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset) return cs; } +void intel_engines_sanitize(struct drm_i915_private *i915); + bool intel_engine_is_idle(struct intel_engine_cs *engine); bool intel_engines_are_idle(struct drm_i915_private *dev_priv); bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine); +void intel_engine_lost_context(struct intel_engine_cs *engine); void intel_engines_park(struct drm_i915_private *i915); void intel_engines_unpark(struct drm_i915_private *i915); @@ -1123,4 +1162,24 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine); ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) + +static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) +{ + if (!execlists->preempt_hang.inject_hang) + return false; + + complete(&execlists->preempt_hang.completion); + return true; +} + +#else + +static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) +{ + return false; +} + +#endif + #endif /* _INTEL_RINGBUFFER_H_ */ |