diff options
author | Michał Winiarski <michal.winiarski@intel.com> | 2018-02-26 19:37:59 +0300 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2018-02-27 13:30:12 +0300 |
commit | 5028a4fb7d8d93438e2c6c1b15140ca44b1f647d (patch) | |
tree | 47a1b08d31374a2e5d50041d273eec09543813ce /drivers/gpu/drm | |
parent | ba1c06a5727b59b4ac118a91cc9fe82c123acbab (diff) | |
download | linux-5028a4fb7d8d93438e2c6c1b15140ca44b1f647d.tar.xz |
drm/i915/guc: Fill preempt context once at init time
Since we're inhibiting context save of preempt context, we're no longer
tracking the position of HEAD/TAIL. With GuC, we're adding a new
breadcrumb for each preemption, which means that the HW will do more and
more breadcrumb writes. Eventually the ring is filled, and we're
submitting the preemption context with HEAD==TAIL==0, which won't result
in breadcrumb write, but will trigger hangcheck instead.
Instead of writing a new preempt breadcrumb for each preemption, let's
just fill the ring once at init time (which also saves a couple of
instructions in the tasklet).
v2: Assert that context save restore is inhibited, don't assert on ring
alignment. (Chris)
v3: Cleanup checkpatch.
Fixes: 517aaffe0c1b ("drm/i915/execlists: Inhibit context save/restore for the fake preempt context")
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180226163800.21745-1-michal.winiarski@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/i915/intel_guc_submission.c | 96 |
1 files changed, 69 insertions, 27 deletions
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 586dde579903..8a8ad2fe158d 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -26,8 +26,14 @@ #include <trace/events/dma_fence.h> #include "intel_guc_submission.h" +#include "intel_lrc_reg.h" #include "i915_drv.h" +#define GUC_PREEMPT_FINISHED 0x1 +#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 +#define GUC_PREEMPT_BREADCRUMB_BYTES \ + (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS) + /** * DOC: GuC-based command submission * @@ -535,8 +541,6 @@ static void flush_ggtt_writes(struct i915_vma *vma) POSTING_READ_FW(GUC_STATUS); } -#define GUC_PREEMPT_FINISHED 0x1 -#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 static void inject_preempt_context(struct work_struct *work) { struct guc_preempt_work *preempt_work = @@ -546,37 +550,17 @@ static void inject_preempt_context(struct work_struct *work) preempt_work[engine->id]); struct intel_guc_client *client = guc->preempt_client; struct guc_stage_desc *stage_desc = __get_stage_desc(client); - struct intel_ring *ring = client->owner->engine[engine->id].ring; u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner, engine)); - u32 *cs = ring->vaddr + ring->tail; u32 data[7]; - if (engine->id == RCS) { - cs = gen8_emit_ggtt_write_rcs(cs, GUC_PREEMPT_FINISHED, - intel_hws_preempt_done_address(engine)); - } else { - cs = gen8_emit_ggtt_write(cs, GUC_PREEMPT_FINISHED, - intel_hws_preempt_done_address(engine)); - *cs++ = MI_NOOP; - *cs++ = MI_NOOP; - } - *cs++ = MI_USER_INTERRUPT; - *cs++ = MI_NOOP; - - GEM_BUG_ON(!IS_ALIGNED(ring->size, - GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32))); - GEM_BUG_ON((void *)cs - (ring->vaddr + ring->tail) != - GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32)); - - ring->tail += GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32); - ring->tail &= (ring->size - 1); - - flush_ggtt_writes(ring->vma); - + /* + * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP. + * See guc_fill_preempt_context(). + */ spin_lock_irq(&client->wq_lock); guc_wq_item_append(client, engine->guc_id, ctx_desc, - ring->tail / sizeof(u64), 0); + GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0); spin_unlock_irq(&client->wq_lock); /* @@ -972,6 +956,62 @@ static void guc_client_free(struct intel_guc_client *client) kfree(client); } +static inline bool ctx_save_restore_disabled(struct intel_context *ce) +{ + u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1]; + +#define SR_DISABLED \ + _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \ + CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) + + return (sr & SR_DISABLED) == SR_DISABLED; + +#undef SR_DISABLED +} + +static void guc_fill_preempt_context(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_guc_client *client = guc->preempt_client; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, dev_priv, id) { + struct intel_context *ce = &client->owner->engine[id]; + u32 addr = intel_hws_preempt_done_address(engine); + u32 *cs; + + GEM_BUG_ON(!ce->pin_count); + + /* + * We rely on this context image *not* being saved after + * preemption. This ensures that the RING_HEAD / RING_TAIL + * remain pointing at initial values forever. + */ + GEM_BUG_ON(!ctx_save_restore_disabled(ce)); + + cs = ce->ring->vaddr; + if (id == RCS) { + cs = gen8_emit_ggtt_write_rcs(cs, + GUC_PREEMPT_FINISHED, + addr); + } else { + cs = gen8_emit_ggtt_write(cs, + GUC_PREEMPT_FINISHED, + addr); + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + } + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + GEM_BUG_ON((void *)cs - ce->ring->vaddr != + GUC_PREEMPT_BREADCRUMB_BYTES); + + flush_ggtt_writes(ce->ring->vma); + } +} + static int guc_clients_create(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -1002,6 +1042,8 @@ static int guc_clients_create(struct intel_guc *guc) return PTR_ERR(client); } guc->preempt_client = client; + + guc_fill_preempt_context(guc); } return 0; |