diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
82 files changed, 6586 insertions, 1631 deletions
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 871fe7bda0e0..1bb766c79dcb 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -322,7 +322,7 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); int err; - GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open)); + GEM_BUG_ON(!kref_read(&ppgtt->base.vm.ref)); /* * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index b1b9c3fd7bf9..3e13960615bd 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -5,8 +5,8 @@ #include "gen8_engine_cs.h" #include "i915_drv.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" -#include "intel_gt_regs.h" #include "intel_lrc.h" #include "intel_ring.h" @@ -165,33 +165,9 @@ static u32 preparser_disable(bool state) return MI_ARB_CHECK | 1 << 8 | state; } -static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine) +u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg) { - static const i915_reg_t vd[] = { - GEN12_VD0_AUX_NV, - GEN12_VD1_AUX_NV, - GEN12_VD2_AUX_NV, - GEN12_VD3_AUX_NV, - }; - - static const i915_reg_t ve[] = { - GEN12_VE0_AUX_NV, - GEN12_VE1_AUX_NV, - }; - - if (engine->class == VIDEO_DECODE_CLASS) - return vd[engine->instance]; - - if (engine->class == VIDEO_ENHANCEMENT_CLASS) - return ve[engine->instance]; - - GEM_BUG_ON("unknown aux_inv reg\n"); - return INVALID_MMIO_REG; -} - -static u32 *gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs) -{ - *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; *cs++ = i915_mmio_reg_offset(inv_reg); *cs++ = AUX_INV; *cs++ = MI_NOOP; @@ -236,7 +212,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (mode & EMIT_INVALIDATE) { u32 flags = 0; - u32 *cs; + u32 *cs, count; flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TLB_INVALIDATE; @@ -254,7 +230,12 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (engine->class == COMPUTE_CLASS) flags &= ~PIPE_CONTROL_3D_FLAGS; - cs = intel_ring_begin(rq, 8 + 4); + if (!HAS_FLAT_CCS(rq->engine->i915)) + count = 8 + 4; + else + count = 8; + + cs = intel_ring_begin(rq, count); if (IS_ERR(cs)) return PTR_ERR(cs); @@ -267,8 +248,10 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - /* hsdes: 1809175790 */ - cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs); + if (!HAS_FLAT_CCS(rq->engine->i915)) { + /* hsdes: 1809175790 */ + cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV); + } *cs++ = preparser_disable(false); intel_ring_advance(rq, cs); @@ -283,12 +266,17 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) u32 cmd, *cs; cmd = 4; - if (mode & EMIT_INVALIDATE) + if (mode & EMIT_INVALIDATE) { cmd += 2; - if (mode & EMIT_INVALIDATE) - aux_inv = rq->engine->mask & ~BIT(BCS0); - if (aux_inv) - cmd += 2 * hweight32(aux_inv) + 2; + + if (!HAS_FLAT_CCS(rq->engine->i915) && + (rq->engine->class == VIDEO_DECODE_CLASS || + rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) { + aux_inv = rq->engine->mask & ~BIT(BCS0); + if (aux_inv) + cmd += 4; + } + } cs = intel_ring_begin(rq, cmd); if (IS_ERR(cs)) @@ -319,15 +307,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) *cs++ = 0; /* value */ if (aux_inv) { /* hsdes: 1809175790 */ - struct intel_engine_cs *engine; - unsigned int tmp; - - *cs++ = MI_LOAD_REGISTER_IMM(hweight32(aux_inv)); - for_each_engine_masked(engine, rq->engine->gt, aux_inv, tmp) { - *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine)); - *cs++ = AUX_INV; - } - *cs++ = MI_NOOP; + if (rq->engine->class == VIDEO_DECODE_CLASS) + cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV); + else + cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV); } if (mode & EMIT_INVALIDATE) @@ -403,6 +386,59 @@ int gen8_emit_init_breadcrumb(struct i915_request *rq) return 0; } +static int __gen125_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags, + u32 arb) +{ + struct intel_context *ce = rq->context; + u32 wa_offset = lrc_indirect_bb(ce); + u32 *cs; + + cs = intel_ring_begin(rq, 12); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_ARB_ON_OFF | arb; + + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | + MI_SRM_LRM_GLOBAL_GTT | + MI_LRI_LRM_CS_MMIO; + *cs++ = i915_mmio_reg_offset(RING_PREDICATE_RESULT(0)); + *cs++ = wa_offset + DG2_PREDICATE_RESULT_WA; + *cs++ = 0; + + *cs++ = MI_BATCH_BUFFER_START_GEN8 | + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + + /* Fixup stray MI_SET_PREDICATE as it prevents us executing the ring */ + *cs++ = MI_BATCH_BUFFER_START_GEN8; + *cs++ = wa_offset + DG2_PREDICATE_RESULT_BB; + *cs++ = 0; + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + intel_ring_advance(rq, cs); + + return 0; +} + +int gen125_emit_bb_start_noarb(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) +{ + return __gen125_emit_bb_start(rq, offset, len, flags, MI_ARB_DISABLE); +} + +int gen125_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) +{ + return __gen125_emit_bb_start(rq, offset, len, flags, MI_ARB_ENABLE); +} + int gen8_emit_bb_start_noarb(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) @@ -601,6 +637,43 @@ static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs) return cs; } +/* Wa_14014475959:dg2 */ +#define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540 +static u32 ccs_semaphore_offset(struct i915_request *rq) +{ + return i915_ggtt_offset(rq->context->state) + + (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET; +} + +/* Wa_14014475959:dg2 */ +static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs) +{ + int i; + + *cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL | + MI_ATOMIC_MOVE; + *cs++ = ccs_semaphore_offset(rq); + *cs++ = 0; + *cs++ = 1; + + /* + * When MI_ATOMIC_INLINE_DATA set this command must be 11 DW + (1 NOP) + * to align. 4 DWs above + 8 filler DWs here. + */ + for (i = 0; i < 8; ++i) + *cs++ = 0; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = ccs_semaphore_offset(rq); + *cs++ = 0; + + return cs; +} + static __always_inline u32* gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) { @@ -611,6 +684,10 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) !intel_uc_uses_guc_submission(&rq->engine->gt->uc)) cs = gen12_emit_preempt_busywait(rq, cs); + /* Wa_14014475959:dg2 */ + if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine)) + cs = ccs_emit_wa_busywait(rq, cs); + rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h index cc6e21d3662a..32e3d2b831bb 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h @@ -10,7 +10,7 @@ #include <linux/types.h> #include "i915_gem.h" /* GEM_BUG_ON */ - +#include "intel_gt_regs.h" #include "intel_gpu_commands.h" struct i915_request; @@ -31,6 +31,13 @@ int gen8_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags); +int gen125_emit_bb_start_noarb(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags); +int gen125_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags); + u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs); u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs); @@ -38,6 +45,8 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); +u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg); + static inline u32 * __gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) { diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index f574da00eff1..c7bd5d71b03e 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -454,11 +454,11 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, pd = pdp->entry[gen8_pd_index(idx, 2)]; } - clflush_cache_range(vaddr, PAGE_SIZE); + drm_clflush_virt_range(vaddr, PAGE_SIZE); vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1))); } } while (1); - clflush_cache_range(vaddr, PAGE_SIZE); + drm_clflush_virt_range(vaddr, PAGE_SIZE); return idx; } @@ -631,7 +631,7 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm, } } while (rem >= page_size && index < I915_PDES); - clflush_cache_range(vaddr, PAGE_SIZE); + drm_clflush_virt_range(vaddr, PAGE_SIZE); /* * Is it safe to mark the 2M block as 64K? -- Either we have @@ -647,7 +647,7 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm, I915_GTT_PAGE_SIZE_2M)))) { vaddr = px_vaddr(pd); vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; - clflush_cache_range(vaddr, PAGE_SIZE); + drm_clflush_virt_range(vaddr, PAGE_SIZE); page_size = I915_GTT_PAGE_SIZE_64K; /* @@ -668,7 +668,7 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm, for (i = 1; i < index; i += 16) memset64(vaddr + i, encode, 15); - clflush_cache_range(vaddr, PAGE_SIZE); + drm_clflush_virt_range(vaddr, PAGE_SIZE); } } @@ -722,7 +722,7 @@ static void gen8_ppgtt_insert_entry(struct i915_address_space *vm, vaddr = px_vaddr(pt); vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags); - clflush_cache_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr)); + drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr)); } static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm, diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 209cf265bf74..9dc9dccf7b09 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -4,6 +4,7 @@ */ #include <linux/kthread.h> +#include <linux/string_helpers.h> #include <trace/events/dma_fence.h> #include <uapi/linux/sched/types.h> @@ -512,7 +513,7 @@ void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, if (!b) return; - drm_printf(p, "IRQ: %s\n", enableddisabled(b->irq_armed)); + drm_printf(p, "IRQ: %s\n", str_enabled_disabled(b->irq_armed)); if (!list_empty(&b->signalers)) print_signals(b, p); } diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 5d0ec7c49b6a..4070cb5711d8 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -386,7 +386,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) ce->ring = NULL; ce->ring_size = SZ_4K; - ewma_runtime_init(&ce->runtime.avg); + ewma_runtime_init(&ce->stats.runtime.avg); ce->vm = i915_vm_get(engine->gt->vm); @@ -400,7 +400,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) INIT_LIST_HEAD(&ce->guc_state.fences); INIT_LIST_HEAD(&ce->guc_state.requests); - ce->guc_id.id = GUC_INVALID_LRC_ID; + ce->guc_id.id = GUC_INVALID_CONTEXT_ID; INIT_LIST_HEAD(&ce->guc_id.link); INIT_LIST_HEAD(&ce->destroyed_link); @@ -576,6 +576,31 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } +u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +{ + u64 total, active; + + total = ce->stats.runtime.total; + if (ce->ops->flags & COPS_RUNTIME_CYCLES) + total *= ce->engine->gt->clock_period_ns; + + active = READ_ONCE(ce->stats.active); + if (active) + active = intel_context_clock() - active; + + return total + active; +} + +u64 intel_context_get_avg_runtime_ns(struct intel_context *ce) +{ + u64 avg = ewma_runtime_read(&ce->stats.runtime.avg); + + if (ce->ops->flags & COPS_RUNTIME_CYCLES) + avg *= ce->engine->gt->clock_period_ns; + + return avg; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_context.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index d8c74bbf9aae..b7d3214d2cdd 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -351,18 +351,13 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, &ce->flags); } -static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce) -{ - const u32 period = ce->engine->gt->clock_period_ns; - - return READ_ONCE(ce->runtime.total) * period; -} +u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); -static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce) +static inline u64 intel_context_clock(void) { - const u32 period = ce->engine->gt->clock_period_ns; - - return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period); + /* As we mix CS cycles with CPU clocks, use the raw monotonic clock. */ + return ktime_get_raw_fast_ns(); } #endif /* __INTEL_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 30cd81ad8911..09f82545789f 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -35,6 +35,9 @@ struct intel_context_ops { #define COPS_HAS_INFLIGHT_BIT 0 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT) +#define COPS_RUNTIME_CYCLES_BIT 1 +#define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) + int (*alloc)(struct intel_context *ce); void (*ban)(struct intel_context *ce, struct i915_request *rq); @@ -134,14 +137,19 @@ struct intel_context { } lrc; u32 tag; /* cookie passed to HW to track this context on submission */ - /* Time on GPU as tracked by the hw. */ - struct { - struct ewma_runtime avg; - u64 total; - u32 last; - I915_SELFTEST_DECLARE(u32 num_underflow); - I915_SELFTEST_DECLARE(u32 max_underflow); - } runtime; + /** stats: Context GPU engine busyness tracking. */ + struct intel_context_stats { + u64 active; + + /* Time on GPU as tracked by the hw. */ + struct { + struct ewma_runtime avg; + u64 total; + u32 last; + I915_SELFTEST_DECLARE(u32 num_underflow); + I915_SELFTEST_DECLARE(u32 max_underflow); + } runtime; + } stats; unsigned int active_count; /* protected by timeline->mutex */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 1c0ab05c3c40..1431f1e9dbee 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -4,6 +4,7 @@ #include <asm/cacheflush.h> #include <drm/drm_util.h> +#include <drm/drm_cache.h> #include <linux/hashtable.h> #include <linux/irq_work.h> @@ -143,15 +144,9 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) * of extra paranoia to try and ensure that the HWS takes the value * we give and that it doesn't end up trapped inside the CPU! */ - if (static_cpu_has(X86_FEATURE_CLFLUSH)) { - mb(); - clflush(&engine->status_page.addr[reg]); - engine->status_page.addr[reg] = value; - clflush(&engine->status_page.addr[reg]); - mb(); - } else { - WRITE_ONCE(engine->status_page.addr[reg], value); - } + drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value)); + WRITE_ONCE(engine->status_page.addr[reg], value); + drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value)); } /* diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index e1aa78b20d2d..14c6ddbbfde8 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -3,6 +3,8 @@ * Copyright © 2016 Intel Corporation */ +#include <linux/string_helpers.h> + #include <drm/drm_print.h> #include "gem/i915_gem_context.h" @@ -434,6 +436,11 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS) engine->props.preempt_timeout_ms = 0; + if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) && + __ffs(CCS_MASK(engine->gt)) == engine->instance) || + engine->class == RENDER_CLASS) + engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE; + /* features common between engines sharing EUs */ if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) { engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE; @@ -724,12 +731,24 @@ static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids, static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class) { - int i; - u8 map[MAX_ENGINE_INSTANCE + 1]; + /* + * Logical to physical mapping is needed for proper support + * to split-frame feature. + */ + if (MEDIA_VER(gt->i915) >= 11 && class == VIDEO_DECODE_CLASS) { + const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 }; + + populate_logical_ids(gt, logical_ids, class, + map, ARRAY_SIZE(map)); + } else { + int i; + u8 map[MAX_ENGINE_INSTANCE + 1]; - for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i) - map[i] = i; - populate_logical_ids(gt, logical_ids, class, map, ARRAY_SIZE(map)); + for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i) + map[i] = i; + populate_logical_ids(gt, logical_ids, class, + map, ARRAY_SIZE(map)); + } } /** @@ -1261,6 +1280,15 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine, int err; intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); + + /* + * Wa_22011802037 : gen12, Prior to doing a reset, ensure CS is + * stopped, set ring stop bit and prefetch disable bit to halt CS + */ + if (GRAPHICS_VER(engine->i915) == 12) + intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base), + _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE)); + err = __intel_wait_for_register_fw(engine->uncore, mode, MODE_IDLE, MODE_IDLE, fast_timeout_us, @@ -1695,9 +1723,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR)); } - if (intel_engine_uses_guc(engine)) { - /* nothing to print yet */ - } else if (HAS_EXECLISTS(dev_priv)) { + if (HAS_EXECLISTS(dev_priv) && !intel_engine_uses_guc(engine)) { struct i915_request * const *port, *rq; const u32 *hws = &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; @@ -1706,9 +1732,8 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, u8 read, write; drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n", - yesno(test_bit(TASKLET_STATE_SCHED, - &engine->sched_engine->tasklet.state)), - enableddisabled(!atomic_read(&engine->sched_engine->tasklet.count)), + str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)), + str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)), repr_timer(&engine->execlists.preempt), repr_timer(&engine->execlists.timer)); @@ -1969,7 +1994,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count)); drm_printf(m, "\tBarriers?: %s\n", - yesno(!llist_empty(&engine->barrier_tasks))); + str_yes_no(!llist_empty(&engine->barrier_tasks))); drm_printf(m, "\tLatency: %luus\n", ewma__engine_latency_read(&engine->latency)); if (intel_engine_supports_stats(engine)) @@ -2011,7 +2036,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, drm_printf(m, "HWSP:\n"); hexdump(m, engine->status_page.addr, PAGE_SIZE); - drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); + drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine))); intel_engine_print_breadcrumbs(engine, m); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h index 0bf8b45c9319..75a0c55c5aa5 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h @@ -148,6 +148,7 @@ (REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 1) | \ REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1)) +#define RING_PREDICATE_RESULT(base) _MMIO((base) + 0x3b8) /* gen12+ */ #define MI_PREDICATE_RESULT_2(base) _MMIO((base) + 0x3bc) #define LOWER_SLICE_ENABLED (1 << 0) #define LOWER_SLICE_DISABLED (0 << 0) @@ -181,6 +182,7 @@ #define GFX_SURFACE_FAULT_ENABLE (1 << 12) #define GFX_REPLAY_MODE (1 << 11) #define GFX_PSMI_GRANULARITY (1 << 10) +#define GEN12_GFX_PREFETCH_DISABLE REG_BIT(10) #define GFX_PPGTT_ENABLE (1 << 9) #define GEN8_GFX_PPGTT_48B (1 << 7) #define GFX_FORWARD_VBLANK_MASK (3 << 5) @@ -192,6 +194,7 @@ #define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4) #define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) #define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */ +#define RING_PREDICATE_RESULT(base) _MMIO((base) + 0x3b8) #define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) #define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2) #define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 19ff8758e34d..298f2cc7a879 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -96,7 +96,9 @@ struct i915_ctx_workarounds { #define I915_MAX_VCS 8 #define I915_MAX_VECS 4 +#define I915_MAX_SFC (I915_MAX_VCS / 2) #define I915_MAX_CCS 4 +#define I915_MAX_RCS 1 /* * Engine IDs definitions. @@ -526,6 +528,8 @@ struct intel_engine_cs { #define I915_ENGINE_WANT_FORCED_PREEMPTION BIT(8) #define I915_ENGINE_HAS_RCS_REG_STATE BIT(9) #define I915_ENGINE_HAS_EU_PRIORITY BIT(10) +#define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11) +#define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12) unsigned int flags; /* @@ -626,6 +630,13 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO; } +/* Wa_14014475959:dg2 */ +static inline bool +intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT; +} + #define instdone_has_slice(dev_priv___, sseu___, slice___) \ ((GRAPHICS_VER(dev_priv___) == 7 ? 1 : ((sseu___)->slice_mask)) & BIT(slice___)) @@ -643,7 +654,7 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) #define for_each_instdone_gslice_dss_xehp(dev_priv_, sseu_, iter_, gslice_, dss_) \ for ((iter_) = 0, (gslice_) = 0, (dss_) = 0; \ - (iter_) < GEN_MAX_SUBSLICES; \ + (iter_) < GEN_SS_MASK_SIZE; \ (iter_)++, (gslice_) = (iter_) / GEN_DSS_PER_GSLICE, \ (dss_) = (iter_) % GEN_DSS_PER_GSLICE) \ for_each_if(intel_sseu_has_subslice((sseu_), 0, (iter_))) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c index b8c9b6b89003..46a174f8aa00 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_user.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -47,7 +47,7 @@ static const u8 uabi_classes[] = { [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY, [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO, [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE, - /* TODO: Add COMPUTE_CLASS mapping once ABI is available */ + [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE, }; static int engine_cmp(void *priv, const struct list_head *A, @@ -193,7 +193,6 @@ static void add_legacy_ring(struct legacy_ring *ring, void intel_engines_driver_register(struct drm_i915_private *i915) { struct legacy_ring ring = {}; - u8 uabi_instances[5] = {}; struct list_head *it, *next; struct rb_node **p, *prev; LIST_HEAD(engines); @@ -214,8 +213,10 @@ void intel_engines_driver_register(struct drm_i915_private *i915) GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); engine->uabi_class = uabi_classes[engine->class]; - GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances)); - engine->uabi_instance = uabi_instances[engine->uabi_class]++; + GEM_BUG_ON(engine->uabi_class >= + ARRAY_SIZE(i915->engine_uabi_class_count)); + engine->uabi_instance = + i915->engine_uabi_class_count[engine->uabi_class]++; /* Replace the internal name with the final user facing name */ memcpy(old, engine->name, sizeof(engine->name)); @@ -245,8 +246,8 @@ void intel_engines_driver_register(struct drm_i915_private *i915) int class, inst; int errors = 0; - for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) { - for (inst = 0; inst < uabi_instances[class]; inst++) { + for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) { + for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) { engine = intel_engine_lookup_user(i915, class, inst); if (!engine) { diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 3e0c81f06bd0..86f7a9ac1c39 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -107,6 +107,7 @@ * */ #include <linux/interrupt.h> +#include <linux/string_helpers.h> #include "i915_drv.h" #include "i915_trace.h" @@ -624,8 +625,6 @@ static void __execlists_schedule_out(struct i915_request * const rq, GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag)); __set_bit(ccid - 1, &engine->context_tag); } - - lrc_update_runtime(ce); intel_engine_context_out(engine); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); if (engine->fw_domain && !--engine->fw_active) @@ -1335,11 +1334,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine) } else if (timeslice_expired(engine, last)) { ENGINE_TRACE(engine, "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n", - yesno(timer_expired(&execlists->timer)), + str_yes_no(timer_expired(&execlists->timer)), last->fence.context, last->fence.seqno, rq_prio(last), sched_engine->queue_priority_hint, - yesno(timeslice_yield(execlists, last))); + str_yes_no(timeslice_yield(execlists, last))); /* * Consume this timeslice; ensure we start a new one. @@ -1427,7 +1426,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) __i915_request_is_complete(rq) ? "!" : __i915_request_has_started(rq) ? "*" : "", - yesno(engine != ve->siblings[0])); + str_yes_no(engine != ve->siblings[0])); WRITE_ONCE(ve->request, NULL); WRITE_ONCE(ve->base.sched_engine->queue_priority_hint, INT_MIN); @@ -1650,12 +1649,6 @@ cancel_port_requests(struct intel_engine_execlists * const execlists, return inactive; } -static void invalidate_csb_entries(const u64 *first, const u64 *last) -{ - clflush((void *)first); - clflush((void *)last); -} - /* * Starting with Gen12, the status has a new format: * @@ -2003,15 +1996,30 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) * the wash as hardware, working or not, will need to do the * invalidation before. */ - invalidate_csb_entries(&buf[0], &buf[num_entries - 1]); + drm_clflush_virt_range(&buf[0], num_entries * sizeof(buf[0])); /* * We assume that any event reflects a change in context flow * and merits a fresh timeslice. We reinstall the timer after * inspecting the queue to see if we need to resumbit. */ - if (*prev != *execlists->active) /* elide lite-restores */ + if (*prev != *execlists->active) { /* elide lite-restores */ + /* + * Note the inherent discrepancy between the HW runtime, + * recorded as part of the context switch, and the CPU + * adjustment for active contexts. We have to hope that + * the delay in processing the CS event is very small + * and consistent. It works to our advantage to have + * the CPU adjustment _undershoot_ (i.e. start later than) + * the CS timestamp so we never overreport the runtime + * and correct overselves later when updating from HW. + */ + if (*prev) + lrc_runtime_stop((*prev)->context); + if (*execlists->active) + lrc_runtime_start((*execlists->active)->context); new_timeslice(execlists); + } return inactive; } @@ -2235,11 +2243,11 @@ static struct execlists_capture *capture_regs(struct intel_engine_cs *engine) if (!cap->error) goto err_cap; - cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp); + cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp, CORE_DUMP_FLAG_NONE); if (!cap->error->gt) goto err_gpu; - cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp); + cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp, CORE_DUMP_FLAG_NONE); if (!cap->error->gt->engine) goto err_gt; @@ -2643,7 +2651,7 @@ unwind: } static const struct intel_context_ops execlists_context_ops = { - .flags = COPS_HAS_INFLIGHT, + .flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES, .alloc = execlists_context_alloc, @@ -2787,8 +2795,9 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) /* Check that the GPU does indeed update the CSB entries! */ memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64)); - invalidate_csb_entries(&execlists->csb_status[0], - &execlists->csb_status[reset_value]); + drm_clflush_virt_range(execlists->csb_status, + execlists->csb_size * + sizeof(execlists->csb_status)); /* Once more for luck and our trusty paranoia */ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, @@ -2832,7 +2841,7 @@ static void execlists_sanitize(struct intel_engine_cs *engine) sanitize_hwsp(engine); /* And scrub the dirty cachelines for the HWSP */ - clflush_cache_range(engine->status_page.addr, PAGE_SIZE); + drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); intel_engine_reset_pinned_contexts(engine); } @@ -2911,7 +2920,7 @@ static int execlists_resume(struct intel_engine_cs *engine) enable_execlists(engine); - if (engine->class == RENDER_CLASS) + if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) xehp_enable_ccs_engines(engine); return 0; @@ -2957,9 +2966,8 @@ reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive) { struct intel_engine_execlists * const execlists = &engine->execlists; - mb(); /* paranoia: read the CSB pointers from after the reset */ - clflush(execlists->csb_write); - mb(); + drm_clflush_virt_range(execlists->csb_write, + sizeof(execlists->csb_write[0])); inactive = process_csb(engine, inactive); /* drain preemption events */ @@ -3425,10 +3433,17 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) } } - if (intel_engine_has_preemption(engine)) - engine->emit_bb_start = gen8_emit_bb_start; - else - engine->emit_bb_start = gen8_emit_bb_start_noarb; + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) { + if (intel_engine_has_preemption(engine)) + engine->emit_bb_start = gen125_emit_bb_start; + else + engine->emit_bb_start = gen125_emit_bb_start_noarb; + } else { + if (intel_engine_has_preemption(engine)) + engine->emit_bb_start = gen8_emit_bb_start; + else + engine->emit_bb_start = gen8_emit_bb_start_noarb; + } engine->busyness = execlists_engine_busyness; } @@ -3701,7 +3716,7 @@ virtual_get_sibling(struct intel_engine_cs *engine, unsigned int sibling) } static const struct intel_context_ops virtual_context_ops = { - .flags = COPS_HAS_INFLIGHT, + .flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES, .alloc = virtual_context_alloc, diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 8850d4e0f9cc..e6b2eb122ad7 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -3,21 +3,20 @@ * Copyright © 2020 Intel Corporation */ -#include <linux/agp_backend.h> -#include <linux/stop_machine.h> - +#include <linux/types.h> #include <asm/set_memory.h> #include <asm/smp.h> #include <drm/i915_drm.h> -#include <drm/intel-gtt.h> #include "gem/i915_gem_lmem.h" #include "intel_gt.h" +#include "intel_gt_gmch.h" #include "intel_gt_regs.h" #include "i915_drv.h" #include "i915_scatterlist.h" +#include "i915_utils.h" #include "i915_vgpu.h" #include "intel_gtt.h" @@ -94,28 +93,6 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915) return 0; } -/* - * Certain Gen5 chipsets require idling the GPU before - * unmapping anything from the GTT when VT-d is enabled. - */ -static bool needs_idle_maps(struct drm_i915_private *i915) -{ - /* - * Query intel_iommu to see if we need the workaround. Presumably that - * was loaded first. - */ - if (!intel_vtd_active(i915)) - return false; - - if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915)) - return true; - - if (GRAPHICS_VER(i915) == 12) - return true; /* XXX DMAR fault reason 7 */ - - return false; -} - /** * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM * @vm: The VM to suspend the mappings for @@ -126,7 +103,7 @@ static bool needs_idle_maps(struct drm_i915_private *i915) void i915_ggtt_suspend_vm(struct i915_address_space *vm) { struct i915_vma *vma, *vn; - int open; + int save_skip_rewrite; drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); @@ -135,8 +112,12 @@ retry: mutex_lock(&vm->mutex); - /* Skip rewriting PTE on VMA unbind. */ - open = atomic_xchg(&vm->open, 0); + /* + * Skip rewriting PTE on VMA unbind. + * FIXME: Use an argument to i915_vma_unbind() instead? + */ + save_skip_rewrite = vm->skip_pte_rewrite; + vm->skip_pte_rewrite = true; list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; @@ -154,16 +135,14 @@ retry: */ i915_gem_object_get(obj); - atomic_set(&vm->open, open); mutex_unlock(&vm->mutex); i915_gem_object_lock(obj, NULL); - open = i915_vma_unbind(vma); + GEM_WARN_ON(i915_vma_unbind(vma)); i915_gem_object_unlock(obj); - - GEM_WARN_ON(open); - i915_gem_object_put(obj); + + vm->skip_pte_rewrite = save_skip_rewrite; goto retry; } @@ -179,7 +158,7 @@ retry: vm->clear_range(vm, 0, vm->total); - atomic_set(&vm->open, open); + vm->skip_pte_rewrite = save_skip_rewrite; mutex_unlock(&vm->mutex); } @@ -202,7 +181,7 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) spin_unlock_irq(&uncore->lock); } -static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) +void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) { struct intel_uncore *uncore = ggtt->vm.gt->uncore; @@ -227,11 +206,6 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); } -static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) -{ - intel_gtt_chipset_flush(); -} - u64 gen8_ggtt_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) @@ -244,258 +218,7 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, return pte; } -static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) -{ - writeq(pte, addr); -} - -static void gen8_ggtt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 flags) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen8_pte_t __iomem *pte = - (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; - - gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags)); - - ggtt->invalidate(ggtt); -} - -static void gen8_ggtt_insert_entries(struct i915_address_space *vm, - struct i915_vma_resource *vma_res, - enum i915_cache_level level, - u32 flags) -{ - const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags); - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen8_pte_t __iomem *gte; - gen8_pte_t __iomem *end; - struct sgt_iter iter; - dma_addr_t addr; - - /* - * Note that we ignore PTE_READ_ONLY here. The caller must be careful - * not to allow the user to override access to a read only page. - */ - - gte = (gen8_pte_t __iomem *)ggtt->gsm; - gte += vma_res->start / I915_GTT_PAGE_SIZE; - end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE; - - for_each_sgt_daddr(addr, iter, vma_res->bi.pages) - gen8_set_pte(gte++, pte_encode | addr); - GEM_BUG_ON(gte > end); - - /* Fill the allocated but "unused" space beyond the end of the buffer */ - while (gte < end) - gen8_set_pte(gte++, vm->scratch[0]->encode); - - /* - * We want to flush the TLBs only after we're certain all the PTE - * updates have finished. - */ - ggtt->invalidate(ggtt); -} - -static void gen6_ggtt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 flags) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen6_pte_t __iomem *pte = - (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; - - iowrite32(vm->pte_encode(addr, level, flags), pte); - - ggtt->invalidate(ggtt); -} - -/* - * Binds an object into the global gtt with the specified cache level. - * The object will be accessible to the GPU via commands whose operands - * reference offsets within the global GTT as well as accessible by the GPU - * through the GMADR mapped BAR (i915->mm.gtt->gtt). - */ -static void gen6_ggtt_insert_entries(struct i915_address_space *vm, - struct i915_vma_resource *vma_res, - enum i915_cache_level level, - u32 flags) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen6_pte_t __iomem *gte; - gen6_pte_t __iomem *end; - struct sgt_iter iter; - dma_addr_t addr; - - gte = (gen6_pte_t __iomem *)ggtt->gsm; - gte += vma_res->start / I915_GTT_PAGE_SIZE; - end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE; - - for_each_sgt_daddr(addr, iter, vma_res->bi.pages) - iowrite32(vm->pte_encode(addr, level, flags), gte++); - GEM_BUG_ON(gte > end); - - /* Fill the allocated but "unused" space beyond the end of the buffer */ - while (gte < end) - iowrite32(vm->scratch[0]->encode, gte++); - - /* - * We want to flush the TLBs only after we're certain all the PTE - * updates have finished. - */ - ggtt->invalidate(ggtt); -} - -static void nop_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ -} - -static void gen8_ggtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - unsigned int first_entry = start / I915_GTT_PAGE_SIZE; - unsigned int num_entries = length / I915_GTT_PAGE_SIZE; - const gen8_pte_t scratch_pte = vm->scratch[0]->encode; - gen8_pte_t __iomem *gtt_base = - (gen8_pte_t __iomem *)ggtt->gsm + first_entry; - const int max_entries = ggtt_total_entries(ggtt) - first_entry; - int i; - - if (WARN(num_entries > max_entries, - "First entry = %d; Num entries = %d (max=%d)\n", - first_entry, num_entries, max_entries)) - num_entries = max_entries; - - for (i = 0; i < num_entries; i++) - gen8_set_pte(>t_base[i], scratch_pte); -} - -static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) -{ - /* - * Make sure the internal GAM fifo has been cleared of all GTT - * writes before exiting stop_machine(). This guarantees that - * any aperture accesses waiting to start in another process - * cannot back up behind the GTT writes causing a hang. - * The register can be any arbitrary GAM register. - */ - intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); -} - -struct insert_page { - struct i915_address_space *vm; - dma_addr_t addr; - u64 offset; - enum i915_cache_level level; -}; - -static int bxt_vtd_ggtt_insert_page__cb(void *_arg) -{ - struct insert_page *arg = _arg; - - gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); - bxt_vtd_ggtt_wa(arg->vm); - - return 0; -} - -static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 unused) -{ - struct insert_page arg = { vm, addr, offset, level }; - - stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); -} - -struct insert_entries { - struct i915_address_space *vm; - struct i915_vma_resource *vma_res; - enum i915_cache_level level; - u32 flags; -}; - -static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) -{ - struct insert_entries *arg = _arg; - - gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags); - bxt_vtd_ggtt_wa(arg->vm); - - return 0; -} - -static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, - struct i915_vma_resource *vma_res, - enum i915_cache_level level, - u32 flags) -{ - struct insert_entries arg = { vm, vma_res, level, flags }; - - stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); -} - -static void gen6_ggtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - unsigned int first_entry = start / I915_GTT_PAGE_SIZE; - unsigned int num_entries = length / I915_GTT_PAGE_SIZE; - gen6_pte_t scratch_pte, __iomem *gtt_base = - (gen6_pte_t __iomem *)ggtt->gsm + first_entry; - const int max_entries = ggtt_total_entries(ggtt) - first_entry; - int i; - - if (WARN(num_entries > max_entries, - "First entry = %d; Num entries = %d (max=%d)\n", - first_entry, num_entries, max_entries)) - num_entries = max_entries; - - scratch_pte = vm->scratch[0]->encode; - for (i = 0; i < num_entries; i++) - iowrite32(scratch_pte, >t_base[i]); -} - -static void i915_ggtt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level cache_level, - u32 unused) -{ - unsigned int flags = (cache_level == I915_CACHE_NONE) ? - AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - - intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); -} - -static void i915_ggtt_insert_entries(struct i915_address_space *vm, - struct i915_vma_resource *vma_res, - enum i915_cache_level cache_level, - u32 unused) -{ - unsigned int flags = (cache_level == I915_CACHE_NONE) ? - AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - - intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT, - flags); -} - -static void i915_ggtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); -} - -static void ggtt_bind_vma(struct i915_address_space *vm, +void intel_ggtt_bind_vma(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, struct i915_vma_resource *vma_res, enum i915_cache_level cache_level, @@ -519,7 +242,7 @@ static void ggtt_bind_vma(struct i915_address_space *vm, vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE; } -static void ggtt_unbind_vma(struct i915_address_space *vm, +void intel_ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma_resource *vma_res) { vm->clear_range(vm, vma_res->start, vma_res->vma_size); @@ -722,10 +445,10 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) ggtt->alias = ppgtt; ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; - GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); + GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma); ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; - GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); + GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma); ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; i915_vm_free_pt_stash(&ppgtt->vm, &stash); @@ -748,8 +471,8 @@ static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) i915_vm_put(&ppgtt->vm); - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; } int i915_init_ggtt(struct drm_i915_private *i915) @@ -773,13 +496,13 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) { struct i915_vma *vma, *vn; - atomic_set(&ggtt->vm.open, 0); - flush_workqueue(ggtt->vm.i915->wq); i915_gem_drain_freed_objects(ggtt->vm.i915); mutex_lock(&ggtt->vm.mutex); + ggtt->vm.skip_pte_rewrite = true; + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; bool trylock; @@ -837,364 +560,12 @@ void i915_ggtt_driver_late_release(struct drm_i915_private *i915) dma_resv_fini(&ggtt->vm._resv); } -static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) -{ - snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; - snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; - return snb_gmch_ctl << 20; -} - -static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) -{ - bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; - bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; - if (bdw_gmch_ctl) - bdw_gmch_ctl = 1 << bdw_gmch_ctl; - -#ifdef CONFIG_X86_32 - /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ - if (bdw_gmch_ctl > 4) - bdw_gmch_ctl = 4; -#endif - - return bdw_gmch_ctl << 20; -} - -static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) -{ - gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; - gmch_ctrl &= SNB_GMCH_GGMS_MASK; - - if (gmch_ctrl) - return 1 << (20 + gmch_ctrl); - - return 0; -} - -static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915) -{ - /* - * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset - * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset - */ - GEM_BUG_ON(GRAPHICS_VER(i915) < 6); - return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M; -} - -static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915) -{ - return gen6_gttmmadr_size(i915) / 2; -} - -static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) -{ - struct drm_i915_private *i915 = ggtt->vm.i915; - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - phys_addr_t phys_addr; - u32 pte_flags; - int ret; - - GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915)); - phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915); - - /* - * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range - * will be dropped. For WC mappings in general we have 64 byte burst - * writes when the WC buffer is flushed, so we can't use it, but have to - * resort to an uncached mapping. The WC issue is easily caught by the - * readback check when writing GTT PTE entries. - */ - if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11) - ggtt->gsm = ioremap(phys_addr, size); - else - ggtt->gsm = ioremap_wc(phys_addr, size); - if (!ggtt->gsm) { - drm_err(&i915->drm, "Failed to map the ggtt page table\n"); - return -ENOMEM; - } - - kref_init(&ggtt->vm.resv_ref); - ret = setup_scratch_page(&ggtt->vm); - if (ret) { - drm_err(&i915->drm, "Scratch setup failed\n"); - /* iounmap will also get called at remove, but meh */ - iounmap(ggtt->gsm); - return ret; - } - - pte_flags = 0; - if (i915_gem_object_is_lmem(ggtt->vm.scratch[0])) - pte_flags |= PTE_LM; - - ggtt->vm.scratch[0]->encode = - ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), - I915_CACHE_NONE, pte_flags); - - return 0; -} - -static void gen6_gmch_remove(struct i915_address_space *vm) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - - iounmap(ggtt->gsm); - free_scratch(vm); -} - -static struct resource pci_resource(struct pci_dev *pdev, int bar) +struct resource intel_pci_resource(struct pci_dev *pdev, int bar) { return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)); } -static int gen8_gmch_probe(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *i915 = ggtt->vm.i915; - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - unsigned int size; - u16 snb_gmch_ctl; - - /* TODO: We're not aware of mappable constraints on gen8 yet */ - if (!HAS_LMEM(i915)) { - ggtt->gmadr = pci_resource(pdev, 2); - ggtt->mappable_end = resource_size(&ggtt->gmadr); - } - - pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - if (IS_CHERRYVIEW(i915)) - size = chv_get_total_gtt_size(snb_gmch_ctl); - else - size = gen8_get_total_gtt_size(snb_gmch_ctl); - - ggtt->vm.alloc_pt_dma = alloc_pt_dma; - ggtt->vm.alloc_scratch_dma = alloc_pt_dma; - ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY; - - ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; - ggtt->vm.cleanup = gen6_gmch_remove; - ggtt->vm.insert_page = gen8_ggtt_insert_page; - ggtt->vm.clear_range = nop_clear_range; - if (intel_scanout_needs_vtd_wa(i915)) - ggtt->vm.clear_range = gen8_ggtt_clear_range; - - ggtt->vm.insert_entries = gen8_ggtt_insert_entries; - - /* - * Serialize GTT updates with aperture access on BXT if VT-d is on, - * and always on CHV. - */ - if (intel_vm_no_concurrent_access_wa(i915)) { - ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; - ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; - ggtt->vm.bind_async_flags = - I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; - } - - ggtt->invalidate = gen8_ggtt_invalidate; - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - - ggtt->vm.pte_encode = gen8_ggtt_pte_encode; - - setup_private_pat(ggtt->vm.gt->uncore); - - return ggtt_probe_common(ggtt, size); -} - -static u64 snb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; - - switch (level) { - case I915_CACHE_L3_LLC: - case I915_CACHE_LLC: - pte |= GEN6_PTE_CACHE_LLC; - break; - case I915_CACHE_NONE: - pte |= GEN6_PTE_UNCACHED; - break; - default: - MISSING_CASE(level); - } - - return pte; -} - -static u64 ivb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; - - switch (level) { - case I915_CACHE_L3_LLC: - pte |= GEN7_PTE_CACHE_L3_LLC; - break; - case I915_CACHE_LLC: - pte |= GEN6_PTE_CACHE_LLC; - break; - case I915_CACHE_NONE: - pte |= GEN6_PTE_UNCACHED; - break; - default: - MISSING_CASE(level); - } - - return pte; -} - -static u64 byt_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; - - if (!(flags & PTE_READ_ONLY)) - pte |= BYT_PTE_WRITEABLE; - - if (level != I915_CACHE_NONE) - pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; - - return pte; -} - -static u64 hsw_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; - - if (level != I915_CACHE_NONE) - pte |= HSW_WB_LLC_AGE3; - - return pte; -} - -static u64 iris_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; - - switch (level) { - case I915_CACHE_NONE: - break; - case I915_CACHE_WT: - pte |= HSW_WT_ELLC_LLC_AGE3; - break; - default: - pte |= HSW_WB_ELLC_LLC_AGE3; - break; - } - - return pte; -} - -static int gen6_gmch_probe(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *i915 = ggtt->vm.i915; - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - unsigned int size; - u16 snb_gmch_ctl; - - ggtt->gmadr = pci_resource(pdev, 2); - ggtt->mappable_end = resource_size(&ggtt->gmadr); - - /* - * 64/512MB is the current min/max we actually know of, but this is - * just a coarse sanity check. - */ - if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { - drm_err(&i915->drm, "Unknown GMADR size (%pa)\n", - &ggtt->mappable_end); - return -ENXIO; - } - - pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - - size = gen6_get_total_gtt_size(snb_gmch_ctl); - ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; - - ggtt->vm.alloc_pt_dma = alloc_pt_dma; - ggtt->vm.alloc_scratch_dma = alloc_pt_dma; - - ggtt->vm.clear_range = nop_clear_range; - if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) - ggtt->vm.clear_range = gen6_ggtt_clear_range; - ggtt->vm.insert_page = gen6_ggtt_insert_page; - ggtt->vm.insert_entries = gen6_ggtt_insert_entries; - ggtt->vm.cleanup = gen6_gmch_remove; - - ggtt->invalidate = gen6_ggtt_invalidate; - - if (HAS_EDRAM(i915)) - ggtt->vm.pte_encode = iris_pte_encode; - else if (IS_HASWELL(i915)) - ggtt->vm.pte_encode = hsw_pte_encode; - else if (IS_VALLEYVIEW(i915)) - ggtt->vm.pte_encode = byt_pte_encode; - else if (GRAPHICS_VER(i915) >= 7) - ggtt->vm.pte_encode = ivb_pte_encode; - else - ggtt->vm.pte_encode = snb_pte_encode; - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - - return ggtt_probe_common(ggtt, size); -} - -static void i915_gmch_remove(struct i915_address_space *vm) -{ - intel_gmch_remove(); -} - -static int i915_gmch_probe(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *i915 = ggtt->vm.i915; - phys_addr_t gmadr_base; - int ret; - - ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL); - if (!ret) { - drm_err(&i915->drm, "failed to set up gmch\n"); - return -EIO; - } - - intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); - - ggtt->gmadr = - (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); - - ggtt->vm.alloc_pt_dma = alloc_pt_dma; - ggtt->vm.alloc_scratch_dma = alloc_pt_dma; - - if (needs_idle_maps(i915)) { - drm_notice(&i915->drm, - "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n"); - ggtt->do_idle_maps = true; - } - - ggtt->vm.insert_page = i915_ggtt_insert_page; - ggtt->vm.insert_entries = i915_ggtt_insert_entries; - ggtt->vm.clear_range = i915_ggtt_clear_range; - ggtt->vm.cleanup = i915_gmch_remove; - - ggtt->invalidate = gmch_ggtt_invalidate; - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - - if (unlikely(ggtt->do_idle_maps)) - drm_notice(&i915->drm, - "Applying Ironlake quirks for intel_iommu\n"); - - return 0; -} - static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; @@ -1206,11 +577,11 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) dma_resv_init(&ggtt->vm._resv); if (GRAPHICS_VER(i915) <= 5) - ret = i915_gmch_probe(ggtt); + ret = intel_gt_gmch_gen5_probe(ggtt); else if (GRAPHICS_VER(i915) < 8) - ret = gen6_gmch_probe(ggtt); + ret = intel_gt_gmch_gen6_probe(ggtt); else - ret = gen8_gmch_probe(ggtt); + ret = intel_gt_gmch_gen8_probe(ggtt); if (ret) { dma_resv_fini(&ggtt->vm._resv); return ret; @@ -1256,7 +627,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) if (ret) return ret; - if (intel_vtd_active(i915)) + if (i915_vtd_active(i915)) drm_info(&i915->drm, "VT-d active for gfx access\n"); return 0; @@ -1264,10 +635,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) int i915_ggtt_enable_hw(struct drm_i915_private *i915) { - if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt()) - return -EIO; - - return 0; + return intel_gt_gmch_gen5_enable_hw(i915); } void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) @@ -1307,16 +675,12 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm) { struct i915_vma *vma; bool write_domain_objs = false; - int open; drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); /* First fill our portion of the GTT with scratch pages */ vm->clear_range(vm, 0, vm->total); - /* Skip rewriting PTE on VMA unbind. */ - open = atomic_xchg(&vm->open, 0); - /* clflush objects bound into the GGTT and rebind them. */ list_for_each_entry(vma, &vm->bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; @@ -1333,8 +697,6 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm) } } - atomic_set(&vm->open, open); - return write_domain_objs; } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index 76880fb8fc19..6ebda3d65086 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -3,6 +3,8 @@ * Copyright © 2008-2015 Intel Corporation */ +#include <linux/highmem.h> + #include "i915_drv.h" #include "i915_reg.h" #include "i915_scatterlist.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index d112ffd56418..556bca3be804 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -39,6 +39,8 @@ #define MI_GLOBAL_GTT (1<<22) #define MI_NOOP MI_INSTR(0, 0) +#define MI_SET_PREDICATE MI_INSTR(0x01, 0) +#define MI_SET_PREDICATE_DISABLE (0 << 0) #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) #define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) @@ -134,6 +136,13 @@ #define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ #define MI_USE_GGTT (1 << 22) /* g4x+ */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) +#define MI_ATOMIC MI_INSTR(0x2f, 1) +#define MI_ATOMIC_INLINE (MI_INSTR(0x2f, 9) | MI_ATOMIC_INLINE_DATA) +#define MI_ATOMIC_GLOBAL_GTT (1 << 22) +#define MI_ATOMIC_INLINE_DATA (1 << 18) +#define MI_ATOMIC_CS_STALL (1 << 17) +#define MI_ATOMIC_MOVE (0x4 << 8) + /* * Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw @@ -144,6 +153,7 @@ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) /* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */ #define MI_LRI_LRM_CS_MMIO REG_BIT(19) +#define MI_LRI_MMIO_REMAP_EN REG_BIT(17) #define MI_LRI_FORCE_POSTED (1<<12) #define MI_LOAD_REGISTER_IMM_MAX_REGS (126) #define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1) @@ -153,8 +163,10 @@ #define MI_FLUSH_DW_PROTECTED_MEM_EN (1 << 22) #define MI_FLUSH_DW_STORE_INDEX (1<<21) #define MI_INVALIDATE_TLB (1<<18) +#define MI_FLUSH_DW_CCS (1<<16) #define MI_FLUSH_DW_OP_STOREDW (1<<14) #define MI_FLUSH_DW_OP_MASK (3<<14) +#define MI_FLUSH_DW_LLC (1<<9) #define MI_FLUSH_DW_NOTIFY (1<<8) #define MI_INVALIDATE_BSD (1<<7) #define MI_FLUSH_DW_USE_GTT (1<<2) @@ -203,8 +215,27 @@ #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) +#define XY_CTRL_SURF_INSTR_SIZE 5 +#define MI_FLUSH_DW_SIZE 3 +#define XY_CTRL_SURF_COPY_BLT ((2 << 29) | (0x48 << 22) | 3) +#define SRC_ACCESS_TYPE_SHIFT 21 +#define DST_ACCESS_TYPE_SHIFT 20 +#define CCS_SIZE_MASK 0x3FF +#define CCS_SIZE_SHIFT 8 +#define XY_CTRL_SURF_MOCS_MASK GENMASK(31, 25) +#define NUM_CCS_BYTES_PER_BLOCK 256 +#define NUM_BYTES_PER_CCS_BYTE 256 +#define NUM_CCS_BLKS_PER_XFER 1024 +#define INDIRECT_ACCESS 0 +#define DIRECT_ACCESS 1 + #define COLOR_BLT_CMD (2 << 29 | 0x40 << 22 | (5 - 2)) #define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22) +#define XY_FAST_COLOR_BLT_CMD (2 << 29 | 0x44 << 22) +#define XY_FAST_COLOR_BLT_DEPTH_32 (2 << 19) +#define XY_FAST_COLOR_BLT_DW 16 +#define XY_FAST_COLOR_BLT_MOCS_MASK GENMASK(27, 21) +#define XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT 31 #define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22) #define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22) #define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22) diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c new file mode 100644 index 000000000000..0e494028b81d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gsc.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2019-2022, Intel Corporation. All rights reserved. + */ + +#include <linux/irq.h> +#include <linux/mei_aux.h> +#include "i915_drv.h" +#include "i915_reg.h" +#include "gt/intel_gsc.h" +#include "gt/intel_gt.h" + +#define GSC_BAR_LENGTH 0x00000FFC + +static void gsc_irq_mask(struct irq_data *d) +{ + /* generic irq handling */ +} + +static void gsc_irq_unmask(struct irq_data *d) +{ + /* generic irq handling */ +} + +static struct irq_chip gsc_irq_chip = { + .name = "gsc_irq_chip", + .irq_mask = gsc_irq_mask, + .irq_unmask = gsc_irq_unmask, +}; + +static int gsc_irq_init(int irq) +{ + irq_set_chip_and_handler_name(irq, &gsc_irq_chip, + handle_simple_irq, "gsc_irq_handler"); + + return irq_set_chip_data(irq, NULL); +} + +struct gsc_def { + const char *name; + unsigned long bar; + size_t bar_size; +}; + +/* gsc resources and definitions (HECI1 and HECI2) */ +static const struct gsc_def gsc_def_dg1[] = { + { + /* HECI1 not yet implemented. */ + }, + { + .name = "mei-gscfi", + .bar = DG1_GSC_HECI2_BASE, + .bar_size = GSC_BAR_LENGTH, + } +}; + +static const struct gsc_def gsc_def_dg2[] = { + { + .name = "mei-gsc", + .bar = DG2_GSC_HECI1_BASE, + .bar_size = GSC_BAR_LENGTH, + }, + { + .name = "mei-gscfi", + .bar = DG2_GSC_HECI2_BASE, + .bar_size = GSC_BAR_LENGTH, + } +}; + +static void gsc_release_dev(struct device *dev) +{ + struct auxiliary_device *aux_dev = to_auxiliary_dev(dev); + struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev); + + kfree(adev); +} + +static void gsc_destroy_one(struct intel_gsc_intf *intf) +{ + if (intf->adev) { + auxiliary_device_delete(&intf->adev->aux_dev); + auxiliary_device_uninit(&intf->adev->aux_dev); + intf->adev = NULL; + } + if (intf->irq >= 0) + irq_free_desc(intf->irq); + intf->irq = -1; +} + +static void gsc_init_one(struct drm_i915_private *i915, + struct intel_gsc_intf *intf, + unsigned int intf_id) +{ + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct mei_aux_device *adev; + struct auxiliary_device *aux_dev; + const struct gsc_def *def; + int ret; + + intf->irq = -1; + intf->id = intf_id; + + if (intf_id == 0 && !HAS_HECI_PXP(i915)) + return; + + if (IS_DG1(i915)) { + def = &gsc_def_dg1[intf_id]; + } else if (IS_DG2(i915)) { + def = &gsc_def_dg2[intf_id]; + } else { + drm_warn_once(&i915->drm, "Unknown platform\n"); + return; + } + + if (!def->name) { + drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1); + return; + } + + intf->irq = irq_alloc_desc(0); + if (intf->irq < 0) { + drm_err(&i915->drm, "gsc irq error %d\n", intf->irq); + return; + } + + ret = gsc_irq_init(intf->irq); + if (ret < 0) { + drm_err(&i915->drm, "gsc irq init failed %d\n", ret); + goto fail; + } + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + goto fail; + + adev->irq = intf->irq; + adev->bar.parent = &pdev->resource[0]; + adev->bar.start = def->bar + pdev->resource[0].start; + adev->bar.end = adev->bar.start + def->bar_size - 1; + adev->bar.flags = IORESOURCE_MEM; + adev->bar.desc = IORES_DESC_NONE; + + aux_dev = &adev->aux_dev; + aux_dev->name = def->name; + aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | + PCI_DEVID(pdev->bus->number, pdev->devfn); + aux_dev->dev.parent = &pdev->dev; + aux_dev->dev.release = gsc_release_dev; + + ret = auxiliary_device_init(aux_dev); + if (ret < 0) { + drm_err(&i915->drm, "gsc aux init failed %d\n", ret); + kfree(adev); + goto fail; + } + + ret = auxiliary_device_add(aux_dev); + if (ret < 0) { + drm_err(&i915->drm, "gsc aux add failed %d\n", ret); + /* adev will be freed with the put_device() and .release sequence */ + auxiliary_device_uninit(aux_dev); + goto fail; + } + intf->adev = adev; + + return; +fail: + gsc_destroy_one(intf); +} + +static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) +{ + int ret; + + if (intf_id >= INTEL_GSC_NUM_INTERFACES) { + drm_warn_once(>->i915->drm, "GSC irq: intf_id %d is out of range", intf_id); + return; + } + + if (!HAS_HECI_GSC(gt->i915)) { + drm_warn_once(>->i915->drm, "GSC irq: not supported"); + return; + } + + if (gt->gsc.intf[intf_id].irq < 0) { + drm_err_ratelimited(>->i915->drm, "GSC irq: irq not set"); + return; + } + + ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); + if (ret) + drm_err_ratelimited(>->i915->drm, "error handling GSC irq: %d\n", ret); +} + +void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir) +{ + if (iir & GSC_IRQ_INTF(0)) + gsc_irq_handler(gt, 0); + if (iir & GSC_IRQ_INTF(1)) + gsc_irq_handler(gt, 1); +} + +void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915) +{ + unsigned int i; + + if (!HAS_HECI_GSC(i915)) + return; + + for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) + gsc_init_one(i915, &gsc->intf[i], i); +} + +void intel_gsc_fini(struct intel_gsc *gsc) +{ + struct intel_gt *gt = gsc_to_gt(gsc); + unsigned int i; + + if (!HAS_HECI_GSC(gt->i915)) + return; + + for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) + gsc_destroy_one(&gsc->intf[i]); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h new file mode 100644 index 000000000000..68582f912b21 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gsc.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2019-2022, Intel Corporation. All rights reserved. + */ +#ifndef __INTEL_GSC_DEV_H__ +#define __INTEL_GSC_DEV_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_gt; +struct mei_aux_device; + +#define INTEL_GSC_NUM_INTERFACES 2 +/* + * The HECI1 bit corresponds to bit15 and HECI2 to bit14. + * The reason for this is to allow growth for more interfaces in the future. + */ +#define GSC_IRQ_INTF(_x) BIT(15 - (_x)) + +/** + * struct intel_gsc - graphics security controller + * @intf : gsc interface + */ +struct intel_gsc { + struct intel_gsc_intf { + struct mei_aux_device *adev; + int irq; + unsigned int id; + } intf[INTEL_GSC_NUM_INTERFACES]; +}; + +void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *dev_priv); +void intel_gsc_fini(struct intel_gsc *gsc); +void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir); + +#endif /* __INTEL_GSC_DEV_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 8a2483ccbfb9..53307ca0eed0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -4,7 +4,6 @@ */ #include <drm/drm_managed.h> -#include <drm/intel-gtt.h> #include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" @@ -17,6 +16,7 @@ #include "intel_gt_buffer_pool.h" #include "intel_gt_clock_utils.h" #include "intel_gt_debugfs.h" +#include "intel_gt_gmch.h" #include "intel_gt_pm.h" #include "intel_gt_regs.h" #include "intel_gt_requests.h" @@ -26,10 +26,11 @@ #include "intel_rc6.h" #include "intel_renderstate.h" #include "intel_rps.h" +#include "intel_gt_sysfs.h" #include "intel_uncore.h" #include "shmem_utils.h" -void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) +static void __intel_gt_init_early(struct intel_gt *gt) { spin_lock_init(>->irq_lock); @@ -51,17 +52,23 @@ void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) intel_rps_init_early(>->rps); } -void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) +/* Preliminary initialization of Tile 0 */ +void intel_root_gt_init_early(struct drm_i915_private *i915) { + struct intel_gt *gt = to_gt(i915); + gt->i915 = i915; gt->uncore = &i915->uncore; + + __intel_gt_init_early(gt); } -int intel_gt_probe_lmem(struct intel_gt *gt) +static int intel_gt_probe_lmem(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; + unsigned int instance = gt->info.id; + int id = INTEL_REGION_LMEM_0 + instance; struct intel_memory_region *mem; - int id; int err; mem = intel_gt_setup_lmem(gt); @@ -76,9 +83,8 @@ int intel_gt_probe_lmem(struct intel_gt *gt) return err; } - id = INTEL_REGION_LMEM; - mem->id = id; + mem->instance = instance; intel_memory_region_set_name(mem, "local%u", mem->instance); @@ -96,6 +102,12 @@ int intel_gt_assign_ggtt(struct intel_gt *gt) return gt->ggtt ? 0 : -ENOMEM; } +static const char * const intel_steering_types[] = { + "L3BANK", + "MSLICE", + "LNCF", +}; + static const struct intel_mmio_range icl_l3bank_steering_table[] = { { 0x00B100, 0x00B3FF }, {}, @@ -439,14 +451,17 @@ void intel_gt_chipset_flush(struct intel_gt *gt) { wmb(); if (GRAPHICS_VER(gt->i915) < 6) - intel_gtt_chipset_flush(); + intel_gt_gmch_gen5_chipset_flush(gt); } void intel_gt_driver_register(struct intel_gt *gt) { + intel_gsc_init(>->gsc, gt->i915); + intel_rps_driver_register(>->rps); intel_gt_debugfs_register(gt); + intel_gt_sysfs_register(gt); } static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) @@ -712,6 +727,11 @@ int intel_gt_init(struct intel_gt *gt) if (err) goto err_uc_init; + err = intel_gt_init_hwconfig(gt); + if (err) + drm_err(>->i915->drm, "Failed to retrieve hwconfig table: %pe\n", + ERR_PTR(err)); + err = __engines_record_defaults(gt); if (err) goto err_gt; @@ -766,6 +786,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt) intel_wakeref_t wakeref; intel_rps_driver_unregister(>->rps); + intel_gsc_fini(>->gsc); intel_pxp_fini(>->pxp); @@ -793,18 +814,24 @@ void intel_gt_driver_release(struct intel_gt *gt) intel_gt_pm_fini(gt); intel_gt_fini_scratch(gt); intel_gt_fini_buffer_pool(gt); + intel_gt_fini_hwconfig(gt); } -void intel_gt_driver_late_release(struct intel_gt *gt) +void intel_gt_driver_late_release_all(struct drm_i915_private *i915) { + struct intel_gt *gt; + unsigned int id; + /* We need to wait for inflight RCU frees to release their grip */ rcu_barrier(); - intel_uc_driver_late_release(>->uc); - intel_gt_fini_requests(gt); - intel_gt_fini_reset(gt); - intel_gt_fini_timelines(gt); - intel_engines_free(gt); + for_each_gt(gt, i915, id) { + intel_uc_driver_late_release(>->uc); + intel_gt_fini_requests(gt); + intel_gt_fini_reset(gt); + intel_gt_fini_timelines(gt); + intel_engines_free(gt); + } } /** @@ -913,6 +940,35 @@ u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg) return intel_uncore_read_fw(gt->uncore, reg); } +/** + * intel_gt_get_valid_steering_for_reg - get a valid steering for a register + * @gt: GT structure + * @reg: register for which the steering is required + * @sliceid: return variable for slice steering + * @subsliceid: return variable for subslice steering + * + * This function returns a slice/subslice pair that is guaranteed to work for + * read steering of the given register. Note that a value will be returned even + * if the register is not replicated and therefore does not actually require + * steering. + */ +void intel_gt_get_valid_steering_for_reg(struct intel_gt *gt, i915_reg_t reg, + u8 *sliceid, u8 *subsliceid) +{ + int type; + + for (type = 0; type < NUM_STEERING_TYPES; type++) { + if (intel_gt_reg_needs_read_steering(gt, reg, type)) { + intel_gt_get_valid_steering(gt, type, sliceid, + subsliceid); + return; + } + } + + *sliceid = gt->default_steering.groupid; + *subsliceid = gt->default_steering.instanceid; +} + u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg) { int type; @@ -932,6 +988,145 @@ u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg) return intel_uncore_read(gt->uncore, reg); } +static void report_steering_type(struct drm_printer *p, + struct intel_gt *gt, + enum intel_steering_type type, + bool dump_table) +{ + const struct intel_mmio_range *entry; + u8 slice, subslice; + + BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES); + + if (!gt->steering_table[type]) { + drm_printf(p, "%s steering: uses default steering\n", + intel_steering_types[type]); + return; + } + + intel_gt_get_valid_steering(gt, type, &slice, &subslice); + drm_printf(p, "%s steering: sliceid=0x%x, subsliceid=0x%x\n", + intel_steering_types[type], slice, subslice); + + if (!dump_table) + return; + + for (entry = gt->steering_table[type]; entry->end; entry++) + drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end); +} + +void intel_gt_report_steering(struct drm_printer *p, struct intel_gt *gt, + bool dump_table) +{ + drm_printf(p, "Default steering: sliceid=0x%x, subsliceid=0x%x\n", + gt->default_steering.groupid, + gt->default_steering.instanceid); + + if (HAS_MSLICES(gt->i915)) { + report_steering_type(p, gt, MSLICE, dump_table); + report_steering_type(p, gt, LNCF, dump_table); + } +} + +static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr) +{ + int ret; + + if (!gt_is_root(gt)) { + struct intel_uncore_mmio_debug *mmio_debug; + struct intel_uncore *uncore; + + uncore = kzalloc(sizeof(*uncore), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL); + if (!mmio_debug) { + kfree(uncore); + return -ENOMEM; + } + + gt->uncore = uncore; + gt->uncore->debug = mmio_debug; + + __intel_gt_init_early(gt); + } + + intel_uncore_init_early(gt->uncore, gt); + + ret = intel_uncore_setup_mmio(gt->uncore, phys_addr); + if (ret) + return ret; + + gt->phys_addr = phys_addr; + + return 0; +} + +static void +intel_gt_tile_cleanup(struct intel_gt *gt) +{ + intel_uncore_cleanup_mmio(gt->uncore); + + if (!gt_is_root(gt)) { + kfree(gt->uncore->debug); + kfree(gt->uncore); + kfree(gt); + } +} + +int intel_gt_probe_all(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct intel_gt *gt = &i915->gt0; + phys_addr_t phys_addr; + unsigned int mmio_bar; + int ret; + + mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0; + phys_addr = pci_resource_start(pdev, mmio_bar); + + /* + * We always have at least one primary GT on any device + * and it has been already initialized early during probe + * in i915_driver_probe() + */ + ret = intel_gt_tile_setup(gt, phys_addr); + if (ret) + return ret; + + i915->gt[0] = gt; + + /* TODO: add more tiles */ + return 0; +} + +int intel_gt_tiles_init(struct drm_i915_private *i915) +{ + struct intel_gt *gt; + unsigned int id; + int ret; + + for_each_gt(gt, i915, id) { + ret = intel_gt_probe_lmem(gt); + if (ret) + return ret; + } + + return 0; +} + +void intel_gt_release_all(struct drm_i915_private *i915) +{ + struct intel_gt *gt; + unsigned int id; + + for_each_gt(gt, i915, id) { + intel_gt_tile_cleanup(gt); + i915->gt[id] = NULL; + } +} + void intel_gt_info_print(const struct intel_gt_info *info, struct drm_printer *p) { @@ -980,6 +1175,7 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt) [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR, [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR, [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR, + [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR, }; struct drm_i915_private *i915 = gt->i915; struct intel_uncore *uncore = gt->uncore; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 0f571c8ee22b..44c6cb63ccbc 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -13,12 +13,24 @@ struct drm_i915_private; struct drm_printer; +struct insert_entries { + struct i915_address_space *vm; + struct i915_vma_resource *vma_res; + enum i915_cache_level level; + u32 flags; +}; + #define GT_TRACE(gt, fmt, ...) do { \ const struct intel_gt *gt__ __maybe_unused = (gt); \ GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \ ##__VA_ARGS__); \ } while (0) +static inline bool gt_is_root(struct intel_gt *gt) +{ + return !gt->info.id; +} + static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) { return container_of(uc, struct intel_gt, uc); @@ -34,10 +46,13 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) return container_of(huc, struct intel_gt, uc.huc); } -void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); -void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); +static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc) +{ + return container_of(gsc, struct intel_gt, gsc); +} + +void intel_root_gt_init_early(struct drm_i915_private *i915); int intel_gt_assign_ggtt(struct intel_gt *gt); -int intel_gt_probe_lmem(struct intel_gt *gt); int intel_gt_init_mmio(struct intel_gt *gt); int __must_check intel_gt_init_hw(struct intel_gt *gt); int intel_gt_init(struct intel_gt *gt); @@ -47,7 +62,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt); void intel_gt_driver_remove(struct intel_gt *gt); void intel_gt_driver_release(struct intel_gt *gt); -void intel_gt_driver_late_release(struct intel_gt *gt); +void intel_gt_driver_late_release_all(struct drm_i915_private *i915); int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); @@ -84,9 +99,25 @@ static inline bool intel_gt_needs_read_steering(struct intel_gt *gt, return gt->steering_table[type]; } +void intel_gt_get_valid_steering_for_reg(struct intel_gt *gt, i915_reg_t reg, + u8 *sliceid, u8 *subsliceid); + u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg); u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg); +void intel_gt_report_steering(struct drm_printer *p, struct intel_gt *gt, + bool dump_table); + +int intel_gt_probe_all(struct drm_i915_private *i915); +int intel_gt_tiles_init(struct drm_i915_private *i915); +void intel_gt_release_all(struct drm_i915_private *i915); + +#define for_each_gt(gt__, i915__, id__) \ + for ((id__) = 0; \ + (id__) < I915_MAX_GT; \ + (id__)++) \ + for_each_if(((gt__) = (i915__)->gt[(id__)])) + void intel_gt_info_print(const struct intel_gt_info *info, struct drm_printer *p); @@ -94,4 +125,6 @@ void intel_gt_watchdog_work(struct work_struct *work); void intel_gt_invalidate_tlbs(struct intel_gt *gt); +struct resource intel_pci_resource(struct pci_dev *pdev, int bar); + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c index 0db822c3b7e5..d5d1b04dbcad 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -161,6 +161,10 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt) if (gt->clock_frequency) gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1); + /* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */ + if (GRAPHICS_VER(gt->i915) == 11) + gt->clock_period_ns = NSEC_PER_SEC / 13750000; + GT_TRACE(gt, "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n", gt->clock_frequency / 1000, diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c index f103664b71d4..d886fdc2c694 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c @@ -6,6 +6,7 @@ #include <linux/debugfs.h> #include "i915_drv.h" +#include "intel_gt.h" #include "intel_gt_debugfs.h" #include "intel_gt_engines_debugfs.h" #include "intel_gt_pm_debugfs.h" @@ -29,7 +30,7 @@ int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val) } } -int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val) +void intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val) { /* Flush any previous reset before applying for a new one */ wait_event(gt->reset.queue, @@ -37,7 +38,6 @@ int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val) intel_gt_handle_error(gt, val, I915_ERROR_CAPTURE, "Manually reset engine mask to %llx", val); - return 0; } /* @@ -51,16 +51,30 @@ static int __intel_gt_debugfs_reset_show(void *data, u64 *val) static int __intel_gt_debugfs_reset_store(void *data, u64 val) { - return intel_gt_debugfs_reset_store(data, val); + intel_gt_debugfs_reset_store(data, val); + + return 0; } DEFINE_SIMPLE_ATTRIBUTE(reset_fops, __intel_gt_debugfs_reset_show, __intel_gt_debugfs_reset_store, "%llu\n"); +static int steering_show(struct seq_file *m, void *data) +{ + struct drm_printer p = drm_seq_file_printer(m); + struct intel_gt *gt = m->private; + + intel_gt_report_steering(&p, gt, true); + + return 0; +} +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(steering); + static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root) { static const struct intel_gt_debugfs_file files[] = { { "reset", &reset_fops, NULL }, + { "steering", &steering_fops }, }; intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h index 17e79b735cfe..e4110eebf093 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h @@ -48,6 +48,6 @@ void intel_gt_debugfs_register_files(struct dentry *root, /* functions that need to be accessed by the upper level non-gt interfaces */ int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val); -int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val); +void intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val); #endif /* INTEL_GT_DEBUGFS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_gmch.c b/drivers/gpu/drm/i915/gt/intel_gt_gmch.c new file mode 100644 index 000000000000..18e488672d1b --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_gmch.c @@ -0,0 +1,654 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include <drm/intel-gtt.h> +#include <drm/i915_drm.h> + +#include <linux/agp_backend.h> +#include <linux/stop_machine.h> + +#include "i915_drv.h" +#include "intel_gt_gmch.h" +#include "intel_gt_regs.h" +#include "intel_gt.h" +#include "i915_utils.h" + +#include "gen8_ppgtt.h" + +struct insert_page { + struct i915_address_space *vm; + dma_addr_t addr; + u64 offset; + enum i915_cache_level level; +}; + +static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) +{ + writeq(pte, addr); +} + +static void nop_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ +} + +static u64 snb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + switch (level) { + case I915_CACHE_L3_LLC: + case I915_CACHE_LLC: + pte |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + pte |= GEN6_PTE_UNCACHED; + break; + default: + MISSING_CASE(level); + } + + return pte; +} + +static u64 ivb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + switch (level) { + case I915_CACHE_L3_LLC: + pte |= GEN7_PTE_CACHE_L3_LLC; + break; + case I915_CACHE_LLC: + pte |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + pte |= GEN6_PTE_UNCACHED; + break; + default: + MISSING_CASE(level); + } + + return pte; +} + +static u64 byt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + if (!(flags & PTE_READ_ONLY)) + pte |= BYT_PTE_WRITEABLE; + + if (level != I915_CACHE_NONE) + pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; + + return pte; +} + +static u64 hsw_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + if (level != I915_CACHE_NONE) + pte |= HSW_WB_LLC_AGE3; + + return pte; +} + +static u64 iris_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + switch (level) { + case I915_CACHE_NONE: + break; + case I915_CACHE_WT: + pte |= HSW_WT_ELLC_LLC_AGE3; + break; + default: + pte |= HSW_WB_ELLC_LLC_AGE3; + break; + } + + return pte; +} + +static void gen5_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level cache_level, + u32 unused) +{ + unsigned int flags = (cache_level == I915_CACHE_NONE) ? + AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + + intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); +} + +static void gen6_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen6_pte_t __iomem *pte = + (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; + + iowrite32(vm->pte_encode(addr, level, flags), pte); + + ggtt->invalidate(ggtt); +} + +static void gen8_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t __iomem *pte = + (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; + + gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags)); + + ggtt->invalidate(ggtt); +} + +static void gen5_ggtt_insert_entries(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + enum i915_cache_level cache_level, + u32 unused) +{ + unsigned int flags = (cache_level == I915_CACHE_NONE) ? + AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + + intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT, + flags); +} + +/* + * Binds an object into the global gtt with the specified cache level. + * The object will be accessible to the GPU via commands whose operands + * reference offsets within the global GTT as well as accessible by the GPU + * through the GMADR mapped BAR (i915->mm.gtt->gtt). + */ +static void gen6_ggtt_insert_entries(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + enum i915_cache_level level, + u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen6_pte_t __iomem *gte; + gen6_pte_t __iomem *end; + struct sgt_iter iter; + dma_addr_t addr; + + gte = (gen6_pte_t __iomem *)ggtt->gsm; + gte += vma_res->start / I915_GTT_PAGE_SIZE; + end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE; + + for_each_sgt_daddr(addr, iter, vma_res->bi.pages) + iowrite32(vm->pte_encode(addr, level, flags), gte++); + GEM_BUG_ON(gte > end); + + /* Fill the allocated but "unused" space beyond the end of the buffer */ + while (gte < end) + iowrite32(vm->scratch[0]->encode, gte++); + + /* + * We want to flush the TLBs only after we're certain all the PTE + * updates have finished. + */ + ggtt->invalidate(ggtt); +} + +static void gen8_ggtt_insert_entries(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + enum i915_cache_level level, + u32 flags) +{ + const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags); + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t __iomem *gte; + gen8_pte_t __iomem *end; + struct sgt_iter iter; + dma_addr_t addr; + + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + + gte = (gen8_pte_t __iomem *)ggtt->gsm; + gte += vma_res->start / I915_GTT_PAGE_SIZE; + end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE; + + for_each_sgt_daddr(addr, iter, vma_res->bi.pages) + gen8_set_pte(gte++, pte_encode | addr); + GEM_BUG_ON(gte > end); + + /* Fill the allocated but "unused" space beyond the end of the buffer */ + while (gte < end) + gen8_set_pte(gte++, vm->scratch[0]->encode); + + /* + * We want to flush the TLBs only after we're certain all the PTE + * updates have finished. + */ + ggtt->invalidate(ggtt); +} + +static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) +{ + /* + * Make sure the internal GAM fifo has been cleared of all GTT + * writes before exiting stop_machine(). This guarantees that + * any aperture accesses waiting to start in another process + * cannot back up behind the GTT writes causing a hang. + * The register can be any arbitrary GAM register. + */ + intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); +} + +static int bxt_vtd_ggtt_insert_page__cb(void *_arg) +{ + struct insert_page *arg = _arg; + + gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); + bxt_vtd_ggtt_wa(arg->vm); + + return 0; +} + +static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 unused) +{ + struct insert_page arg = { vm, addr, offset, level }; + + stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); +} + +static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) +{ + struct insert_entries *arg = _arg; + + gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags); + bxt_vtd_ggtt_wa(arg->vm); + + return 0; +} + +static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + enum i915_cache_level level, + u32 flags) +{ + struct insert_entries arg = { vm, vma_res, level, flags }; + + stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); +} + +void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt) +{ + intel_gtt_chipset_flush(); +} + +static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + intel_gtt_chipset_flush(); +} + +static void gen5_ggtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); +} + +static void gen6_ggtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + gen6_pte_t scratch_pte, __iomem *gtt_base = + (gen6_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + scratch_pte = vm->scratch[0]->encode; + for (i = 0; i < num_entries; i++) + iowrite32(scratch_pte, >t_base[i]); +} + +static void gen8_ggtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + const gen8_pte_t scratch_pte = vm->scratch[0]->encode; + gen8_pte_t __iomem *gtt_base = + (gen8_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + for (i = 0; i < num_entries; i++) + gen8_set_pte(>t_base[i], scratch_pte); +} + +static void gen5_gmch_remove(struct i915_address_space *vm) +{ + intel_gmch_remove(); +} + +static void gen6_gmch_remove(struct i915_address_space *vm) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + + iounmap(ggtt->gsm); + free_scratch(vm); +} + +/* + * Certain Gen5 chipsets require idling the GPU before + * unmapping anything from the GTT when VT-d is enabled. + */ +static bool needs_idle_maps(struct drm_i915_private *i915) +{ + /* + * Query intel_iommu to see if we need the workaround. Presumably that + * was loaded first. + */ + if (!i915_vtd_active(i915)) + return false; + + if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915)) + return true; + + if (GRAPHICS_VER(i915) == 12) + return true; /* XXX DMAR fault reason 7 */ + + return false; +} + +static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915) +{ + /* + * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset + * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset + */ + GEM_BUG_ON(GRAPHICS_VER(i915) < 6); + return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M; +} + +static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) +{ + snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; + snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; + return snb_gmch_ctl << 20; +} + +static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) +{ + bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; + bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; + if (bdw_gmch_ctl) + bdw_gmch_ctl = 1 << bdw_gmch_ctl; + +#ifdef CONFIG_X86_32 + /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ + if (bdw_gmch_ctl > 4) + bdw_gmch_ctl = 4; +#endif + + return bdw_gmch_ctl << 20; +} + +static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915) +{ + return gen6_gttmmadr_size(i915) / 2; +} + +static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + phys_addr_t phys_addr; + u32 pte_flags; + int ret; + + GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915)); + phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915); + + /* + * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range + * will be dropped. For WC mappings in general we have 64 byte burst + * writes when the WC buffer is flushed, so we can't use it, but have to + * resort to an uncached mapping. The WC issue is easily caught by the + * readback check when writing GTT PTE entries. + */ + if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11) + ggtt->gsm = ioremap(phys_addr, size); + else + ggtt->gsm = ioremap_wc(phys_addr, size); + if (!ggtt->gsm) { + drm_err(&i915->drm, "Failed to map the ggtt page table\n"); + return -ENOMEM; + } + + kref_init(&ggtt->vm.resv_ref); + ret = setup_scratch_page(&ggtt->vm); + if (ret) { + drm_err(&i915->drm, "Scratch setup failed\n"); + /* iounmap will also get called at remove, but meh */ + iounmap(ggtt->gsm); + return ret; + } + + pte_flags = 0; + if (i915_gem_object_is_lmem(ggtt->vm.scratch[0])) + pte_flags |= PTE_LM; + + ggtt->vm.scratch[0]->encode = + ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), + I915_CACHE_NONE, pte_flags); + + return 0; +} + +int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + phys_addr_t gmadr_base; + int ret; + + ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL); + if (!ret) { + drm_err(&i915->drm, "failed to set up gmch\n"); + return -EIO; + } + + intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); + + ggtt->gmadr = + (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); + + ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.alloc_scratch_dma = alloc_pt_dma; + + if (needs_idle_maps(i915)) { + drm_notice(&i915->drm, + "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n"); + ggtt->do_idle_maps = true; + } + + ggtt->vm.insert_page = gen5_ggtt_insert_page; + ggtt->vm.insert_entries = gen5_ggtt_insert_entries; + ggtt->vm.clear_range = gen5_ggtt_clear_range; + ggtt->vm.cleanup = gen5_gmch_remove; + + ggtt->invalidate = gmch_ggtt_invalidate; + + ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; + + if (unlikely(ggtt->do_idle_maps)) + drm_notice(&i915->drm, + "Applying Ironlake quirks for intel_iommu\n"); + + return 0; +} + +int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + unsigned int size; + u16 snb_gmch_ctl; + + ggtt->gmadr = intel_pci_resource(pdev, 2); + ggtt->mappable_end = resource_size(&ggtt->gmadr); + + /* + * 64/512MB is the current min/max we actually know of, but this is + * just a coarse sanity check. + */ + if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { + drm_err(&i915->drm, "Unknown GMADR size (%pa)\n", + &ggtt->mappable_end); + return -ENXIO; + } + + pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + + size = gen6_get_total_gtt_size(snb_gmch_ctl); + ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; + + ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.alloc_scratch_dma = alloc_pt_dma; + + ggtt->vm.clear_range = nop_clear_range; + if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) + ggtt->vm.clear_range = gen6_ggtt_clear_range; + ggtt->vm.insert_page = gen6_ggtt_insert_page; + ggtt->vm.insert_entries = gen6_ggtt_insert_entries; + ggtt->vm.cleanup = gen6_gmch_remove; + + ggtt->invalidate = gen6_ggtt_invalidate; + + if (HAS_EDRAM(i915)) + ggtt->vm.pte_encode = iris_pte_encode; + else if (IS_HASWELL(i915)) + ggtt->vm.pte_encode = hsw_pte_encode; + else if (IS_VALLEYVIEW(i915)) + ggtt->vm.pte_encode = byt_pte_encode; + else if (GRAPHICS_VER(i915) >= 7) + ggtt->vm.pte_encode = ivb_pte_encode; + else + ggtt->vm.pte_encode = snb_pte_encode; + + ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; + + return ggtt_probe_common(ggtt, size); +} + +static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) +{ + gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; + gmch_ctrl &= SNB_GMCH_GGMS_MASK; + + if (gmch_ctrl) + return 1 << (20 + gmch_ctrl); + + return 0; +} + +int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + unsigned int size; + u16 snb_gmch_ctl; + + /* TODO: We're not aware of mappable constraints on gen8 yet */ + if (!HAS_LMEM(i915)) { + ggtt->gmadr = intel_pci_resource(pdev, 2); + ggtt->mappable_end = resource_size(&ggtt->gmadr); + } + + pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + if (IS_CHERRYVIEW(i915)) + size = chv_get_total_gtt_size(snb_gmch_ctl); + else + size = gen8_get_total_gtt_size(snb_gmch_ctl); + + ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.alloc_scratch_dma = alloc_pt_dma; + ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY; + + ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; + ggtt->vm.cleanup = gen6_gmch_remove; + ggtt->vm.insert_page = gen8_ggtt_insert_page; + ggtt->vm.clear_range = nop_clear_range; + if (intel_scanout_needs_vtd_wa(i915)) + ggtt->vm.clear_range = gen8_ggtt_clear_range; + + ggtt->vm.insert_entries = gen8_ggtt_insert_entries; + + /* + * Serialize GTT updates with aperture access on BXT if VT-d is on, + * and always on CHV. + */ + if (intel_vm_no_concurrent_access_wa(i915)) { + ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; + ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; + ggtt->vm.bind_async_flags = + I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; + } + + ggtt->invalidate = gen8_ggtt_invalidate; + + ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; + + ggtt->vm.pte_encode = gen8_ggtt_pte_encode; + + setup_private_pat(ggtt->vm.gt->uncore); + + return ggtt_probe_common(ggtt, size); +} + +int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915) +{ + if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt()) + return -EIO; + + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_gmch.h b/drivers/gpu/drm/i915/gt/intel_gt_gmch.h new file mode 100644 index 000000000000..75ed55c1f30a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_gmch.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_GT_GMCH_H__ +#define __INTEL_GT_GMCH_H__ + +#include "intel_gtt.h" + +/* For x86 platforms */ +#if IS_ENABLED(CONFIG_X86) +void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt); +int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt); +int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt); +int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt); +int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915); + +/* Stubs for non-x86 platforms */ +#else +static inline void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt) +{ +} +static inline int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt) +{ + /* No HW should be probed for this case yet, return fail */ + return -ENODEV; +} +static inline int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt) +{ + /* No HW should be probed for this case yet, return fail */ + return -ENODEV; +} +static inline int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt) +{ + /* No HW should be probed for this case yet, return fail */ + return -ENODEV; +} +static inline int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915) +{ + /* No HW should be enabled for this case yet, return fail */ + return -ENODEV; +} +#endif + +#endif /* __INTEL_GT_GMCH_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index e443ac4c8059..88b4becfcb17 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -68,6 +68,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, if (instance == OTHER_KCR_INSTANCE) return intel_pxp_irq_handler(>->pxp, iir); + if (instance == OTHER_GSC_INSTANCE) + return intel_gsc_irq_handler(gt, iir); + WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", instance, iir); } @@ -184,6 +187,8 @@ void gen11_gt_irq_reset(struct intel_gt *gt) intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); if (CCS_MASK(gt)) intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, 0); + if (HAS_HECI_GSC(gt->i915)) + intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, 0); /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); @@ -201,6 +206,8 @@ void gen11_gt_irq_reset(struct intel_gt *gt) intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~0); if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3)) intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~0); + if (HAS_HECI_GSC(gt->i915)) + intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~0); intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); @@ -215,6 +222,7 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; u32 irqs = GT_RENDER_USER_INTERRUPT; + const u32 gsc_mask = GSC_IRQ_INTF(0) | GSC_IRQ_INTF(1); u32 dmask; u32 smask; @@ -233,6 +241,9 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); if (CCS_MASK(gt)) intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask); + if (HAS_HECI_GSC(gt->i915)) + intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, + gsc_mask); /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); @@ -250,6 +261,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~dmask); if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3)) intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask); + if (HAS_HECI_GSC(gt->i915)) + intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~gsc_mask); /* * RPS interrupts will get enabled/disabled on demand when RPS itself diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index c0fa41e4c803..f553e2173bda 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -3,6 +3,7 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/string_helpers.h> #include <linux/suspend.h> #include "i915_drv.h" @@ -128,7 +129,14 @@ static const struct intel_wakeref_ops wf_ops = { void intel_gt_pm_init_early(struct intel_gt *gt) { - intel_wakeref_init(>->wakeref, gt->uncore->rpm, &wf_ops); + /* + * We access the runtime_pm structure via gt->i915 here rather than + * gt->uncore as we do elsewhere in the file because gt->uncore is not + * yet initialized for all tiles at this point in the driver startup. + * runtime_pm is per-device rather than per-tile, so this is still the + * correct structure. + */ + intel_wakeref_init(>->wakeref, >->i915->runtime_pm, &wf_ops); seqcount_mutex_init(>->stats.lock, >->wakeref.mutex); } @@ -157,7 +165,7 @@ static void gt_sanitize(struct intel_gt *gt, bool force) enum intel_engine_id id; intel_wakeref_t wakeref; - GT_TRACE(gt, "force:%s", yesno(force)); + GT_TRACE(gt, "force:%s", str_yes_no(force)); /* Use a raw wakeref to avoid calling intel_display_power_get early */ wakeref = intel_runtime_pm_get(gt->uncore->rpm); @@ -174,15 +182,16 @@ static void gt_sanitize(struct intel_gt *gt, bool force) if (intel_gt_is_wedged(gt)) intel_gt_unset_wedged(gt); - for_each_engine(engine, gt, id) + /* For GuC mode, ensure submission is disabled before stopping ring */ + intel_uc_reset_prepare(>->uc); + + for_each_engine(engine, gt, id) { if (engine->reset.prepare) engine->reset.prepare(engine); - intel_uc_reset_prepare(>->uc); - - for_each_engine(engine, gt, id) if (engine->sanitize) engine->sanitize(engine); + } if (reset_engines(gt) || force) { for_each_engine(engine, gt, id) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 37765919fe32..0c6b9eb724ae 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -5,6 +5,7 @@ */ #include <linux/seq_file.h> +#include <linux/string_helpers.h> #include "i915_drv.h" #include "i915_reg.h" @@ -23,38 +24,38 @@ #include "intel_uncore.h" #include "vlv_sideband.h" -int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt) +void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt) { atomic_inc(>->user_wakeref); intel_gt_pm_get(gt); if (GRAPHICS_VER(gt->i915) >= 6) intel_uncore_forcewake_user_get(gt->uncore); - - return 0; } -int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt) +void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt) { if (GRAPHICS_VER(gt->i915) >= 6) intel_uncore_forcewake_user_put(gt->uncore); intel_gt_pm_put(gt); atomic_dec(>->user_wakeref); - - return 0; } static int forcewake_user_open(struct inode *inode, struct file *file) { struct intel_gt *gt = inode->i_private; - return intel_gt_pm_debugfs_forcewake_user_open(gt); + intel_gt_pm_debugfs_forcewake_user_open(gt); + + return 0; } static int forcewake_user_release(struct inode *inode, struct file *file) { struct intel_gt *gt = inode->i_private; - return intel_gt_pm_debugfs_forcewake_user_release(gt); + intel_gt_pm_debugfs_forcewake_user_release(gt); + + return 0; } static const struct file_operations forcewake_user_fops = { @@ -105,14 +106,14 @@ static int vlv_drpc(struct seq_file *m) rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL); seq_printf(m, "RC6 Enabled: %s\n", - yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | + str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))); seq_printf(m, "Render Power Well: %s\n", (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); seq_printf(m, "Media Power Well: %s\n", (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); - print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6); + print_rc6_res(m, "Render RC6 residency since boot:", GEN6_GT_GFX_RC6); print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6); return fw_domains_show(m, NULL); @@ -140,19 +141,19 @@ static int gen6_drpc(struct seq_file *m) snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); seq_printf(m, "RC1e Enabled: %s\n", - yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); + str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); seq_printf(m, "RC6 Enabled: %s\n", - yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); + str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); if (GRAPHICS_VER(i915) >= 9) { seq_printf(m, "Render Well Gating Enabled: %s\n", - yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); + str_yes_no(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); seq_printf(m, "Media Well Gating Enabled: %s\n", - yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); + str_yes_no(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); } seq_printf(m, "Deep RC6 Enabled: %s\n", - yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); + str_yes_no(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); seq_printf(m, "Deepest RC6 Enabled: %s\n", - yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); + str_yes_no(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); seq_puts(m, "Current RC state: "); switch (gt_core_status & GEN6_RCn_MASK) { case GEN6_RC0: @@ -176,7 +177,7 @@ static int gen6_drpc(struct seq_file *m) } seq_printf(m, "Core Power Down: %s\n", - yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); + str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); if (GRAPHICS_VER(i915) >= 9) { seq_printf(m, "Render Power Well: %s\n", (gen9_powergate_status & @@ -216,16 +217,17 @@ static int ilk_drpc(struct seq_file *m) rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL); crstandvid = intel_uncore_read16(uncore, CRSTANDVID); - seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); + seq_printf(m, "HD boost: %s\n", + str_yes_no(rgvmodectl & MEMMODE_BOOST_EN)); seq_printf(m, "Boost freq: %d\n", (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> MEMMODE_BOOST_FREQ_SHIFT); seq_printf(m, "HW control enabled: %s\n", - yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); + str_yes_no(rgvmodectl & MEMMODE_HWIDLE_EN)); seq_printf(m, "SW control enabled: %s\n", - yesno(rgvmodectl & MEMMODE_SWMODE_EN)); + str_yes_no(rgvmodectl & MEMMODE_SWMODE_EN)); seq_printf(m, "Gated voltage change: %s\n", - yesno(rgvmodectl & MEMMODE_RCLK_GATE)); + str_yes_no(rgvmodectl & MEMMODE_RCLK_GATE)); seq_printf(m, "Starting frequency: P%d\n", (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); seq_printf(m, "Max P-state: P%d\n", @@ -234,7 +236,7 @@ static int ilk_drpc(struct seq_file *m) seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); seq_printf(m, "Render standby enabled: %s\n", - yesno(!(rstdbyctl & RCX_SW_EXIT))); + str_yes_no(!(rstdbyctl & RCX_SW_EXIT))); seq_puts(m, "Current RS state: "); switch (rstdbyctl & RSX_STATUS_MASK) { case RSX_STATUS_ON: @@ -307,12 +309,11 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p) rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL); drm_printf(p, "Video Turbo Mode: %s\n", - yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); + str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO)); drm_printf(p, "HW control enabled: %s\n", - yesno(rpmodectl & GEN6_RP_ENABLE)); + str_yes_no(rpmodectl & GEN6_RP_ENABLE)); drm_printf(p, "SW control enabled: %s\n", - yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == - GEN6_RP_MEDIA_SW_MODE)); + str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); vlv_punit_get(i915); freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); @@ -341,17 +342,16 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p) } else if (GRAPHICS_VER(i915) >= 6) { u32 rp_state_limits; u32 gt_perf_status; - u32 rp_state_cap; + struct intel_rps_freq_caps caps; u32 rpmodectl, rpinclimit, rpdeclimit; u32 rpstat, cagf, reqf; u32 rpcurupei, rpcurup, rpprevup; u32 rpcurdownei, rpcurdown, rpprevdown; u32 rpupei, rpupt, rpdownei, rpdownt; u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; - int max_freq; rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS); - rp_state_cap = intel_rps_read_state_cap(rps); + gen6_rps_get_freq_caps(rps, &caps); if (IS_GEN9_LP(i915)) gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS); else @@ -417,12 +417,11 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p) pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); drm_printf(p, "Video Turbo Mode: %s\n", - yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); + str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO)); drm_printf(p, "HW control enabled: %s\n", - yesno(rpmodectl & GEN6_RP_ENABLE)); + str_yes_no(rpmodectl & GEN6_RP_ENABLE)); drm_printf(p, "SW control enabled: %s\n", - yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == - GEN6_RP_MEDIA_SW_MODE)); + str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", pm_ier, pm_imr, pm_mask); @@ -474,25 +473,12 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p) drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n", rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt)); - max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 : - rp_state_cap >> 16) & 0xff; - max_freq *= (IS_GEN9_BC(i915) || - GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1); drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", - intel_gpu_freq(rps, max_freq)); - - max_freq = (rp_state_cap & 0xff00) >> 8; - max_freq *= (IS_GEN9_BC(i915) || - GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1); + intel_gpu_freq(rps, caps.min_freq)); drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", - intel_gpu_freq(rps, max_freq)); - - max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 : - rp_state_cap >> 0) & 0xff; - max_freq *= (IS_GEN9_BC(i915) || - GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1); + intel_gpu_freq(rps, caps.rp1_freq)); drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", - intel_gpu_freq(rps, max_freq)); + intel_gpu_freq(rps, caps.rp0_freq)); drm_printf(p, "Max overclocked frequency: %dMHz\n", intel_gpu_freq(rps, rps->max_freq)); @@ -542,7 +528,7 @@ static int llc_show(struct seq_file *m, void *data) intel_wakeref_t wakeref; int gpu_freq, ia_freq; - seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(i915))); + seq_printf(m, "LLC: %s\n", str_yes_no(HAS_LLC(i915))); seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC", i915->edram_size_mb); @@ -604,10 +590,12 @@ static int rps_boost_show(struct seq_file *m, void *data) struct drm_i915_private *i915 = gt->i915; struct intel_rps *rps = >->rps; - seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps))); - seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps))); + seq_printf(m, "RPS enabled? %s\n", + str_yes_no(intel_rps_is_enabled(rps))); + seq_printf(m, "RPS active? %s\n", + str_yes_no(intel_rps_is_active(rps))); seq_printf(m, "GPU busy? %s, %llums\n", - yesno(gt->awake), + str_yes_no(gt->awake), ktime_to_ms(intel_gt_get_awake_time(gt))); seq_printf(m, "Boosts outstanding? %d\n", atomic_read(&rps->num_waiters)); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h index a8457887ec65..0ace8c2da0ac 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h @@ -14,7 +14,7 @@ void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root); void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *m); /* functions that need to be accessed by the upper level non-gt interfaces */ -int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt); -int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt); +void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt); +void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt); #endif /* INTEL_GT_PM_DEBUGFS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index 19cd34f24263..a0a49c16babd 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -46,6 +46,7 @@ #define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3) #define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24) #define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3) +#define GEN11_MCR_MULTICAST REG_BIT(31) #define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27) #define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf) #define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24) @@ -840,6 +841,24 @@ #define CTC_SHIFT_PARAMETER_SHIFT 1 #define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT) +/* GPM MSG_IDLE */ +#define MSG_IDLE_CS _MMIO(0x8000) +#define MSG_IDLE_VCS0 _MMIO(0x8004) +#define MSG_IDLE_VCS1 _MMIO(0x8008) +#define MSG_IDLE_BCS _MMIO(0x800C) +#define MSG_IDLE_VECS0 _MMIO(0x8010) +#define MSG_IDLE_VCS2 _MMIO(0x80C0) +#define MSG_IDLE_VCS3 _MMIO(0x80C4) +#define MSG_IDLE_VCS4 _MMIO(0x80C8) +#define MSG_IDLE_VCS5 _MMIO(0x80CC) +#define MSG_IDLE_VCS6 _MMIO(0x80D0) +#define MSG_IDLE_VCS7 _MMIO(0x80D4) +#define MSG_IDLE_VECS1 _MMIO(0x80D8) +#define MSG_IDLE_VECS2 _MMIO(0x80DC) +#define MSG_IDLE_VECS3 _MMIO(0x80E0) +#define MSG_IDLE_FW_MASK REG_GENMASK(13, 9) +#define MSG_IDLE_FW_SHIFT 9 + #define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270) #define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278) @@ -988,6 +1007,7 @@ #define GEN12_VD_TLB_INV_CR _MMIO(0xcedc) #define GEN12_VE_TLB_INV_CR _MMIO(0xcee0) #define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4) +#define GEN12_COMPCTX_TLB_INV_CR _MMIO(0xcf04) #define GEN12_MERT_MOD_CTRL _MMIO(0xcf28) #define RENDER_MOD_CTRL _MMIO(0xcf2c) @@ -1087,6 +1107,7 @@ #define EU_PERF_CNTL3 _MMIO(0xe758) #define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8) +#define DISABLE_D8_D16_COASLESCE REG_BIT(30) #define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15) #define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4) #define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32) @@ -1440,7 +1461,6 @@ #define VLV_MEDIA_RC6_COUNT_EN (1 << 1) #define VLV_RENDER_RC6_COUNT_EN (1 << 0) #define GEN6_GT_GFX_RC6 _MMIO(0x138108) -#define VLV_GT_RENDER_RC6 _MMIO(0x138108) #define VLV_GT_MEDIA_RC6 _MMIO(0x13810c) #define GEN6_GT_GFX_RC6p _MMIO(0x13810c) @@ -1483,6 +1503,7 @@ #define OTHER_GUC_INSTANCE 0 #define OTHER_GTPM_INSTANCE 1 #define OTHER_KCR_INSTANCE 4 +#define OTHER_GSC_INSTANCE 6 #define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4)) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c new file mode 100644 index 000000000000..8ec8bc660c8c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include <drm/drm_device.h> +#include <linux/device.h> +#include <linux/kobject.h> +#include <linux/printk.h> +#include <linux/sysfs.h> + +#include "i915_drv.h" +#include "i915_sysfs.h" +#include "intel_gt.h" +#include "intel_gt_sysfs.h" +#include "intel_gt_sysfs_pm.h" +#include "intel_gt_types.h" +#include "intel_rc6.h" + +bool is_object_gt(struct kobject *kobj) +{ + return !strncmp(kobj->name, "gt", 2); +} + +static struct intel_gt *kobj_to_gt(struct kobject *kobj) +{ + return container_of(kobj, struct kobj_gt, base)->gt; +} + +struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev, + const char *name) +{ + struct kobject *kobj = &dev->kobj; + + /* + * We are interested at knowing from where the interface + * has been called, whether it's called from gt/ or from + * the parent directory. + * From the interface position it depends also the value of + * the private data. + * If the interface is called from gt/ then private data is + * of the "struct intel_gt *" type, otherwise it's * a + * "struct drm_i915_private *" type. + */ + if (!is_object_gt(kobj)) { + struct drm_i915_private *i915 = kdev_minor_to_i915(dev); + + return to_gt(i915); + } + + return kobj_to_gt(kobj); +} + +static struct kobject *gt_get_parent_obj(struct intel_gt *gt) +{ + return >->i915->drm.primary->kdev->kobj; +} + +static ssize_t id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name); + + return sysfs_emit(buf, "%u\n", gt->info.id); +} +static DEVICE_ATTR_RO(id); + +static struct attribute *id_attrs[] = { + &dev_attr_id.attr, + NULL, +}; +ATTRIBUTE_GROUPS(id); + +static void kobj_gt_release(struct kobject *kobj) +{ + kfree(kobj); +} + +static struct kobj_type kobj_gt_type = { + .release = kobj_gt_release, + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = id_groups, +}; + +void intel_gt_sysfs_register(struct intel_gt *gt) +{ + struct kobj_gt *kg; + + /* + * We need to make things right with the + * ABI compatibility. The files were originally + * generated under the parent directory. + * + * We generate the files only for gt 0 + * to avoid duplicates. + */ + if (gt_is_root(gt)) + intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt)); + + kg = kzalloc(sizeof(*kg), GFP_KERNEL); + if (!kg) + goto exit_fail; + + kobject_init(&kg->base, &kobj_gt_type); + kg->gt = gt; + + /* xfer ownership to sysfs tree */ + if (kobject_add(&kg->base, gt->i915->sysfs_gt, "gt%d", gt->info.id)) + goto exit_kobj_put; + + intel_gt_sysfs_pm_init(gt, &kg->base); + + return; + +exit_kobj_put: + kobject_put(&kg->base); + +exit_fail: + drm_warn(>->i915->drm, + "failed to initialize gt%d sysfs root\n", gt->info.id); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h new file mode 100644 index 000000000000..9471b26752cf --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __SYSFS_GT_H__ +#define __SYSFS_GT_H__ + +#include <linux/ctype.h> +#include <linux/kobject.h> + +#include "i915_gem.h" /* GEM_BUG_ON() */ + +struct intel_gt; + +struct kobj_gt { + struct kobject base; + struct intel_gt *gt; +}; + +bool is_object_gt(struct kobject *kobj); + +struct drm_i915_private *kobj_to_i915(struct kobject *kobj); + +struct kobject * +intel_gt_create_kobj(struct intel_gt *gt, + struct kobject *dir, + const char *name); + +void intel_gt_sysfs_register(struct intel_gt *gt); +struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev, + const char *name); + +#endif /* SYSFS_GT_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c new file mode 100644 index 000000000000..f76b6cf8040e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include <drm/drm_device.h> +#include <linux/sysfs.h> +#include <linux/printk.h> + +#include "i915_drv.h" +#include "i915_reg.h" +#include "i915_sysfs.h" +#include "intel_gt.h" +#include "intel_gt_regs.h" +#include "intel_gt_sysfs.h" +#include "intel_gt_sysfs_pm.h" +#include "intel_rc6.h" +#include "intel_rps.h" + +enum intel_gt_sysfs_op { + INTEL_GT_SYSFS_MIN = 0, + INTEL_GT_SYSFS_MAX, +}; + +static int +sysfs_gt_attribute_w_func(struct device *dev, struct device_attribute *attr, + int (func)(struct intel_gt *gt, u32 val), u32 val) +{ + struct intel_gt *gt; + int ret; + + if (!is_object_gt(&dev->kobj)) { + int i; + struct drm_i915_private *i915 = kdev_minor_to_i915(dev); + + for_each_gt(gt, i915, i) { + ret = func(gt, val); + if (ret) + break; + } + } else { + gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name); + ret = func(gt, val); + } + + return ret; +} + +static u32 +sysfs_gt_attribute_r_func(struct device *dev, struct device_attribute *attr, + u32 (func)(struct intel_gt *gt), + enum intel_gt_sysfs_op op) +{ + struct intel_gt *gt; + u32 ret; + + ret = (op == INTEL_GT_SYSFS_MAX) ? 0 : (u32) -1; + + if (!is_object_gt(&dev->kobj)) { + int i; + struct drm_i915_private *i915 = kdev_minor_to_i915(dev); + + for_each_gt(gt, i915, i) { + u32 val = func(gt); + + switch (op) { + case INTEL_GT_SYSFS_MIN: + if (val < ret) + ret = val; + break; + + case INTEL_GT_SYSFS_MAX: + if (val > ret) + ret = val; + break; + } + } + } else { + gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name); + ret = func(gt); + } + + return ret; +} + +/* RC6 interfaces will show the minimum RC6 residency value */ +#define sysfs_gt_attribute_r_min_func(d, a, f) \ + sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MIN) + +/* Frequency interfaces will show the maximum frequency value */ +#define sysfs_gt_attribute_r_max_func(d, a, f) \ + sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MAX) + +#ifdef CONFIG_PM +static u32 get_residency(struct intel_gt *gt, i915_reg_t reg) +{ + intel_wakeref_t wakeref; + u64 res = 0; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + res = intel_rc6_residency_us(>->rc6, reg); + + return DIV_ROUND_CLOSEST_ULL(res, 1000); +} + +static ssize_t rc6_enable_show(struct device *dev, + struct device_attribute *attr, + char *buff) +{ + struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name); + u8 mask = 0; + + if (HAS_RC6(gt->i915)) + mask |= BIT(0); + if (HAS_RC6p(gt->i915)) + mask |= BIT(1); + if (HAS_RC6pp(gt->i915)) + mask |= BIT(2); + + return sysfs_emit(buff, "%x\n", mask); +} + +static u32 __rc6_residency_ms_show(struct intel_gt *gt) +{ + return get_residency(gt, GEN6_GT_GFX_RC6); +} + +static ssize_t rc6_residency_ms_show(struct device *dev, + struct device_attribute *attr, + char *buff) +{ + u32 rc6_residency = sysfs_gt_attribute_r_min_func(dev, attr, + __rc6_residency_ms_show); + + return sysfs_emit(buff, "%u\n", rc6_residency); +} + +static u32 __rc6p_residency_ms_show(struct intel_gt *gt) +{ + return get_residency(gt, GEN6_GT_GFX_RC6p); +} + +static ssize_t rc6p_residency_ms_show(struct device *dev, + struct device_attribute *attr, + char *buff) +{ + u32 rc6p_residency = sysfs_gt_attribute_r_min_func(dev, attr, + __rc6p_residency_ms_show); + + return sysfs_emit(buff, "%u\n", rc6p_residency); +} + +static u32 __rc6pp_residency_ms_show(struct intel_gt *gt) +{ + return get_residency(gt, GEN6_GT_GFX_RC6pp); +} + +static ssize_t rc6pp_residency_ms_show(struct device *dev, + struct device_attribute *attr, + char *buff) +{ + u32 rc6pp_residency = sysfs_gt_attribute_r_min_func(dev, attr, + __rc6pp_residency_ms_show); + + return sysfs_emit(buff, "%u\n", rc6pp_residency); +} + +static u32 __media_rc6_residency_ms_show(struct intel_gt *gt) +{ + return get_residency(gt, VLV_GT_MEDIA_RC6); +} + +static ssize_t media_rc6_residency_ms_show(struct device *dev, + struct device_attribute *attr, + char *buff) +{ + u32 rc6_residency = sysfs_gt_attribute_r_min_func(dev, attr, + __media_rc6_residency_ms_show); + + return sysfs_emit(buff, "%u\n", rc6_residency); +} + +static DEVICE_ATTR_RO(rc6_enable); +static DEVICE_ATTR_RO(rc6_residency_ms); +static DEVICE_ATTR_RO(rc6p_residency_ms); +static DEVICE_ATTR_RO(rc6pp_residency_ms); +static DEVICE_ATTR_RO(media_rc6_residency_ms); + +static struct attribute *rc6_attrs[] = { + &dev_attr_rc6_enable.attr, + &dev_attr_rc6_residency_ms.attr, + NULL +}; + +static struct attribute *rc6p_attrs[] = { + &dev_attr_rc6p_residency_ms.attr, + &dev_attr_rc6pp_residency_ms.attr, + NULL +}; + +static struct attribute *media_rc6_attrs[] = { + &dev_attr_media_rc6_residency_ms.attr, + NULL +}; + +static const struct attribute_group rc6_attr_group[] = { + { .attrs = rc6_attrs, }, + { .name = power_group_name, .attrs = rc6_attrs, }, +}; + +static const struct attribute_group rc6p_attr_group[] = { + { .attrs = rc6p_attrs, }, + { .name = power_group_name, .attrs = rc6p_attrs, }, +}; + +static const struct attribute_group media_rc6_attr_group[] = { + { .attrs = media_rc6_attrs, }, + { .name = power_group_name, .attrs = media_rc6_attrs, }, +}; + +static int __intel_gt_sysfs_create_group(struct kobject *kobj, + const struct attribute_group *grp) +{ + return is_object_gt(kobj) ? + sysfs_create_group(kobj, &grp[0]) : + sysfs_merge_group(kobj, &grp[1]); +} + +static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj) +{ + int ret; + + if (!HAS_RC6(gt->i915)) + return; + + ret = __intel_gt_sysfs_create_group(kobj, rc6_attr_group); + if (ret) + drm_warn(>->i915->drm, + "failed to create gt%u RC6 sysfs files (%pe)\n", + gt->info.id, ERR_PTR(ret)); + + /* + * cannot use the is_visible() attribute because + * the upper object inherits from the parent group. + */ + if (HAS_RC6p(gt->i915)) { + ret = __intel_gt_sysfs_create_group(kobj, rc6p_attr_group); + if (ret) + drm_warn(>->i915->drm, + "failed to create gt%u RC6p sysfs files (%pe)\n", + gt->info.id, ERR_PTR(ret)); + } + + if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) { + ret = __intel_gt_sysfs_create_group(kobj, media_rc6_attr_group); + if (ret) + drm_warn(>->i915->drm, + "failed to create media %u RC6 sysfs files (%pe)\n", + gt->info.id, ERR_PTR(ret)); + } +} +#else +static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj) +{ +} +#endif /* CONFIG_PM */ + +static u32 __act_freq_mhz_show(struct intel_gt *gt) +{ + return intel_rps_read_actual_frequency(>->rps); +} + +static ssize_t act_freq_mhz_show(struct device *dev, + struct device_attribute *attr, char *buff) +{ + u32 actual_freq = sysfs_gt_attribute_r_max_func(dev, attr, + __act_freq_mhz_show); + + return sysfs_emit(buff, "%u\n", actual_freq); +} + +static u32 __cur_freq_mhz_show(struct intel_gt *gt) +{ + return intel_rps_get_requested_frequency(>->rps); +} + +static ssize_t cur_freq_mhz_show(struct device *dev, + struct device_attribute *attr, char *buff) +{ + u32 cur_freq = sysfs_gt_attribute_r_max_func(dev, attr, + __cur_freq_mhz_show); + + return sysfs_emit(buff, "%u\n", cur_freq); +} + +static u32 __boost_freq_mhz_show(struct intel_gt *gt) +{ + return intel_rps_get_boost_frequency(>->rps); +} + +static ssize_t boost_freq_mhz_show(struct device *dev, + struct device_attribute *attr, + char *buff) +{ + u32 boost_freq = sysfs_gt_attribute_r_max_func(dev, attr, + __boost_freq_mhz_show); + + return sysfs_emit(buff, "%u\n", boost_freq); +} + +static int __boost_freq_mhz_store(struct intel_gt *gt, u32 val) +{ + return intel_rps_set_boost_frequency(>->rps, val); +} + +static ssize_t boost_freq_mhz_store(struct device *dev, + struct device_attribute *attr, + const char *buff, size_t count) +{ + ssize_t ret; + u32 val; + + ret = kstrtou32(buff, 0, &val); + if (ret) + return ret; + + return sysfs_gt_attribute_w_func(dev, attr, + __boost_freq_mhz_store, val) ?: count; +} + +static u32 __rp0_freq_mhz_show(struct intel_gt *gt) +{ + return intel_rps_get_rp0_frequency(>->rps); +} + +static ssize_t RP0_freq_mhz_show(struct device *dev, + struct device_attribute *attr, char *buff) +{ + u32 rp0_freq = sysfs_gt_attribute_r_max_func(dev, attr, + __rp0_freq_mhz_show); + + return sysfs_emit(buff, "%u\n", rp0_freq); +} + +static u32 __rp1_freq_mhz_show(struct intel_gt *gt) +{ + return intel_rps_get_rp1_frequency(>->rps); +} + +static ssize_t RP1_freq_mhz_show(struct device *dev, + struct device_attribute *attr, char *buff) +{ + u32 rp1_freq = sysfs_gt_attribute_r_max_func(dev, attr, + __rp1_freq_mhz_show); + + return sysfs_emit(buff, "%u\n", rp1_freq); +} + +static u32 __rpn_freq_mhz_show(struct intel_gt *gt) +{ + return intel_rps_get_rpn_frequency(>->rps); +} + +static ssize_t RPn_freq_mhz_show(struct device *dev, + struct device_attribute *attr, char *buff) +{ + u32 rpn_freq = sysfs_gt_attribute_r_max_func(dev, attr, + __rpn_freq_mhz_show); + + return sysfs_emit(buff, "%u\n", rpn_freq); +} + +static u32 __max_freq_mhz_show(struct intel_gt *gt) +{ + return intel_rps_get_max_frequency(>->rps); +} + +static ssize_t max_freq_mhz_show(struct device *dev, + struct device_attribute *attr, char *buff) +{ + u32 max_freq = sysfs_gt_attribute_r_max_func(dev, attr, + __max_freq_mhz_show); + + return sysfs_emit(buff, "%u\n", max_freq); +} + +static int __set_max_freq(struct intel_gt *gt, u32 val) +{ + return intel_rps_set_max_frequency(>->rps, val); +} + +static ssize_t max_freq_mhz_store(struct device *dev, + struct device_attribute *attr, + const char *buff, size_t count) +{ + int ret; + u32 val; + + ret = kstrtou32(buff, 0, &val); + if (ret) + return ret; + + ret = sysfs_gt_attribute_w_func(dev, attr, __set_max_freq, val); + + return ret ?: count; +} + +static u32 __min_freq_mhz_show(struct intel_gt *gt) +{ + return intel_rps_get_min_frequency(>->rps); +} + +static ssize_t min_freq_mhz_show(struct device *dev, + struct device_attribute *attr, char *buff) +{ + u32 min_freq = sysfs_gt_attribute_r_min_func(dev, attr, + __min_freq_mhz_show); + + return sysfs_emit(buff, "%u\n", min_freq); +} + +static int __set_min_freq(struct intel_gt *gt, u32 val) +{ + return intel_rps_set_min_frequency(>->rps, val); +} + +static ssize_t min_freq_mhz_store(struct device *dev, + struct device_attribute *attr, + const char *buff, size_t count) +{ + int ret; + u32 val; + + ret = kstrtou32(buff, 0, &val); + if (ret) + return ret; + + ret = sysfs_gt_attribute_w_func(dev, attr, __set_min_freq, val); + + return ret ?: count; +} + +static u32 __vlv_rpe_freq_mhz_show(struct intel_gt *gt) +{ + struct intel_rps *rps = >->rps; + + return intel_gpu_freq(rps, rps->efficient_freq); +} + +static ssize_t vlv_rpe_freq_mhz_show(struct device *dev, + struct device_attribute *attr, char *buff) +{ + u32 rpe_freq = sysfs_gt_attribute_r_max_func(dev, attr, + __vlv_rpe_freq_mhz_show); + + return sysfs_emit(buff, "%u\n", rpe_freq); +} + +#define INTEL_GT_RPS_SYSFS_ATTR(_name, _mode, _show, _store) \ + static struct device_attribute dev_attr_gt_##_name = __ATTR(gt_##_name, _mode, _show, _store); \ + static struct device_attribute dev_attr_rps_##_name = __ATTR(rps_##_name, _mode, _show, _store) + +#define INTEL_GT_RPS_SYSFS_ATTR_RO(_name) \ + INTEL_GT_RPS_SYSFS_ATTR(_name, 0444, _name##_show, NULL) +#define INTEL_GT_RPS_SYSFS_ATTR_RW(_name) \ + INTEL_GT_RPS_SYSFS_ATTR(_name, 0644, _name##_show, _name##_store) + +/* The below macros generate static structures */ +INTEL_GT_RPS_SYSFS_ATTR_RO(act_freq_mhz); +INTEL_GT_RPS_SYSFS_ATTR_RO(cur_freq_mhz); +INTEL_GT_RPS_SYSFS_ATTR_RW(boost_freq_mhz); +INTEL_GT_RPS_SYSFS_ATTR_RO(RP0_freq_mhz); +INTEL_GT_RPS_SYSFS_ATTR_RO(RP1_freq_mhz); +INTEL_GT_RPS_SYSFS_ATTR_RO(RPn_freq_mhz); +INTEL_GT_RPS_SYSFS_ATTR_RW(max_freq_mhz); +INTEL_GT_RPS_SYSFS_ATTR_RW(min_freq_mhz); + +static DEVICE_ATTR_RO(vlv_rpe_freq_mhz); + +#define GEN6_ATTR(s) { \ + &dev_attr_##s##_act_freq_mhz.attr, \ + &dev_attr_##s##_cur_freq_mhz.attr, \ + &dev_attr_##s##_boost_freq_mhz.attr, \ + &dev_attr_##s##_max_freq_mhz.attr, \ + &dev_attr_##s##_min_freq_mhz.attr, \ + &dev_attr_##s##_RP0_freq_mhz.attr, \ + &dev_attr_##s##_RP1_freq_mhz.attr, \ + &dev_attr_##s##_RPn_freq_mhz.attr, \ + NULL, \ + } + +#define GEN6_RPS_ATTR GEN6_ATTR(rps) +#define GEN6_GT_ATTR GEN6_ATTR(gt) + +static const struct attribute * const gen6_rps_attrs[] = GEN6_RPS_ATTR; +static const struct attribute * const gen6_gt_attrs[] = GEN6_GT_ATTR; + +static ssize_t punit_req_freq_mhz_show(struct device *dev, + struct device_attribute *attr, + char *buff) +{ + struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name); + u32 preq = intel_rps_read_punit_req_frequency(>->rps); + + return sysfs_emit(buff, "%u\n", preq); +} + +struct intel_gt_bool_throttle_attr { + struct attribute attr; + ssize_t (*show)(struct device *dev, struct device_attribute *attr, + char *buf); + i915_reg_t reg32; + u32 mask; +}; + +static ssize_t throttle_reason_bool_show(struct device *dev, + struct device_attribute *attr, + char *buff) +{ + struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name); + struct intel_gt_bool_throttle_attr *t_attr = + (struct intel_gt_bool_throttle_attr *) attr; + bool val = rps_read_mask_mmio(>->rps, t_attr->reg32, t_attr->mask); + + return sysfs_emit(buff, "%u\n", val); +} + +#define INTEL_GT_RPS_BOOL_ATTR_RO(sysfs_func__, mask__) \ +struct intel_gt_bool_throttle_attr attr_##sysfs_func__ = { \ + .attr = { .name = __stringify(sysfs_func__), .mode = 0444 }, \ + .show = throttle_reason_bool_show, \ + .reg32 = GT0_PERF_LIMIT_REASONS, \ + .mask = mask__, \ +} + +static DEVICE_ATTR_RO(punit_req_freq_mhz); +static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_status, GT0_PERF_LIMIT_REASONS_MASK); +static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl1, POWER_LIMIT_1_MASK); +static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl2, POWER_LIMIT_2_MASK); +static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl4, POWER_LIMIT_4_MASK); +static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_thermal, THERMAL_LIMIT_MASK); +static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_prochot, PROCHOT_MASK); +static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK); +static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK); +static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK); + +static const struct attribute *freq_attrs[] = { + &dev_attr_punit_req_freq_mhz.attr, + &attr_throttle_reason_status.attr, + &attr_throttle_reason_pl1.attr, + &attr_throttle_reason_pl2.attr, + &attr_throttle_reason_pl4.attr, + &attr_throttle_reason_thermal.attr, + &attr_throttle_reason_prochot.attr, + &attr_throttle_reason_ratl.attr, + &attr_throttle_reason_vr_thermalert.attr, + &attr_throttle_reason_vr_tdc.attr, + NULL +}; + +static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj, + const struct attribute * const *attrs) +{ + int ret; + + if (GRAPHICS_VER(gt->i915) < 6) + return 0; + + ret = sysfs_create_files(kobj, attrs); + if (ret) + return ret; + + if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) + ret = sysfs_create_file(kobj, &dev_attr_vlv_rpe_freq_mhz.attr); + + return ret; +} + +void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj) +{ + int ret; + + intel_sysfs_rc6_init(gt, kobj); + + ret = is_object_gt(kobj) ? + intel_sysfs_rps_init(gt, kobj, gen6_rps_attrs) : + intel_sysfs_rps_init(gt, kobj, gen6_gt_attrs); + if (ret) + drm_warn(>->i915->drm, + "failed to create gt%u RPS sysfs files (%pe)", + gt->info.id, ERR_PTR(ret)); + + /* end of the legacy interfaces */ + if (!is_object_gt(kobj)) + return; + + ret = sysfs_create_files(kobj, freq_attrs); + if (ret) + drm_warn(>->i915->drm, + "failed to create gt%u throttle sysfs files (%pe)", + gt->info.id, ERR_PTR(ret)); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h new file mode 100644 index 000000000000..f567105a4a89 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __SYSFS_GT_PM_H__ +#define __SYSFS_GT_PM_H__ + +#include <linux/kobject.h> + +#include "intel_gt_types.h" + +void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj); + +#endif /* SYSFS_RC6_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index f20687796490..b06611c1d4ad 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -16,10 +16,12 @@ #include <linux/workqueue.h> #include "uc/intel_uc.h" +#include "intel_gsc.h" #include "i915_vma.h" #include "intel_engine_types.h" #include "intel_gt_buffer_pool_types.h" +#include "intel_hwconfig.h" #include "intel_llc_types.h" #include "intel_reset_types.h" #include "intel_rc6_types.h" @@ -72,6 +74,7 @@ struct intel_gt { struct i915_ggtt *ggtt; struct intel_uc uc; + struct intel_gsc gsc; struct mutex tlb_invalidate_lock; @@ -182,7 +185,19 @@ struct intel_gt { const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES]; + struct { + u8 groupid; + u8 instanceid; + } default_steering; + + /* + * Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT. + */ + phys_addr_t phys_addr; + struct intel_gt_info { + unsigned int id; + intel_engine_mask_t engine_mask; u32 l3bank_mask; @@ -199,6 +214,9 @@ struct intel_gt { struct sseu_dev_info sseu; unsigned long mslice_mask; + + /** @hwconfig: hardware configuration data */ + struct intel_hwconfig hwconfig; } info; struct { diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index a5f5b2dda332..b67831833c9a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -13,10 +13,22 @@ #include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" #include "i915_trace.h" +#include "i915_utils.h" #include "intel_gt.h" #include "intel_gt_regs.h" #include "intel_gtt.h" + +static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) +{ + return IS_BROXTON(i915) && i915_vtd_active(i915); +} + +bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) +{ + return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); +} + struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) { struct drm_i915_gem_object *obj; @@ -97,32 +109,52 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object return 0; } -void __i915_vm_close(struct i915_address_space *vm) +static void clear_vm_list(struct list_head *list) { struct i915_vma *vma, *vn; - if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex)) - return; - - list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { + list_for_each_entry_safe(vma, vn, list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; - if (!kref_get_unless_zero(&obj->base.refcount)) { + if (!i915_gem_object_get_rcu(obj)) { /* - * Unbind the dying vma to ensure the bound_list + * Object is dying, but has not yet cleared its + * vma list. + * Unbind the dying vma to ensure our list * is completely drained. We leave the destruction to - * the object destructor. + * the object destructor to avoid the vma + * disappearing under it. */ atomic_and(~I915_VMA_PIN_MASK, &vma->flags); WARN_ON(__i915_vma_unbind(vma)); - continue; + + /* Remove from the unbound list */ + list_del_init(&vma->vm_link); + + /* + * Delay the vm and vm mutex freeing until the + * object is done with destruction. + */ + i915_vm_resv_get(vma->vm); + vma->vm_ddestroy = true; + } else { + i915_vma_destroy_locked(vma); + i915_gem_object_put(obj); } - /* Keep the obj (and hence the vma) alive as _we_ destroy it */ - i915_vma_destroy_locked(vma); - i915_gem_object_put(obj); } +} + +static void __i915_vm_close(struct i915_address_space *vm) +{ + mutex_lock(&vm->mutex); + + clear_vm_list(&vm->bound_list); + clear_vm_list(&vm->unbound_list); + + /* Check for must-fix unanticipated side-effects */ GEM_BUG_ON(!list_empty(&vm->bound_list)); + GEM_BUG_ON(!list_empty(&vm->unbound_list)); mutex_unlock(&vm->mutex); } @@ -144,7 +176,6 @@ int i915_vm_lock_objects(struct i915_address_space *vm, void i915_address_space_fini(struct i915_address_space *vm) { drm_mm_takedown(&vm->mm); - mutex_destroy(&vm->mutex); } /** @@ -152,7 +183,8 @@ void i915_address_space_fini(struct i915_address_space *vm) * @kref: Pointer to the &i915_address_space.resv_ref member. * * This function is called when the last lock sharer no longer shares the - * &i915_address_space._resv lock. + * &i915_address_space._resv lock, and also if we raced when + * destroying a vma by the vma destruction */ void i915_vm_resv_release(struct kref *kref) { @@ -160,6 +192,8 @@ void i915_vm_resv_release(struct kref *kref) container_of(kref, typeof(*vm), resv_ref); dma_resv_fini(&vm->_resv); + mutex_destroy(&vm->mutex); + kfree(vm); } @@ -168,6 +202,8 @@ static void __i915_vm_release(struct work_struct *work) struct i915_address_space *vm = container_of(work, struct i915_address_space, release_work); + __i915_vm_close(vm); + /* Synchronize async unbinds. */ i915_vma_resource_bind_dep_sync_all(vm); @@ -201,7 +237,6 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass) vm->pending_unbind = RB_ROOT_CACHED; INIT_WORK(&vm->release_work, __i915_vm_release); - atomic_set(&vm->open, 1); /* * The vm->mutex must be reclaim safe (for use in the shrinker). @@ -246,6 +281,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass) vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; INIT_LIST_HEAD(&vm->bound_list); + INIT_LIST_HEAD(&vm->unbound_list); } void *__px_vaddr(struct drm_i915_gem_object *p) @@ -274,7 +310,7 @@ fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count) void *vaddr = __px_vaddr(p); memset64(vaddr, val, count); - clflush_cache_range(vaddr, PAGE_SIZE); + drm_clflush_virt_range(vaddr, PAGE_SIZE); } static void poison_scratch_page(struct drm_i915_gem_object *scratch) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 9d83c2d3959c..a40d928b3888 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -240,15 +240,6 @@ struct i915_address_space { unsigned int bind_async_flags; - /* - * Each active user context has its own address space (in full-ppgtt). - * Since the vm may be shared between multiple contexts, we count how - * many contexts keep us "open". Once open hits zero, we are closed - * and do not allow any new attachments, and proceed to shutdown our - * vma and page directories. - */ - atomic_t open; - struct mutex mutex; /* protects vma and our lists */ struct kref resv_ref; /* kref to keep the reservation lock alive. */ @@ -263,6 +254,11 @@ struct i915_address_space { */ struct list_head bound_list; + /** + * List of vmas not yet bound or evicted. + */ + struct list_head unbound_list; + /* Global GTT */ bool is_ggtt:1; @@ -272,6 +268,9 @@ struct i915_address_space { /* Some systems support read-only mappings for GGTT and/or PPGTT */ bool has_read_only:1; + /* Skip pte rewrite on unbind for suspend. Protected by @mutex */ + bool skip_pte_rewrite:1; + u8 top; u8 pd_shift; u8 scratch_order; @@ -383,6 +382,8 @@ struct i915_ppgtt { #define i915_is_dpt(vm) ((vm)->is_dpt) #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm)) +bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915); + int __must_check i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww); @@ -446,6 +447,17 @@ i915_vm_get(struct i915_address_space *vm) return vm; } +static inline struct i915_address_space * +i915_vm_tryget(struct i915_address_space *vm) +{ + return kref_get_unless_zero(&vm->ref) ? vm : NULL; +} + +static inline void assert_vm_alive(struct i915_address_space *vm) +{ + GEM_BUG_ON(!kref_read(&vm->ref)); +} + /** * i915_vm_resv_get - Obtain a reference on the vm's reservation lock * @vm: The vm whose reservation lock we want to share. @@ -476,34 +488,6 @@ static inline void i915_vm_resv_put(struct i915_address_space *vm) kref_put(&vm->resv_ref, i915_vm_resv_release); } -static inline struct i915_address_space * -i915_vm_open(struct i915_address_space *vm) -{ - GEM_BUG_ON(!atomic_read(&vm->open)); - atomic_inc(&vm->open); - return i915_vm_get(vm); -} - -static inline bool -i915_vm_tryopen(struct i915_address_space *vm) -{ - if (atomic_add_unless(&vm->open, 1, 0)) - return i915_vm_get(vm); - - return false; -} - -void __i915_vm_close(struct i915_address_space *vm); - -static inline void -i915_vm_close(struct i915_address_space *vm) -{ - GEM_BUG_ON(!atomic_read(&vm->open)); - __i915_vm_close(vm); - - i915_vm_put(vm); -} - void i915_address_space_init(struct i915_address_space *vm, int subclass); void i915_address_space_fini(struct i915_address_space *vm); @@ -565,6 +549,14 @@ i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, unsigned long lmem_pt_obj_flags); +void intel_ggtt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma_resource *vma_res, + enum i915_cache_level cache_level, + u32 flags); +void intel_ggtt_unbind_vma(struct i915_address_space *vm, + struct i915_vma_resource *vma_res); + int i915_ggtt_probe_hw(struct drm_i915_private *i915); int i915_ggtt_init_hw(struct drm_i915_private *i915); int i915_ggtt_enable_hw(struct drm_i915_private *i915); @@ -635,6 +627,7 @@ release_pd_entry(struct i915_page_directory * const pd, struct i915_page_table * const pt, const struct drm_i915_gem_object * const scratch); void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); +void gen8_ggtt_invalidate(struct i915_ggtt *ggtt); void ppgtt_bind_vma(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, diff --git a/drivers/gpu/drm/i915/gt/intel_hwconfig.h b/drivers/gpu/drm/i915/gt/intel_hwconfig.h new file mode 100644 index 000000000000..322290780b67 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_hwconfig.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _INTEL_HWCONFIG_H_ +#define _INTEL_HWCONFIG_H_ + +#include <linux/types.h> + +struct intel_gt; + +struct intel_hwconfig { + u32 size; + void *ptr; +}; + +int intel_gt_init_hwconfig(struct intel_gt *gt); +void intel_gt_fini_hwconfig(struct intel_gt *gt); + +#endif /* _INTEL_HWCONFIG_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 07bef7128fdb..eec73c66406c 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -778,7 +778,7 @@ static void init_common_regs(u32 * const regs, CTX_CTRL_RS_CTX_ENABLE); regs[CTX_CONTEXT_CONTROL] = ctl; - regs[CTX_TIMESTAMP] = ce->runtime.last; + regs[CTX_TIMESTAMP] = ce->stats.runtime.last; } static void init_wa_bb_regs(u32 * const regs, @@ -904,6 +904,24 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine) engine->name); } +static u32 context_wa_bb_offset(const struct intel_context *ce) +{ + return PAGE_SIZE * ce->wa_bb_page; +} + +static u32 *context_indirect_bb(const struct intel_context *ce) +{ + void *ptr; + + GEM_BUG_ON(!ce->wa_bb_page); + + ptr = ce->lrc_reg_state; + ptr -= LRC_STATE_OFFSET; /* back to start of context image */ + ptr += context_wa_bb_offset(ce); + + return ptr; +} + void lrc_init_state(struct intel_context *ce, struct intel_engine_cs *engine, void *state) @@ -922,6 +940,10 @@ void lrc_init_state(struct intel_context *ce, /* Clear the ppHWSP (inc. per-context counters) */ memset(state, 0, PAGE_SIZE); + /* Clear the indirect wa and storage */ + if (ce->wa_bb_page) + memset(state + context_wa_bb_offset(ce), 0, PAGE_SIZE); + /* * The second page of the context object contains some registers which * must be set up prior to the first execution. @@ -929,6 +951,35 @@ void lrc_init_state(struct intel_context *ce, __lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit); } +u32 lrc_indirect_bb(const struct intel_context *ce) +{ + return i915_ggtt_offset(ce->state) + context_wa_bb_offset(ce); +} + +static u32 *setup_predicate_disable_wa(const struct intel_context *ce, u32 *cs) +{ + /* If predication is active, this will be noop'ed */ + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2); + *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA; + *cs++ = 0; + *cs++ = 0; /* No predication */ + + /* predicated end, only terminates if SET_PREDICATE_RESULT:0 is clear */ + *cs++ = MI_BATCH_BUFFER_END | BIT(15); + *cs++ = MI_SET_PREDICATE | MI_SET_PREDICATE_DISABLE; + + /* Instructions are no longer predicated (disabled), we can proceed */ + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2); + *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA; + *cs++ = 0; + *cs++ = 1; /* enable predication before the next BB */ + + *cs++ = MI_BATCH_BUFFER_END; + GEM_BUG_ON(offset_in_page(cs) > DG2_PREDICATE_RESULT_WA); + + return cs; +} + static struct i915_vma * __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine) { @@ -1208,6 +1259,10 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) IS_DG2_G11(ce->engine->i915)) cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); + /* hsdes: 1809175790 */ + if (!HAS_FLAT_CCS(ce->engine->i915)) + cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV); + return cs; } @@ -1225,25 +1280,15 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - return cs; -} - -static u32 context_wa_bb_offset(const struct intel_context *ce) -{ - return PAGE_SIZE * ce->wa_bb_page; -} - -static u32 *context_indirect_bb(const struct intel_context *ce) -{ - void *ptr; - - GEM_BUG_ON(!ce->wa_bb_page); - - ptr = ce->lrc_reg_state; - ptr -= LRC_STATE_OFFSET; /* back to start of context image */ - ptr += context_wa_bb_offset(ce); + /* hsdes: 1809175790 */ + if (!HAS_FLAT_CCS(ce->engine->i915)) { + if (ce->engine->class == VIDEO_DECODE_CLASS) + cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV); + else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS) + cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV); + } - return ptr; + return cs; } static void @@ -1259,9 +1304,11 @@ setup_indirect_ctx_bb(const struct intel_context *ce, while ((unsigned long)cs % CACHELINE_BYTES) *cs++ = MI_NOOP; + GEM_BUG_ON(cs - start > DG2_PREDICATE_RESULT_BB / sizeof(*start)); + setup_predicate_disable_wa(ce, start + DG2_PREDICATE_RESULT_BB / sizeof(*start)); + lrc_setup_indirect_ctx(ce->lrc_reg_state, engine, - i915_ggtt_offset(ce->state) + - context_wa_bb_offset(ce), + lrc_indirect_bb(ce), (cs - start) * sizeof(*cs)); } @@ -1722,11 +1769,12 @@ err: } } -static void st_update_runtime_underflow(struct intel_context *ce, s32 dt) +static void st_runtime_underflow(struct intel_context_stats *stats, s32 dt) { #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) - ce->runtime.num_underflow++; - ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt); + stats->runtime.num_underflow++; + stats->runtime.max_underflow = + max_t(u32, stats->runtime.max_underflow, -dt); #endif } @@ -1743,25 +1791,25 @@ static u32 lrc_get_runtime(const struct intel_context *ce) void lrc_update_runtime(struct intel_context *ce) { + struct intel_context_stats *stats = &ce->stats; u32 old; s32 dt; - if (intel_context_is_barrier(ce)) + old = stats->runtime.last; + stats->runtime.last = lrc_get_runtime(ce); + dt = stats->runtime.last - old; + if (!dt) return; - old = ce->runtime.last; - ce->runtime.last = lrc_get_runtime(ce); - dt = ce->runtime.last - old; - if (unlikely(dt < 0)) { CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n", - old, ce->runtime.last, dt); - st_update_runtime_underflow(ce, dt); + old, stats->runtime.last, dt); + st_runtime_underflow(stats, dt); return; } - ewma_runtime_add(&ce->runtime.avg, dt); - ce->runtime.total += dt; + ewma_runtime_add(&stats->runtime.avg, dt); + stats->runtime.total += dt; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index 6e4f9f58fca5..31be734010db 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -11,9 +11,10 @@ #include <linux/bitfield.h> #include <linux/types.h> +#include "intel_context.h" + struct drm_i915_gem_object; struct i915_gem_ww_ctx; -struct intel_context; struct intel_engine_cs; struct intel_ring; struct kref; @@ -120,4 +121,33 @@ static inline u32 lrc_desc_priority(int prio) return GEN12_CTX_PRIORITY_NORMAL; } +static inline void lrc_runtime_start(struct intel_context *ce) +{ + struct intel_context_stats *stats = &ce->stats; + + if (intel_context_is_barrier(ce)) + return; + + if (stats->active) + return; + + WRITE_ONCE(stats->active, intel_context_clock()); +} + +static inline void lrc_runtime_stop(struct intel_context *ce) +{ + struct intel_context_stats *stats = &ce->stats; + + if (!stats->active) + return; + + lrc_update_runtime(ce); + WRITE_ONCE(stats->active, 0); +} + +#define DG2_PREDICATE_RESULT_WA (PAGE_SIZE - sizeof(u64)) +#define DG2_PREDICATE_RESULT_BB (2048) + +u32 lrc_indirect_bb(const struct intel_context *ce); + #endif /* __INTEL_LRC_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c index 20444d6ceb3c..2c35324b5f68 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.c +++ b/drivers/gpu/drm/i915/gt/intel_migrate.c @@ -17,6 +17,8 @@ struct insert_pte_data { #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */ +#define GET_CCS_BYTES(i915, size) (HAS_FLAT_CCS(i915) ? \ + DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0) static bool engine_supports_migration(struct intel_engine_cs *engine) { if (!engine) @@ -467,6 +469,128 @@ static bool wa_1209644611_applies(int ver, u32 size) return height % 4 == 3 && height <= 8; } +/** + * DOC: Flat-CCS - Memory compression for Local memory + * + * On Xe-HP and later devices, we use dedicated compression control state (CCS) + * stored in local memory for each surface, to support the 3D and media + * compression formats. + * + * The memory required for the CCS of the entire local memory is 1/256 of the + * local memory size. So before the kernel boot, the required memory is reserved + * for the CCS data and a secure register will be programmed with the CCS base + * address. + * + * Flat CCS data needs to be cleared when a lmem object is allocated. + * And CCS data can be copied in and out of CCS region through + * XY_CTRL_SURF_COPY_BLT. CPU can't access the CCS data directly. + * + * I915 supports Flat-CCS on lmem only objects. When an objects has smem in + * its preference list, on memory pressure, i915 needs to migrate the lmem + * content into smem. If the lmem object is Flat-CCS compressed by userspace, + * then i915 needs to decompress it. But I915 lack the required information + * for such decompression. Hence I915 supports Flat-CCS only on lmem only objects. + * + * When we exhaust the lmem, Flat-CCS capable objects' lmem backing memory can + * be temporarily evicted to smem, along with the auxiliary CCS state, where + * it can be potentially swapped-out at a later point, if required. + * If userspace later touches the evicted pages, then we always move + * the backing memory back to lmem, which includes restoring the saved CCS state, + * and potentially performing any required swap-in. + * + * For the migration of the lmem objects with smem in placement list, such as + * {lmem, smem}, objects are treated as non Flat-CCS capable objects. + */ + +static inline u32 *i915_flush_dw(u32 *cmd, u32 flags) +{ + *cmd++ = MI_FLUSH_DW | flags; + *cmd++ = 0; + *cmd++ = 0; + + return cmd; +} + +static u32 calc_ctrl_surf_instr_size(struct drm_i915_private *i915, int size) +{ + u32 num_cmds, num_blks, total_size; + + if (!GET_CCS_BYTES(i915, size)) + return 0; + + /* + * XY_CTRL_SURF_COPY_BLT transfers CCS in 256 byte + * blocks. one XY_CTRL_SURF_COPY_BLT command can + * transfer upto 1024 blocks. + */ + num_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size), + NUM_CCS_BYTES_PER_BLOCK); + num_cmds = DIV_ROUND_UP(num_blks, NUM_CCS_BLKS_PER_XFER); + total_size = XY_CTRL_SURF_INSTR_SIZE * num_cmds; + + /* + * Adding a flush before and after XY_CTRL_SURF_COPY_BLT + */ + total_size += 2 * MI_FLUSH_DW_SIZE; + + return total_size; +} + +static int emit_copy_ccs(struct i915_request *rq, + u32 dst_offset, u8 dst_access, + u32 src_offset, u8 src_access, int size) +{ + struct drm_i915_private *i915 = rq->engine->i915; + int mocs = rq->engine->gt->mocs.uc_index << 1; + u32 num_ccs_blks, ccs_ring_size; + u32 *cs; + + ccs_ring_size = calc_ctrl_surf_instr_size(i915, size); + WARN_ON(!ccs_ring_size); + + cs = intel_ring_begin(rq, round_up(ccs_ring_size, 2)); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + num_ccs_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size), + NUM_CCS_BYTES_PER_BLOCK); + GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER); + cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS); + + /* + * The XY_CTRL_SURF_COPY_BLT instruction is used to copy the CCS + * data in and out of the CCS region. + * + * We can copy at most 1024 blocks of 256 bytes using one + * XY_CTRL_SURF_COPY_BLT instruction. + * + * In case we need to copy more than 1024 blocks, we need to add + * another instruction to the same batch buffer. + * + * 1024 blocks of 256 bytes of CCS represent a total 256KB of CCS. + * + * 256 KB of CCS represents 256 * 256 KB = 64 MB of LMEM. + */ + *cs++ = XY_CTRL_SURF_COPY_BLT | + src_access << SRC_ACCESS_TYPE_SHIFT | + dst_access << DST_ACCESS_TYPE_SHIFT | + ((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT; + *cs++ = src_offset; + *cs++ = rq->engine->instance | + FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs); + *cs++ = dst_offset; + *cs++ = rq->engine->instance | + FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs); + + cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS); + if (ccs_ring_size & 1) + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + return 0; +} + static int emit_copy(struct i915_request *rq, u32 dst_offset, u32 src_offset, int size) { @@ -514,6 +638,57 @@ static int emit_copy(struct i915_request *rq, return 0; } +static int scatter_list_length(struct scatterlist *sg) +{ + int len = 0; + + while (sg && sg_dma_len(sg)) { + len += sg_dma_len(sg); + sg = sg_next(sg); + }; + + return len; +} + +static void +calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem, + int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy) +{ + if (ccs_bytes_to_cpy) { + if (!src_is_lmem) + /* + * When CHUNK_SZ is passed all the pages upto CHUNK_SZ + * will be taken for the blt. in Flat-ccs supported + * platform Smem obj will have more pages than required + * for main meory hence limit it to the required size + * for main memory + */ + *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ); + } else { /* ccs handling is not required */ + *src_sz = CHUNK_SZ; + } +} + +static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy) +{ + u32 len; + + do { + GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg)); + len = it->max - it->dma; + if (len > bytes_to_cpy) { + it->dma += bytes_to_cpy; + break; + } + + bytes_to_cpy -= len; + + it->sg = __sg_next(it->sg); + it->dma = sg_dma_address(it->sg); + it->max = it->dma + sg_dma_len(it->sg); + } while (bytes_to_cpy); +} + int intel_context_migrate_copy(struct intel_context *ce, const struct i915_deps *deps, @@ -525,17 +700,67 @@ intel_context_migrate_copy(struct intel_context *ce, bool dst_is_lmem, struct i915_request **out) { - struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst); + struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs; + struct drm_i915_private *i915 = ce->engine->i915; + u32 ccs_bytes_to_cpy = 0, bytes_to_cpy; + enum i915_cache_level ccs_cache_level; + u32 src_offset, dst_offset; + u8 src_access, dst_access; struct i915_request *rq; + int src_sz, dst_sz; + bool ccs_is_src; int err; GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); + GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem)); *out = NULL; GEM_BUG_ON(ce->ring->size < SZ_64K); + src_sz = scatter_list_length(src); + bytes_to_cpy = src_sz; + + if (HAS_FLAT_CCS(i915) && src_is_lmem ^ dst_is_lmem) { + src_access = !src_is_lmem && dst_is_lmem; + dst_access = !src_access; + + dst_sz = scatter_list_length(dst); + if (src_is_lmem) { + it_ccs = it_dst; + ccs_cache_level = dst_cache_level; + ccs_is_src = false; + } else if (dst_is_lmem) { + bytes_to_cpy = dst_sz; + it_ccs = it_src; + ccs_cache_level = src_cache_level; + ccs_is_src = true; + } + + /* + * When there is a eviction of ccs needed smem will have the + * extra pages for the ccs data + * + * TO-DO: Want to move the size mismatch check to a WARN_ON, + * but still we have some requests of smem->lmem with same size. + * Need to fix it. + */ + ccs_bytes_to_cpy = src_sz != dst_sz ? GET_CCS_BYTES(i915, bytes_to_cpy) : 0; + if (ccs_bytes_to_cpy) + get_ccs_sg_sgt(&it_ccs, bytes_to_cpy); + } + + src_offset = 0; + dst_offset = CHUNK_SZ; + if (HAS_64K_PAGES(ce->engine->i915)) { + src_offset = 0; + dst_offset = 0; + if (src_is_lmem) + src_offset = CHUNK_SZ; + if (dst_is_lmem) + dst_offset = 2 * CHUNK_SZ; + } + do { - u32 src_offset, dst_offset; int len; rq = i915_request_create(ce); @@ -563,22 +788,16 @@ intel_context_migrate_copy(struct intel_context *ce, if (err) goto out_rq; - src_offset = 0; - dst_offset = CHUNK_SZ; - if (HAS_64K_PAGES(ce->engine->i915)) { - GEM_BUG_ON(!src_is_lmem && !dst_is_lmem); - - src_offset = 0; - dst_offset = 0; - if (src_is_lmem) - src_offset = CHUNK_SZ; - if (dst_is_lmem) - dst_offset = 2 * CHUNK_SZ; - } + calculate_chunk_sz(i915, src_is_lmem, &src_sz, + bytes_to_cpy, ccs_bytes_to_cpy); len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem, - src_offset, CHUNK_SZ); - if (len <= 0) { + src_offset, src_sz); + if (!len) { + err = -EINVAL; + goto out_rq; + } + if (len < 0) { err = len; goto out_rq; } @@ -596,7 +815,44 @@ intel_context_migrate_copy(struct intel_context *ce, if (err) goto out_rq; - err = emit_copy(rq, dst_offset, src_offset, len); + err = emit_copy(rq, dst_offset, src_offset, len); + if (err) + goto out_rq; + + bytes_to_cpy -= len; + + if (ccs_bytes_to_cpy) { + int ccs_sz; + + err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); + if (err) + goto out_rq; + + ccs_sz = GET_CCS_BYTES(i915, len); + err = emit_pte(rq, &it_ccs, ccs_cache_level, false, + ccs_is_src ? src_offset : dst_offset, + ccs_sz); + if (err < 0) + goto out_rq; + if (err < ccs_sz) { + err = -EINVAL; + goto out_rq; + } + + err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); + if (err) + goto out_rq; + + err = emit_copy_ccs(rq, dst_offset, dst_access, + src_offset, src_access, len); + if (err) + goto out_rq; + + err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); + if (err) + goto out_rq; + ccs_bytes_to_cpy -= ccs_sz; + } /* Arbitration is re-enabled between requests. */ out_rq: @@ -604,9 +860,26 @@ out_rq: i915_request_put(*out); *out = i915_request_get(rq); i915_request_add(rq); - if (err || !it_src.sg || !sg_dma_len(it_src.sg)) + + if (err) break; + if (!bytes_to_cpy && !ccs_bytes_to_cpy) { + if (src_is_lmem) + WARN_ON(it_src.sg && sg_dma_len(it_src.sg)); + else + WARN_ON(it_dst.sg && sg_dma_len(it_dst.sg)); + break; + } + + if (WARN_ON(!it_src.sg || !sg_dma_len(it_src.sg) || + !it_dst.sg || !sg_dma_len(it_dst.sg) || + (ccs_bytes_to_cpy && (!it_ccs.sg || + !sg_dma_len(it_ccs.sg))))) { + err = -EINVAL; + break; + } + cond_resched(); } while (1); @@ -614,35 +887,65 @@ out_ce: return err; } -static int emit_clear(struct i915_request *rq, u64 offset, int size, u32 value) +static int emit_clear(struct i915_request *rq, u32 offset, int size, + u32 value, bool is_lmem) { - const int ver = GRAPHICS_VER(rq->engine->i915); + struct drm_i915_private *i915 = rq->engine->i915; + int mocs = rq->engine->gt->mocs.uc_index << 1; + const int ver = GRAPHICS_VER(i915); + int ring_sz; u32 *cs; GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX); - offset += (u64)rq->engine->instance << 32; + if (HAS_FLAT_CCS(i915) && ver >= 12) + ring_sz = XY_FAST_COLOR_BLT_DW; + else if (ver >= 8) + ring_sz = 8; + else + ring_sz = 6; - cs = intel_ring_begin(rq, ver >= 8 ? 8 : 6); + cs = intel_ring_begin(rq, ring_sz); if (IS_ERR(cs)) return PTR_ERR(cs); - if (ver >= 8) { + if (HAS_FLAT_CCS(i915) && ver >= 12) { + *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 | + (XY_FAST_COLOR_BLT_DW - 2); + *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | + (PAGE_SIZE - 1); + *cs++ = 0; + *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; + *cs++ = offset; + *cs++ = rq->engine->instance; + *cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT; + /* BG7 */ + *cs++ = value; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + /* BG11 */ + *cs++ = 0; + *cs++ = 0; + /* BG13 */ + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + } else if (ver >= 8) { *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2); *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; *cs++ = 0; *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; - *cs++ = lower_32_bits(offset); - *cs++ = upper_32_bits(offset); + *cs++ = offset; + *cs++ = rq->engine->instance; *cs++ = value; *cs++ = MI_NOOP; } else { - GEM_BUG_ON(upper_32_bits(offset)); *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; *cs++ = 0; *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; - *cs++ = lower_32_bits(offset); + *cs++ = offset; *cs++ = value; } @@ -659,8 +962,10 @@ intel_context_migrate_clear(struct intel_context *ce, u32 value, struct i915_request **out) { + struct drm_i915_private *i915 = ce->engine->i915; struct sgt_dma it = sg_sgt(sg); struct i915_request *rq; + u32 offset; int err; GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); @@ -668,8 +973,11 @@ intel_context_migrate_clear(struct intel_context *ce, GEM_BUG_ON(ce->ring->size < SZ_64K); + offset = 0; + if (HAS_64K_PAGES(i915) && is_lmem) + offset = CHUNK_SZ; + do { - u32 offset; int len; rq = i915_request_create(ce); @@ -697,10 +1005,6 @@ intel_context_migrate_clear(struct intel_context *ce, if (err) goto out_rq; - offset = 0; - if (HAS_64K_PAGES(ce->engine->i915) && is_lmem) - offset = CHUNK_SZ; - len = emit_pte(rq, &it, cache_level, is_lmem, offset, CHUNK_SZ); if (len <= 0) { err = len; @@ -711,7 +1015,22 @@ intel_context_migrate_clear(struct intel_context *ce, if (err) goto out_rq; - err = emit_clear(rq, offset, len, value); + err = emit_clear(rq, offset, len, value, is_lmem); + if (err) + goto out_rq; + + if (HAS_FLAT_CCS(i915) && is_lmem && !value) { + /* + * copy the content of memory into corresponding + * ccs surface + */ + err = emit_copy_ccs(rq, offset, INDIRECT_ACCESS, offset, + DIRECT_ACCESS, len); + if (err) + goto out_rq; + } + + err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); /* Arbitration is re-enabled between requests. */ out_rq: diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index d91e2beb7517..d8b94d638559 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -91,7 +91,7 @@ write_dma_entry(struct drm_i915_gem_object * const pdma, u64 * const vaddr = __px_vaddr(pdma); vaddr[idx] = encoded_entry; - clflush_cache_range(&vaddr[idx], sizeof(u64)); + drm_clflush_virt_range(&vaddr[idx], sizeof(u64)); } void diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 6df359c534fe..b4770690e794 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -4,7 +4,9 @@ */ #include <linux/pm_runtime.h> +#include <linux/string_helpers.h> +#include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_reg.h" #include "i915_vgpu.h" @@ -324,9 +326,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6) resource_size_t pcbr_offset; pcbr_offset = (pcbr & ~4095) - i915->dsm.start; - pctx = i915_gem_object_create_stolen_for_preallocated(i915, - pcbr_offset, - pctx_size); + pctx = i915_gem_object_create_region_at(i915->mm.stolen_region, + pcbr_offset, + pctx_size, + 0); if (IS_ERR(pctx)) return PTR_ERR(pctx); @@ -430,8 +433,8 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT; drm_dbg(&i915->drm, "BIOS enabled RC states: " "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", - onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), - onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), + str_on_off(rc_ctl & GEN6_RC_CTL_HW_ENABLE), + str_on_off(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), rc_sw_target); if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) { diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index 6cecfdae07ad..f5111c0a0060 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -93,6 +93,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) struct intel_memory_region *mem; resource_size_t min_page_size; resource_size_t io_start; + resource_size_t io_size; resource_size_t lmem_size; int err; @@ -122,9 +123,14 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE); } + if (i915->params.lmem_size > 0) { + lmem_size = min_t(resource_size_t, lmem_size, + mul_u32_u32(i915->params.lmem_size, SZ_1M)); + } io_start = pci_resource_start(pdev, 2); - if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2))) + io_size = min(pci_resource_len(pdev, 2), lmem_size); + if (!io_size) return ERR_PTR(-ENODEV); min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : @@ -134,7 +140,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) lmem_size, min_page_size, io_start, - lmem_size, + io_size, INTEL_MEMORY_LOCAL, 0, &intel_region_lmem_ops); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index b7c6d4462ec5..a5338c3fde7a 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -5,6 +5,7 @@ #include <linux/sched/mm.h> #include <linux/stop_machine.h> +#include <linux/string_helpers.h> #include "display/intel_display.h" #include "display/intel_overlay.h" @@ -137,7 +138,7 @@ void __i915_request_reset(struct i915_request *rq, bool guilty) { bool banned = false; - RQ_TRACE(rq, "guilty? %s\n", yesno(guilty)); + RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty)); GEM_BUG_ON(__i915_request_is_complete(rq)); rcu_read_lock(); /* protect the GEM context */ @@ -771,14 +772,15 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt) intel_engine_mask_t awake = 0; enum intel_engine_id id; + /* For GuC mode, ensure submission is disabled before stopping ring */ + intel_uc_reset_prepare(>->uc); + for_each_engine(engine, gt, id) { if (intel_engine_pm_get_if_awake(engine)) awake |= engine->mask; reset_prepare_engine(engine); } - intel_uc_reset_prepare(>->uc); - return awake; } @@ -1318,7 +1320,7 @@ void intel_gt_handle_error(struct intel_gt *gt, engine_mask &= gt->info.engine_mask; if (flags & I915_ERROR_CAPTURE) { - i915_capture_error_state(gt, engine_mask); + i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE); intel_gt_clear_error_registers(gt, engine_mask); } diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 6d7ec3bf1f32..5423bfd301ad 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -767,7 +767,7 @@ static int mi_set_context(struct i915_request *rq, if (GRAPHICS_VER(i915) == 7) { if (num_engines) { struct intel_engine_cs *signaller; - i915_reg_t last_reg = {}; /* keep gcc quiet */ + i915_reg_t last_reg = INVALID_MMIO_REG; /* keep gcc quiet */ *cs++ = MI_LOAD_REGISTER_IMM(num_engines); for_each_engine(signaller, engine->gt, id) { diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index c8124101aada..3476a11f294c 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/string_helpers.h> + #include <drm/i915_drm.h> #include "i915_drv.h" @@ -772,7 +774,8 @@ static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) { - GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive)); + GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", + str_yes_no(interactive)); mutex_lock(&rps->power.mutex); if (interactive) { @@ -1067,23 +1070,66 @@ int intel_rps_set(struct intel_rps *rps, u8 val) return 0; } -static void gen6_rps_init(struct intel_rps *rps) +static u32 intel_rps_read_state_cap(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); - u32 rp_state_cap = intel_rps_read_state_cap(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); - /* All of these values are in units of 50MHz */ + if (IS_XEHPSDV(i915)) + return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP); + else if (IS_GEN9_LP(i915)) + return intel_uncore_read(uncore, BXT_RP_STATE_CAP); + else + return intel_uncore_read(uncore, GEN6_RP_STATE_CAP); +} + +/** + * gen6_rps_get_freq_caps - Get freq caps exposed by HW + * @rps: the intel_rps structure + * @caps: returned freq caps + * + * Returned "caps" frequencies should be converted to MHz using + * intel_gpu_freq() + */ +void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 rp_state_cap; + + rp_state_cap = intel_rps_read_state_cap(rps); /* static values from HW: RP0 > RP1 > RPn (min_freq) */ if (IS_GEN9_LP(i915)) { - rps->rp0_freq = (rp_state_cap >> 16) & 0xff; - rps->rp1_freq = (rp_state_cap >> 8) & 0xff; - rps->min_freq = (rp_state_cap >> 0) & 0xff; + caps->rp0_freq = (rp_state_cap >> 16) & 0xff; + caps->rp1_freq = (rp_state_cap >> 8) & 0xff; + caps->min_freq = (rp_state_cap >> 0) & 0xff; } else { - rps->rp0_freq = (rp_state_cap >> 0) & 0xff; - rps->rp1_freq = (rp_state_cap >> 8) & 0xff; - rps->min_freq = (rp_state_cap >> 16) & 0xff; + caps->rp0_freq = (rp_state_cap >> 0) & 0xff; + caps->rp1_freq = (rp_state_cap >> 8) & 0xff; + caps->min_freq = (rp_state_cap >> 16) & 0xff; + } + + if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { + /* + * In this case rp_state_cap register reports frequencies in + * units of 50 MHz. Convert these to the actual "hw unit", i.e. + * units of 16.67 MHz + */ + caps->rp0_freq *= GEN9_FREQ_SCALER; + caps->rp1_freq *= GEN9_FREQ_SCALER; + caps->min_freq *= GEN9_FREQ_SCALER; } +} + +static void gen6_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_rps_freq_caps caps; + + gen6_rps_get_freq_caps(rps, &caps); + rps->rp0_freq = caps.rp0_freq; + rps->rp1_freq = caps.rp1_freq; + rps->min_freq = caps.min_freq; /* hw_max = RP0 until we check for overclocking */ rps->max_freq = rps->rp0_freq; @@ -1092,26 +1138,18 @@ static void gen6_rps_init(struct intel_rps *rps) if (IS_HASWELL(i915) || IS_BROADWELL(i915) || IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { u32 ddcc_status = 0; + u32 mult = 1; + if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) + mult = GEN9_FREQ_SCALER; if (snb_pcode_read(i915, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, &ddcc_status, NULL) == 0) rps->efficient_freq = - clamp_t(u8, - (ddcc_status >> 8) & 0xff, + clamp_t(u32, + ((ddcc_status >> 8) & 0xff) * mult, rps->min_freq, rps->max_freq); } - - if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { - /* Store the frequency values in 16.66 MHZ units, which is - * the natural hardware unit for SKL - */ - rps->rp0_freq *= GEN9_FREQ_SCALER; - rps->rp1_freq *= GEN9_FREQ_SCALER; - rps->min_freq *= GEN9_FREQ_SCALER; - rps->max_freq *= GEN9_FREQ_SCALER; - rps->efficient_freq *= GEN9_FREQ_SCALER; - } } static bool rps_reset(struct intel_rps *rps) @@ -1279,7 +1317,8 @@ static bool chv_rps_enable(struct intel_rps *rps) drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, "GPLL not enabled\n"); - drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE)); + drm_dbg(&i915->drm, "GPLL enabled? %s\n", + str_yes_no(val & GPLLENABLE)); drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); return rps_reset(rps); @@ -1380,7 +1419,8 @@ static bool vlv_rps_enable(struct intel_rps *rps) drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, "GPLL not enabled\n"); - drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE)); + drm_dbg(&i915->drm, "GPLL enabled? %s\n", + str_yes_no(val & GPLLENABLE)); drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); return rps_reset(rps); @@ -1772,7 +1812,7 @@ static void rps_work(struct work_struct *work) GT_TRACE(gt, "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n", - pm_iir, yesno(client_boost), + pm_iir, str_yes_no(client_boost), adj, new_freq, min, max); if (client_boost && new_freq < rps->boost_freq) { @@ -2214,19 +2254,6 @@ int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val) return set_min_freq(rps, val); } -u32 intel_rps_read_state_cap(struct intel_rps *rps) -{ - struct drm_i915_private *i915 = rps_to_i915(rps); - struct intel_uncore *uncore = rps_to_uncore(rps); - - if (IS_XEHPSDV(i915)) - return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP); - else if (IS_GEN9_LP(i915)) - return intel_uncore_read(uncore, BXT_RP_STATE_CAP); - else - return intel_uncore_read(uncore, GEN6_RP_STATE_CAP); -} - static void intel_rps_set_manual(struct intel_rps *rps, bool enable) { struct intel_uncore *uncore = rps_to_uncore(rps); @@ -2239,18 +2266,18 @@ static void intel_rps_set_manual(struct intel_rps *rps, bool enable) void intel_rps_raise_unslice(struct intel_rps *rps) { struct intel_uncore *uncore = rps_to_uncore(rps); - u32 rp0_unslice_req; mutex_lock(&rps->lock); if (rps_uses_slpc(rps)) { /* RP limits have not been initialized yet for SLPC path */ - rp0_unslice_req = ((intel_rps_read_state_cap(rps) >> 0) - & 0xff) * GEN9_FREQ_SCALER; + struct intel_rps_freq_caps caps; + + gen6_rps_get_freq_caps(rps, &caps); intel_rps_set_manual(rps, true); intel_uncore_write(uncore, GEN6_RPNSWREQ, - ((rp0_unslice_req << + ((caps.rp0_freq << GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | GEN9_IGNORE_SLICE_RATIO)); intel_rps_set_manual(rps, false); @@ -2264,18 +2291,18 @@ void intel_rps_raise_unslice(struct intel_rps *rps) void intel_rps_lower_unslice(struct intel_rps *rps) { struct intel_uncore *uncore = rps_to_uncore(rps); - u32 rpn_unslice_req; mutex_lock(&rps->lock); if (rps_uses_slpc(rps)) { /* RP limits have not been initialized yet for SLPC path */ - rpn_unslice_req = ((intel_rps_read_state_cap(rps) >> 16) - & 0xff) * GEN9_FREQ_SCALER; + struct intel_rps_freq_caps caps; + + gen6_rps_get_freq_caps(rps, &caps); intel_rps_set_manual(rps, true); intel_uncore_write(uncore, GEN6_RPNSWREQ, - ((rpn_unslice_req << + ((caps.min_freq << GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | GEN9_IGNORE_SLICE_RATIO)); intel_rps_set_manual(rps, false); @@ -2286,6 +2313,24 @@ void intel_rps_lower_unslice(struct intel_rps *rps) mutex_unlock(&rps->lock); } +static u32 rps_read_mmio(struct intel_rps *rps, i915_reg_t reg32) +{ + struct intel_gt *gt = rps_to_gt(rps); + intel_wakeref_t wakeref; + u32 val; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + val = intel_uncore_read(gt->uncore, reg32); + + return val; +} + +bool rps_read_mask_mmio(struct intel_rps *rps, + i915_reg_t reg32, u32 mask) +{ + return rps_read_mmio(rps, reg32) & mask; +} + /* External interface for intel_ips.ko */ static struct drm_i915_private __rcu *ips_mchdev; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h index c6d76a3d1331..1e8d56491308 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.h +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -7,6 +7,7 @@ #define INTEL_RPS_H #include "intel_rps_types.h" +#include "i915_reg_defs.h" struct i915_request; @@ -44,10 +45,13 @@ u32 intel_rps_get_rp1_frequency(struct intel_rps *rps); u32 intel_rps_get_rpn_frequency(struct intel_rps *rps); u32 intel_rps_read_punit_req(struct intel_rps *rps); u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps); -u32 intel_rps_read_state_cap(struct intel_rps *rps); +void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps); void intel_rps_raise_unslice(struct intel_rps *rps); void intel_rps_lower_unslice(struct intel_rps *rps); +u32 intel_rps_read_throttle_reason(struct intel_rps *rps); +bool rps_read_mask_mmio(struct intel_rps *rps, i915_reg_t reg32, u32 mask); + void gen5_rps_irq_handler(struct intel_rps *rps); void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h index 3941d8551f52..9173ec75f2b8 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps_types.h +++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h @@ -37,6 +37,21 @@ enum { INTEL_RPS_TIMER, }; +/** + * struct intel_rps_freq_caps - rps freq capabilities + * @rp0_freq: non-overclocked max frequency + * @rp1_freq: "less than" RP0 power/freqency + * @min_freq: aka RPn, minimum frequency + * + * Freq caps exposed by HW, values are in "hw units" and intel_gpu_freq() + * should be used to convert to MHz + */ +struct intel_rps_freq_caps { + u8 rp0_freq; + u8 rp1_freq; + u8 min_freq; +}; + struct intel_rps { struct mutex lock; /* protects enabling and the worker */ diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 4ac0bbaf0c31..fdd25691beda 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/string_helpers.h> + #include "i915_drv.h" #include "intel_engine_regs.h" #include "intel_gt_regs.h" @@ -33,8 +35,8 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu) } static u32 -_intel_sseu_get_subslices(const struct sseu_dev_info *sseu, - const u8 *subslice_mask, u8 slice) +sseu_get_subslices(const struct sseu_dev_info *sseu, + const u8 *subslice_mask, u8 slice) { int i, offset = slice * sseu->ss_stride; u32 mask = 0; @@ -49,12 +51,17 @@ _intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice) { - return _intel_sseu_get_subslices(sseu, sseu->subslice_mask, slice); + return sseu_get_subslices(sseu, sseu->subslice_mask, slice); +} + +static u32 sseu_get_geometry_subslices(const struct sseu_dev_info *sseu) +{ + return sseu_get_subslices(sseu, sseu->geometry_subslice_mask, 0); } u32 intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu) { - return _intel_sseu_get_subslices(sseu, sseu->compute_subslice_mask, 0); + return sseu_get_subslices(sseu, sseu->compute_subslice_mask, 0); } void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, @@ -711,22 +718,18 @@ void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) drm_printf(p, "EU total: %u\n", sseu->eu_total); drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); drm_printf(p, "has slice power gating: %s\n", - yesno(sseu->has_slice_pg)); + str_yes_no(sseu->has_slice_pg)); drm_printf(p, "has subslice power gating: %s\n", - yesno(sseu->has_subslice_pg)); - drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg)); + str_yes_no(sseu->has_subslice_pg)); + drm_printf(p, "has EU power gating: %s\n", + str_yes_no(sseu->has_eu_pg)); } -void intel_sseu_print_topology(const struct sseu_dev_info *sseu, - struct drm_printer *p) +static void sseu_print_hsw_topology(const struct sseu_dev_info *sseu, + struct drm_printer *p) { int s, ss; - if (sseu->max_slices == 0) { - drm_printf(p, "Unavailable\n"); - return; - } - for (s = 0; s < sseu->max_slices; s++) { drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", s, intel_sseu_subslices_per_slice(sseu, s), @@ -741,6 +744,36 @@ void intel_sseu_print_topology(const struct sseu_dev_info *sseu, } } +static void sseu_print_xehp_topology(const struct sseu_dev_info *sseu, + struct drm_printer *p) +{ + u32 g_dss_mask = sseu_get_geometry_subslices(sseu); + u32 c_dss_mask = intel_sseu_get_compute_subslices(sseu); + int dss; + + for (dss = 0; dss < sseu->max_subslices; dss++) { + u16 enabled_eus = sseu_get_eus(sseu, 0, dss); + + drm_printf(p, "DSS_%02d: G:%3s C:%3s, %2u EUs (0x%04hx)\n", dss, + str_yes_no(g_dss_mask & BIT(dss)), + str_yes_no(c_dss_mask & BIT(dss)), + hweight16(enabled_eus), enabled_eus); + } +} + +void intel_sseu_print_topology(struct drm_i915_private *i915, + const struct sseu_dev_info *sseu, + struct drm_printer *p) +{ + if (sseu->max_slices == 0) { + drm_printf(p, "Unavailable\n"); + } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { + sseu_print_xehp_topology(sseu, p); + } else { + sseu_print_hsw_topology(sseu, p); + } +} + u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice) { u16 slice_mask = 0; diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 8a79cd8eaab4..5c078df4729c 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -15,26 +15,49 @@ struct drm_i915_private; struct intel_gt; struct drm_printer; -#define GEN_MAX_SLICES (3) /* SKL upper bound */ -#define GEN_MAX_SUBSLICES (32) /* XEHPSDV upper bound */ -#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) -#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES) -#define GEN_MAX_EUS (16) /* TGL upper bound */ -#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS) +/* + * Maximum number of slices on older platforms. Slices no longer exist + * starting on Xe_HP ("gslices," "cslices," etc. are a different concept and + * are not expressed through fusing). + */ +#define GEN_MAX_HSW_SLICES 3 + +/* + * Maximum number of subslices that can exist within a HSW-style slice. This + * is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the + * GEN_MAX_DSS value below). + */ +#define GEN_MAX_SS_PER_HSW_SLICE 6 + +/* Maximum number of DSS on newer platforms (Xe_HP and beyond). */ +#define GEN_MAX_DSS 32 + +/* Maximum number of EUs that can exist within a subslice or DSS. */ +#define GEN_MAX_EUS_PER_SS 16 + +#define SSEU_MAX(a, b) ((a) > (b) ? (a) : (b)) + +/* The maximum number of bits needed to express each subslice/DSS independently */ +#define GEN_SS_MASK_SIZE SSEU_MAX(GEN_MAX_DSS, \ + GEN_MAX_HSW_SLICES * GEN_MAX_SS_PER_HSW_SLICE) + +#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) +#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_SS_MASK_SIZE) +#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS_PER_SS) #define GEN_DSS_PER_GSLICE 4 #define GEN_DSS_PER_CSLICE 8 #define GEN_DSS_PER_MSLICE 8 -#define GEN_MAX_GSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_GSLICE) -#define GEN_MAX_CSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_CSLICE) +#define GEN_MAX_GSLICES (GEN_MAX_DSS / GEN_DSS_PER_GSLICE) +#define GEN_MAX_CSLICES (GEN_MAX_DSS / GEN_DSS_PER_CSLICE) struct sseu_dev_info { u8 slice_mask; - u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; - u8 geometry_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; - u8 compute_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; - u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE]; + u8 subslice_mask[GEN_SS_MASK_SIZE]; + u8 geometry_subslice_mask[GEN_SS_MASK_SIZE]; + u8 compute_subslice_mask[GEN_SS_MASK_SIZE]; + u8 eu_mask[GEN_SS_MASK_SIZE * GEN_MAX_EU_STRIDE]; u16 eu_total; u8 eu_per_subslice; u8 min_eu_in_pool; @@ -116,7 +139,8 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt, const struct intel_sseu *req_sseu); void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p); -void intel_sseu_print_topology(const struct sseu_dev_info *sseu, +void intel_sseu_print_topology(struct drm_i915_private *i915, + const struct sseu_dev_info *sseu, struct drm_printer *p); u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice); diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c index 903626f106ea..2d5d011e01db 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c @@ -4,6 +4,8 @@ * Copyright © 2020 Intel Corporation */ +#include <linux/string_helpers.h> + #include "i915_drv.h" #include "intel_gt_debugfs.h" #include "intel_gt_regs.h" @@ -226,16 +228,16 @@ static void i915_print_sseu_info(struct seq_file *m, if (!is_available_info) return; - seq_printf(m, " Has Pooled EU: %s\n", yesno(has_pooled_eu)); + seq_printf(m, " Has Pooled EU: %s\n", str_yes_no(has_pooled_eu)); if (has_pooled_eu) seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool); seq_printf(m, " Has Slice Power Gating: %s\n", - yesno(sseu->has_slice_pg)); + str_yes_no(sseu->has_slice_pg)); seq_printf(m, " Has Subslice Power Gating: %s\n", - yesno(sseu->has_subslice_pg)); + str_yes_no(sseu->has_subslice_pg)); seq_printf(m, " Has EU Power Gating: %s\n", - yesno(sseu->has_eu_pg)); + str_yes_no(sseu->has_eu_pg)); } /* @@ -246,7 +248,7 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; const struct intel_gt_info *info = >->info; - struct sseu_dev_info sseu; + struct sseu_dev_info *sseu; intel_wakeref_t wakeref; if (GRAPHICS_VER(i915) < 8) @@ -256,23 +258,29 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt) i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu); seq_puts(m, "SSEU Device Status\n"); - memset(&sseu, 0, sizeof(sseu)); - intel_sseu_set_info(&sseu, info->sseu.max_slices, + + sseu = kzalloc(sizeof(*sseu), GFP_KERNEL); + if (!sseu) + return -ENOMEM; + + intel_sseu_set_info(sseu, info->sseu.max_slices, info->sseu.max_subslices, info->sseu.max_eus_per_subslice); with_intel_runtime_pm(&i915->runtime_pm, wakeref) { if (IS_CHERRYVIEW(i915)) - cherryview_sseu_device_status(gt, &sseu); + cherryview_sseu_device_status(gt, sseu); else if (IS_BROADWELL(i915)) - bdw_sseu_device_status(gt, &sseu); + bdw_sseu_device_status(gt, sseu); else if (GRAPHICS_VER(i915) == 9) - gen9_sseu_device_status(gt, &sseu); + gen9_sseu_device_status(gt, sseu); else if (GRAPHICS_VER(i915) >= 11) - gen11_sseu_device_status(gt, &sseu); + gen11_sseu_device_status(gt, sseu); } - i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), &sseu); + i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), sseu); + + kfree(sseu); return 0; } @@ -285,22 +293,22 @@ static int sseu_status_show(struct seq_file *m, void *unused) } DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_status); -static int rcs_topology_show(struct seq_file *m, void *unused) +static int sseu_topology_show(struct seq_file *m, void *unused) { struct intel_gt *gt = m->private; struct drm_printer p = drm_seq_file_printer(m); - intel_sseu_print_topology(>->info.sseu, &p); + intel_sseu_print_topology(gt->i915, >->info.sseu, &p); return 0; } -DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rcs_topology); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_topology); void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root) { static const struct intel_gt_debugfs_file files[] = { { "sseu_status", &sseu_status_fops, NULL }, - { "rcs_topology", &rcs_topology_fops, NULL }, + { "sseu_topology", &sseu_topology_fops, NULL }, }; intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index c014b40d2e9f..a05c4b99b3fb 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1072,9 +1072,15 @@ static void __set_mcr_steering(struct i915_wa_list *wal, static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal, unsigned int slice, unsigned int subslice) { - drm_dbg(>->i915->drm, "MCR slice=0x%x, subslice=0x%x\n", slice, subslice); + struct drm_printer p = drm_debug_printer("MCR Steering:"); __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice); + + gt->default_steering.groupid = slice; + gt->default_steering.instanceid = subslice; + + if (drm_debug_enabled(DRM_UT_DRIVER)) + intel_gt_report_steering(&p, gt, false); } static void @@ -2188,11 +2194,15 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) */ wa_write_or(wal, GEN7_FF_THREAD_MODE, GEN12_FF_TESSELATION_DOP_GATE_DISABLE); + } + if (IS_ALDERLAKE_P(i915) || IS_DG2(i915) || IS_ALDERLAKE_S(i915) || + IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* * Wa_1606700617:tgl,dg1,adl-p * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p * Wa_14010826681:tgl,dg1,rkl,adl-p + * Wa_18019627453:dg2 */ wa_masked_en(wal, GEN9_CS_DEBUG_MODE1, @@ -2310,7 +2320,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) FF_DOP_CLOCK_GATE_DISABLE); } - if (IS_GRAPHICS_VER(i915, 9, 12)) { + if (HAS_PERCTX_PREEMPT_CTRL(i915)) { /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */ wa_masked_en(wal, GEN7_FF_SLICE_CS_CHICKEN1, @@ -2618,6 +2628,11 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE); } + + if (IS_DG2(i915)) { + /* Wa_22014226127:dg2 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE); + } } static void @@ -2633,7 +2648,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal * to a single RCS/CCS engine's workaround list since * they're reset as part of the general render domain reset. */ - if (engine->class == RENDER_CLASS) + if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) general_render_compute_wa_init(engine, wal); if (engine->class == RENDER_CLASS) diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index 72d5faab8f9a..09f8cd2d0e2c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -1736,15 +1736,9 @@ static int live_preempt(void *arg) enum intel_engine_id id; int err = -ENOMEM; - if (igt_spinner_init(&spin_hi, gt)) - return -ENOMEM; - - if (igt_spinner_init(&spin_lo, gt)) - goto err_spin_hi; - ctx_hi = kernel_context(gt->i915, NULL); if (!ctx_hi) - goto err_spin_lo; + return -ENOMEM; ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; ctx_lo = kernel_context(gt->i915, NULL); @@ -1752,6 +1746,12 @@ static int live_preempt(void *arg) goto err_ctx_hi; ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; + if (igt_spinner_init(&spin_hi, gt)) + goto err_ctx_lo; + + if (igt_spinner_init(&spin_lo, gt)) + goto err_spin_hi; + for_each_engine(engine, gt, id) { struct igt_live_test t; struct i915_request *rq; @@ -1761,14 +1761,14 @@ static int live_preempt(void *arg) if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } rq = spinner_create_request(&spin_lo, ctx_lo, engine, MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } i915_request_add(rq); @@ -1777,7 +1777,7 @@ static int live_preempt(void *arg) GEM_TRACE_DUMP(); intel_gt_set_wedged(gt); err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } rq = spinner_create_request(&spin_hi, ctx_hi, engine, @@ -1785,7 +1785,7 @@ static int live_preempt(void *arg) if (IS_ERR(rq)) { igt_spinner_end(&spin_lo); err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } i915_request_add(rq); @@ -1794,7 +1794,7 @@ static int live_preempt(void *arg) GEM_TRACE_DUMP(); intel_gt_set_wedged(gt); err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } igt_spinner_end(&spin_hi); @@ -1802,19 +1802,19 @@ static int live_preempt(void *arg) if (igt_live_test_end(&t)) { err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } } err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); err_spin_lo: igt_spinner_fini(&spin_lo); err_spin_hi: igt_spinner_fini(&spin_hi); +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); return err; } @@ -1828,20 +1828,20 @@ static int live_late_preempt(void *arg) enum intel_engine_id id; int err = -ENOMEM; - if (igt_spinner_init(&spin_hi, gt)) - return -ENOMEM; - - if (igt_spinner_init(&spin_lo, gt)) - goto err_spin_hi; - ctx_hi = kernel_context(gt->i915, NULL); if (!ctx_hi) - goto err_spin_lo; + return -ENOMEM; ctx_lo = kernel_context(gt->i915, NULL); if (!ctx_lo) goto err_ctx_hi; + if (igt_spinner_init(&spin_hi, gt)) + goto err_ctx_lo; + + if (igt_spinner_init(&spin_lo, gt)) + goto err_spin_hi; + /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */ ctx_lo->sched.priority = 1; @@ -1854,14 +1854,14 @@ static int live_late_preempt(void *arg) if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } rq = spinner_create_request(&spin_lo, ctx_lo, engine, MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } i915_request_add(rq); @@ -1875,7 +1875,7 @@ static int live_late_preempt(void *arg) if (IS_ERR(rq)) { igt_spinner_end(&spin_lo); err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } i915_request_add(rq); @@ -1898,19 +1898,19 @@ static int live_late_preempt(void *arg) if (igt_live_test_end(&t)) { err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } } err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); err_spin_lo: igt_spinner_fini(&spin_lo); err_spin_hi: igt_spinner_fini(&spin_hi); +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); return err; err_wedged: @@ -1918,7 +1918,7 @@ err_wedged: igt_spinner_end(&spin_lo); intel_gt_set_wedged(gt); err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } struct preempt_client { @@ -3382,12 +3382,9 @@ static int live_preempt_timeout(void *arg) if (!intel_has_reset_engine(gt)) return 0; - if (igt_spinner_init(&spin_lo, gt)) - return -ENOMEM; - ctx_hi = kernel_context(gt->i915, NULL); if (!ctx_hi) - goto err_spin_lo; + return -ENOMEM; ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; ctx_lo = kernel_context(gt->i915, NULL); @@ -3395,6 +3392,9 @@ static int live_preempt_timeout(void *arg) goto err_ctx_hi; ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; + if (igt_spinner_init(&spin_lo, gt)) + goto err_ctx_lo; + for_each_engine(engine, gt, id) { unsigned long saved_timeout; struct i915_request *rq; @@ -3406,21 +3406,21 @@ static int live_preempt_timeout(void *arg) MI_NOOP); /* preemption disabled */ if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } i915_request_add(rq); if (!igt_wait_for_spinner(&spin_lo, rq)) { intel_gt_set_wedged(gt); err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } rq = igt_request_alloc(ctx_hi, engine); if (IS_ERR(rq)) { igt_spinner_end(&spin_lo); err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } /* Flush the previous CS ack before changing timeouts */ @@ -3440,7 +3440,7 @@ static int live_preempt_timeout(void *arg) intel_gt_set_wedged(gt); i915_request_put(rq); err = -ETIME; - goto err_ctx_lo; + goto err_spin_lo; } igt_spinner_end(&spin_lo); @@ -3448,12 +3448,12 @@ static int live_preempt_timeout(void *arg) } err = 0; +err_spin_lo: + igt_spinner_fini(&spin_lo); err_ctx_lo: kernel_context_close(ctx_lo); err_ctx_hi: kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 21c29d315cc0..8b2c11dbe354 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -128,6 +128,27 @@ static int context_flush(struct intel_context *ce, long timeout) return err; } +static int get_lri_mask(struct intel_engine_cs *engine, u32 lri) +{ + if ((lri & MI_LRI_LRM_CS_MMIO) == 0) + return ~0u; + + if (GRAPHICS_VER(engine->i915) < 12) + return 0xfff; + + switch (engine->class) { + default: + case RENDER_CLASS: + case COMPUTE_CLASS: + return 0x07ff; + case COPY_ENGINE_CLASS: + return 0x0fff; + case VIDEO_DECODE_CLASS: + case VIDEO_ENHANCEMENT_CLASS: + return 0x3fff; + } +} + static int live_lrc_layout(void *arg) { struct intel_gt *gt = arg; @@ -167,6 +188,7 @@ static int live_lrc_layout(void *arg) dw = 0; do { u32 lri = READ_ONCE(hw[dw]); + u32 lri_mask; if (lri == 0) { dw++; @@ -194,6 +216,18 @@ static int live_lrc_layout(void *arg) break; } + /* + * When bit 19 of MI_LOAD_REGISTER_IMM instruction + * opcode is set on Gen12+ devices, HW does not + * care about certain register address offsets, and + * instead check the following for valid address + * ranges on specific engines: + * RCS && CCS: BITS(0 - 10) + * BCS: BITS(0 - 11) + * VECS && VCS: BITS(0 - 13) + */ + lri_mask = get_lri_mask(engine, lri); + lri &= 0x7f; lri++; dw++; @@ -201,7 +235,7 @@ static int live_lrc_layout(void *arg) while (lri) { u32 offset = READ_ONCE(hw[dw]); - if (offset != lrc[dw]) { + if ((offset ^ lrc[dw]) & lri_mask) { pr_err("%s: Different registers found at dword %d, expected %x, found %x\n", engine->name, dw, offset, lrc[dw]); err = -EINVAL; @@ -911,6 +945,19 @@ create_user_vma(struct i915_address_space *vm, unsigned long size) return vma; } +static u32 safe_poison(u32 offset, u32 poison) +{ + /* + * Do not enable predication as it will nop all subsequent commands, + * not only disabling the tests (by preventing all the other SRM) but + * also preventing the arbitration events at the end of the request. + */ + if (offset == i915_mmio_reg_offset(RING_PREDICATE_RESULT(0))) + poison &= ~REG_BIT(0); + + return poison; +} + static struct i915_vma * store_context(struct intel_context *ce, struct i915_vma *scratch) { @@ -1120,7 +1167,9 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison) *cs++ = MI_LOAD_REGISTER_IMM(len); while (len--) { *cs++ = hw[dw]; - *cs++ = poison; + *cs++ = safe_poison(hw[dw] & get_lri_mask(ce->engine, + MI_LRI_LRM_CS_MMIO), + poison); dw += 2; } } while (dw < PAGE_SIZE / sizeof(u32) && @@ -1753,8 +1802,8 @@ static int __live_pphwsp_runtime(struct intel_engine_cs *engine) if (IS_ERR(ce)) return PTR_ERR(ce); - ce->runtime.num_underflow = 0; - ce->runtime.max_underflow = 0; + ce->stats.runtime.num_underflow = 0; + ce->stats.runtime.max_underflow = 0; do { unsigned int loop = 1024; @@ -1792,11 +1841,11 @@ static int __live_pphwsp_runtime(struct intel_engine_cs *engine) intel_context_get_avg_runtime_ns(ce)); err = 0; - if (ce->runtime.num_underflow) { + if (ce->stats.runtime.num_underflow) { pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n", engine->name, - ce->runtime.num_underflow, - ce->runtime.max_underflow); + ce->stats.runtime.num_underflow, + ce->stats.runtime.max_underflow); GEM_TRACE_DUMP(); err = -EOVERFLOW; } diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c index c9c4f391c5cc..2b0c87999949 100644 --- a/drivers/gpu/drm/i915/gt/selftest_migrate.c +++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c @@ -132,6 +132,124 @@ err_free_src: return err; } +static int intel_context_copy_ccs(struct intel_context *ce, + const struct i915_deps *deps, + struct scatterlist *sg, + enum i915_cache_level cache_level, + bool write_to_ccs, + struct i915_request **out) +{ + u8 src_access = write_to_ccs ? DIRECT_ACCESS : INDIRECT_ACCESS; + u8 dst_access = write_to_ccs ? INDIRECT_ACCESS : DIRECT_ACCESS; + struct sgt_dma it = sg_sgt(sg); + struct i915_request *rq; + u32 offset; + int err; + + GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); + *out = NULL; + + GEM_BUG_ON(ce->ring->size < SZ_64K); + + offset = 0; + if (HAS_64K_PAGES(ce->engine->i915)) + offset = CHUNK_SZ; + + do { + int len; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_ce; + } + + if (deps) { + err = i915_request_await_deps(rq, deps); + if (err) + goto out_rq; + + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) + goto out_rq; + } + + deps = NULL; + } + + /* The PTE updates + clear must not be interrupted. */ + err = emit_no_arbitration(rq); + if (err) + goto out_rq; + + len = emit_pte(rq, &it, cache_level, true, offset, CHUNK_SZ); + if (len <= 0) { + err = len; + goto out_rq; + } + + err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); + if (err) + goto out_rq; + + err = emit_copy_ccs(rq, offset, dst_access, + offset, src_access, len); + if (err) + goto out_rq; + + err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); + + /* Arbitration is re-enabled between requests. */ +out_rq: + if (*out) + i915_request_put(*out); + *out = i915_request_get(rq); + i915_request_add(rq); + if (err || !it.sg || !sg_dma_len(it.sg)) + break; + + cond_resched(); + } while (1); + +out_ce: + return err; +} + +static int +intel_migrate_ccs_copy(struct intel_migrate *m, + struct i915_gem_ww_ctx *ww, + const struct i915_deps *deps, + struct scatterlist *sg, + enum i915_cache_level cache_level, + bool write_to_ccs, + struct i915_request **out) +{ + struct intel_context *ce; + int err; + + *out = NULL; + if (!m->context) + return -ENODEV; + + ce = intel_migrate_create_context(m); + if (IS_ERR(ce)) + ce = intel_context_get(m->context); + GEM_BUG_ON(IS_ERR(ce)); + + err = intel_context_pin_ww(ce, ww); + if (err) + goto out; + + err = intel_context_copy_ccs(ce, deps, sg, cache_level, + write_to_ccs, out); + + intel_context_unpin(ce); +out: + intel_context_put(ce); + return err; +} + static int clear(struct intel_migrate *migrate, int (*fn)(struct intel_migrate *migrate, struct i915_gem_ww_ctx *ww, @@ -144,7 +262,8 @@ static int clear(struct intel_migrate *migrate, struct drm_i915_gem_object *obj; struct i915_request *rq; struct i915_gem_ww_ctx ww; - u32 *vaddr; + u32 *vaddr, val = 0; + bool ccs_cap = false; int err = 0; int i; @@ -152,7 +271,15 @@ static int clear(struct intel_migrate *migrate, if (IS_ERR(obj)) return 0; + /* Consider the rounded up memory too */ + sz = obj->base.size; + + if (HAS_FLAT_CCS(i915) && i915_gem_object_is_lmem(obj)) + ccs_cap = true; + for_i915_gem_ww(&ww, err, true) { + int ccs_bytes, ccs_bytes_per_chunk; + err = i915_gem_object_lock(obj, &ww); if (err) continue; @@ -167,44 +294,114 @@ static int clear(struct intel_migrate *migrate, vaddr[i] = ~i; i915_gem_object_flush_map(obj); - err = fn(migrate, &ww, obj, sz, &rq); - if (!err) - continue; + if (ccs_cap && !val) { + /* Write the obj data into ccs surface */ + err = intel_migrate_ccs_copy(migrate, &ww, NULL, + obj->mm.pages->sgl, + obj->cache_level, + true, &rq); + if (rq && !err) { + if (i915_request_wait(rq, 0, HZ) < 0) { + pr_err("%ps timed out, size: %u\n", + fn, sz); + err = -ETIME; + } + i915_request_put(rq); + rq = NULL; + } + if (err) + continue; + } - if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS) - pr_err("%ps failed, size: %u\n", fn, sz); - if (rq) { - i915_request_wait(rq, 0, HZ); + err = fn(migrate, &ww, obj, val, &rq); + if (rq && !err) { + if (i915_request_wait(rq, 0, HZ) < 0) { + pr_err("%ps timed out, size: %u\n", fn, sz); + err = -ETIME; + } i915_request_put(rq); + rq = NULL; } - i915_gem_object_unpin_map(obj); - } - if (err) - goto err_out; + if (err) + continue; - if (rq) { - if (i915_request_wait(rq, 0, HZ) < 0) { - pr_err("%ps timed out, size: %u\n", fn, sz); - err = -ETIME; + i915_gem_object_flush_map(obj); + + /* Verify the set/clear of the obj mem */ + for (i = 0; !err && i < sz / PAGE_SIZE; i++) { + int x = i * 1024 + + i915_prandom_u32_max_state(1024, prng); + + if (vaddr[x] != val) { + pr_err("%ps failed, (%u != %u), offset: %zu\n", + fn, vaddr[x], val, x * sizeof(u32)); + igt_hexdump(vaddr + i * 1024, 4096); + err = -EINVAL; + } } - i915_request_put(rq); - } + if (err) + continue; - for (i = 0; !err && i < sz / PAGE_SIZE; i++) { - int x = i * 1024 + i915_prandom_u32_max_state(1024, prng); + if (ccs_cap && !val) { + for (i = 0; i < sz / sizeof(u32); i++) + vaddr[i] = ~i; + i915_gem_object_flush_map(obj); + + err = intel_migrate_ccs_copy(migrate, &ww, NULL, + obj->mm.pages->sgl, + obj->cache_level, + false, &rq); + if (rq && !err) { + if (i915_request_wait(rq, 0, HZ) < 0) { + pr_err("%ps timed out, size: %u\n", + fn, sz); + err = -ETIME; + } + i915_request_put(rq); + rq = NULL; + } + if (err) + continue; + + ccs_bytes = GET_CCS_BYTES(i915, sz); + ccs_bytes_per_chunk = GET_CCS_BYTES(i915, CHUNK_SZ); + i915_gem_object_flush_map(obj); + + for (i = 0; !err && i < DIV_ROUND_UP(ccs_bytes, PAGE_SIZE); i++) { + int offset = ((i * PAGE_SIZE) / + ccs_bytes_per_chunk) * CHUNK_SZ / sizeof(u32); + int ccs_bytes_left = (ccs_bytes - i * PAGE_SIZE) / sizeof(u32); + int x = i915_prandom_u32_max_state(min_t(int, 1024, + ccs_bytes_left), prng); + + if (vaddr[offset + x]) { + pr_err("%ps ccs clearing failed, offset: %ld/%d\n", + fn, i * PAGE_SIZE + x * sizeof(u32), ccs_bytes); + igt_hexdump(vaddr + offset, + min_t(int, 4096, + ccs_bytes_left * sizeof(u32))); + err = -EINVAL; + } + } + + if (err) + continue; + } + i915_gem_object_unpin_map(obj); + } - if (vaddr[x] != sz) { - pr_err("%ps failed, size: %u, offset: %zu\n", - fn, sz, x * sizeof(u32)); - igt_hexdump(vaddr + i * 1024, 4096); - err = -EINVAL; + if (err) { + if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS) + pr_err("%ps failed, size: %u\n", fn, sz); + if (rq && err != -EINVAL) { + i915_request_wait(rq, 0, HZ); + i915_request_put(rq); } + + i915_gem_object_unpin_map(obj); } - i915_gem_object_unpin_map(obj); -err_out: i915_gem_object_put(obj); - return err; } @@ -621,13 +818,15 @@ static int perf_copy_blt(void *arg) for (i = 0; i < ARRAY_SIZE(sizes); i++) { struct drm_i915_gem_object *src, *dst; + size_t sz; int err; src = create_init_lmem_internal(gt, sizes[i], true); if (IS_ERR(src)) return PTR_ERR(src); - dst = create_init_lmem_internal(gt, sizes[i], false); + sz = src->base.size; + dst = create_init_lmem_internal(gt, sz, false); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto err_src; @@ -640,7 +839,7 @@ static int perf_copy_blt(void *arg) dst->mm.pages->sgl, I915_CACHE_NONE, i915_gem_object_is_lmem(dst), - sizes[i]); + sz); i915_gem_object_unlock(dst); i915_gem_object_put(dst); diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 0410c402f2a3..522d0190509c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -4,6 +4,7 @@ */ #include <linux/prime_numbers.h> +#include <linux/string_helpers.h> #include "intel_context.h" #include "intel_engine_heartbeat.h" @@ -209,7 +210,7 @@ static int __igt_sync(struct intel_timeline *tl, if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n", - name, p->name, ctx, p->seqno, yesno(p->expected)); + name, p->name, ctx, p->seqno, str_yes_no(p->expected)); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index 7afdadc7656f..4ef9990ed7f8 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -50,7 +50,7 @@ #define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u) #define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 -#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16) +#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffffU << 16) #define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0) #define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn #define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn @@ -122,17 +122,14 @@ enum intel_guc_action { INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002, INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003, INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004, - INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY = 0x1005, - INTEL_GUC_ACTION_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006, - INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007, INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008, INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009, + INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B, INTEL_GUC_ACTION_SETUP_PC_GUCRC = 0x3004, INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, + INTEL_GUC_ACTION_GET_HWCONFIG = 0x4100, INTEL_GUC_ACTION_REGISTER_CONTEXT = 0x4502, INTEL_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503, - INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, - INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600, INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601, INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507, @@ -173,4 +170,11 @@ enum intel_guc_sleep_state_status { #define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) #define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) +enum intel_guc_state_capture_event_status { + INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_SUCCESS = 0x0, + INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE = 0x1, +}; + +#define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF + #endif /* _ABI_GUC_ACTIONS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h index c9086a600bce..df83c1cc7c7a 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h @@ -82,7 +82,7 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64); #define GUC_CTB_HDR_LEN 1u #define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN #define GUC_CTB_MSG_MAX_LEN 256u -#define GUC_CTB_MSG_0_FENCE (0xffff << 16) +#define GUC_CTB_MSG_0_FENCE (0xffffU << 16) #define GUC_CTB_MSG_0_FORMAT (0xf << 12) #define GUC_CTB_FORMAT_HXG 0u #define GUC_CTB_MSG_0_RESERVED (0xf << 8) diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h index c20658ee85a5..8085fb181274 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h @@ -8,6 +8,10 @@ enum intel_guc_response_status { INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, + INTEL_GUC_RESPONSE_NOT_SUPPORTED = 0x20, + INTEL_GUC_RESPONSE_NO_ATTRIBUTE_TABLE = 0x201, + INTEL_GUC_RESPONSE_NO_DECRYPTION_KEY = 0x202, + INTEL_GUC_RESPONSE_DECRYPTION_FAILED = 0x204, INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, }; diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h index f0814a57c191..4a59478c3b5c 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h @@ -6,6 +6,8 @@ #ifndef _ABI_GUC_KLVS_ABI_H #define _ABI_GUC_KLVS_ABI_H +#include <linux/types.h> + /** * DOC: GuC KLV * @@ -79,4 +81,17 @@ #define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY 0x0907 #define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u +/* + * Per context scheduling policy update keys. + */ +enum { + GUC_CONTEXT_POLICIES_KLV_ID_EXECUTION_QUANTUM = 0x2001, + GUC_CONTEXT_POLICIES_KLV_ID_PREEMPTION_TIMEOUT = 0x2002, + GUC_CONTEXT_POLICIES_KLV_ID_SCHEDULING_PRIORITY = 0x2003, + GUC_CONTEXT_POLICIES_KLV_ID_PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY = 0x2004, + GUC_CONTEXT_POLICIES_KLV_ID_SLPM_GT_FREQUENCY = 0x2005, + + GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5, +}; + #endif /* _ABI_GUC_KLVS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h index 29ac823acd4c..7d5ba4d97d70 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h @@ -40,7 +40,7 @@ */ #define GUC_HXG_MSG_MIN_LEN 1u -#define GUC_HXG_MSG_0_ORIGIN (0x1 << 31) +#define GUC_HXG_MSG_0_ORIGIN (0x1U << 31) #define GUC_HXG_ORIGIN_HOST 0u #define GUC_HXG_ORIGIN_GUC 1u #define GUC_HXG_MSG_0_TYPE (0x7 << 28) diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h new file mode 100644 index 000000000000..3624abfd22d1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021-2022 Intel Corporation + */ + +#ifndef _INTEL_GUC_CAPTURE_FWIF_H +#define _INTEL_GUC_CAPTURE_FWIF_H + +#include <linux/types.h> +#include "intel_guc_fwif.h" + +struct intel_guc; +struct file; + +/** + * struct __guc_capture_bufstate + * + * Book-keeping structure used to track read and write pointers + * as we extract error capture data from the GuC-log-buffer's + * error-capture region as a stream of dwords. + */ +struct __guc_capture_bufstate { + u32 size; + void *data; + u32 rd; + u32 wr; +}; + +/** + * struct __guc_capture_parsed_output - extracted error capture node + * + * A single unit of extracted error-capture output data grouped together + * at an engine-instance level. We keep these nodes in a linked list. + * See cachelist and outlist below. + */ +struct __guc_capture_parsed_output { + /* + * A single set of 3 capture lists: a global-list + * an engine-class-list and an engine-instance list. + * outlist in __guc_capture_parsed_output will keep + * a linked list of these nodes that will eventually + * be detached from outlist and attached into to + * i915_gpu_codedump in response to a context reset + */ + struct list_head link; + bool is_partial; + u32 eng_class; + u32 eng_inst; + u32 guc_id; + u32 lrca; + struct gcap_reg_list_info { + u32 vfid; + u32 num_regs; + struct guc_mmio_reg *regs; + } reginfo[GUC_CAPTURE_LIST_TYPE_MAX]; +#define GCAP_PARSED_REGLIST_INDEX_GLOBAL BIT(GUC_CAPTURE_LIST_TYPE_GLOBAL) +#define GCAP_PARSED_REGLIST_INDEX_ENGCLASS BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS) +#define GCAP_PARSED_REGLIST_INDEX_ENGINST BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) +}; + +/** + * struct guc_debug_capture_list_header / struct guc_debug_capture_list + * + * As part of ADS registration, these header structures (followed by + * an array of 'struct guc_mmio_reg' entries) are used to register with + * GuC microkernel the list of registers we want it to dump out prior + * to a engine reset. + */ +struct guc_debug_capture_list_header { + u32 info; +#define GUC_CAPTURELISTHDR_NUMDESCR GENMASK(15, 0) +} __packed; + +struct guc_debug_capture_list { + struct guc_debug_capture_list_header header; + struct guc_mmio_reg regs[0]; +} __packed; + +/** + * struct __guc_mmio_reg_descr / struct __guc_mmio_reg_descr_group + * + * intel_guc_capture module uses these structures to maintain static + * tables (per unique platform) that consists of lists of registers + * (offsets, names, flags,...) that are used at the ADS regisration + * time as well as during runtime processing and reporting of error- + * capture states generated by GuC just prior to engine reset events. + */ +struct __guc_mmio_reg_descr { + i915_reg_t reg; + u32 flags; + u32 mask; + const char *regname; +}; + +struct __guc_mmio_reg_descr_group { + const struct __guc_mmio_reg_descr *list; + u32 num_regs; + u32 owner; /* see enum guc_capture_owner */ + u32 type; /* see enum guc_capture_type */ + u32 engine; /* as per MAX_ENGINE_CLASS */ + struct __guc_mmio_reg_descr *extlist; /* only used for steered registers */ +}; + +/** + * struct guc_state_capture_header_t / struct guc_state_capture_t / + * guc_state_capture_group_header_t / guc_state_capture_group_t + * + * Prior to resetting engines that have hung or faulted, GuC microkernel + * reports the engine error-state (register values that was read) by + * logging them into the shared GuC log buffer using these hierarchy + * of structures. + */ +struct guc_state_capture_header_t { + u32 owner; +#define CAP_HDR_CAPTURE_VFID GENMASK(7, 0) + u32 info; +#define CAP_HDR_CAPTURE_TYPE GENMASK(3, 0) /* see enum guc_capture_type */ +#define CAP_HDR_ENGINE_CLASS GENMASK(7, 4) /* see GUC_MAX_ENGINE_CLASSES */ +#define CAP_HDR_ENGINE_INSTANCE GENMASK(11, 8) + u32 lrca; /* if type-instance, LRCA (address) that hung, else set to ~0 */ + u32 guc_id; /* if type-instance, context index of hung context, else set to ~0 */ + u32 num_mmios; +#define CAP_HDR_NUM_MMIOS GENMASK(9, 0) +} __packed; + +struct guc_state_capture_t { + struct guc_state_capture_header_t header; + struct guc_mmio_reg mmio_entries[0]; +} __packed; + +enum guc_capture_group_types { + GUC_STATE_CAPTURE_GROUP_TYPE_FULL, + GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL, + GUC_STATE_CAPTURE_GROUP_TYPE_MAX, +}; + +struct guc_state_capture_group_header_t { + u32 owner; +#define CAP_GRP_HDR_CAPTURE_VFID GENMASK(7, 0) + u32 info; +#define CAP_GRP_HDR_NUM_CAPTURES GENMASK(7, 0) +#define CAP_GRP_HDR_CAPTURE_TYPE GENMASK(15, 8) /* guc_capture_group_types */ +} __packed; + +/* this is the top level structure where an error-capture dump starts */ +struct guc_state_capture_group_t { + struct guc_state_capture_group_header_t grp_header; + struct guc_state_capture_t capture_entries[0]; +} __packed; + +/** + * struct __guc_capture_ads_cache + * + * A structure to cache register lists that were populated and registered + * with GuC at startup during ADS registration. This allows much quicker + * GuC resets without re-parsing all the tables for the given gt. + */ +struct __guc_capture_ads_cache { + bool is_valid; + void *ptr; + size_t size; + int status; +}; + +/** + * struct intel_guc_state_capture + * + * Internal context of the intel_guc_capture module. + */ +struct intel_guc_state_capture { + /** + * @reglists: static table of register lists used for error-capture state. + */ + const struct __guc_mmio_reg_descr_group *reglists; + + /** + * @extlists: allocated table of steered register lists used for error-capture state. + * + * NOTE: steered registers have multiple instances depending on the HW configuration + * (slices or dual-sub-slices) and thus depends on HW fuses discovered at startup + */ + struct __guc_mmio_reg_descr_group *extlists; + + /** + * @ads_cache: cached register lists that is ADS format ready + */ + struct __guc_capture_ads_cache ads_cache[GUC_CAPTURE_LIST_INDEX_MAX] + [GUC_CAPTURE_LIST_TYPE_MAX] + [GUC_MAX_ENGINE_CLASSES]; + void *ads_null_cache; + + /** + * @cachelist: Pool of pre-allocated nodes for error capture output + * + * We need this pool of pre-allocated nodes because we cannot + * dynamically allocate new nodes when receiving the G2H notification + * because the event handlers for all G2H event-processing is called + * by the ct processing worker queue and when that queue is being + * processed, there is no absoluate guarantee that we are not in the + * midst of a GT reset operation (which doesn't allow allocations). + */ + struct list_head cachelist; +#define PREALLOC_NODES_MAX_COUNT (3 * GUC_MAX_ENGINE_CLASSES * GUC_MAX_INSTANCES_PER_CLASS) +#define PREALLOC_NODES_DEFAULT_NUMREGS 64 + int max_mmio_per_node; + + /** + * @outlist: Pool of pre-allocated nodes for error capture output + * + * A linked list of parsed GuC error-capture output data before + * reporting with formatting via i915_gpu_coredump. Each node in this linked list shall + * contain a single engine-capture including global, engine-class and + * engine-instance register dumps as per guc_capture_parsed_output_node + */ + struct list_head outlist; +}; + +#endif /* _INTEL_GUC_CAPTURE_FWIF_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 447a976c9f25..2c4ad4a65089 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -9,8 +9,9 @@ #include "gt/intel_gt_pm_irq.h" #include "gt/intel_gt_regs.h" #include "intel_guc.h" -#include "intel_guc_slpc.h" #include "intel_guc_ads.h" +#include "intel_guc_capture.h" +#include "intel_guc_slpc.h" #include "intel_guc_submission.h" #include "i915_drv.h" #include "i915_irq.h" @@ -291,6 +292,41 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50)) flags |= GUC_WA_POLLCS; + /* Wa_16011759253:dg2_g10:a0 */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) + flags |= GUC_WA_GAM_CREDITS; + + /* Wa_14014475959:dg2 */ + if (IS_DG2(gt->i915)) + flags |= GUC_WA_HOLD_CCS_SWITCHOUT; + + /* + * Wa_14012197797:dg2_g10:a0,dg2_g11:a0 + * Wa_22011391025:dg2_g10,dg2_g11,dg2_g12 + * + * The same WA bit is used for both and 22011391025 is applicable to + * all DG2. + */ + if (IS_DG2(gt->i915)) + flags |= GUC_WA_DUAL_QUEUE; + + /* Wa_22011802037: graphics version 12 */ + if (GRAPHICS_VER(gt->i915) == 12) + flags |= GUC_WA_PRE_PARSER; + + /* Wa_16011777198:dg2 */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) + flags |= GUC_WA_RCS_RESET_BEFORE_RC6; + + /* + * Wa_22012727170:dg2_g10[a0-c0), dg2_g11[a0..) + * Wa_22012727685:dg2_g11[a0..) + */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_FOREVER)) + flags |= GUC_WA_CONTEXT_ISOLATION; + return flags; } @@ -362,9 +398,14 @@ int intel_guc_init(struct intel_guc *guc) if (ret) goto err_fw; - ret = intel_guc_ads_create(guc); + ret = intel_guc_capture_init(guc); if (ret) goto err_log; + + ret = intel_guc_ads_create(guc); + if (ret) + goto err_capture; + GEM_BUG_ON(!guc->ads_vma); ret = intel_guc_ct_init(&guc->ct); @@ -403,6 +444,8 @@ err_ct: intel_guc_ct_fini(&guc->ct); err_ads: intel_guc_ads_destroy(guc); +err_capture: + intel_guc_capture_destroy(guc); err_log: intel_guc_log_destroy(&guc->log); err_fw: @@ -430,6 +473,7 @@ void intel_guc_fini(struct intel_guc *guc) intel_guc_ct_fini(&guc->ct); intel_guc_ads_destroy(guc); + intel_guc_capture_destroy(guc); intel_guc_log_destroy(&guc->log); intel_uc_fw_fini(&guc->fw); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 2488d1197f3e..966e69a8b1c1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -10,18 +10,19 @@ #include <linux/iosys-map.h> #include <linux/xarray.h> -#include "intel_uncore.h" +#include "intel_guc_ct.h" #include "intel_guc_fw.h" #include "intel_guc_fwif.h" -#include "intel_guc_ct.h" #include "intel_guc_log.h" #include "intel_guc_reg.h" #include "intel_guc_slpc_types.h" #include "intel_uc_fw.h" +#include "intel_uncore.h" #include "i915_utils.h" #include "i915_vma.h" struct __guc_ads_blob; +struct intel_guc_state_capture; /** * struct intel_guc - Top level structure of GuC. @@ -38,6 +39,8 @@ struct intel_guc { struct intel_guc_ct ct; /** @slpc: sub-structure containing SLPC related data and objects */ struct intel_guc_slpc slpc; + /** @capture: the error-state-capture module's data and objects */ + struct intel_guc_state_capture *capture; /** @sched_engine: Global engine used to submit requests to GuC */ struct i915_sched_engine *sched_engine; @@ -138,6 +141,8 @@ struct intel_guc { bool submission_supported; /** @submission_selected: tracks whether the user enabled GuC submission */ bool submission_selected; + /** @submission_initialized: tracks whether GuC submission has been initialised */ + bool submission_initialized; /** * @rc_supported: tracks whether we support GuC rc on the current platform */ @@ -160,14 +165,11 @@ struct intel_guc { struct guc_mmio_reg *ads_regset; /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ u32 ads_golden_ctxt_size; + /** @ads_capture_size: size of register lists in the ADS used for error capture */ + u32 ads_capture_size; /** @ads_engine_usage_size: size of engine usage in the ADS */ u32 ads_engine_usage_size; - /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */ - struct i915_vma *lrc_desc_pool; - /** @lrc_desc_pool_vaddr: contents of the GuC LRC descriptor pool */ - void *lrc_desc_pool_vaddr; - /** * @context_lookup: used to resolve intel_context from guc_id, if a * context is present in this structure it is registered with the GuC @@ -431,6 +433,9 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, int intel_guc_error_capture_process_msg(struct intel_guc *guc, const u32 *msg, u32 len); +struct intel_engine_cs * +intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance); + void intel_guc_find_hung_context(struct intel_engine_cs *engine); int intel_guc_global_policies_update(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 92cb88248391..3eabf4cf8eec 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -11,6 +11,7 @@ #include "gt/intel_lrc.h" #include "gt/shmem_utils.h" #include "intel_guc_ads.h" +#include "intel_guc_capture.h" #include "intel_guc_fwif.h" #include "intel_uc.h" #include "i915_drv.h" @@ -86,8 +87,7 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc) static u32 guc_ads_capture_size(struct intel_guc *guc) { - /* FIXME: Allocate a proper capture list */ - return PAGE_ALIGN(PAGE_SIZE); + return PAGE_ALIGN(guc->ads_capture_size); } static u32 guc_ads_private_data_size(struct intel_guc *guc) @@ -276,15 +276,24 @@ __mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg) return slot; } -static long __must_check guc_mmio_reg_add(struct temp_regset *regset, - u32 offset, u32 flags) +#define GUC_REGSET_STEERING(group, instance) ( \ + FIELD_PREP(GUC_REGSET_STEERING_GROUP, (group)) | \ + FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, (instance)) | \ + GUC_REGSET_NEEDS_STEERING \ +) + +static long __must_check guc_mmio_reg_add(struct intel_gt *gt, + struct temp_regset *regset, + i915_reg_t reg, u32 flags) { u32 count = regset->storage_used - (regset->registers - regset->storage); - struct guc_mmio_reg reg = { + u32 offset = i915_mmio_reg_offset(reg); + struct guc_mmio_reg entry = { .offset = offset, .flags = flags, }; struct guc_mmio_reg *slot; + u8 group, inst; /* * The mmio list is built using separate lists within the driver. @@ -292,11 +301,22 @@ static long __must_check guc_mmio_reg_add(struct temp_regset *regset, * register more than once. Do not consider this an error; silently * move on if the register is already in the list. */ - if (bsearch(®, regset->registers, count, - sizeof(reg), guc_mmio_reg_cmp)) + if (bsearch(&entry, regset->registers, count, + sizeof(entry), guc_mmio_reg_cmp)) return 0; - slot = __mmio_reg_add(regset, ®); + /* + * The GuC doesn't have a default steering, so we need to explicitly + * steer all registers that need steering. However, we do not keep track + * of all the steering ranges, only of those that have a chance of using + * a non-default steering from the i915 pov. Instead of adding such + * tracking, it is easier to just program the default steering for all + * regs that don't need a non-default one. + */ + intel_gt_get_valid_steering_for_reg(gt, reg, &group, &inst); + entry.flags |= GUC_REGSET_STEERING(group, inst); + + slot = __mmio_reg_add(regset, &entry); if (IS_ERR(slot)) return PTR_ERR(slot); @@ -311,14 +331,16 @@ static long __must_check guc_mmio_reg_add(struct temp_regset *regset, return 0; } -#define GUC_MMIO_REG_ADD(regset, reg, masked) \ - guc_mmio_reg_add(regset, \ - i915_mmio_reg_offset((reg)), \ +#define GUC_MMIO_REG_ADD(gt, regset, reg, masked) \ + guc_mmio_reg_add(gt, \ + regset, \ + (reg), \ (masked) ? GUC_REGSET_MASKED : 0) static int guc_mmio_regset_init(struct temp_regset *regset, struct intel_engine_cs *engine) { + struct intel_gt *gt = engine->gt; const u32 base = engine->mmio_base; struct i915_wa_list *wal = &engine->wa_list; struct i915_wa *wa; @@ -331,26 +353,26 @@ static int guc_mmio_regset_init(struct temp_regset *regset, */ regset->registers = regset->storage + regset->storage_used; - ret |= GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true); - ret |= GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false); - ret |= GUC_MMIO_REG_ADD(regset, RING_IMR(base), false); + ret |= GUC_MMIO_REG_ADD(gt, regset, RING_MODE_GEN7(base), true); + ret |= GUC_MMIO_REG_ADD(gt, regset, RING_HWS_PGA(base), false); + ret |= GUC_MMIO_REG_ADD(gt, regset, RING_IMR(base), false); - if (engine->class == RENDER_CLASS && + if ((engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) && CCS_MASK(engine->gt)) - ret |= GUC_MMIO_REG_ADD(regset, GEN12_RCU_MODE, true); + ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true); for (i = 0, wa = wal->list; i < wal->count; i++, wa++) - ret |= GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg); + ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg); /* Be extra paranoid and include all whitelist registers. */ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) - ret |= GUC_MMIO_REG_ADD(regset, + ret |= GUC_MMIO_REG_ADD(gt, regset, RING_FORCE_TO_NONPRIV(base, i), false); /* add in local MOCS registers */ for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++) - ret |= GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false); + ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false); return ret ? -1 : 0; } @@ -433,7 +455,7 @@ static void guc_mmio_reg_state_init(struct intel_guc *guc) static void fill_engine_enable_masks(struct intel_gt *gt, struct iosys_map *info_map) { - info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], 1); + info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], RCS_MASK(gt)); info_map_write(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS], CCS_MASK(gt)); info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], 1); info_map_write(info_map, engine_enabled_masks[GUC_VIDEO_CLASS], VDBOX_MASK(gt)); @@ -589,24 +611,119 @@ static void guc_init_golden_context(struct intel_guc *guc) GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size); } -static void guc_capture_list_init(struct intel_guc *guc) +static int +guc_capture_prep_lists(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0; + struct guc_gt_system_info local_info; + struct iosys_map info_map; + bool ads_is_mapped; + size_t size = 0; + void *ptr; int i, j; - u32 addr_ggtt, offset; - offset = guc_ads_capture_offset(guc); - addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; + ads_is_mapped = !iosys_map_is_null(&guc->ads_map); + if (ads_is_mapped) { + capture_offset = guc_ads_capture_offset(guc); + ads_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma); + info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map, + offsetof(struct __guc_ads_blob, system_info)); + } else { + memset(&local_info, 0, sizeof(local_info)); + iosys_map_set_vaddr(&info_map, &local_info); + fill_engine_enable_masks(gt, &info_map); + } - /* FIXME: Populate a proper capture list */ + /* first, set aside the first page for a capture_list with zero descriptors */ + total_size = PAGE_SIZE; + if (ads_is_mapped) { + if (!intel_guc_capture_getnullheader(guc, &ptr, &size)) + iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size); + null_ggtt = ads_ggtt + capture_offset; + capture_offset += PAGE_SIZE; + } for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) { for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) { - ads_blob_write(guc, ads.capture_instance[i][j], addr_ggtt); - ads_blob_write(guc, ads.capture_class[i][j], addr_ggtt); - } - ads_blob_write(guc, ads.capture_global[i], addr_ggtt); + /* null list if we dont have said engine or list */ + if (!info_map_read(&info_map, engine_enabled_masks[j])) { + if (ads_is_mapped) { + ads_blob_write(guc, ads.capture_class[i][j], null_ggtt); + ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt); + } + continue; + } + if (intel_guc_capture_getlistsize(guc, i, + GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, + j, &size)) { + if (ads_is_mapped) + ads_blob_write(guc, ads.capture_class[i][j], null_ggtt); + goto engine_instance_list; + } + total_size += size; + if (ads_is_mapped) { + if (total_size > guc->ads_capture_size || + intel_guc_capture_getlist(guc, i, + GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, + j, &ptr)) { + ads_blob_write(guc, ads.capture_class[i][j], null_ggtt); + continue; + } + ads_blob_write(guc, ads.capture_class[i][j], ads_ggtt + + capture_offset); + iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size); + capture_offset += size; + } +engine_instance_list: + if (intel_guc_capture_getlistsize(guc, i, + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE, + j, &size)) { + if (ads_is_mapped) + ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt); + continue; + } + total_size += size; + if (ads_is_mapped) { + if (total_size > guc->ads_capture_size || + intel_guc_capture_getlist(guc, i, + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE, + j, &ptr)) { + ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt); + continue; + } + ads_blob_write(guc, ads.capture_instance[i][j], ads_ggtt + + capture_offset); + iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size); + capture_offset += size; + } + } + if (intel_guc_capture_getlistsize(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &size)) { + if (ads_is_mapped) + ads_blob_write(guc, ads.capture_global[i], null_ggtt); + continue; + } + total_size += size; + if (ads_is_mapped) { + if (total_size > guc->ads_capture_size || + intel_guc_capture_getlist(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, + &ptr)) { + ads_blob_write(guc, ads.capture_global[i], null_ggtt); + continue; + } + ads_blob_write(guc, ads.capture_global[i], ads_ggtt + capture_offset); + iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size); + capture_offset += size; + } } + + if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size)) + drm_warn(&i915->drm, "GuC->ADS->Capture alloc size changed from %d to %d\n", + guc->ads_capture_size, PAGE_ALIGN(total_size)); + + return PAGE_ALIGN(total_size); } static void __guc_ads_init(struct intel_guc *guc) @@ -644,8 +761,8 @@ static void __guc_ads_init(struct intel_guc *guc) base = intel_guc_ggtt_offset(guc, guc->ads_vma); - /* Capture list for hang debug */ - guc_capture_list_init(guc); + /* Lists for error capture debug */ + guc_capture_prep_lists(guc); /* ADS */ ads_blob_write(guc, ads.scheduler_policies, base + @@ -693,6 +810,12 @@ int intel_guc_ads_create(struct intel_guc *guc) return ret; guc->ads_golden_ctxt_size = ret; + /* Likewise the capture lists: */ + ret = guc_capture_prep_lists(guc); + if (ret < 0) + return ret; + guc->ads_capture_size = ret; + /* Now the total size can be determined: */ size = guc_ads_blob_size(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c new file mode 100644 index 000000000000..c4e25966d3e9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c @@ -0,0 +1,1657 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021-2022 Intel Corporation + */ + +#include <linux/types.h> + +#include <drm/drm_print.h> + +#include "gt/intel_engine_regs.h" +#include "gt/intel_gt.h" +#include "gt/intel_gt_regs.h" +#include "gt/intel_lrc.h" +#include "guc_capture_fwif.h" +#include "intel_guc_capture.h" +#include "intel_guc_fwif.h" +#include "i915_drv.h" +#include "i915_gpu_error.h" +#include "i915_irq.h" +#include "i915_memcpy.h" +#include "i915_reg.h" + +/* + * Define all device tables of GuC error capture register lists + * NOTE: For engine-registers, GuC only needs the register offsets + * from the engine-mmio-base + */ +#define COMMON_BASE_GLOBAL \ + { FORCEWAKE_MT, 0, 0, "FORCEWAKE" } + +#define COMMON_GEN9BASE_GLOBAL \ + { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \ + { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }, \ + { ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \ + { DONE_REG, 0, 0, "DONE_REG" }, \ + { HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" } + +#define COMMON_GEN12BASE_GLOBAL \ + { GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \ + { GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \ + { GEN12_AUX_ERR_DBG, 0, 0, "AUX_ERR_DBG" }, \ + { GEN12_GAM_DONE, 0, 0, "GAM_DONE" }, \ + { GEN12_RING_FAULT_REG, 0, 0, "FAULT_REG" } + +#define COMMON_BASE_ENGINE_INSTANCE \ + { RING_PSMI_CTL(0), 0, 0, "RC PSMI" }, \ + { RING_ESR(0), 0, 0, "ESR" }, \ + { RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \ + { RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \ + { RING_IPEIR(0), 0, 0, "IPEIR" }, \ + { RING_IPEHR(0), 0, 0, "IPEHR" }, \ + { RING_INSTPS(0), 0, 0, "INSTPS" }, \ + { RING_BBADDR(0), 0, 0, "RING_BBADDR_LOW32" }, \ + { RING_BBADDR_UDW(0), 0, 0, "RING_BBADDR_UP32" }, \ + { RING_BBSTATE(0), 0, 0, "BB_STATE" }, \ + { CCID(0), 0, 0, "CCID" }, \ + { RING_ACTHD(0), 0, 0, "ACTHD_LDW" }, \ + { RING_ACTHD_UDW(0), 0, 0, "ACTHD_UDW" }, \ + { RING_INSTPM(0), 0, 0, "INSTPM" }, \ + { RING_INSTDONE(0), 0, 0, "INSTDONE" }, \ + { RING_NOPID(0), 0, 0, "RING_NOPID" }, \ + { RING_START(0), 0, 0, "START" }, \ + { RING_HEAD(0), 0, 0, "HEAD" }, \ + { RING_TAIL(0), 0, 0, "TAIL" }, \ + { RING_CTL(0), 0, 0, "CTL" }, \ + { RING_MI_MODE(0), 0, 0, "MODE" }, \ + { RING_CONTEXT_CONTROL(0), 0, 0, "RING_CONTEXT_CONTROL" }, \ + { RING_HWS_PGA(0), 0, 0, "HWS" }, \ + { RING_MODE_GEN7(0), 0, 0, "GFX_MODE" }, \ + { GEN8_RING_PDP_LDW(0, 0), 0, 0, "PDP0_LDW" }, \ + { GEN8_RING_PDP_UDW(0, 0), 0, 0, "PDP0_UDW" }, \ + { GEN8_RING_PDP_LDW(0, 1), 0, 0, "PDP1_LDW" }, \ + { GEN8_RING_PDP_UDW(0, 1), 0, 0, "PDP1_UDW" }, \ + { GEN8_RING_PDP_LDW(0, 2), 0, 0, "PDP2_LDW" }, \ + { GEN8_RING_PDP_UDW(0, 2), 0, 0, "PDP2_UDW" }, \ + { GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \ + { GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" } + +#define COMMON_BASE_HAS_EU \ + { EIR, 0, 0, "EIR" } + +#define COMMON_BASE_RENDER \ + { GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" } + +#define COMMON_GEN12BASE_RENDER \ + { GEN12_SC_INSTDONE_EXTRA, 0, 0, "GEN12_SC_INSTDONE_EXTRA" }, \ + { GEN12_SC_INSTDONE_EXTRA2, 0, 0, "GEN12_SC_INSTDONE_EXTRA2" } + +#define COMMON_GEN12BASE_VEC \ + { GEN12_SFC_DONE(0), 0, 0, "SFC_DONE[0]" }, \ + { GEN12_SFC_DONE(1), 0, 0, "SFC_DONE[1]" }, \ + { GEN12_SFC_DONE(2), 0, 0, "SFC_DONE[2]" }, \ + { GEN12_SFC_DONE(3), 0, 0, "SFC_DONE[3]" } + +/* XE_LPD - Global */ +static const struct __guc_mmio_reg_descr xe_lpd_global_regs[] = { + COMMON_BASE_GLOBAL, + COMMON_GEN9BASE_GLOBAL, + COMMON_GEN12BASE_GLOBAL, +}; + +/* XE_LPD - Render / Compute Per-Class */ +static const struct __guc_mmio_reg_descr xe_lpd_rc_class_regs[] = { + COMMON_BASE_HAS_EU, + COMMON_BASE_RENDER, + COMMON_GEN12BASE_RENDER, +}; + +/* GEN9/XE_LPD - Render / Compute Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_lpd_rc_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* GEN9/XE_LPD - Media Decode/Encode Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_lpd_vd_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* XE_LPD - Video Enhancement Per-Class */ +static const struct __guc_mmio_reg_descr xe_lpd_vec_class_regs[] = { + COMMON_GEN12BASE_VEC, +}; + +/* GEN9/XE_LPD - Video Enhancement Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_lpd_vec_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* GEN9/XE_LPD - Blitter Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_lpd_blt_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* GEN9 - Global */ +static const struct __guc_mmio_reg_descr default_global_regs[] = { + COMMON_BASE_GLOBAL, + COMMON_GEN9BASE_GLOBAL, +}; + +static const struct __guc_mmio_reg_descr default_rc_class_regs[] = { + COMMON_BASE_HAS_EU, + COMMON_BASE_RENDER, +}; + +/* + * Empty lists: + * GEN9/XE_LPD - Blitter Per-Class + * GEN9/XE_LPD - Media Decode/Encode Per-Class + * GEN9 - VEC Class + */ +static const struct __guc_mmio_reg_descr empty_regs_list[] = { +}; + +#define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x) +#define TO_GCAP_DEF_TYPE(x) (GUC_CAPTURE_LIST_TYPE_##x) +#define MAKE_REGLIST(regslist, regsowner, regstype, class) \ + { \ + regslist, \ + ARRAY_SIZE(regslist), \ + TO_GCAP_DEF_OWNER(regsowner), \ + TO_GCAP_DEF_TYPE(regstype), \ + class, \ + NULL, \ + } + +/* List of lists */ +static struct __guc_mmio_reg_descr_group default_lists[] = { + MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0), + MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS), + MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS), + MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS), + MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS), + MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS), + {} +}; + +static const struct __guc_mmio_reg_descr_group xe_lpd_lists[] = { + MAKE_REGLIST(xe_lpd_global_regs, PF, GLOBAL, 0), + MAKE_REGLIST(xe_lpd_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS), + MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS), + MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS), + MAKE_REGLIST(xe_lpd_vec_class_regs, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS), + MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS), + MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS), + {} +}; + +static const struct __guc_mmio_reg_descr_group * +guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists, + u32 owner, u32 type, u32 id) +{ + int i; + + if (!reglists) + return NULL; + + for (i = 0; reglists[i].list; ++i) { + if (reglists[i].owner == owner && reglists[i].type == type && + (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL)) + return ®lists[i]; + } + + return NULL; +} + +static struct __guc_mmio_reg_descr_group * +guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group *reglists, + u32 owner, u32 type, u32 id) +{ + int i; + + if (!reglists) + return NULL; + + for (i = 0; reglists[i].extlist; ++i) { + if (reglists[i].owner == owner && reglists[i].type == type && + (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL)) + return ®lists[i]; + } + + return NULL; +} + +static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglists) +{ + int i = 0; + + if (!reglists) + return; + + while (reglists[i].extlist) + kfree(reglists[i++].extlist); +} + +struct __ext_steer_reg { + const char *name; + i915_reg_t reg; +}; + +static const struct __ext_steer_reg xe_extregs[] = { + {"GEN7_SAMPLER_INSTDONE", GEN7_SAMPLER_INSTDONE}, + {"GEN7_ROW_INSTDONE", GEN7_ROW_INSTDONE} +}; + +static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext, + const struct __ext_steer_reg *extlist, + int slice_id, int subslice_id) +{ + ext->reg = extlist->reg; + ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); + ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id); + ext->regname = extlist->name; +} + +static int +__alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist, + const struct __guc_mmio_reg_descr_group *rootlist, int num_regs) +{ + struct __guc_mmio_reg_descr *list; + + list = kcalloc(num_regs, sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL); + if (!list) + return -ENOMEM; + + newlist->extlist = list; + newlist->num_regs = num_regs; + newlist->owner = rootlist->owner; + newlist->engine = rootlist->engine; + newlist->type = rootlist->type; + + return 0; +} + +static void +guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc, + const struct __guc_mmio_reg_descr_group *lists) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + int slice, subslice, i, num_steer_regs, num_tot_regs = 0; + const struct __guc_mmio_reg_descr_group *list; + struct __guc_mmio_reg_descr_group *extlists; + struct __guc_mmio_reg_descr *extarray; + struct sseu_dev_info *sseu; + + /* In XE_LPD we only have steered registers for the render-class */ + list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF, + GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS); + /* skip if extlists was previously allocated */ + if (!list || guc->capture->extlists) + return; + + num_steer_regs = ARRAY_SIZE(xe_extregs); + + sseu = >->info.sseu; + for_each_instdone_slice_subslice(i915, sseu, slice, subslice) + num_tot_regs += num_steer_regs; + + if (!num_tot_regs) + return; + + /* allocate an extra for an end marker */ + extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL); + if (!extlists) + return; + + if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) { + kfree(extlists); + return; + } + + extarray = extlists[0].extlist; + for_each_instdone_slice_subslice(i915, sseu, slice, subslice) { + for (i = 0; i < num_steer_regs; ++i) { + __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice); + ++extarray; + } + } + + guc->capture->extlists = extlists; +} + +static const struct __ext_steer_reg xehpg_extregs[] = { + {"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG} +}; + +static bool __has_xehpg_extregs(u32 ipver) +{ + return (ipver >= IP_VER(12, 55)); +} + +static void +guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc, + const struct __guc_mmio_reg_descr_group *lists, + u32 ipver) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct sseu_dev_info *sseu; + int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0; + const struct __guc_mmio_reg_descr_group *list; + struct __guc_mmio_reg_descr_group *extlists; + struct __guc_mmio_reg_descr *extarray; + + /* In XE_LP / HPG we only have render-class steering registers during error-capture */ + list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF, + GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS); + /* skip if extlists was previously allocated */ + if (!list || guc->capture->extlists) + return; + + num_steer_regs = ARRAY_SIZE(xe_extregs); + if (__has_xehpg_extregs(ipver)) + num_steer_regs += ARRAY_SIZE(xehpg_extregs); + + sseu = >->info.sseu; + for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) { + num_tot_regs += num_steer_regs; + } + + if (!num_tot_regs) + return; + + /* allocate an extra for an end marker */ + extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL); + if (!extlists) + return; + + if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) { + kfree(extlists); + return; + } + + extarray = extlists[0].extlist; + for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) { + for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) { + __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice); + ++extarray; + } + if (__has_xehpg_extregs(ipver)) { + for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) { + __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice); + ++extarray; + } + } + } + + drm_dbg(&i915->drm, "GuC-capture found %d-ext-regs.\n", num_tot_regs); + guc->capture->extlists = extlists; +} + +static const struct __guc_mmio_reg_descr_group * +guc_capture_get_device_reglist(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + + if (GRAPHICS_VER(i915) > 11) { + /* + * For certain engine classes, there are slice and subslice + * level registers requiring steering. We allocate and populate + * these at init time based on hw config add it as an extension + * list at the end of the pre-populated render list. + */ + if (IS_DG2(i915)) + guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 55)); + else if (IS_XEHPSDV(i915)) + guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 50)); + else + guc_capture_alloc_steered_lists_xe_lpd(guc, xe_lpd_lists); + + return xe_lpd_lists; + } + + /* if GuC submission is enabled on a non-POR platform, just use a common baseline */ + return default_lists; +} + +static const char * +__stringify_owner(u32 owner) +{ + switch (owner) { + case GUC_CAPTURE_LIST_INDEX_PF: + return "PF"; + case GUC_CAPTURE_LIST_INDEX_VF: + return "VF"; + default: + return "unknown"; + } + + return ""; +} + +static const char * +__stringify_type(u32 type) +{ + switch (type) { + case GUC_CAPTURE_LIST_TYPE_GLOBAL: + return "Global"; + case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS: + return "Class"; + case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE: + return "Instance"; + default: + return "unknown"; + } + + return ""; +} + +static const char * +__stringify_engclass(u32 class) +{ + switch (class) { + case GUC_RENDER_CLASS: + return "Render"; + case GUC_VIDEO_CLASS: + return "Video"; + case GUC_VIDEOENHANCE_CLASS: + return "VideoEnhance"; + case GUC_BLITTER_CLASS: + return "Blitter"; + case GUC_COMPUTE_CLASS: + return "Compute"; + default: + return "unknown"; + } + + return ""; +} + +static void +guc_capture_warn_with_list_info(struct drm_i915_private *i915, char *msg, + u32 owner, u32 type, u32 classid) +{ + if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL) + drm_dbg(&i915->drm, "GuC-capture: %s for %s %s-Registers.\n", msg, + __stringify_owner(owner), __stringify_type(type)); + else + drm_dbg(&i915->drm, "GuC-capture: %s for %s %s-Registers on %s-Engine\n", msg, + __stringify_owner(owner), __stringify_type(type), + __stringify_engclass(classid)); +} + +static int +guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid, + struct guc_mmio_reg *ptr, u16 num_entries) +{ + u32 i = 0, j = 0; + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists; + struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists; + const struct __guc_mmio_reg_descr_group *match; + struct __guc_mmio_reg_descr_group *matchext; + + if (!reglists) + return -ENODEV; + + match = guc_capture_get_one_list(reglists, owner, type, classid); + if (!match) { + guc_capture_warn_with_list_info(i915, "Missing register list init", owner, type, + classid); + return -ENODATA; + } + + for (i = 0; i < num_entries && i < match->num_regs; ++i) { + ptr[i].offset = match->list[i].reg.reg; + ptr[i].value = 0xDEADF00D; + ptr[i].flags = match->list[i].flags; + ptr[i].mask = match->list[i].mask; + } + + matchext = guc_capture_get_one_ext_list(extlists, owner, type, classid); + if (matchext) { + for (i = match->num_regs, j = 0; i < num_entries && + i < (match->num_regs + matchext->num_regs) && + j < matchext->num_regs; ++i, ++j) { + ptr[i].offset = matchext->extlist[j].reg.reg; + ptr[i].value = 0xDEADF00D; + ptr[i].flags = matchext->extlist[j].flags; + ptr[i].mask = matchext->extlist[j].mask; + } + } + if (i < num_entries) + drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n", + (int)i, (int)num_entries); + + return 0; +} + +static int +guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u32 classid) +{ + const struct __guc_mmio_reg_descr_group *match; + struct __guc_mmio_reg_descr_group *matchext; + int num_regs; + + match = guc_capture_get_one_list(gc->reglists, owner, type, classid); + if (!match) + return 0; + + num_regs = match->num_regs; + + matchext = guc_capture_get_one_ext_list(gc->extlists, owner, type, classid); + if (matchext) + num_regs += matchext->num_regs; + + return num_regs; +} + +int +intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid, + size_t *size) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct intel_guc_state_capture *gc = guc->capture; + struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid]; + int num_regs; + + if (!gc->reglists) + return -ENODEV; + + if (cache->is_valid) { + *size = cache->size; + return cache->status; + } + + num_regs = guc_cap_list_num_regs(gc, owner, type, classid); + if (!num_regs) { + guc_capture_warn_with_list_info(i915, "Missing register list size", + owner, type, classid); + return -ENODATA; + } + + *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) + + (num_regs * sizeof(struct guc_mmio_reg))); + + return 0; +} + +static void guc_capture_create_prealloc_nodes(struct intel_guc *guc); + +int +intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid, + void **outptr) +{ + struct intel_guc_state_capture *gc = guc->capture; + struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid]; + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct guc_debug_capture_list *listnode; + int ret, num_regs; + u8 *caplist, *tmp; + size_t size = 0; + + if (!gc->reglists) + return -ENODEV; + + if (cache->is_valid) { + *outptr = cache->ptr; + return cache->status; + } + + /* + * ADS population of input registers is a good + * time to pre-allocate cachelist output nodes + */ + guc_capture_create_prealloc_nodes(guc); + + ret = intel_guc_capture_getlistsize(guc, owner, type, classid, &size); + if (ret) { + cache->is_valid = true; + cache->ptr = NULL; + cache->size = 0; + cache->status = ret; + return ret; + } + + caplist = kzalloc(size, GFP_KERNEL); + if (!caplist) { + drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached caplist"); + return -ENOMEM; + } + + /* populate capture list header */ + tmp = caplist; + num_regs = guc_cap_list_num_regs(guc->capture, owner, type, classid); + listnode = (struct guc_debug_capture_list *)tmp; + listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs); + + /* populate list of register descriptor */ + tmp += sizeof(struct guc_debug_capture_list); + guc_capture_list_init(guc, owner, type, classid, (struct guc_mmio_reg *)tmp, num_regs); + + /* cache this list */ + cache->is_valid = true; + cache->ptr = caplist; + cache->size = size; + cache->status = 0; + + *outptr = caplist; + + return 0; +} + +int +intel_guc_capture_getnullheader(struct intel_guc *guc, + void **outptr, size_t *size) +{ + struct intel_guc_state_capture *gc = guc->capture; + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + int tmp = sizeof(u32) * 4; + void *null_header; + + if (gc->ads_null_cache) { + *outptr = gc->ads_null_cache; + *size = tmp; + return 0; + } + + null_header = kzalloc(tmp, GFP_KERNEL); + if (!null_header) { + drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached nulllist"); + return -ENOMEM; + } + + gc->ads_null_cache = null_header; + *outptr = null_header; + *size = tmp; + + return 0; +} + +#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3 + +int +intel_guc_capture_output_min_size_est(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + int worst_min_size = 0, num_regs = 0; + size_t tmp = 0; + + if (!guc->capture) + return -ENODEV; + + /* + * If every single engine-instance suffered a failure in quick succession but + * were all unrelated, then a burst of multiple error-capture events would dump + * registers for every one engine instance, one at a time. In this case, GuC + * would even dump the global-registers repeatedly. + * + * For each engine instance, there would be 1 x guc_state_capture_group_t output + * followed by 3 x guc_state_capture_t lists. The latter is how the register + * dumps are split across different register types (where the '3' are global vs class + * vs instance). Finally, let's multiply the whole thing by 3x (just so we are + * not limited to just 1 round of data in a worst case full register dump log) + * + * NOTE: intel_guc_log that allocates the log buffer would round this size up to + * a power of two. + */ + + for_each_engine(engine, gt, id) { + worst_min_size += sizeof(struct guc_state_capture_group_header_t) + + (3 * sizeof(struct guc_state_capture_header_t)); + + if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp)) + num_regs += tmp; + + if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, + engine->class, &tmp)) { + num_regs += tmp; + } + if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE, + engine->class, &tmp)) { + num_regs += tmp; + } + } + + worst_min_size += (num_regs * sizeof(struct guc_mmio_reg)); + + return (worst_min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER); +} + +/* + * KMD Init time flows: + * -------------------- + * --> alloc A: GuC input capture regs lists (registered to GuC via ADS). + * intel_guc_ads acquires the register lists by calling + * intel_guc_capture_list_size and intel_guc_capture_list_get 'n' times, + * where n = 1 for global-reg-list + + * num_engine_classes for class-reg-list + + * num_engine_classes for instance-reg-list + * (since all instances of the same engine-class type + * have an identical engine-instance register-list). + * ADS module also calls separately for PF vs VF. + * + * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param)) + * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small) + * Note2: 'x 3' to hold multiple capture groups + * + * GUC Runtime notify capture: + * -------------------------- + * --> G2H STATE_CAPTURE_NOTIFICATION + * L--> intel_guc_capture_process + * L--> Loop through B (head..tail) and for each engine instance's + * err-state-captured register-list we find, we alloc 'C': + * --> alloc C: A capture-output-node structure that includes misc capture info along + * with 3 register list dumps (global, engine-class and engine-instance) + * This node is created from a pre-allocated list of blank nodes in + * guc->capture->cachelist and populated with the error-capture + * data from GuC and then it's added into guc->capture->outlist linked + * list. This list is used for matchup and printout by i915_gpu_coredump + * and err_print_gt, (when user invokes the error capture sysfs). + * + * GUC --> notify context reset: + * ----------------------------- + * --> G2H CONTEXT RESET + * L--> guc_handle_context_reset --> i915_capture_error_state + * L--> i915_gpu_coredump(..IS_GUC_CAPTURE) --> gt_record_engines + * --> capture_engine(..IS_GUC_CAPTURE) + * L--> intel_guc_capture_get_matching_node is where + * detach C from internal linked list and add it into + * intel_engine_coredump struct (if the context and + * engine of the event notification matches a node + * in the link list). + * + * User Sysfs / Debugfs + * -------------------- + * --> i915_gpu_coredump_copy_to_buffer-> + * L--> err_print_to_sgl --> err_print_gt + * L--> error_print_guc_captures + * L--> intel_guc_capture_print_node prints the + * register lists values of the attached node + * on the error-engine-dump being reported. + * L--> i915_reset_error_state ... -->__i915_gpu_coredump_free + * L--> ... cleanup_gt --> + * L--> intel_guc_capture_free_node returns the + * capture-output-node back to the internal + * cachelist for reuse. + * + */ + +static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf) +{ + if (buf->wr >= buf->rd) + return (buf->wr - buf->rd); + return (buf->size - buf->rd) + buf->wr; +} + +static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf) +{ + if (buf->rd > buf->wr) + return (buf->size - buf->rd); + return (buf->wr - buf->rd); +} + +/* + * GuC's error-capture output is a ring buffer populated in a byte-stream fashion: + * + * The GuC Log buffer region for error-capture is managed like a ring buffer. + * The GuC firmware dumps error capture logs into this ring in a byte-stream flow. + * Additionally, as per the current and foreseeable future, all packed error- + * capture output structures are dword aligned. + * + * That said, if the GuC firmware is in the midst of writing a structure that is larger + * than one dword but the tail end of the err-capture buffer-region has lesser space left, + * we would need to extract that structure one dword at a time straddled across the end, + * onto the start of the ring. + * + * Below function, guc_capture_log_remove_dw is a helper for that. All callers of this + * function would typically do a straight-up memcpy from the ring contents and will only + * call this helper if their structure-extraction is straddling across the end of the + * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease + * scalability for future expansion of output data types without requiring a redesign + * of the flow controls. + */ +static int +guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf, + u32 *dw) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + int tries = 2; + int avail = 0; + u32 *src_data; + + if (!guc_capture_buf_cnt(buf)) + return 0; + + while (tries--) { + avail = guc_capture_buf_cnt_to_end(buf); + if (avail >= sizeof(u32)) { + src_data = (u32 *)(buf->data + buf->rd); + *dw = *src_data; + buf->rd += 4; + return 4; + } + if (avail) + drm_dbg(&i915->drm, "GuC-Cap-Logs not dword aligned, skipping.\n"); + buf->rd = 0; + } + + return 0; +} + +static bool +guc_capture_data_extracted(struct __guc_capture_bufstate *b, + int size, void *dest) +{ + if (guc_capture_buf_cnt_to_end(b) >= size) { + memcpy(dest, (b->data + b->rd), size); + b->rd += size; + return true; + } + return false; +} + +static int +guc_capture_log_get_group_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf, + struct guc_state_capture_group_header_t *ghdr) +{ + int read = 0; + int fullsize = sizeof(struct guc_state_capture_group_header_t); + + if (fullsize > guc_capture_buf_cnt(buf)) + return -1; + + if (guc_capture_data_extracted(buf, fullsize, (void *)ghdr)) + return 0; + + read += guc_capture_log_remove_dw(guc, buf, &ghdr->owner); + read += guc_capture_log_remove_dw(guc, buf, &ghdr->info); + if (read != fullsize) + return -1; + + return 0; +} + +static int +guc_capture_log_get_data_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf, + struct guc_state_capture_header_t *hdr) +{ + int read = 0; + int fullsize = sizeof(struct guc_state_capture_header_t); + + if (fullsize > guc_capture_buf_cnt(buf)) + return -1; + + if (guc_capture_data_extracted(buf, fullsize, (void *)hdr)) + return 0; + + read += guc_capture_log_remove_dw(guc, buf, &hdr->owner); + read += guc_capture_log_remove_dw(guc, buf, &hdr->info); + read += guc_capture_log_remove_dw(guc, buf, &hdr->lrca); + read += guc_capture_log_remove_dw(guc, buf, &hdr->guc_id); + read += guc_capture_log_remove_dw(guc, buf, &hdr->num_mmios); + if (read != fullsize) + return -1; + + return 0; +} + +static int +guc_capture_log_get_register(struct intel_guc *guc, struct __guc_capture_bufstate *buf, + struct guc_mmio_reg *reg) +{ + int read = 0; + int fullsize = sizeof(struct guc_mmio_reg); + + if (fullsize > guc_capture_buf_cnt(buf)) + return -1; + + if (guc_capture_data_extracted(buf, fullsize, (void *)reg)) + return 0; + + read += guc_capture_log_remove_dw(guc, buf, ®->offset); + read += guc_capture_log_remove_dw(guc, buf, ®->value); + read += guc_capture_log_remove_dw(guc, buf, ®->flags); + read += guc_capture_log_remove_dw(guc, buf, ®->mask); + if (read != fullsize) + return -1; + + return 0; +} + +static void +guc_capture_delete_one_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node) +{ + int i; + + for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) + kfree(node->reginfo[i].regs); + list_del(&node->link); + kfree(node); +} + +static void +guc_capture_delete_prealloc_nodes(struct intel_guc *guc) +{ + struct __guc_capture_parsed_output *n, *ntmp; + + /* + * NOTE: At the end of driver operation, we must assume that we + * have prealloc nodes in both the cachelist as well as outlist + * if unclaimed error capture events occurred prior to shutdown. + */ + list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) + guc_capture_delete_one_node(guc, n); + + list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) + guc_capture_delete_one_node(guc, n); +} + +static void +guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node, + struct list_head *list) +{ + list_add_tail(&node->link, list); +} + +static void +guc_capture_add_node_to_outlist(struct intel_guc_state_capture *gc, + struct __guc_capture_parsed_output *node) +{ + guc_capture_add_node_to_list(node, &gc->outlist); +} + +static void +guc_capture_add_node_to_cachelist(struct intel_guc_state_capture *gc, + struct __guc_capture_parsed_output *node) +{ + guc_capture_add_node_to_list(node, &gc->cachelist); +} + +static void +guc_capture_init_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node) +{ + struct guc_mmio_reg *tmp[GUC_CAPTURE_LIST_TYPE_MAX]; + int i; + + for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { + tmp[i] = node->reginfo[i].regs; + memset(tmp[i], 0, sizeof(struct guc_mmio_reg) * + guc->capture->max_mmio_per_node); + } + memset(node, 0, sizeof(*node)); + for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) + node->reginfo[i].regs = tmp[i]; + + INIT_LIST_HEAD(&node->link); +} + +static struct __guc_capture_parsed_output * +guc_capture_get_prealloc_node(struct intel_guc *guc) +{ + struct __guc_capture_parsed_output *found = NULL; + + if (!list_empty(&guc->capture->cachelist)) { + struct __guc_capture_parsed_output *n, *ntmp; + + /* get first avail node from the cache list */ + list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) { + found = n; + list_del(&n->link); + break; + } + } else { + struct __guc_capture_parsed_output *n, *ntmp; + + /* traverse down and steal back the oldest node already allocated */ + list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) { + found = n; + } + if (found) + list_del(&found->link); + } + if (found) + guc_capture_init_node(guc, found); + + return found; +} + +static struct __guc_capture_parsed_output * +guc_capture_alloc_one_node(struct intel_guc *guc) +{ + struct __guc_capture_parsed_output *new; + int i; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { + new->reginfo[i].regs = kcalloc(guc->capture->max_mmio_per_node, + sizeof(struct guc_mmio_reg), GFP_KERNEL); + if (!new->reginfo[i].regs) { + while (i) + kfree(new->reginfo[--i].regs); + kfree(new); + return NULL; + } + } + guc_capture_init_node(guc, new); + + return new; +} + +static struct __guc_capture_parsed_output * +guc_capture_clone_node(struct intel_guc *guc, struct __guc_capture_parsed_output *original, + u32 keep_reglist_mask) +{ + struct __guc_capture_parsed_output *new; + int i; + + new = guc_capture_get_prealloc_node(guc); + if (!new) + return NULL; + if (!original) + return new; + + new->is_partial = original->is_partial; + + /* copy reg-lists that we want to clone */ + for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { + if (keep_reglist_mask & BIT(i)) { + GEM_BUG_ON(original->reginfo[i].num_regs > + guc->capture->max_mmio_per_node); + + memcpy(new->reginfo[i].regs, original->reginfo[i].regs, + original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg)); + + new->reginfo[i].num_regs = original->reginfo[i].num_regs; + new->reginfo[i].vfid = original->reginfo[i].vfid; + + if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS) { + new->eng_class = original->eng_class; + } else if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) { + new->eng_inst = original->eng_inst; + new->guc_id = original->guc_id; + new->lrca = original->lrca; + } + } + } + + return new; +} + +static void +__guc_capture_create_prealloc_nodes(struct intel_guc *guc) +{ + struct __guc_capture_parsed_output *node = NULL; + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + int i; + + for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) { + node = guc_capture_alloc_one_node(guc); + if (!node) { + drm_warn(&i915->drm, "GuC Capture pre-alloc-cache failure\n"); + /* dont free the priors, use what we got and cleanup at shutdown */ + return; + } + guc_capture_add_node_to_cachelist(guc->capture, node); + } +} + +static int +guc_get_max_reglist_count(struct intel_guc *guc) +{ + int i, j, k, tmp, maxregcount = 0; + + for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) { + for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) { + for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) { + if (j == GUC_CAPTURE_LIST_TYPE_GLOBAL && k > 0) + continue; + + tmp = guc_cap_list_num_regs(guc->capture, i, j, k); + if (tmp > maxregcount) + maxregcount = tmp; + } + } + } + if (!maxregcount) + maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS; + + return maxregcount; +} + +static void +guc_capture_create_prealloc_nodes(struct intel_guc *guc) +{ + /* skip if we've already done the pre-alloc */ + if (guc->capture->max_mmio_per_node) + return; + + guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc); + __guc_capture_create_prealloc_nodes(guc); +} + +static int +guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct guc_state_capture_group_header_t ghdr = {0}; + struct guc_state_capture_header_t hdr = {0}; + struct __guc_capture_parsed_output *node = NULL; + struct guc_mmio_reg *regs = NULL; + int i, numlists, numregs, ret = 0; + enum guc_capture_type datatype; + struct guc_mmio_reg tmp; + bool is_partial = false; + + i = guc_capture_buf_cnt(buf); + if (!i) + return -ENODATA; + if (i % sizeof(u32)) { + drm_warn(&i915->drm, "GuC Capture new entries unaligned\n"); + ret = -EIO; + goto bailout; + } + + /* first get the capture group header */ + if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) { + ret = -EIO; + goto bailout; + } + /* + * we would typically expect a layout as below where n would be expected to be + * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine + * instances being reset together. + * ____________________________________________ + * | Capture Group | + * | ________________________________________ | + * | | Capture Group Header: | | + * | | - num_captures = 5 | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture1: | | + * | | Hdr: GLOBAL, numregs=a | | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... rega | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture2: | | + * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... regb | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture3: | | + * | | Hdr: INSTANCE=RCS, numregs=c | | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... regc | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture4: | | + * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... regd | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture5: | | + * | | Hdr: INSTANCE=CCS0, numregs=e | | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... rege | | | + * | | |__________________________________| | | + * | |______________________________________| | + * |__________________________________________| + */ + is_partial = FIELD_GET(CAP_GRP_HDR_CAPTURE_TYPE, ghdr.info); + numlists = FIELD_GET(CAP_GRP_HDR_NUM_CAPTURES, ghdr.info); + + while (numlists--) { + if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) { + ret = -EIO; + break; + } + + datatype = FIELD_GET(CAP_HDR_CAPTURE_TYPE, hdr.info); + if (datatype > GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) { + /* unknown capture type - skip over to next capture set */ + numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios); + while (numregs--) { + if (guc_capture_log_get_register(guc, buf, &tmp)) { + ret = -EIO; + break; + } + } + continue; + } else if (node) { + /* + * Based on the current capture type and what we have so far, + * decide if we should add the current node into the internal + * linked list for match-up when i915_gpu_coredump calls later + * (and alloc a blank node for the next set of reglists) + * or continue with the same node or clone the current node + * but only retain the global or class registers (such as the + * case of dependent engine resets). + */ + if (datatype == GUC_CAPTURE_LIST_TYPE_GLOBAL) { + guc_capture_add_node_to_outlist(guc->capture, node); + node = NULL; + } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS && + node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS].num_regs) { + /* Add to list, clone node and duplicate global list */ + guc_capture_add_node_to_outlist(guc->capture, node); + node = guc_capture_clone_node(guc, node, + GCAP_PARSED_REGLIST_INDEX_GLOBAL); + } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE && + node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE].num_regs) { + /* Add to list, clone node and duplicate global + class lists */ + guc_capture_add_node_to_outlist(guc->capture, node); + node = guc_capture_clone_node(guc, node, + (GCAP_PARSED_REGLIST_INDEX_GLOBAL | + GCAP_PARSED_REGLIST_INDEX_ENGCLASS)); + } + } + + if (!node) { + node = guc_capture_get_prealloc_node(guc); + if (!node) { + ret = -ENOMEM; + break; + } + if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL) + drm_dbg(&i915->drm, "GuC Capture missing global dump: %08x!\n", + datatype); + } + node->is_partial = is_partial; + node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner); + switch (datatype) { + case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE: + node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info); + node->eng_inst = FIELD_GET(CAP_HDR_ENGINE_INSTANCE, hdr.info); + node->lrca = hdr.lrca; + node->guc_id = hdr.guc_id; + break; + case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS: + node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info); + break; + default: + break; + } + + numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios); + if (numregs > guc->capture->max_mmio_per_node) { + drm_dbg(&i915->drm, "GuC Capture list extraction clipped by prealloc!\n"); + numregs = guc->capture->max_mmio_per_node; + } + node->reginfo[datatype].num_regs = numregs; + regs = node->reginfo[datatype].regs; + i = 0; + while (numregs--) { + if (guc_capture_log_get_register(guc, buf, ®s[i++])) { + ret = -EIO; + break; + } + } + } + +bailout: + if (node) { + /* If we have data, add to linked list for match-up when i915_gpu_coredump calls */ + for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { + if (node->reginfo[i].regs) { + guc_capture_add_node_to_outlist(guc->capture, node); + node = NULL; + break; + } + } + if (node) /* else return it back to cache list */ + guc_capture_add_node_to_cachelist(guc->capture, node); + } + return ret; +} + +static int __guc_capture_flushlog_complete(struct intel_guc *guc) +{ + u32 action[] = { + INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE, + GUC_CAPTURE_LOG_BUFFER + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static void __guc_capture_process_output(struct intel_guc *guc) +{ + unsigned int buffer_size, read_offset, write_offset, full_count; + struct intel_uc *uc = container_of(guc, typeof(*uc), guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct guc_log_buffer_state log_buf_state_local; + struct guc_log_buffer_state *log_buf_state; + struct __guc_capture_bufstate buf; + void *src_data = NULL; + bool new_overflow; + int ret; + + log_buf_state = guc->log.buf_addr + + (sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER); + src_data = guc->log.buf_addr + intel_guc_get_log_buffer_offset(GUC_CAPTURE_LOG_BUFFER); + + /* + * Make a copy of the state structure, inside GuC log buffer + * (which is uncached mapped), on the stack to avoid reading + * from it multiple times. + */ + memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state)); + buffer_size = intel_guc_get_log_buffer_size(GUC_CAPTURE_LOG_BUFFER); + read_offset = log_buf_state_local.read_ptr; + write_offset = log_buf_state_local.sampled_write_ptr; + full_count = log_buf_state_local.buffer_full_cnt; + + /* Bookkeeping stuff */ + guc->log.stats[GUC_CAPTURE_LOG_BUFFER].flush += log_buf_state_local.flush_to_file; + new_overflow = intel_guc_check_log_buf_overflow(&guc->log, GUC_CAPTURE_LOG_BUFFER, + full_count); + + /* Now copy the actual logs. */ + if (unlikely(new_overflow)) { + /* copy the whole buffer in case of overflow */ + read_offset = 0; + write_offset = buffer_size; + } else if (unlikely((read_offset > buffer_size) || + (write_offset > buffer_size))) { + drm_err(&i915->drm, "invalid GuC log capture buffer state!\n"); + /* copy whole buffer as offsets are unreliable */ + read_offset = 0; + write_offset = buffer_size; + } + + buf.size = buffer_size; + buf.rd = read_offset; + buf.wr = write_offset; + buf.data = src_data; + + if (!uc->reset_in_progress) { + do { + ret = guc_capture_extract_reglists(guc, &buf); + } while (ret >= 0); + } + + /* Update the state of log buffer err-cap state */ + log_buf_state->read_ptr = write_offset; + log_buf_state->flush_to_file = 0; + __guc_capture_flushlog_complete(guc); +} + +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + +static const char * +guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type, + u32 class, u32 id, u32 offset, u32 *is_ext) +{ + const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists; + struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists; + const struct __guc_mmio_reg_descr_group *match; + struct __guc_mmio_reg_descr_group *matchext; + int j; + + *is_ext = 0; + if (!reglists) + return NULL; + + match = guc_capture_get_one_list(reglists, owner, type, id); + if (!match) + return NULL; + + for (j = 0; j < match->num_regs; ++j) { + if (offset == match->list[j].reg.reg) + return match->list[j].regname; + } + if (extlists) { + matchext = guc_capture_get_one_ext_list(extlists, owner, type, id); + if (!matchext) + return NULL; + for (j = 0; j < matchext->num_regs; ++j) { + if (offset == matchext->extlist[j].reg.reg) { + *is_ext = 1; + return matchext->extlist[j].regname; + } + } + } + + return NULL; +} + +#ifdef CONFIG_DRM_I915_DEBUG_GUC +#define __out(a, ...) \ + do { \ + drm_warn((&(a)->i915->drm), __VA_ARGS__); \ + i915_error_printf((a), __VA_ARGS__); \ + } while (0) +#else +#define __out(a, ...) \ + i915_error_printf(a, __VA_ARGS__) +#endif + +#define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \ + do { \ + __out(ebuf, " i915-Eng-Name: %s command stream\n", \ + (eng)->name); \ + __out(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \ + __out(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \ + __out(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \ + (eng)->logical_mask); \ + } while (0) + +#define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \ + do { \ + __out(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \ + (node)->eng_inst); \ + __out(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \ + __out(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \ + } while (0) + +int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf, + const struct intel_engine_coredump *ee) +{ + const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = { + "full-capture", + "partial-capture" + }; + const char *datatype[GUC_CAPTURE_LIST_TYPE_MAX] = { + "Global", + "Engine-Class", + "Engine-Instance" + }; + struct intel_guc_state_capture *cap; + struct __guc_capture_parsed_output *node; + struct intel_engine_cs *eng; + struct guc_mmio_reg *regs; + struct intel_guc *guc; + const char *str; + int numregs, i, j; + u32 is_ext; + + if (!ebuf || !ee) + return -EINVAL; + cap = ee->capture; + if (!cap || !ee->engine) + return -ENODEV; + + guc = &ee->engine->gt->uc.guc; + + __out(ebuf, "global --- GuC Error Capture on %s command stream:\n", + ee->engine->name); + + node = ee->guc_capture_node; + if (!node) { + __out(ebuf, " No matching ee-node\n"); + return 0; + } + + __out(ebuf, "Coverage: %s\n", grptype[node->is_partial]); + + for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { + __out(ebuf, " RegListType: %s\n", + datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]); + __out(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid); + + switch (i) { + case GUC_CAPTURE_LIST_TYPE_GLOBAL: + default: + break; + case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS: + __out(ebuf, " GuC-Eng-Class: %d\n", node->eng_class); + __out(ebuf, " i915-Eng-Class: %d\n", + guc_class_to_engine_class(node->eng_class)); + break; + case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE: + eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst); + if (eng) + GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng); + else + __out(ebuf, " i915-Eng-Lookup Fail!\n"); + GCAP_PRINT_GUC_INST_INFO(ebuf, node); + break; + } + + numregs = node->reginfo[i].num_regs; + __out(ebuf, " NumRegs: %d\n", numregs); + j = 0; + while (numregs--) { + regs = node->reginfo[i].regs; + str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i, + node->eng_class, 0, regs[j].offset, &is_ext); + if (!str) + __out(ebuf, " REG-0x%08x", regs[j].offset); + else + __out(ebuf, " %s", str); + if (is_ext) + __out(ebuf, "[%ld][%ld]", + FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags), + FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags)); + __out(ebuf, ": 0x%08x\n", regs[j].value); + ++j; + } + } + return 0; +} + +#endif //CONFIG_DRM_I915_CAPTURE_ERROR + +void intel_guc_capture_free_node(struct intel_engine_coredump *ee) +{ + if (!ee || !ee->guc_capture_node) + return; + + guc_capture_add_node_to_cachelist(ee->capture, ee->guc_capture_node); + ee->capture = NULL; + ee->guc_capture_node = NULL; +} + +void intel_guc_capture_get_matching_node(struct intel_gt *gt, + struct intel_engine_coredump *ee, + struct intel_context *ce) +{ + struct __guc_capture_parsed_output *n, *ntmp; + struct drm_i915_private *i915; + struct intel_guc *guc; + + if (!gt || !ee || !ce) + return; + + i915 = gt->i915; + guc = >->uc.guc; + if (!guc->capture) + return; + + GEM_BUG_ON(ee->guc_capture_node); + /* + * Look for a matching GuC reported error capture node from + * the internal output link-list based on lrca, guc-id and engine + * identification. + */ + list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) { + if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) && + n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) && + n->guc_id && n->guc_id == ce->guc_id.id && + (n->lrca & CTX_GTT_ADDRESS_MASK) && (n->lrca & CTX_GTT_ADDRESS_MASK) == + (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) { + list_del(&n->link); + ee->guc_capture_node = n; + ee->capture = guc->capture; + return; + } + } + drm_dbg(&i915->drm, "GuC capture can't match ee to node\n"); +} + +void intel_guc_capture_process(struct intel_guc *guc) +{ + if (guc->capture) + __guc_capture_process_output(guc); +} + +static void +guc_capture_free_ads_cache(struct intel_guc_state_capture *gc) +{ + int i, j, k; + struct __guc_capture_ads_cache *cache; + + for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) { + for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) { + for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) { + cache = &gc->ads_cache[i][j][k]; + if (cache->is_valid) + kfree(cache->ptr); + } + } + } + kfree(gc->ads_null_cache); +} + +void intel_guc_capture_destroy(struct intel_guc *guc) +{ + if (!guc->capture) + return; + + guc_capture_free_ads_cache(guc->capture); + + guc_capture_delete_prealloc_nodes(guc); + + guc_capture_free_extlists(guc->capture->extlists); + kfree(guc->capture->extlists); + + kfree(guc->capture); + guc->capture = NULL; +} + +int intel_guc_capture_init(struct intel_guc *guc) +{ + guc->capture = kzalloc(sizeof(*guc->capture), GFP_KERNEL); + if (!guc->capture) + return -ENOMEM; + + guc->capture->reglists = guc_capture_get_device_reglist(guc); + + INIT_LIST_HEAD(&guc->capture->outlist); + INIT_LIST_HEAD(&guc->capture->cachelist); + + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h new file mode 100644 index 000000000000..d3d7bd0b6db6 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021-2021 Intel Corporation + */ + +#ifndef _INTEL_GUC_CAPTURE_H +#define _INTEL_GUC_CAPTURE_H + +#include <linux/types.h> + +struct drm_i915_error_state_buf; +struct guc_gt_system_info; +struct intel_engine_coredump; +struct intel_context; +struct intel_gt; +struct intel_guc; + +void intel_guc_capture_free_node(struct intel_engine_coredump *ee); +int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *m, + const struct intel_engine_coredump *ee); +void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_engine_coredump *ee, + struct intel_context *ce); +void intel_guc_capture_process(struct intel_guc *guc); +int intel_guc_capture_output_min_size_est(struct intel_guc *guc); +int intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid, + void **outptr); +int intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid, + size_t *size); +int intel_guc_capture_getnullheader(struct intel_guc *guc, void **outptr, size_t *size); +void intel_guc_capture_destroy(struct intel_guc *guc); +int intel_guc_capture_init(struct intel_guc *guc); + +#endif /* _INTEL_GUC_CAPTURE_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 2f7fc87a78e1..f01325cd1b62 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -6,6 +6,7 @@ #include <linux/circ_buf.h> #include <linux/ktime.h> #include <linux/time64.h> +#include <linux/string_helpers.h> #include <linux/timekeeping.h> #include "i915_drv.h" @@ -170,7 +171,7 @@ static int ct_control_enable(struct intel_guc_ct *ct, bool enable) GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE); if (unlikely(err)) CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n", - enabledisable(enable), ERR_PTR(err)); + str_enable_disable(enable), ERR_PTR(err)); return err; } @@ -1202,7 +1203,7 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct) void intel_guc_ct_print_info(struct intel_guc_ct *ct, struct drm_printer *p) { - drm_printf(p, "CT %s\n", enableddisabled(ct->enabled)); + drm_printf(p, "CT %s\n", str_enabled_disabled(ct->enabled)); if (!ct->enabled) return; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 4b300b6cc0f9..42cb7a9a6199 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -32,8 +32,8 @@ #define GUC_CLIENT_PRIORITY_NORMAL 3 #define GUC_CLIENT_PRIORITY_NUM 4 -#define GUC_MAX_LRC_DESCRIPTORS 65535 -#define GUC_INVALID_LRC_ID GUC_MAX_LRC_DESCRIPTORS +#define GUC_MAX_CONTEXT_ID 65535 +#define GUC_INVALID_CONTEXT_ID GUC_MAX_CONTEXT_ID #define GUC_RENDER_ENGINE 0 #define GUC_VIDEO_ENGINE 1 @@ -98,7 +98,13 @@ #define GUC_LOG_BUF_ADDR_SHIFT 12 #define GUC_CTL_WA 1 -#define GUC_WA_POLLCS BIT(18) +#define GUC_WA_GAM_CREDITS BIT(10) +#define GUC_WA_DUAL_QUEUE BIT(11) +#define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13) +#define GUC_WA_CONTEXT_ISOLATION BIT(15) +#define GUC_WA_PRE_PARSER BIT(14) +#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17) +#define GUC_WA_POLLCS BIT(18) #define GUC_CTL_FEATURE 2 #define GUC_CTL_ENABLE_SLPC BIT(2) @@ -197,54 +203,45 @@ struct guc_wq_item { u32 fence_id; } __packed; -struct guc_process_desc { - u32 stage_id; - u64 db_base_addr; +struct guc_sched_wq_desc { u32 head; u32 tail; u32 error_offset; - u64 wq_base_addr; - u32 wq_size_bytes; u32 wq_status; - u32 engine_presence; - u32 priority; - u32 reserved[36]; + u32 reserved[28]; } __packed; +/* Helper for context registration H2G */ +struct guc_ctxt_registration_info { + u32 flags; + u32 context_idx; + u32 engine_class; + u32 engine_submit_mask; + u32 wq_desc_lo; + u32 wq_desc_hi; + u32 wq_base_lo; + u32 wq_base_hi; + u32 wq_size; + u32 hwlrca_lo; + u32 hwlrca_hi; +}; #define CONTEXT_REGISTRATION_FLAG_KMD BIT(0) -#define CONTEXT_POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000 -#define CONTEXT_POLICY_DEFAULT_PREEMPTION_TIME_US 500000 +/* 32-bit KLV structure as used by policy updates and others */ +struct guc_klv_generic_dw_t { + u32 kl; + u32 value; +} __packed; -/* Preempt to idle on quantum expiry */ -#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE BIT(0) +/* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */ +struct guc_update_context_policy_header { + u32 action; + u32 ctx_id; +} __packed; -/* - * GuC Context registration descriptor. - * FIXME: This is only required to exist during context registration. - * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC - * is not required. - */ -struct guc_lrc_desc { - u32 hw_context_desc; - u32 slpm_perf_mode_hint; /* SPLC v1 only */ - u32 slpm_freq_hint; - u32 engine_submit_mask; /* In logical space */ - u8 engine_class; - u8 reserved0[3]; - u32 priority; - u32 process_desc; - u32 wq_addr; - u32 wq_size; - u32 context_flags; /* CONTEXT_REGISTRATION_* */ - /* Time for one workload to execute. (in micro seconds) */ - u32 execution_quantum; - /* Time to wait for a preemption request to complete before issuing a - * reset. (in micro seconds). - */ - u32 preemption_timeout; - u32 policy_flags; /* CONTEXT_POLICY_* */ - u32 reserved1[19]; +struct guc_update_context_policy { + struct guc_update_context_policy_header header; + struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS]; } __packed; #define GUC_POWER_UNSPECIFIED 0 @@ -285,10 +282,13 @@ struct guc_mmio_reg { u32 offset; u32 value; u32 flags; - u32 mask; #define GUC_REGSET_MASKED BIT(0) +#define GUC_REGSET_NEEDS_STEERING BIT(1) #define GUC_REGSET_MASKED_WITH_VALUE BIT(2) #define GUC_REGSET_RESTORE_ONLY BIT(3) +#define GUC_REGSET_STEERING_GROUP GENMASK(15, 12) +#define GUC_REGSET_STEERING_INSTANCE GENMASK(23, 20) + u32 mask; } __packed; /* GuC register sets */ @@ -311,6 +311,14 @@ enum { GUC_CAPTURE_LIST_INDEX_MAX = 2, }; +/*Register-types of GuC capture register lists */ +enum guc_capture_type { + GUC_CAPTURE_LIST_TYPE_GLOBAL = 0, + GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE, + GUC_CAPTURE_LIST_TYPE_MAX, +}; + /* GuC Additional Data Struct */ struct guc_ads { struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c new file mode 100644 index 000000000000..79c66b6b51a3 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "gt/intel_gt.h" +#include "gt/intel_hwconfig.h" +#include "i915_drv.h" +#include "i915_memcpy.h" + +/* + * GuC has a blob containing hardware configuration information (HWConfig). + * This is formatted as a simple and flexible KLV (Key/Length/Value) table. + * + * For example, a minimal version could be: + * enum device_attr { + * ATTR_SOME_VALUE = 0, + * ATTR_SOME_MASK = 1, + * }; + * + * static const u32 hwconfig[] = { + * ATTR_SOME_VALUE, + * 1, // Value Length in DWords + * 8, // Value + * + * ATTR_SOME_MASK, + * 3, + * 0x00FFFFFFFF, 0xFFFFFFFF, 0xFF000000, + * }; + * + * The attribute ids are defined in a hardware spec. + */ + +static int __guc_action_get_hwconfig(struct intel_guc *guc, + u32 ggtt_offset, u32 ggtt_size) +{ + u32 action[] = { + INTEL_GUC_ACTION_GET_HWCONFIG, + lower_32_bits(ggtt_offset), + upper_32_bits(ggtt_offset), + ggtt_size, + }; + int ret; + + ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); + if (ret == -ENXIO) + return -ENOENT; + + return ret; +} + +static int guc_hwconfig_discover_size(struct intel_guc *guc, struct intel_hwconfig *hwconfig) +{ + int ret; + + /* + * Sending a query with zero offset and size will return the + * size of the blob. + */ + ret = __guc_action_get_hwconfig(guc, 0, 0); + if (ret < 0) + return ret; + + if (ret == 0) + return -EINVAL; + + hwconfig->size = ret; + return 0; +} + +static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig *hwconfig) +{ + struct i915_vma *vma; + u32 ggtt_offset; + void *vaddr; + int ret; + + GEM_BUG_ON(!hwconfig->size); + + ret = intel_guc_allocate_and_map_vma(guc, hwconfig->size, &vma, &vaddr); + if (ret) + return ret; + + ggtt_offset = intel_guc_ggtt_offset(guc, vma); + + ret = __guc_action_get_hwconfig(guc, ggtt_offset, hwconfig->size); + if (ret >= 0) + memcpy(hwconfig->ptr, vaddr, hwconfig->size); + + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + + return ret; +} + +static bool has_table(struct drm_i915_private *i915) +{ + if (IS_ALDERLAKE_P(i915)) + return true; + if (IS_DG2(i915)) + return true; + + return false; +} + +/** + * intel_guc_hwconfig_init - Initialize the HWConfig + * + * Retrieve the HWConfig table from the GuC and save it locally. + * It can then be queried on demand by other users later on. + */ +static int guc_hwconfig_init(struct intel_gt *gt) +{ + struct intel_hwconfig *hwconfig = >->info.hwconfig; + struct intel_guc *guc = >->uc.guc; + int ret; + + if (!has_table(gt->i915)) + return 0; + + ret = guc_hwconfig_discover_size(guc, hwconfig); + if (ret) + return ret; + + hwconfig->ptr = kmalloc(hwconfig->size, GFP_KERNEL); + if (!hwconfig->ptr) { + hwconfig->size = 0; + return -ENOMEM; + } + + ret = guc_hwconfig_fill_buffer(guc, hwconfig); + if (ret < 0) { + intel_gt_fini_hwconfig(gt); + return ret; + } + + return 0; +} + +/** + * intel_gt_init_hwconfig - Initialize the HWConfig if available + * + * Retrieve the HWConfig table if available on the current platform. + */ +int intel_gt_init_hwconfig(struct intel_gt *gt) +{ + if (!intel_uc_uses_guc(>->uc)) + return 0; + + return guc_hwconfig_init(gt); +} + +/** + * intel_gt_fini_hwconfig - Finalize the HWConfig + * + * Free up the memory allocation holding the table. + */ +void intel_gt_fini_hwconfig(struct intel_gt *gt) +{ + struct intel_hwconfig *hwconfig = >->info.hwconfig; + + kfree(hwconfig->ptr); + hwconfig->size = 0; + hwconfig->ptr = NULL; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index b53f61f3101f..78d2989fe917 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -4,14 +4,16 @@ */ #include <linux/debugfs.h> +#include <linux/string_helpers.h> #include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_irq.h" #include "i915_memcpy.h" +#include "intel_guc_capture.h" #include "intel_guc_log.h" -static void guc_log_capture_logs(struct intel_guc_log *log); +static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log); /** * DOC: GuC firmware log @@ -25,7 +27,8 @@ static void guc_log_capture_logs(struct intel_guc_log *log); static int guc_action_flush_log_complete(struct intel_guc *guc) { u32 action[] = { - INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE + INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE, + GUC_DEBUG_LOG_BUFFER }; return intel_guc_send(guc, action, ARRAY_SIZE(action)); @@ -136,7 +139,7 @@ static void guc_move_to_next_buf(struct intel_guc_log *log) smp_wmb(); /* All data has been written, so now move the offset of sub buffer. */ - relay_reserve(log->relay.channel, log->vma->obj->base.size); + relay_reserve(log->relay.channel, log->vma->obj->base.size - CAPTURE_BUFFER_SIZE); /* Switch to the next sub buffer */ relay_flush(log->relay.channel); @@ -156,9 +159,9 @@ static void *guc_get_write_buffer(struct intel_guc_log *log) return relay_reserve(log->relay.channel, 0); } -static bool guc_check_log_buf_overflow(struct intel_guc_log *log, - enum guc_log_buffer_type type, - unsigned int full_cnt) +bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, + enum guc_log_buffer_type type, + unsigned int full_cnt) { unsigned int prev_full_cnt = log->stats[type].sampled_overflow; bool overflow = false; @@ -181,7 +184,7 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log, return overflow; } -static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type) +unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type) { switch (type) { case GUC_DEBUG_LOG_BUFFER: @@ -197,7 +200,21 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type) return 0; } -static void guc_read_update_log_buffer(struct intel_guc_log *log) +size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type) +{ + enum guc_log_buffer_type i; + size_t offset = PAGE_SIZE;/* for the log_buffer_states */ + + for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) { + if (i == type) + break; + offset += intel_guc_get_log_buffer_size(i); + } + + return offset; +} + +static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) { unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt; struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state; @@ -212,7 +229,8 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log) goto out_unlock; /* Get the pointer to shared GuC log buffer */ - log_buf_state = src_data = log->relay.buf_addr; + src_data = log->buf_addr; + log_buf_state = src_data; /* Get the pointer to local buffer to store the logs */ log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); @@ -222,7 +240,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log) * Used rate limited to avoid deluge of messages, logs might be * getting consumed by User at a slow rate. */ - DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n"); + DRM_ERROR_RATELIMITED("no sub-buffer to copy general logs\n"); log->relay.full_count++; goto out_unlock; @@ -232,7 +250,8 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log) src_data += PAGE_SIZE; dst_data += PAGE_SIZE; - for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { + /* For relay logging, we exclude error state capture */ + for (type = GUC_DEBUG_LOG_BUFFER; type <= GUC_CRASH_DUMP_LOG_BUFFER; type++) { /* * Make a copy of the state structure, inside GuC log buffer * (which is uncached mapped), on the stack to avoid reading @@ -240,14 +259,14 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log) */ memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state)); - buffer_size = guc_get_log_buffer_size(type); + buffer_size = intel_guc_get_log_buffer_size(type); read_offset = log_buf_state_local.read_ptr; write_offset = log_buf_state_local.sampled_write_ptr; full_cnt = log_buf_state_local.buffer_full_cnt; /* Bookkeeping stuff */ log->stats[type].flush += log_buf_state_local.flush_to_file; - new_overflow = guc_check_log_buf_overflow(log, type, full_cnt); + new_overflow = intel_guc_check_log_buf_overflow(log, type, full_cnt); /* Update the state of shared log buffer */ log_buf_state->read_ptr = write_offset; @@ -300,49 +319,43 @@ out_unlock: mutex_unlock(&log->relay.lock); } -static void capture_logs_work(struct work_struct *work) +static void copy_debug_logs_work(struct work_struct *work) { struct intel_guc_log *log = container_of(work, struct intel_guc_log, relay.flush_work); - guc_log_capture_logs(log); + guc_log_copy_debuglogs_for_relay(log); } -static int guc_log_map(struct intel_guc_log *log) +static int guc_log_relay_map(struct intel_guc_log *log) { - void *vaddr; - lockdep_assert_held(&log->relay.lock); - if (!log->vma) + if (!log->vma || !log->buf_addr) return -ENODEV; /* - * Create a WC (Uncached for read) vmalloc mapping of log - * buffer pages, so that we can directly get the data - * (up-to-date) from memory. + * WC vmalloc mapping of log buffer pages was done at + * GuC Log Init time, but lets keep a ref for book-keeping */ - vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC); - if (IS_ERR(vaddr)) - return PTR_ERR(vaddr); - - log->relay.buf_addr = vaddr; + i915_gem_object_get(log->vma->obj); + log->relay.buf_in_use = true; return 0; } -static void guc_log_unmap(struct intel_guc_log *log) +static void guc_log_relay_unmap(struct intel_guc_log *log) { lockdep_assert_held(&log->relay.lock); - i915_gem_object_unpin_map(log->vma->obj); - log->relay.buf_addr = NULL; + i915_gem_object_put(log->vma->obj); + log->relay.buf_in_use = false; } void intel_guc_log_init_early(struct intel_guc_log *log) { mutex_init(&log->relay.lock); - INIT_WORK(&log->relay.flush_work, capture_logs_work); + INIT_WORK(&log->relay.flush_work, copy_debug_logs_work); log->relay.started = false; } @@ -357,8 +370,11 @@ static int guc_log_relay_create(struct intel_guc_log *log) lockdep_assert_held(&log->relay.lock); GEM_BUG_ON(!log->vma); - /* Keep the size of sub buffers same as shared log buffer */ - subbuf_size = log->vma->size; + /* + * Keep the size of sub buffers same as shared log buffer + * but GuC log-events excludes the error-state-capture logs + */ + subbuf_size = log->vma->size - CAPTURE_BUFFER_SIZE; /* * Store up to 8 snapshots, which is large enough to buffer sufficient @@ -393,13 +409,13 @@ static void guc_log_relay_destroy(struct intel_guc_log *log) log->relay.channel = NULL; } -static void guc_log_capture_logs(struct intel_guc_log *log) +static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; intel_wakeref_t wakeref; - guc_read_update_log_buffer(log); + _guc_log_copy_debuglogs_for_relay(log); /* * Generally device is expected to be active only at this @@ -439,6 +455,7 @@ int intel_guc_log_create(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct i915_vma *vma; + void *vaddr; u32 guc_log_size; int ret; @@ -446,23 +463,28 @@ int intel_guc_log_create(struct intel_guc_log *log) /* * GuC Log buffer Layout + * (this ordering must follow "enum guc_log_buffer_type" definition) * * +===============================+ 00B - * | Crash dump state header | - * +-------------------------------+ 32B * | Debug state header | + * +-------------------------------+ 32B + * | Crash dump state header | * +-------------------------------+ 64B * | Capture state header | * +-------------------------------+ 96B * | | * +===============================+ PAGE_SIZE (4KB) - * | Crash Dump logs | - * +===============================+ + CRASH_SIZE * | Debug logs | * +===============================+ + DEBUG_SIZE + * | Crash Dump logs | + * +===============================+ + CRASH_SIZE * | Capture logs | * +===============================+ + CAPTURE_SIZE */ + if (intel_guc_capture_output_min_size_est(guc) > CAPTURE_BUFFER_SIZE) + DRM_WARN("GuC log buffer for state_capture maybe too small. %d < %d\n", + CAPTURE_BUFFER_SIZE, intel_guc_capture_output_min_size_est(guc)); + guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE + CAPTURE_BUFFER_SIZE; @@ -473,23 +495,35 @@ int intel_guc_log_create(struct intel_guc_log *log) } log->vma = vma; + /* + * Create a WC (Uncached for read) vmalloc mapping up front immediate access to + * data from memory during critical events such as error capture + */ + vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + i915_vma_unpin_and_release(&log->vma, 0); + goto err; + } + log->buf_addr = vaddr; log->level = __get_default_log_level(log); DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", - log->level, enableddisabled(log->level), - yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), + log->level, str_enabled_disabled(log->level), + str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); return 0; err: - DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret); + DRM_ERROR("Failed to allocate or map GuC log buffer. %d\n", ret); return ret; } void intel_guc_log_destroy(struct intel_guc_log *log) { - i915_vma_unpin_and_release(&log->vma, 0); + log->buf_addr = NULL; + i915_vma_unpin_and_release(&log->vma, I915_VMA_RELEASE_MAP); } int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) @@ -534,7 +568,7 @@ out_unlock: bool intel_guc_log_relay_created(const struct intel_guc_log *log) { - return log->relay.buf_addr; + return log->buf_addr; } int intel_guc_log_relay_open(struct intel_guc_log *log) @@ -565,7 +599,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) if (ret) goto out_unlock; - ret = guc_log_map(log); + ret = guc_log_relay_map(log); if (ret) goto out_relay; @@ -615,8 +649,8 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref) guc_action_flush_log(guc); - /* GuC would have updated log buffer by now, so capture it */ - guc_log_capture_logs(log); + /* GuC would have updated log buffer by now, so copy it */ + guc_log_copy_debuglogs_for_relay(log); } /* @@ -645,7 +679,7 @@ void intel_guc_log_relay_close(struct intel_guc_log *log) mutex_lock(&log->relay.lock); GEM_BUG_ON(!intel_guc_log_relay_created(log)); - guc_log_unmap(log); + guc_log_relay_unmap(log); guc_log_relay_destroy(log); mutex_unlock(&log->relay.lock); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h index d7e1b6471fed..18007e639be9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h @@ -49,8 +49,9 @@ struct intel_guc; struct intel_guc_log { u32 level; struct i915_vma *vma; + void *buf_addr; struct { - void *buf_addr; + bool buf_in_use; bool started; struct work_struct flush_work; struct rchan *channel; @@ -66,6 +67,10 @@ struct intel_guc_log { }; void intel_guc_log_init_early(struct intel_guc_log *log); +bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, enum guc_log_buffer_type type, + unsigned int full_cnt); +unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type); +size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type); int intel_guc_log_create(struct intel_guc_log *log); void intel_guc_log_destroy(struct intel_guc_log *log); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c index fc805d466d99..e00661fb0853 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c @@ -3,6 +3,8 @@ * Copyright © 2021 Intel Corporation */ +#include <linux/string_helpers.h> + #include "intel_guc_rc.h" #include "gt/intel_gt.h" #include "i915_drv.h" @@ -59,12 +61,12 @@ static int __guc_rc_control(struct intel_guc *guc, bool enable) ret = guc_action_control_gucrc(guc, enable); if (ret) { drm_err(drm, "Failed to %s GuC RC (%pe)\n", - enabledisable(enable), ERR_PTR(ret)); + str_enable_disable(enable), ERR_PTR(ret)); return ret; } drm_info(>->i915->drm, "GuC RC: %s\n", - enableddisabled(enable)); + str_enabled_disabled(enable)); return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h index 66027a42cda9..ad570fa002a6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h @@ -28,7 +28,7 @@ #define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT) #define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT) #define GS_AUTH_STATUS_SHIFT 30 -#define GS_AUTH_STATUS_MASK (0x03 << GS_AUTH_STATUS_SHIFT) +#define GS_AUTH_STATUS_MASK (0x03U << GS_AUTH_STATUS_SHIFT) #define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT) #define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index ac749ab11035..1db833da42df 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -4,6 +4,7 @@ */ #include <drm/drm_cache.h> +#include <linux/string_helpers.h> #include "i915_drv.h" #include "i915_reg.h" @@ -152,8 +153,8 @@ static int slpc_query_task_state(struct intel_guc_slpc *slpc) ret = guc_action_slpc_query(guc, offset); if (unlikely(ret)) - drm_err(&i915->drm, "Failed to query task state (%pe)\n", - ERR_PTR(ret)); + i915_probe_error(i915, "Failed to query task state (%pe)\n", + ERR_PTR(ret)); drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES); @@ -170,8 +171,8 @@ static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value) ret = guc_action_slpc_set_param(guc, id, value); if (ret) - drm_err(&i915->drm, "Failed to set param %d to %u (%pe)\n", - id, value, ERR_PTR(ret)); + i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n", + id, value, ERR_PTR(ret)); return ret; } @@ -211,8 +212,8 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq) SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, freq); if (ret) - drm_err(&i915->drm, "Unable to force min freq to %u: %d", - freq, ret); + i915_probe_error(i915, "Unable to force min freq to %u: %d", + freq, ret); } return ret; @@ -247,9 +248,9 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc) err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); if (unlikely(err)) { - drm_err(&i915->drm, - "Failed to allocate SLPC struct (err=%pe)\n", - ERR_PTR(err)); + i915_probe_error(i915, + "Failed to allocate SLPC struct (err=%pe)\n", + ERR_PTR(err)); return err; } @@ -316,15 +317,15 @@ static int slpc_reset(struct intel_guc_slpc *slpc) ret = guc_action_slpc_reset(guc, offset); if (unlikely(ret < 0)) { - drm_err(&i915->drm, "SLPC reset action failed (%pe)\n", - ERR_PTR(ret)); + i915_probe_error(i915, "SLPC reset action failed (%pe)\n", + ERR_PTR(ret)); return ret; } if (!ret) { if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) { - drm_err(&i915->drm, "SLPC not enabled! State = %s\n", - slpc_get_state_string(slpc)); + i915_probe_error(i915, "SLPC not enabled! State = %s\n", + slpc_get_state_string(slpc)); return -EIO; } } @@ -581,16 +582,12 @@ static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc) static void slpc_get_rp_values(struct intel_guc_slpc *slpc) { struct intel_rps *rps = &slpc_to_gt(slpc)->rps; - u32 rp_state_cap; + struct intel_rps_freq_caps caps; - rp_state_cap = intel_rps_read_state_cap(rps); - - slpc->rp0_freq = REG_FIELD_GET(RP0_CAP_MASK, rp_state_cap) * - GT_FREQUENCY_MULTIPLIER; - slpc->rp1_freq = REG_FIELD_GET(RP1_CAP_MASK, rp_state_cap) * - GT_FREQUENCY_MULTIPLIER; - slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) * - GT_FREQUENCY_MULTIPLIER; + gen6_rps_get_freq_caps(rps, &caps); + slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq); + slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq); + slpc->min_freq = intel_gpu_freq(rps, caps.min_freq); if (!slpc->boost_freq) slpc->boost_freq = slpc->rp0_freq; @@ -620,8 +617,8 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) ret = slpc_reset(slpc); if (unlikely(ret < 0)) { - drm_err(&i915->drm, "SLPC Reset event returned (%pe)\n", - ERR_PTR(ret)); + i915_probe_error(i915, "SLPC Reset event returned (%pe)\n", + ERR_PTR(ret)); return ret; } @@ -636,24 +633,24 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) /* Ignore efficient freq and set min to platform min */ ret = slpc_ignore_eff_freq(slpc, true); if (unlikely(ret)) { - drm_err(&i915->drm, "Failed to set SLPC min to RPn (%pe)\n", - ERR_PTR(ret)); + i915_probe_error(i915, "Failed to set SLPC min to RPn (%pe)\n", + ERR_PTR(ret)); return ret; } /* Set SLPC max limit to RP0 */ ret = slpc_use_fused_rp0(slpc); if (unlikely(ret)) { - drm_err(&i915->drm, "Failed to set SLPC max to RP0 (%pe)\n", - ERR_PTR(ret)); + i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n", + ERR_PTR(ret)); return ret; } /* Revert SLPC min/max to softlimits if necessary */ ret = slpc_set_softlimits(slpc); if (unlikely(ret)) { - drm_err(&i915->drm, "Failed to set SLPC softlimits (%pe)\n", - ERR_PTR(ret)); + i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n", + ERR_PTR(ret)); return ret; } @@ -719,7 +716,7 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc)); drm_printf(p, "\tGTPERF task active: %s\n", - yesno(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED)); + str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED)); drm_printf(p, "\tMax freq: %u MHz\n", slpc_decode_max_freq(slpc)); drm_printf(p, "\tMin freq: %u MHz\n", diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 28f9aac0201d..1726f0f19901 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -25,6 +25,7 @@ #include "gt/intel_ring.h" #include "intel_guc_ads.h" +#include "intel_guc_capture.h" #include "intel_guc_submission.h" #include "i915_drv.h" @@ -161,7 +162,8 @@ guc_create_parallel(struct intel_engine_cs **engines, #define SCHED_STATE_ENABLED BIT(4) #define SCHED_STATE_PENDING_ENABLE BIT(5) #define SCHED_STATE_REGISTERED BIT(6) -#define SCHED_STATE_BLOCKED_SHIFT 7 +#define SCHED_STATE_POLICY_REQUIRED BIT(7) +#define SCHED_STATE_BLOCKED_SHIFT 8 #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT) #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT) @@ -300,6 +302,23 @@ static inline void clr_context_registered(struct intel_context *ce) ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED; } +static inline bool context_policy_required(struct intel_context *ce) +{ + return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED; +} + +static inline void set_context_policy_required(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED; +} + +static inline void clr_context_policy_required(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED; +} + static inline u32 context_blocked(struct intel_context *ce) { return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >> @@ -351,12 +370,12 @@ request_to_scheduling_context(struct i915_request *rq) static inline bool context_guc_id_invalid(struct intel_context *ce) { - return ce->guc_id.id == GUC_INVALID_LRC_ID; + return ce->guc_id.id == GUC_INVALID_CONTEXT_ID; } static inline void set_context_guc_id_invalid(struct intel_context *ce) { - ce->guc_id.id = GUC_INVALID_LRC_ID; + ce->guc_id.id = GUC_INVALID_CONTEXT_ID; } static inline struct intel_guc *ce_to_guc(struct intel_context *ce) @@ -395,12 +414,12 @@ struct sync_semaphore { }; struct parent_scratch { - struct guc_process_desc pdesc; + struct guc_sched_wq_desc wq_desc; struct sync_semaphore go; struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1]; - u8 unused[WQ_OFFSET - sizeof(struct guc_process_desc) - + u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) - sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)]; u32 wq[WQ_SIZE / sizeof(u32)]; @@ -437,15 +456,15 @@ __get_parent_scratch(struct intel_context *ce) LRC_STATE_OFFSET) / sizeof(u32))); } -static struct guc_process_desc * -__get_process_desc(struct intel_context *ce) +static struct guc_sched_wq_desc * +__get_wq_desc(struct intel_context *ce) { struct parent_scratch *ps = __get_parent_scratch(ce); - return &ps->pdesc; + return &ps->wq_desc; } -static u32 *get_wq_pointer(struct guc_process_desc *desc, +static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc, struct intel_context *ce, u32 wqi_size) { @@ -457,7 +476,7 @@ static u32 *get_wq_pointer(struct guc_process_desc *desc, #define AVAILABLE_SPACE \ CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE) if (wqi_size > AVAILABLE_SPACE) { - ce->parallel.guc.wqi_head = READ_ONCE(desc->head); + ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head); if (wqi_size > AVAILABLE_SPACE) return NULL; @@ -467,75 +486,27 @@ static u32 *get_wq_pointer(struct guc_process_desc *desc, return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)]; } -static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) -{ - struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr; - - GEM_BUG_ON(index >= GUC_MAX_LRC_DESCRIPTORS); - - return &base[index]; -} - static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) { struct intel_context *ce = xa_load(&guc->context_lookup, id); - GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS); + GEM_BUG_ON(id >= GUC_MAX_CONTEXT_ID); return ce; } -static int guc_lrc_desc_pool_create(struct intel_guc *guc) -{ - u32 size; - int ret; - - size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) * - GUC_MAX_LRC_DESCRIPTORS); - ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool, - (void **)&guc->lrc_desc_pool_vaddr); - if (ret) - return ret; - - return 0; -} - -static void guc_lrc_desc_pool_destroy(struct intel_guc *guc) -{ - guc->lrc_desc_pool_vaddr = NULL; - i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP); -} - static inline bool guc_submission_initialized(struct intel_guc *guc) { - return !!guc->lrc_desc_pool_vaddr; -} - -static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) -{ - if (likely(guc_submission_initialized(guc))) { - struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); - unsigned long flags; - - memset(desc, 0, sizeof(*desc)); - - /* - * xarray API doesn't have xa_erase_irqsave wrapper, so calling - * the lower level functions directly. - */ - xa_lock_irqsave(&guc->context_lookup, flags); - __xa_erase(&guc->context_lookup, id); - xa_unlock_irqrestore(&guc->context_lookup, flags); - } + return guc->submission_initialized; } -static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) +static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id) { return __get_context(guc, id); } -static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, - struct intel_context *ce) +static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id, + struct intel_context *ce) { unsigned long flags; @@ -548,6 +519,22 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, xa_unlock_irqrestore(&guc->context_lookup, flags); } +static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id) +{ + unsigned long flags; + + if (unlikely(!guc_submission_initialized(guc))) + return; + + /* + * xarray API doesn't have xa_erase_irqsave wrapper, so calling + * the lower level functions directly. + */ + xa_lock_irqsave(&guc->context_lookup, flags); + __xa_erase(&guc->context_lookup, id); + xa_unlock_irqrestore(&guc->context_lookup, flags); +} + static void decr_outstanding_submission_g2h(struct intel_guc *guc) { if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) @@ -624,7 +611,8 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout) true, timeout); } -static int guc_lrc_desc_pin(struct intel_context *ce, bool loop); +static int guc_context_policy_init(struct intel_context *ce, bool loop); +static int try_context_registration(struct intel_context *ce, bool loop); static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) { @@ -650,6 +638,12 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); GEM_BUG_ON(context_guc_id_invalid(ce)); + if (context_policy_required(ce)) { + err = guc_context_policy_init(ce, false); + if (err) + return err; + } + spin_lock(&ce->guc_state.lock); /* @@ -743,7 +737,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce) return (WQ_SIZE - ce->parallel.guc.wqi_tail); } -static void write_wqi(struct guc_process_desc *desc, +static void write_wqi(struct guc_sched_wq_desc *wq_desc, struct intel_context *ce, u32 wqi_size) { @@ -756,13 +750,13 @@ static void write_wqi(struct guc_process_desc *desc, ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) & (WQ_SIZE - 1); - WRITE_ONCE(desc->tail, ce->parallel.guc.wqi_tail); + WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail); } static int guc_wq_noop_append(struct intel_context *ce) { - struct guc_process_desc *desc = __get_process_desc(ce); - u32 *wqi = get_wq_pointer(desc, ce, wq_space_until_wrap(ce)); + struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce); + u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce)); u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1; if (!wqi) @@ -781,7 +775,7 @@ static int __guc_wq_item_append(struct i915_request *rq) { struct intel_context *ce = request_to_scheduling_context(rq); struct intel_context *child; - struct guc_process_desc *desc = __get_process_desc(ce); + struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce); unsigned int wqi_size = (ce->parallel.number_children + 4) * sizeof(u32); u32 *wqi; @@ -792,7 +786,7 @@ static int __guc_wq_item_append(struct i915_request *rq) GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); GEM_BUG_ON(context_guc_id_invalid(ce)); GEM_BUG_ON(context_wait_for_deregister_to_register(ce)); - GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)); + GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)); /* Insert NOOP if this work queue item will wrap the tail pointer. */ if (wqi_size > wq_space_until_wrap(ce)) { @@ -801,7 +795,7 @@ static int __guc_wq_item_append(struct i915_request *rq) return ret; } - wqi = get_wq_pointer(desc, ce, wqi_size); + wqi = get_wq_pointer(wq_desc, ce, wqi_size); if (!wqi) return -EBUSY; @@ -816,7 +810,7 @@ static int __guc_wq_item_append(struct i915_request *rq) for_each_child(ce, child) *wqi++ = child->ring->tail / sizeof(u64); - write_wqi(desc, ce, wqi_size); + write_wqi(wq_desc, ce, wqi_size); return 0; } @@ -920,9 +914,9 @@ register_context: if (submit) { struct intel_context *ce = request_to_scheduling_context(last); - if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) && + if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) && !intel_context_is_banned(ce))) { - ret = guc_lrc_desc_pin(ce, false); + ret = try_context_registration(ce, false); if (unlikely(ret == -EPIPE)) { goto deadlk; } else if (ret == -EBUSY) { @@ -1206,20 +1200,6 @@ static u32 gpm_timestamp_shift(struct intel_gt *gt) return 3 - shift; } -static u64 gpm_timestamp(struct intel_gt *gt) -{ - u32 lo, hi, old_hi, loop = 0; - - hi = intel_uncore_read(gt->uncore, MISC_STATUS1); - do { - lo = intel_uncore_read(gt->uncore, MISC_STATUS0); - old_hi = hi; - hi = intel_uncore_read(gt->uncore, MISC_STATUS1); - } while (old_hi != hi && loop++ < 2); - - return ((u64)hi << 32) | lo; -} - static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now) { struct intel_gt *gt = guc_to_gt(guc); @@ -1229,7 +1209,8 @@ static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now) lockdep_assert_held(&guc->timestamp.lock); gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); - gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift; + gpm_ts = intel_uncore_read64_2x32(gt->uncore, MISC_STATUS0, + MISC_STATUS1) >> guc->timestamp.shift; gt_stamp_lo = lower_32_bits(gpm_ts); *now = ktime_get(); @@ -1546,6 +1527,89 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub) lrc_update_regs(ce, engine, head); } +static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine) +{ + static const i915_reg_t _reg[I915_NUM_ENGINES] = { + [RCS0] = MSG_IDLE_CS, + [BCS0] = MSG_IDLE_BCS, + [VCS0] = MSG_IDLE_VCS0, + [VCS1] = MSG_IDLE_VCS1, + [VCS2] = MSG_IDLE_VCS2, + [VCS3] = MSG_IDLE_VCS3, + [VCS4] = MSG_IDLE_VCS4, + [VCS5] = MSG_IDLE_VCS5, + [VCS6] = MSG_IDLE_VCS6, + [VCS7] = MSG_IDLE_VCS7, + [VECS0] = MSG_IDLE_VECS0, + [VECS1] = MSG_IDLE_VECS1, + [VECS2] = MSG_IDLE_VECS2, + [VECS3] = MSG_IDLE_VECS3, + [CCS0] = MSG_IDLE_CS, + [CCS1] = MSG_IDLE_CS, + [CCS2] = MSG_IDLE_CS, + [CCS3] = MSG_IDLE_CS, + }; + u32 val; + + if (!_reg[engine->id].reg) + return 0; + + val = intel_uncore_read(engine->uncore, _reg[engine->id]); + + /* bits[29:25] & bits[13:9] >> shift */ + return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT; +} + +static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask) +{ + int ret; + + /* Ensure GPM receives fw up/down after CS is stopped */ + udelay(1); + + /* Wait for forcewake request to complete in GPM */ + ret = __intel_wait_for_register_fw(gt->uncore, + GEN9_PWRGT_DOMAIN_STATUS, + fw_mask, fw_mask, 5000, 0, NULL); + + /* Ensure CS receives fw ack from GPM */ + udelay(1); + + if (ret) + GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret); +} + +/* + * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any + * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The + * pending status is indicated by bits[13:9] (masked by bits[ 29:25]) in the + * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we + * are concerned only with the gt reset here, we use a logical OR of pending + * forcewakeups from all reset domains and then wait for them to complete by + * querying PWRGT_DOMAIN_STATUS. + */ +static void guc_engine_reset_prepare(struct intel_engine_cs *engine) +{ + u32 fw_pending; + + if (GRAPHICS_VER(engine->i915) != 12) + return; + + /* + * Wa_22011802037 + * TODO: Occasionally trying to stop the cs times out, but does not + * adversely affect functionality. The timeout is set as a config + * parameter that defaults to 100ms. Assuming that this timeout is + * sufficient for any pending MI_FORCEWAKEs to complete, ignore the + * timeout returned here until it is root caused. + */ + intel_engine_stop_cs(engine); + + fw_pending = __cs_pending_mi_force_wakes(engine); + if (fw_pending) + __gpm_wait_for_fw_complete(engine->gt, fw_pending); +} + static void guc_reset_nop(struct intel_engine_cs *engine) { } @@ -1804,20 +1868,10 @@ static void reset_fail_worker_func(struct work_struct *w); int intel_guc_submission_init(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); - int ret; - if (guc->lrc_desc_pool) + if (guc->submission_initialized) return 0; - ret = guc_lrc_desc_pool_create(guc); - if (ret) - return ret; - /* - * Keep static analysers happy, let them know that we allocated the - * vma after testing that it didn't exist earlier. - */ - GEM_BUG_ON(!guc->lrc_desc_pool); - guc->submission_state.guc_ids_bitmap = bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL); if (!guc->submission_state.guc_ids_bitmap) @@ -1825,19 +1879,20 @@ int intel_guc_submission_init(struct intel_guc *guc) guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; guc->timestamp.shift = gpm_timestamp_shift(gt); + guc->submission_initialized = true; return 0; } void intel_guc_submission_fini(struct intel_guc *guc) { - if (!guc->lrc_desc_pool) + if (!guc->submission_initialized) return; guc_flush_destroyed_contexts(guc); - guc_lrc_desc_pool_destroy(guc); i915_sched_engine_put(guc->sched_engine); bitmap_free(guc->submission_state.guc_ids_bitmap); + guc->submission_initialized = false; } static inline void queue_request(struct i915_sched_engine *sched_engine, @@ -1884,7 +1939,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq) return submission_disabled(guc) || guc->stalled_request || !i915_sched_engine_is_empty(sched_engine) || - !lrc_desc_registered(guc, ce->guc_id.id); + !ctx_id_mapped(guc, ce->guc_id.id); } static void guc_submit_request(struct i915_request *rq) @@ -1941,7 +1996,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) else ida_simple_remove(&guc->submission_state.guc_ids, ce->guc_id.id); - reset_lrc_desc(guc, ce->guc_id.id); + clr_ctx_id_mapping(guc, ce->guc_id.id); set_context_guc_id_invalid(ce); } if (!list_empty(&ce->guc_id.link)) @@ -2094,65 +2149,96 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) static int __guc_action_register_multi_lrc(struct intel_guc *guc, struct intel_context *ce, - u32 guc_id, - u32 offset, + struct guc_ctxt_registration_info *info, bool loop) { struct intel_context *child; - u32 action[4 + MAX_ENGINE_INSTANCE]; + u32 action[13 + (MAX_ENGINE_INSTANCE * 2)]; int len = 0; + u32 next_id; GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE); action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC; - action[len++] = guc_id; + action[len++] = info->flags; + action[len++] = info->context_idx; + action[len++] = info->engine_class; + action[len++] = info->engine_submit_mask; + action[len++] = info->wq_desc_lo; + action[len++] = info->wq_desc_hi; + action[len++] = info->wq_base_lo; + action[len++] = info->wq_base_hi; + action[len++] = info->wq_size; action[len++] = ce->parallel.number_children + 1; - action[len++] = offset; + action[len++] = info->hwlrca_lo; + action[len++] = info->hwlrca_hi; + + next_id = info->context_idx + 1; for_each_child(ce, child) { - offset += sizeof(struct guc_lrc_desc); - action[len++] = offset; + GEM_BUG_ON(next_id++ != child->guc_id.id); + + /* + * NB: GuC interface supports 64 bit LRCA even though i915/HW + * only supports 32 bit currently. + */ + action[len++] = lower_32_bits(child->lrc.lrca); + action[len++] = upper_32_bits(child->lrc.lrca); } + GEM_BUG_ON(len > ARRAY_SIZE(action)); + return guc_submission_send_busy_loop(guc, action, len, 0, loop); } static int __guc_action_register_context(struct intel_guc *guc, - u32 guc_id, - u32 offset, + struct guc_ctxt_registration_info *info, bool loop) { u32 action[] = { INTEL_GUC_ACTION_REGISTER_CONTEXT, - guc_id, - offset, + info->flags, + info->context_idx, + info->engine_class, + info->engine_submit_mask, + info->wq_desc_lo, + info->wq_desc_hi, + info->wq_base_lo, + info->wq_base_hi, + info->wq_size, + info->hwlrca_lo, + info->hwlrca_hi, }; return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, loop); } +static void prepare_context_registration_info(struct intel_context *ce, + struct guc_ctxt_registration_info *info); + static int register_context(struct intel_context *ce, bool loop) { + struct guc_ctxt_registration_info info; struct intel_guc *guc = ce_to_guc(ce); - u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) + - ce->guc_id.id * sizeof(struct guc_lrc_desc); int ret; GEM_BUG_ON(intel_context_is_child(ce)); trace_intel_context_register(ce); + prepare_context_registration_info(ce, &info); + if (intel_context_is_parent(ce)) - ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id, - offset, loop); + ret = __guc_action_register_multi_lrc(guc, ce, &info, loop); else - ret = __guc_action_register_context(guc, ce->guc_id.id, offset, - loop); + ret = __guc_action_register_context(guc, &info, loop); if (likely(!ret)) { unsigned long flags; spin_lock_irqsave(&ce->guc_state.lock, flags); set_context_registered(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); + + guc_context_policy_init(ce, loop); } return ret; @@ -2202,33 +2288,120 @@ static inline u32 get_children_join_value(struct intel_context *ce, return __get_parent_scratch(ce)->join[child_index].semaphore; } -static void guc_context_policy_init(struct intel_engine_cs *engine, - struct guc_lrc_desc *desc) +struct context_policy { + u32 count; + struct guc_update_context_policy h2g; +}; + +static u32 __guc_context_policy_action_size(struct context_policy *policy) +{ + size_t bytes = sizeof(policy->h2g.header) + + (sizeof(policy->h2g.klv[0]) * policy->count); + + return bytes / sizeof(u32); +} + +static void __guc_context_policy_start_klv(struct context_policy *policy, u16 guc_id) { - desc->policy_flags = 0; + policy->h2g.header.action = INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES; + policy->h2g.header.ctx_id = guc_id; + policy->count = 0; +} - if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION) - desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE; +#define MAKE_CONTEXT_POLICY_ADD(func, id) \ +static void __guc_context_policy_add_##func(struct context_policy *policy, u32 data) \ +{ \ + GEM_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \ + policy->h2g.klv[policy->count].kl = \ + FIELD_PREP(GUC_KLV_0_KEY, GUC_CONTEXT_POLICIES_KLV_ID_##id) | \ + FIELD_PREP(GUC_KLV_0_LEN, 1); \ + policy->h2g.klv[policy->count].value = data; \ + policy->count++; \ +} + +MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM) +MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT) +MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY) +MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY) + +#undef MAKE_CONTEXT_POLICY_ADD + +static int __guc_context_set_context_policies(struct intel_guc *guc, + struct context_policy *policy, + bool loop) +{ + return guc_submission_send_busy_loop(guc, (u32 *)&policy->h2g, + __guc_context_policy_action_size(policy), + 0, loop); +} + +static int guc_context_policy_init(struct intel_context *ce, bool loop) +{ + struct intel_engine_cs *engine = ce->engine; + struct intel_guc *guc = &engine->gt->uc.guc; + struct context_policy policy; + u32 execution_quantum; + u32 preemption_timeout; + bool missing = false; + unsigned long flags; + int ret; /* NB: For both of these, zero means disabled. */ - desc->execution_quantum = engine->props.timeslice_duration_ms * 1000; - desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000; + execution_quantum = engine->props.timeslice_duration_ms * 1000; + preemption_timeout = engine->props.preempt_timeout_ms * 1000; + + __guc_context_policy_start_klv(&policy, ce->guc_id.id); + + __guc_context_policy_add_priority(&policy, ce->guc_state.prio); + __guc_context_policy_add_execution_quantum(&policy, execution_quantum); + __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout); + + if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION) + __guc_context_policy_add_preempt_to_idle(&policy, 1); + + ret = __guc_context_set_context_policies(guc, &policy, loop); + missing = ret != 0; + + if (!missing && intel_context_is_parent(ce)) { + struct intel_context *child; + + for_each_child(ce, child) { + __guc_context_policy_start_klv(&policy, child->guc_id.id); + + if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION) + __guc_context_policy_add_preempt_to_idle(&policy, 1); + + child->guc_state.prio = ce->guc_state.prio; + __guc_context_policy_add_priority(&policy, ce->guc_state.prio); + __guc_context_policy_add_execution_quantum(&policy, execution_quantum); + __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout); + + ret = __guc_context_set_context_policies(guc, &policy, loop); + if (ret) { + missing = true; + break; + } + } + } + + spin_lock_irqsave(&ce->guc_state.lock, flags); + if (missing) + set_context_policy_required(ce); + else + clr_context_policy_required(ce); + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + + return ret; } -static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) +static void prepare_context_registration_info(struct intel_context *ce, + struct guc_ctxt_registration_info *info) { struct intel_engine_cs *engine = ce->engine; - struct intel_runtime_pm *runtime_pm = engine->uncore->rpm; struct intel_guc *guc = &engine->gt->uc.guc; - u32 desc_idx = ce->guc_id.id; - struct guc_lrc_desc *desc; - bool context_registered; - intel_wakeref_t wakeref; - struct intel_context *child; - int ret = 0; + u32 ctx_id = ce->guc_id.id; GEM_BUG_ON(!engine->mask); - GEM_BUG_ON(!sched_state_is_init(ce)); /* * Ensure LRC + CT vmas are is same region as write barrier is done @@ -2237,55 +2410,63 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) != i915_gem_object_is_lmem(ce->ring->vma->obj)); - context_registered = lrc_desc_registered(guc, desc_idx); - - reset_lrc_desc(guc, desc_idx); - set_lrc_desc_registered(guc, desc_idx, ce); - - desc = __get_lrc_desc(guc, desc_idx); - desc->engine_class = engine_class_to_guc_class(engine->class); - desc->engine_submit_mask = engine->logical_mask; - desc->hw_context_desc = ce->lrc.lrca; - desc->priority = ce->guc_state.prio; - desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; - guc_context_policy_init(engine, desc); + memset(info, 0, sizeof(*info)); + info->context_idx = ctx_id; + info->engine_class = engine_class_to_guc_class(engine->class); + info->engine_submit_mask = engine->logical_mask; + /* + * NB: GuC interface supports 64 bit LRCA even though i915/HW + * only supports 32 bit currently. + */ + info->hwlrca_lo = lower_32_bits(ce->lrc.lrca); + info->hwlrca_hi = upper_32_bits(ce->lrc.lrca); + info->flags = CONTEXT_REGISTRATION_FLAG_KMD; /* * If context is a parent, we need to register a process descriptor * describing a work queue and register all child contexts. */ if (intel_context_is_parent(ce)) { - struct guc_process_desc *pdesc; + struct guc_sched_wq_desc *wq_desc; + u64 wq_desc_offset, wq_base_offset; ce->parallel.guc.wqi_tail = 0; ce->parallel.guc.wqi_head = 0; - desc->process_desc = i915_ggtt_offset(ce->state) + - __get_parent_scratch_offset(ce); - desc->wq_addr = i915_ggtt_offset(ce->state) + - __get_wq_offset(ce); - desc->wq_size = WQ_SIZE; - - pdesc = __get_process_desc(ce); - memset(pdesc, 0, sizeof(*(pdesc))); - pdesc->stage_id = ce->guc_id.id; - pdesc->wq_base_addr = desc->wq_addr; - pdesc->wq_size_bytes = desc->wq_size; - pdesc->wq_status = WQ_STATUS_ACTIVE; + wq_desc_offset = i915_ggtt_offset(ce->state) + + __get_parent_scratch_offset(ce); + wq_base_offset = i915_ggtt_offset(ce->state) + + __get_wq_offset(ce); + info->wq_desc_lo = lower_32_bits(wq_desc_offset); + info->wq_desc_hi = upper_32_bits(wq_desc_offset); + info->wq_base_lo = lower_32_bits(wq_base_offset); + info->wq_base_hi = upper_32_bits(wq_base_offset); + info->wq_size = WQ_SIZE; - for_each_child(ce, child) { - desc = __get_lrc_desc(guc, child->guc_id.id); - - desc->engine_class = - engine_class_to_guc_class(engine->class); - desc->hw_context_desc = child->lrc.lrca; - desc->priority = ce->guc_state.prio; - desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; - guc_context_policy_init(engine, desc); - } + wq_desc = __get_wq_desc(ce); + memset(wq_desc, 0, sizeof(*wq_desc)); + wq_desc->wq_status = WQ_STATUS_ACTIVE; clear_children_join_go_memory(ce); } +} + +static int try_context_registration(struct intel_context *ce, bool loop) +{ + struct intel_engine_cs *engine = ce->engine; + struct intel_runtime_pm *runtime_pm = engine->uncore->rpm; + struct intel_guc *guc = &engine->gt->uc.guc; + intel_wakeref_t wakeref; + u32 ctx_id = ce->guc_id.id; + bool context_registered; + int ret = 0; + + GEM_BUG_ON(!sched_state_is_init(ce)); + + context_registered = ctx_id_mapped(guc, ctx_id); + + clr_ctx_id_mapping(guc, ctx_id); + set_ctx_id_mapping(guc, ctx_id, ce); /* * The context_lookup xarray is used to determine if the hardware @@ -2311,7 +2492,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) } spin_unlock_irqrestore(&ce->guc_state.lock, flags); if (unlikely(disabled)) { - reset_lrc_desc(guc, desc_idx); + clr_ctx_id_mapping(guc, ctx_id); return 0; /* Will get registered later */ } @@ -2327,9 +2508,9 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) with_intel_runtime_pm(runtime_pm, wakeref) ret = register_context(ce, loop); if (unlikely(ret == -EBUSY)) { - reset_lrc_desc(guc, desc_idx); + clr_ctx_id_mapping(guc, ctx_id); } else if (unlikely(ret == -ENODEV)) { - reset_lrc_desc(guc, desc_idx); + clr_ctx_id_mapping(guc, ctx_id); ret = 0; /* Will get registered later */ } } @@ -2419,7 +2600,7 @@ static void __guc_context_sched_disable(struct intel_guc *guc, GUC_CONTEXT_DISABLE }; - GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID); + GEM_BUG_ON(guc_id == GUC_INVALID_CONTEXT_ID); GEM_BUG_ON(intel_context_is_child(ce)); trace_intel_context_sched_disable(ce); @@ -2516,7 +2697,7 @@ static bool context_cant_unblock(struct intel_context *ce) return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || context_guc_id_invalid(ce) || - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) || + !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) || !intel_context_is_pinned(ce); } @@ -2580,13 +2761,11 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc, u16 guc_id, u32 preemption_timeout) { - u32 action[] = { - INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT, - guc_id, - preemption_timeout - }; + struct context_policy policy; - intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); + __guc_context_policy_start_klv(&policy, guc_id); + __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout); + __guc_context_set_context_policies(guc, &policy, true); } static void guc_context_ban(struct intel_context *ce, struct i915_request *rq) @@ -2686,7 +2865,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) bool disabled; GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); - GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); + GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); GEM_BUG_ON(context_enabled(ce)); @@ -2803,7 +2982,7 @@ static void guc_context_destroy(struct kref *kref) */ spin_lock_irqsave(&guc->submission_state.lock, flags); destroy = submission_disabled(guc) || context_guc_id_invalid(ce) || - !lrc_desc_registered(guc, ce->guc_id.id); + !ctx_id_mapped(guc, ce->guc_id.id); if (likely(!destroy)) { if (!list_empty(&ce->guc_id.link)) list_del_init(&ce->guc_id.link); @@ -2831,16 +3010,20 @@ static int guc_context_alloc(struct intel_context *ce) return lrc_alloc(ce, ce->engine); } +static void __guc_context_set_prio(struct intel_guc *guc, + struct intel_context *ce) +{ + struct context_policy policy; + + __guc_context_policy_start_klv(&policy, ce->guc_id.id); + __guc_context_policy_add_priority(&policy, ce->guc_state.prio); + __guc_context_set_context_policies(guc, &policy, true); +} + static void guc_context_set_prio(struct intel_guc *guc, struct intel_context *ce, u8 prio) { - u32 action[] = { - INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY, - ce->guc_id.id, - prio, - }; - GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH || prio > GUC_CLIENT_PRIORITY_NORMAL); lockdep_assert_held(&ce->guc_state.lock); @@ -2851,9 +3034,9 @@ static void guc_context_set_prio(struct intel_guc *guc, return; } - guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); - ce->guc_state.prio = prio; + __guc_context_set_prio(guc, ce); + trace_intel_context_set_prio(ce); } @@ -3046,7 +3229,7 @@ static void guc_signal_context_fence(struct intel_context *ce) static bool context_needs_register(struct intel_context *ce, bool new_guc_id) { return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) && + !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) && !submission_disabled(ce_to_guc(ce)); } @@ -3123,7 +3306,7 @@ static int guc_request_alloc(struct i915_request *rq) if (unlikely(ret < 0)) return ret; if (context_needs_register(ce, !!ret)) { - ret = guc_lrc_desc_pin(ce, true); + ret = try_context_registration(ce, true); if (unlikely(ret)) { /* unwind */ if (ret == -EPIPE) { disable_submission(guc); @@ -3560,7 +3743,7 @@ static void guc_sanitize(struct intel_engine_cs *engine) sanitize_hwsp(engine); /* And scrub the dirty cachelines for the HWSP */ - clflush_cache_range(engine->status_page.addr, PAGE_SIZE); + drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); intel_engine_reset_pinned_contexts(engine); } @@ -3595,7 +3778,7 @@ static int guc_resume(struct intel_engine_cs *engine) setup_hwsp(engine); start_engine(engine); - if (engine->class == RENDER_CLASS) + if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) xehp_enable_ccs_engines(engine); return 0; @@ -3614,9 +3797,17 @@ static void guc_set_default_submission(struct intel_engine_cs *engine) static inline void guc_kernel_context_pin(struct intel_guc *guc, struct intel_context *ce) { + /* + * Note: we purposefully do not check the returns below because + * the registration can only fail if a reset is just starting. + * This is called at the end of reset so presumably another reset + * isn't happening and even it did this code would be run again. + */ + if (context_guc_id_invalid(ce)) pin_guc_id(guc, ce); - guc_lrc_desc_pin(ce, true); + + try_context_registration(ce, true); } static inline void guc_init_lrc_mapping(struct intel_guc *guc) @@ -3634,13 +3825,7 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc) * Also, after a reset the of the GuC we want to make sure that the * information shared with GuC is properly reset. The kernel LRCs are * not attached to the gem_context, so they need to be added separately. - * - * Note: we purposefully do not check the return of guc_lrc_desc_pin, - * because that function can only fail if a reset is just starting. This - * is at the end of reset so presumably another reset isn't happening - * and even it did this code would be run again. */ - for_each_engine(engine, gt, id) { struct intel_context *ce; @@ -3680,7 +3865,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) engine->sched_engine->schedule = i915_schedule; - engine->reset.prepare = guc_reset_nop; + engine->reset.prepare = guc_engine_reset_prepare; engine->reset.rewind = guc_rewind_nop; engine->reset.cancel = guc_reset_nop; engine->reset.finish = guc_reset_nop; @@ -3699,6 +3884,10 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) engine->flags |= I915_ENGINE_HAS_PREEMPTION; engine->flags |= I915_ENGINE_HAS_TIMESLICES; + /* Wa_14014475959:dg2 */ + if (IS_DG2(engine->i915) && engine->class == COMPUTE_CLASS) + engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT; + /* * TODO: GuC supports timeslicing and semaphores as well, but they're * handled by the firmware so some minor tweaks are required before @@ -3708,6 +3897,8 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) */ engine->emit_bb_start = gen8_emit_bb_start; + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + engine->emit_bb_start = gen125_emit_bb_start; } static void rcs_submission_override(struct intel_engine_cs *engine) @@ -3835,32 +4026,32 @@ void intel_guc_submission_init_early(struct intel_guc *guc) spin_lock_init(&guc->timestamp.lock); INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping); - guc->submission_state.num_guc_ids = GUC_MAX_LRC_DESCRIPTORS; + guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID; guc->submission_supported = __guc_submission_supported(guc); guc->submission_selected = __guc_submission_selected(guc); } static inline struct intel_context * -g2h_context_lookup(struct intel_guc *guc, u32 desc_idx) +g2h_context_lookup(struct intel_guc *guc, u32 ctx_id) { struct intel_context *ce; - if (unlikely(desc_idx >= GUC_MAX_LRC_DESCRIPTORS)) { + if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) { drm_err(&guc_to_gt(guc)->i915->drm, - "Invalid desc_idx %u", desc_idx); + "Invalid ctx_id %u\n", ctx_id); return NULL; } - ce = __get_context(guc, desc_idx); + ce = __get_context(guc, ctx_id); if (unlikely(!ce)) { drm_err(&guc_to_gt(guc)->i915->drm, - "Context is NULL, desc_idx %u", desc_idx); + "Context is NULL, ctx_id %u\n", ctx_id); return NULL; } if (unlikely(intel_context_is_child(ce))) { drm_err(&guc_to_gt(guc)->i915->drm, - "Context is child, desc_idx %u", desc_idx); + "Context is child, ctx_id %u\n", ctx_id); return NULL; } @@ -3872,14 +4063,15 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc, u32 len) { struct intel_context *ce; - u32 desc_idx = msg[0]; + u32 ctx_id; if (unlikely(len < 1)) { - drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len); + drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len); return -EPROTO; } + ctx_id = msg[0]; - ce = g2h_context_lookup(guc, desc_idx); + ce = g2h_context_lookup(guc, ctx_id); if (unlikely(!ce)) return -EPROTO; @@ -3923,14 +4115,15 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, { struct intel_context *ce; unsigned long flags; - u32 desc_idx = msg[0]; + u32 ctx_id; if (unlikely(len < 2)) { - drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len); + drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len); return -EPROTO; } + ctx_id = msg[0]; - ce = g2h_context_lookup(guc, desc_idx); + ce = g2h_context_lookup(guc, ctx_id); if (unlikely(!ce)) return -EPROTO; @@ -3938,8 +4131,8 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, (!context_pending_enable(ce) && !context_pending_disable(ce)))) { drm_err(&guc_to_gt(guc)->i915->drm, - "Bad context sched_state 0x%x, desc_idx %u", - ce->guc_state.sched_state, desc_idx); + "Bad context sched_state 0x%x, ctx_id %u\n", + ce->guc_state.sched_state, ctx_id); return -EPROTO; } @@ -4005,7 +4198,7 @@ static void capture_error_state(struct intel_guc *guc, intel_engine_set_hung_context(engine, ce); with_intel_runtime_pm(&i915->runtime_pm, wakeref) - i915_capture_error_state(gt, engine->mask); + i915_capture_error_state(gt, engine->mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE); atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]); } @@ -4037,14 +4230,14 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, { struct intel_context *ce; unsigned long flags; - int desc_idx; + int ctx_id; if (unlikely(len != 1)) { drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len); return -EPROTO; } - desc_idx = msg[0]; + ctx_id = msg[0]; /* * The context lookup uses the xarray but lookups only require an RCU lock @@ -4053,7 +4246,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, * asynchronously until the reset is done. */ xa_lock_irqsave(&guc->context_lookup, flags); - ce = g2h_context_lookup(guc, desc_idx); + ce = g2h_context_lookup(guc, ctx_id); if (ce) intel_context_get(ce); xa_unlock_irqrestore(&guc->context_lookup, flags); @@ -4070,23 +4263,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, int intel_guc_error_capture_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) { - int status; + u32 status; if (unlikely(len != 1)) { drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len); return -EPROTO; } - status = msg[0]; - drm_info(&guc_to_gt(guc)->i915->drm, "Got error capture: status = %d", status); + status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK; + if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE) + drm_warn(&guc_to_gt(guc)->i915->drm, "G2H-Error capture no space"); - /* FIXME: Do something with the capture */ + intel_guc_capture_process(guc); return 0; } -static struct intel_engine_cs * -guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance) +struct intel_engine_cs * +intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance) { struct intel_gt *gt = guc_to_gt(guc); u8 engine_class = guc_class_to_engine_class(guc_class); @@ -4135,7 +4329,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, instance = msg[1]; reason = msg[2]; - engine = guc_lookup_engine(guc, guc_class, instance); + engine = intel_guc_lookup_engine(guc, guc_class, instance); if (unlikely(!engine)) { drm_err(>->i915->drm, "Invalid engine %d:%d", guc_class, instance); @@ -4333,17 +4527,17 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, guc_log_context_priority(p, ce); if (intel_context_is_parent(ce)) { - struct guc_process_desc *desc = __get_process_desc(ce); + struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce); struct intel_context *child; drm_printf(p, "\t\tNumber children: %u\n", ce->parallel.number_children); drm_printf(p, "\t\tWQI Head: %u\n", - READ_ONCE(desc->head)); + READ_ONCE(wq_desc->head)); drm_printf(p, "\t\tWQI Tail: %u\n", - READ_ONCE(desc->tail)); + READ_ONCE(wq_desc->tail)); drm_printf(p, "\t\tWQI Status: %u\n\n", - READ_ONCE(desc->wq_status)); + READ_ONCE(wq_desc->wq_status)); if (ce->engine->emit_bb_start == emit_bb_start_parent_no_preempt_mid_batch) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 8eb34de2f20c..e8f099360e01 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -3,6 +3,8 @@ * Copyright © 2016-2019 Intel Corporation */ +#include <linux/string_helpers.h> + #include "gt/intel_gt.h" #include "gt/intel_reset.h" #include "intel_guc.h" @@ -78,10 +80,10 @@ static void __confirm_options(struct intel_uc *uc) drm_dbg(&i915->drm, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n", i915->params.enable_guc, - yesno(intel_uc_wants_guc(uc)), - yesno(intel_uc_wants_guc_submission(uc)), - yesno(intel_uc_wants_huc(uc)), - yesno(intel_uc_wants_guc_slpc(uc))); + str_yes_no(intel_uc_wants_guc(uc)), + str_yes_no(intel_uc_wants_guc_submission(uc)), + str_yes_no(intel_uc_wants_huc(uc)), + str_yes_no(intel_uc_wants_guc_slpc(uc))); if (i915->params.enable_guc == 0) { GEM_BUG_ON(intel_uc_wants_guc(uc)); @@ -522,9 +524,9 @@ static int __uc_init_hw(struct intel_uc *uc) } drm_info(&i915->drm, "GuC submission %s\n", - enableddisabled(intel_uc_uses_guc_submission(uc))); + str_enabled_disabled(intel_uc_uses_guc_submission(uc))); drm_info(&i915->drm, "GuC SLPC %s\n", - enableddisabled(intel_uc_uses_guc_slpc(uc))); + str_enabled_disabled(intel_uc_uses_guc_slpc(uc))); return 0; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c index c2f7924295e7..284d6fbc2d08 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c @@ -4,6 +4,8 @@ */ #include <linux/debugfs.h> +#include <linux/string_helpers.h> + #include <drm/drm_print.h> #include "gt/intel_gt_debugfs.h" @@ -18,17 +20,17 @@ static int uc_usage_show(struct seq_file *m, void *data) struct drm_printer p = drm_seq_file_printer(m); drm_printf(&p, "[guc] supported:%s wanted:%s used:%s\n", - yesno(intel_uc_supports_guc(uc)), - yesno(intel_uc_wants_guc(uc)), - yesno(intel_uc_uses_guc(uc))); + str_yes_no(intel_uc_supports_guc(uc)), + str_yes_no(intel_uc_wants_guc(uc)), + str_yes_no(intel_uc_uses_guc(uc))); drm_printf(&p, "[huc] supported:%s wanted:%s used:%s\n", - yesno(intel_uc_supports_huc(uc)), - yesno(intel_uc_wants_huc(uc)), - yesno(intel_uc_uses_huc(uc))); + str_yes_no(intel_uc_supports_huc(uc)), + str_yes_no(intel_uc_wants_huc(uc)), + str_yes_no(intel_uc_uses_huc(uc))); drm_printf(&p, "[submission] supported:%s wanted:%s used:%s\n", - yesno(intel_uc_supports_guc_submission(uc)), - yesno(intel_uc_wants_guc_submission(uc)), - yesno(intel_uc_uses_guc_submission(uc))); + str_yes_no(intel_uc_supports_guc_submission(uc)), + str_yes_no(intel_uc_wants_guc_submission(uc)), + str_yes_no(intel_uc_uses_guc_submission(uc))); return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index c88113044494..d078f884b5e3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -5,6 +5,7 @@ #include <linux/bitfield.h> #include <linux/firmware.h> +#include <linux/highmem.h> #include <drm/drm_cache.h> #include <drm/drm_print.h> @@ -52,21 +53,22 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * firmware as TGL. */ #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \ - fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \ - fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3)) \ - fw_def(DG1, 0, guc_def(dg1, 69, 0, 3)) \ - fw_def(ROCKETLAKE, 0, guc_def(tgl, 69, 0, 3)) \ - fw_def(TIGERLAKE, 0, guc_def(tgl, 69, 0, 3)) \ - fw_def(JASPERLAKE, 0, guc_def(ehl, 69, 0, 3)) \ - fw_def(ELKHARTLAKE, 0, guc_def(ehl, 69, 0, 3)) \ - fw_def(ICELAKE, 0, guc_def(icl, 69, 0, 3)) \ - fw_def(COMETLAKE, 5, guc_def(cml, 69, 0, 3)) \ - fw_def(COMETLAKE, 0, guc_def(kbl, 69, 0, 3)) \ - fw_def(COFFEELAKE, 0, guc_def(kbl, 69, 0, 3)) \ - fw_def(GEMINILAKE, 0, guc_def(glk, 69, 0, 3)) \ - fw_def(KABYLAKE, 0, guc_def(kbl, 69, 0, 3)) \ - fw_def(BROXTON, 0, guc_def(bxt, 69, 0, 3)) \ - fw_def(SKYLAKE, 0, guc_def(skl, 69, 0, 3)) + fw_def(DG2, 0, guc_def(dg2, 70, 1, 2)) \ + fw_def(ALDERLAKE_P, 0, guc_def(adlp, 70, 1, 1)) \ + fw_def(ALDERLAKE_S, 0, guc_def(tgl, 70, 1, 1)) \ + fw_def(DG1, 0, guc_def(dg1, 70, 1, 1)) \ + fw_def(ROCKETLAKE, 0, guc_def(tgl, 70, 1, 1)) \ + fw_def(TIGERLAKE, 0, guc_def(tgl, 70, 1, 1)) \ + fw_def(JASPERLAKE, 0, guc_def(ehl, 70, 1, 1)) \ + fw_def(ELKHARTLAKE, 0, guc_def(ehl, 70, 1, 1)) \ + fw_def(ICELAKE, 0, guc_def(icl, 70, 1, 1)) \ + fw_def(COMETLAKE, 5, guc_def(cml, 70, 1, 1)) \ + fw_def(COMETLAKE, 0, guc_def(kbl, 70, 1, 1)) \ + fw_def(COFFEELAKE, 0, guc_def(kbl, 70, 1, 1)) \ + fw_def(GEMINILAKE, 0, guc_def(glk, 70, 1, 1)) \ + fw_def(KABYLAKE, 0, guc_def(kbl, 70, 1, 1)) \ + fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \ + fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1)) #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \ fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \ diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index a115894d5896..1df71d0796ae 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -148,7 +148,7 @@ static int intel_guc_steal_guc_ids(void *arg) struct i915_request *spin_rq = NULL, *rq, *last = NULL; int number_guc_id_stolen = guc->number_guc_id_stolen; - ce = kzalloc(sizeof(*ce) * GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL); + ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL); if (!ce) { pr_err("Context array allocation failed\n"); return -ENOMEM; |