diff options
Diffstat (limited to 'drivers/gpu/drm')
22 files changed, 194 insertions, 158 deletions
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 70980f82a15b..f191d7b66b1d 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1308,10 +1308,11 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; bool active = false; /* If the command parser is not enabled, report 0 - unsupported */ - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { if (intel_engine_needs_cmd_parser(engine)) { active = true; break; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2e312e0f2670..b20c1ccbd427 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -150,7 +150,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->base.size / 1024, obj->base.read_domains, obj->base.write_domain); - for_each_engine_id(engine, dev_priv, id) + for_each_engine(engine, dev_priv, id) seq_printf(m, "%x ", i915_gem_active_get_seqno(&obj->last_read[id], &obj->base.dev->struct_mutex)); @@ -323,11 +323,12 @@ static void print_batch_pool_stats(struct seq_file *m, struct drm_i915_gem_object *obj; struct file_stats stats; struct intel_engine_cs *engine; + enum intel_engine_id id; int j; memset(&stats, 0, sizeof(stats)); - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { list_for_each_entry(obj, &engine->batch_pool.cache_list[j], @@ -596,6 +597,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) struct drm_device *dev = &dev_priv->drm; struct drm_i915_gem_object *obj; struct intel_engine_cs *engine; + enum intel_engine_id id; int total = 0; int ret, j; @@ -603,7 +605,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) if (ret) return ret; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { int count; @@ -655,8 +657,9 @@ static int i915_gem_request_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_device *dev = &dev_priv->drm; - struct intel_engine_cs *engine; struct drm_i915_gem_request *req; + struct intel_engine_cs *engine; + enum intel_engine_id id; int ret, any; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -664,7 +667,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) return ret; any = 0; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { int count; count = 0; @@ -710,8 +713,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) i915_ring_seqno_info(m, engine); return 0; @@ -722,6 +726,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_engine_cs *engine; + enum intel_engine_id id; int i, pipe; intel_runtime_pm_get(dev_priv); @@ -890,7 +895,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Graphics Interrupt mask: %08x\n", I915_READ(GTIMR)); } - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { if (INTEL_GEN(dev_priv) >= 6) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", @@ -938,7 +943,7 @@ static int i915_hws_info(struct seq_file *m, void *data) const u32 *hws; int i; - engine = &dev_priv->engine[(uintptr_t)node->info_ent->data]; + engine = dev_priv->engine[(uintptr_t)node->info_ent->data]; hws = engine->status_page.page_addr; if (hws == NULL) return 0; @@ -1329,12 +1334,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv, id) { acthd[id] = intel_engine_get_active_head(engine); seqno[id] = intel_engine_get_seqno(engine); } - intel_engine_get_instdone(&dev_priv->engine[RCS], &instdone); + intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); intel_runtime_pm_put(dev_priv); @@ -1345,7 +1350,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) } else seq_printf(m, "Hangcheck inactive\n"); - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv, id) { struct intel_breadcrumbs *b = &engine->breadcrumbs; struct rb_node *rb; @@ -1944,6 +1949,7 @@ static int i915_context_status(struct seq_file *m, void *unused) struct drm_device *dev = &dev_priv->drm; struct intel_engine_cs *engine; struct i915_gem_context *ctx; + enum intel_engine_id id; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1970,7 +1976,7 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_putc(m, ctx->remap_slice ? 'R' : 'r'); seq_putc(m, '\n'); - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { struct intel_context *ce = &ctx->engine[engine->id]; seq_printf(m, "%s: ", engine->name); @@ -2037,6 +2043,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) struct drm_device *dev = &dev_priv->drm; struct intel_engine_cs *engine; struct i915_gem_context *ctx; + enum intel_engine_id id; int ret; if (!i915.enable_execlists) { @@ -2049,7 +2056,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->context_list, link) - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) i915_dump_lrc_obj(m, ctx, engine); mutex_unlock(&dev->struct_mutex); @@ -2158,14 +2165,15 @@ static int per_file_ctx(int id, void *ptr, void *data) static void gen8_ppgtt_info(struct seq_file *m, struct drm_i915_private *dev_priv) { - struct intel_engine_cs *engine; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + struct intel_engine_cs *engine; + enum intel_engine_id id; int i; if (!ppgtt) return; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { seq_printf(m, "%s\n", engine->name); for (i = 0; i < 4; i++) { u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); @@ -2180,11 +2188,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; if (IS_GEN6(dev_priv)) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { seq_printf(m, "%s\n", engine->name); if (IS_GEN7(dev_priv)) seq_printf(m, "GFX_MODE: 0x%08x\n", @@ -2253,9 +2262,10 @@ out_unlock: static int count_irq_waiters(struct drm_i915_private *i915) { struct intel_engine_cs *engine; + enum intel_engine_id id; int count = 0; - for_each_engine(engine, i915) + for_each_engine(engine, i915, id) count += intel_engine_has_waiter(engine); return count; @@ -2418,7 +2428,7 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); seq_printf(m, "\tLast submission result: %d\n", client->retcode); - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv, id) { u64 submissions = client->submissions[id]; tot += submissions; seq_printf(m, "\tSubmissions: %llu %s\n", @@ -2461,7 +2471,7 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "GuC last action error code: %d\n", guc.action_err); seq_printf(m, "\nGuC submissions:\n"); - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv, id) { u64 submissions = guc.submissions[id]; total += submissions; seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", @@ -3082,8 +3092,9 @@ static int i915_engine_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { struct intel_breadcrumbs *b = &engine->breadcrumbs; struct drm_i915_gem_request *rq; struct rb_node *rb; @@ -3231,7 +3242,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0); seqno = (uint64_t *)kmap_atomic(page); - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv, id) { uint64_t offset; seq_printf(m, "%s\n", engine->name); @@ -3256,7 +3267,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) kunmap_atomic(seqno); } else { seq_puts(m, " Last signal:"); - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) for (j = 0; j < num_rings; j++) seq_printf(m, "0x%08x\n", I915_READ(engine->semaphore.mbox.signal[j])); @@ -3264,7 +3275,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) } seq_puts(m, "\nSync seqno:\n"); - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { for (j = 0; j < num_rings; j++) seq_printf(m, " 0x%08x ", engine->semaphore.sync_seqno[j]); @@ -3320,7 +3331,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); seq_printf(m, "Workarounds applied: %d\n", workarounds->count); - for_each_engine_id(engine, dev_priv, id) + for_each_engine(engine, dev_priv, id) seq_printf(m, "HW whitelist count for %s: %d\n", engine->name, workarounds->hw_whitelist_count[id]); for (i = 0; i < workarounds->count; ++i) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e9b3bfcb347a..8c3d4761dfa0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -255,16 +255,16 @@ static int i915_getparam(struct drm_device *dev, void *data, value = dev_priv->overlay ? 1 : 0; break; case I915_PARAM_HAS_BSD: - value = intel_engine_initialized(&dev_priv->engine[VCS]); + value = !!dev_priv->engine[VCS]; break; case I915_PARAM_HAS_BLT: - value = intel_engine_initialized(&dev_priv->engine[BCS]); + value = !!dev_priv->engine[BCS]; break; case I915_PARAM_HAS_VEBOX: - value = intel_engine_initialized(&dev_priv->engine[VECS]); + value = !!dev_priv->engine[VECS]; break; case I915_PARAM_HAS_BSD2: - value = intel_engine_initialized(&dev_priv->engine[VCS2]); + value = !!dev_priv->engine[VCS2]; break; case I915_PARAM_HAS_EXEC_CONSTANTS: value = INTEL_GEN(dev_priv) >= 4; @@ -1707,10 +1707,11 @@ int i915_resume_switcheroo(struct drm_device *dev) static void disable_engines_irq(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; /* Ensure irq handler finishes, and not run again. */ disable_irq(dev_priv->drm.irq); - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) tasklet_kill(&engine->irq_tasklet); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6360e807c6ba..9e830b58b06b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1808,7 +1808,7 @@ struct drm_i915_private { struct pci_dev *bridge_dev; struct i915_gem_context *kernel_context; - struct intel_engine_cs engine[I915_NUM_ENGINES]; + struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct i915_vma *semaphore; u32 next_seqno; @@ -2125,19 +2125,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) } /* Simple iterator over all initialised engines */ -#define for_each_engine(engine__, dev_priv__) \ - for ((engine__) = &(dev_priv__)->engine[0]; \ - (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ - (engine__)++) \ - for_each_if (intel_engine_initialized(engine__)) - -/* Iterator with engine_id */ -#define for_each_engine_id(engine__, dev_priv__, id__) \ - for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \ - (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ - (engine__)++) \ - for_each_if (((id__) = (engine__)->id, \ - intel_engine_initialized(engine__))) +#define for_each_engine(engine__, dev_priv__, id__) \ + for ((id__) = 0; \ + (id__) < I915_NUM_ENGINES; \ + (id__)++) \ + for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) #define __mask_next_bit(mask) ({ \ int __idx = ffs(mask) - 1; \ @@ -2148,7 +2140,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) /* Iterator over subset of engines selected by mask */ #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \ - tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; ) + tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; ) enum hdmi_force_audio { HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 77dc0590ecc7..fb460cc2857c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2626,10 +2626,11 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) void i915_gem_reset(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; i915_gem_retire_requests(dev_priv); - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) i915_gem_reset_engine(engine); i915_gem_restore_fences(&dev_priv->drm); @@ -2677,12 +2678,13 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) void i915_gem_set_wedged(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; lockdep_assert_held(&dev_priv->drm.struct_mutex); set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); i915_gem_context_lost(dev_priv); - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) i915_gem_cleanup_engine(engine); mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); @@ -2721,6 +2723,7 @@ i915_gem_idle_work_handler(struct work_struct *work) container_of(work, typeof(*dev_priv), gt.idle_work.work); struct drm_device *dev = &dev_priv->drm; struct intel_engine_cs *engine; + enum intel_engine_id id; bool rearm_hangcheck; if (!READ_ONCE(dev_priv->gt.awake)) @@ -2743,7 +2746,7 @@ i915_gem_idle_work_handler(struct work_struct *work) if (dev_priv->gt.active_engines) goto out_unlock; - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) i915_gem_batch_pool_fini(&engine->batch_pool); GEM_BUG_ON(!dev_priv->gt.awake); @@ -2936,9 +2939,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags) { struct intel_engine_cs *engine; + enum intel_engine_id id; int ret; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { if (engine->last_context == NULL) continue; @@ -3181,7 +3185,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) */ wmb(); if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) - POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base)); + POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT)); @@ -4416,6 +4420,7 @@ i915_gem_init_hw(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; + enum intel_engine_id id; int ret; /* Double layer security blanket, see i915_gem_init() */ @@ -4459,7 +4464,7 @@ i915_gem_init_hw(struct drm_device *dev) } /* Need to do basic initialisation of all rings first: */ - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { ret = engine->init_hw(engine); if (ret) goto out; @@ -4558,17 +4563,12 @@ i915_gem_cleanup_engines(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) dev_priv->gt.cleanup_engine(engine); } -static void -init_engine_lists(struct intel_engine_cs *engine) -{ - INIT_LIST_HEAD(&engine->request_list); -} - void i915_gem_load_init_fences(struct drm_i915_private *dev_priv) { @@ -4605,7 +4605,6 @@ void i915_gem_load_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - int i; dev_priv->objects = kmem_cache_create("i915_gem_object", @@ -4629,8 +4628,6 @@ i915_gem_load_init(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); - for (i = 0; i < I915_NUM_ENGINES; i++) - init_engine_lists(&dev_priv->engine[i]); INIT_DELAYED_WORK(&dev_priv->gt.retire_work, i915_gem_retire_work_handler); INIT_DELAYED_WORK(&dev_priv->gt.idle_work, diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index df10f4e95736..481ec43f8180 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -474,10 +474,11 @@ int i915_gem_context_init(struct drm_device *dev) void i915_gem_context_lost(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; lockdep_assert_held(&dev_priv->drm.struct_mutex); - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { if (engine->last_context) { i915_gem_context_unpin(engine->last_context, engine); engine->last_context = NULL; @@ -492,13 +493,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv) if (!i915_gem_context_is_default(ctx)) continue; - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) ctx->engine[engine->id].initialised = false; ctx->remap_slice = ALL_L3_SLICES(dev_priv); } - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { struct intel_context *kce = &dev_priv->kernel_context->engine[engine->id]; @@ -563,6 +564,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) struct drm_i915_private *dev_priv = req->i915; struct intel_ring *ring = req->ring; struct intel_engine_cs *engine = req->engine; + enum intel_engine_id id; u32 flags = hw_flags | MI_MM_SPACE_GTT; const int num_rings = /* Use an extended w/a on ivb+ if signalling from other rings */ @@ -605,7 +607,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, dev_priv) { + for_each_engine(signaller, dev_priv, id) { if (signaller == engine) continue; @@ -634,7 +636,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, dev_priv) { + for_each_engine(signaller, dev_priv, id) { if (signaller == engine) continue; @@ -929,8 +931,9 @@ int i915_switch_context(struct drm_i915_gem_request *req) int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { struct drm_i915_gem_request *req; int ret; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 5b6f81c1dbca..b5e9e669f50f 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -37,8 +37,9 @@ static bool gpu_is_idle(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { if (intel_engine_is_active(engine)) return false; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 72c7c1855e70..fa4d27ca0c18 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1599,12 +1599,12 @@ eb_select_engine(struct drm_i915_private *dev_priv, return NULL; } - engine = &dev_priv->engine[_VCS(bsd_idx)]; + engine = dev_priv->engine[_VCS(bsd_idx)]; } else { - engine = &dev_priv->engine[user_ring_map[user_ring_id]]; + engine = dev_priv->engine[user_ring_map[user_ring_id]]; } - if (!intel_engine_initialized(engine)) { + if (!engine) { DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id); return NULL; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2d846aa39ca5..0a45063b465f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1728,8 +1728,9 @@ static void gen8_ppgtt_enable(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; I915_WRITE(RING_MODE_GEN7(engine), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); @@ -1741,6 +1742,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; uint32_t ecochk, ecobits; + enum intel_engine_id id; ecobits = I915_READ(GAC_ECO_BITS); I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); @@ -1754,7 +1756,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev) } I915_WRITE(GAM_ECOCHK, ecochk); - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { /* GFX_MODE is per-ring on gen7+ */ I915_WRITE(RING_MODE_GEN7(engine), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); @@ -2239,11 +2241,12 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv) void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; if (INTEL_INFO(dev_priv)->gen < 6) return; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { u32 fault_reg; fault_reg = I915_READ(RING_FAULT_REG(engine)); if (fault_reg & RING_FAULT_VALID) { @@ -2260,7 +2263,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) fault_reg & ~RING_FAULT_VALID); } } - POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS])); + + /* Engine specific init may not have been done till this point. */ + if (dev_priv->engine[RCS]) + POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); } static void i915_ggtt_flush(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 8832f8ec1583..74ede1f53372 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -256,10 +256,11 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv) static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) { struct intel_engine_cs *engine; + enum intel_engine_id id; int ret; /* Carefully retire all requests without writing to the rings */ - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { ret = intel_engine_idle(engine, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED); @@ -276,7 +277,7 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) } /* Finally reset hw state */ - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) intel_engine_init_seqno(engine, seqno); return 0; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index cd38948107e7..8d4b9eb8718a 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -632,7 +632,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, len += scnprintf(buf + len, sizeof(buf), "%s%s", first ? "" : ", ", - dev_priv->engine[j].name); + dev_priv->engine[j]->name); first = 0; } scnprintf(buf + len, sizeof(buf), ")"); @@ -650,7 +650,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, obj = ee->batchbuffer; if (obj) { - err_puts(m, dev_priv->engine[i].name); + err_puts(m, dev_priv->engine[i]->name); if (ee->pid != -1) err_printf(m, " (submitted by %s [%d])", ee->comm, @@ -658,12 +658,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, " --- gtt_offset = 0x%08x %08x\n", upper_32_bits(obj->gtt_offset), lower_32_bits(obj->gtt_offset)); - print_error_obj(m, &dev_priv->engine[i], NULL, obj); + print_error_obj(m, dev_priv->engine[i], NULL, obj); } if (ee->num_requests) { err_printf(m, "%s --- %d requests\n", - dev_priv->engine[i].name, + dev_priv->engine[i]->name, ee->num_requests); for (j = 0; j < ee->num_requests; j++) error_print_request(m, " ", &ee->requests[j]); @@ -671,10 +671,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, if (IS_ERR(ee->waiters)) { err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n", - dev_priv->engine[i].name); + dev_priv->engine[i]->name); } else if (ee->num_waiters) { err_printf(m, "%s --- %d waiters\n", - dev_priv->engine[i].name, + dev_priv->engine[i]->name, ee->num_waiters); for (j = 0; j < ee->num_waiters; j++) { err_printf(m, " seqno 0x%08x for %s [%d]\n", @@ -684,19 +684,19 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } - print_error_obj(m, &dev_priv->engine[i], + print_error_obj(m, dev_priv->engine[i], "ringbuffer", ee->ringbuffer); - print_error_obj(m, &dev_priv->engine[i], + print_error_obj(m, dev_priv->engine[i], "HW Status", ee->hws_page); - print_error_obj(m, &dev_priv->engine[i], + print_error_obj(m, dev_priv->engine[i], "HW context", ee->ctx); - print_error_obj(m, &dev_priv->engine[i], + print_error_obj(m, dev_priv->engine[i], "WA context", ee->wa_ctx); - print_error_obj(m, &dev_priv->engine[i], + print_error_obj(m, dev_priv->engine[i], "WA batchbuffer", ee->wa_batchbuffer); } @@ -977,7 +977,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error, if (!error->semaphore) return; - for_each_engine_id(to, dev_priv, id) { + for_each_engine(to, dev_priv, id) { int idx; u16 signal_offset; u32 *tmp; @@ -1247,14 +1247,14 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, i915_error_object_create(dev_priv, dev_priv->semaphore); for (i = 0; i < I915_NUM_ENGINES; i++) { - struct intel_engine_cs *engine = &dev_priv->engine[i]; + struct intel_engine_cs *engine = dev_priv->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i]; struct drm_i915_gem_request *request; ee->pid = -1; ee->engine_id = -1; - if (!intel_engine_initialized(engine)) + if (!engine) continue; ee->engine_id = i; diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 3106dcc06fe9..a1f76c8f8cde 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -917,6 +917,7 @@ static void guc_addon_create(struct intel_guc *guc) struct guc_policies *policies; struct guc_mmio_reg_state *reg_state; struct intel_engine_cs *engine; + enum intel_engine_id id; struct page *page; u32 size; @@ -944,10 +945,10 @@ static void guc_addon_create(struct intel_guc *guc) * so its address won't change after we've told the GuC where * to find it. */ - engine = &dev_priv->engine[RCS]; + engine = dev_priv->engine[RCS]; ads->golden_context_lrca = engine->status_page.ggtt_offset; - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine); /* GuC scheduling policies */ @@ -960,7 +961,7 @@ static void guc_addon_create(struct intel_guc *guc) /* MMIO reg state */ reg_state = (void *)policies + sizeof(struct guc_policies); - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { reg_state->mmio_white_list[engine->guc_id].mmio_start = engine->mmio_base + GUC_MMIO_WHITE_LIST_START; @@ -1014,9 +1015,10 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) int i915_guc_submission_enable(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; + struct drm_i915_gem_request *request; struct i915_guc_client *client; struct intel_engine_cs *engine; - struct drm_i915_gem_request *request; + enum intel_engine_id id; /* client for execbuf submission */ client = guc_client_alloc(dev_priv, @@ -1033,7 +1035,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) guc_init_doorbell_hw(guc); /* Take over from manual control of ELSP (execlists) */ - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { engine->submit_request = i915_guc_submit; /* Replay the current set of previously submitted requests */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 91077e426c77..89380c830cc0 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1058,8 +1058,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) static bool any_waiters(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) if (intel_engine_has_waiter(engine)) return true; @@ -1257,20 +1258,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - notify_ring(&dev_priv->engine[RCS]); + notify_ring(dev_priv->engine[RCS]); if (gt_iir & ILK_BSD_USER_INTERRUPT) - notify_ring(&dev_priv->engine[VCS]); + notify_ring(dev_priv->engine[VCS]); } static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - notify_ring(&dev_priv->engine[RCS]); + notify_ring(dev_priv->engine[RCS]); if (gt_iir & GT_BSD_USER_INTERRUPT) - notify_ring(&dev_priv->engine[VCS]); + notify_ring(dev_priv->engine[VCS]); if (gt_iir & GT_BLT_USER_INTERRUPT) - notify_ring(&dev_priv->engine[BCS]); + notify_ring(dev_priv->engine[BCS]); if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | @@ -1340,21 +1341,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir[4]) { if (gt_iir[0]) { - gen8_cs_irq_handler(&dev_priv->engine[RCS], + gen8_cs_irq_handler(dev_priv->engine[RCS], gt_iir[0], GEN8_RCS_IRQ_SHIFT); - gen8_cs_irq_handler(&dev_priv->engine[BCS], + gen8_cs_irq_handler(dev_priv->engine[BCS], gt_iir[0], GEN8_BCS_IRQ_SHIFT); } if (gt_iir[1]) { - gen8_cs_irq_handler(&dev_priv->engine[VCS], + gen8_cs_irq_handler(dev_priv->engine[VCS], gt_iir[1], GEN8_VCS1_IRQ_SHIFT); - gen8_cs_irq_handler(&dev_priv->engine[VCS2], + gen8_cs_irq_handler(dev_priv->engine[VCS2], gt_iir[1], GEN8_VCS2_IRQ_SHIFT); } if (gt_iir[3]) - gen8_cs_irq_handler(&dev_priv->engine[VECS], + gen8_cs_irq_handler(dev_priv->engine[VECS], gt_iir[3], GEN8_VECS_IRQ_SHIFT); if (gt_iir[2] & dev_priv->pm_rps_events) @@ -1598,7 +1599,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) if (HAS_VEBOX(dev_priv)) { if (pm_iir & PM_VEBOX_USER_INTERRUPT) - notify_ring(&dev_priv->engine[VECS]); + notify_ring(dev_priv->engine[VECS]); if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); @@ -2588,7 +2589,7 @@ static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv) pr_err("render error detected, EIR: 0x%08x\n", eir); - intel_engine_get_instdone(&dev_priv->engine[RCS], &instdone); + intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); if (IS_G4X(dev_priv)) { if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { @@ -2833,9 +2834,10 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, { struct drm_i915_private *dev_priv = engine->i915; struct intel_engine_cs *signaller; + enum intel_engine_id id; if (INTEL_GEN(dev_priv) >= 8) { - for_each_engine(signaller, dev_priv) { + for_each_engine(signaller, dev_priv, id) { if (engine == signaller) continue; @@ -2845,7 +2847,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, } else { u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; - for_each_engine(signaller, dev_priv) { + for_each_engine(signaller, dev_priv, id) { if(engine == signaller) continue; @@ -2966,8 +2968,9 @@ static int semaphore_passed(struct intel_engine_cs *engine) static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) engine->hangcheck.deadlock = 0; } @@ -3094,6 +3097,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) container_of(work, typeof(*dev_priv), gpu_error.hangcheck_work.work); struct intel_engine_cs *engine; + enum intel_engine_id id; unsigned int hung = 0, stuck = 0; int busy_count = 0; #define BUSY 1 @@ -3113,7 +3117,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) */ intel_uncore_arm_unclaimed_mmio_detection(dev_priv); - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { bool busy = intel_engine_has_waiter(engine); u64 acthd; u32 seqno; @@ -4004,7 +4008,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) new_iir = I915_READ16(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) - notify_ring(&dev_priv->engine[RCS]); + notify_ring(dev_priv->engine[RCS]); for_each_pipe(dev_priv, pipe) { int plane = pipe; @@ -4201,7 +4205,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) - notify_ring(&dev_priv->engine[RCS]); + notify_ring(dev_priv->engine[RCS]); for_each_pipe(dev_priv, pipe) { int plane = pipe; @@ -4433,9 +4437,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) - notify_ring(&dev_priv->engine[RCS]); + notify_ring(dev_priv->engine[RCS]); if (iir & I915_BSD_USER_INTERRUPT) - notify_ring(&dev_priv->engine[VCS]); + notify_ring(dev_priv->engine[VCS]); for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 495611b7068d..23fc1042fed4 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -621,6 +621,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) unsigned int intel_kick_waiters(struct drm_i915_private *i915) { struct intel_engine_cs *engine; + enum intel_engine_id id; unsigned int mask = 0; /* To avoid the task_struct disappearing beneath us as we wake up @@ -628,7 +629,7 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915) * RCU lock, i.e. as we call wake_up_process() we must be holding the * rcu_read_lock(). */ - for_each_engine(engine, i915) + for_each_engine(engine, i915, id) if (unlikely(intel_engine_wakeup(engine))) mask |= intel_engine_flag(engine); @@ -638,9 +639,10 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915) unsigned int intel_kick_signalers(struct drm_i915_private *i915) { struct intel_engine_cs *engine; + enum intel_engine_id id; unsigned int mask = 0; - for_each_engine(engine, i915) { + for_each_engine(engine, i915, id) { if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) { wake_up_process(engine->breadcrumbs.signaler); mask |= intel_engine_flag(engine); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index cfcb03f82016..645a779b7ea4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12243,19 +12243,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { - engine = &dev_priv->engine[BCS]; + engine = dev_priv->engine[BCS]; if (fb->modifier[0] != old_fb->modifier[0]) /* vlv: DISPLAY_FLIP fails to change tiling */ engine = NULL; } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - engine = &dev_priv->engine[BCS]; + engine = dev_priv->engine[BCS]; } else if (INTEL_INFO(dev)->gen >= 7) { engine = i915_gem_active_get_engine(&obj->last_write, &obj->base.dev->struct_mutex); if (engine == NULL || engine->id != RCS) - engine = &dev_priv->engine[BCS]; + engine = dev_priv->engine[BCS]; } else { - engine = &dev_priv->engine[RCS]; + engine = dev_priv->engine[RCS]; } mmio_flip = use_mmio_flip(engine, obj); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 1d597feba97f..fba6edd9f819 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -82,12 +82,17 @@ static const struct engine_info { }, }; -static struct intel_engine_cs * +static int intel_engine_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id) { const struct engine_info *info = &intel_engines[id]; - struct intel_engine_cs *engine = &dev_priv->engine[id]; + struct intel_engine_cs *engine; + + GEM_BUG_ON(dev_priv->engine[id]); + engine = kzalloc(sizeof(*engine), GFP_KERNEL); + if (!engine) + return -ENOMEM; engine->id = id; engine->i915 = dev_priv; @@ -97,7 +102,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->mmio_base = info->mmio_base; engine->irq_shift = info->irq_shift; - return engine; + dev_priv->engine[id] = engine; + return 0; } /** @@ -112,6 +118,8 @@ int intel_engines_init(struct drm_device *dev) struct intel_device_info *device_info = mkwrite_device_info(dev_priv); unsigned int mask = 0; int (*init)(struct intel_engine_cs *engine); + struct intel_engine_cs *engine; + enum intel_engine_id id; unsigned int i; int ret; @@ -131,7 +139,11 @@ int intel_engines_init(struct drm_device *dev) if (!init) continue; - ret = init(intel_engine_setup(dev_priv, i)); + ret = intel_engine_setup(dev_priv, i); + if (ret) + goto cleanup; + + ret = init(dev_priv->engine[i]); if (ret) goto cleanup; @@ -151,11 +163,11 @@ int intel_engines_init(struct drm_device *dev) return 0; cleanup: - for (i = 0; i < I915_NUM_ENGINES; i++) { + for_each_engine(engine, dev_priv, id) { if (i915.enable_execlists) - intel_logical_ring_cleanup(&dev_priv->engine[i]); + intel_logical_ring_cleanup(engine); else - intel_engine_cleanup(&dev_priv->engine[i]); + intel_engine_cleanup(engine); } return ret; diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 7ace96be82a8..95c6751d598d 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -100,12 +100,13 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) static void guc_interrupts_release(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; int irqs; /* tell all command streamers NOT to forward interrupts or vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MODE_GEN7(engine), irqs); /* route all GT interrupts to the host */ @@ -117,12 +118,13 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv) static void guc_interrupts_capture(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; int irqs; u32 tmp; /* tell all command streamers to forward interrupts (but not vblank) to GuC */ irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MODE_GEN7(engine), irqs); /* route USER_INTERRUPT to Host, all others are sent to GuC. */ diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 10fcea57e4dd..bc86585b9fbb 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1648,9 +1648,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv; - if (!intel_engine_initialized(engine)) - return; - /* * Tasklet cannot be active at this point due intel_mark_active/idle * so this is just for documentation. @@ -1677,13 +1674,16 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) lrc_destroy_wa_ctx_obj(engine); engine->i915 = NULL; + dev_priv->engine[engine->id] = NULL; + kfree(engine); } void intel_execlists_enable_submission(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) engine->submit_request = execlists_submit_request; } @@ -2151,6 +2151,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; struct i915_gem_context *ctx; + enum intel_engine_id id; /* Because we emit WA_TAIL_DWORDS there may be a disparity * between our bookkeeping in ce->ring->head and ce->ring->tail and @@ -2163,7 +2164,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv) * simplicity, we just zero everything out. */ list_for_each_entry(ctx, &dev_priv->context_list, link) { - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { struct intel_context *ce = &ctx->engine[engine->id]; u32 *reg; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 8c411bfc3b3f..25bcd4a178d3 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -233,7 +233,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; - struct intel_engine_cs *engine = &dev_priv->engine[RCS]; + struct intel_engine_cs *engine = dev_priv->engine[RCS]; return i915_gem_request_alloc(engine, dev_priv->kernel_context); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 38081387802d..15f21c6eb4f4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5367,6 +5367,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv) static void gen9_enable_rc6(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; uint32_t rc6_mask = 0; /* 1a: Software RC state - RC0 */ @@ -5388,7 +5389,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); if (HAS_GUC(dev_priv)) @@ -5433,6 +5434,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) static void gen8_enable_rps(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; uint32_t rc6_mask = 0; /* 1a: Software RC state - RC0 */ @@ -5449,7 +5451,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); if (IS_BROADWELL(dev_priv)) @@ -5509,6 +5511,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv) static void gen6_enable_rps(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; u32 rc6vids, rc6_mask = 0; u32 gtfifodbg; int rc6_mode; @@ -5542,7 +5545,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -5991,6 +5994,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) static void cherryview_enable_rps(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; u32 gtfifodbg, val, rc6_mode = 0, pcbr; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -6017,7 +6021,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -6079,6 +6083,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv) static void valleyview_enable_rps(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; u32 gtfifodbg, val, rc6_mode = 0; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -6118,7 +6123,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - for_each_engine(engine, dev_priv) + for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); @@ -6801,7 +6806,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work) if (READ_ONCE(dev_priv->rps.enabled)) goto out; - rcs = &dev_priv->engine[RCS]; + rcs = dev_priv->engine[RCS]; if (rcs->last_context) goto out; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 729f373782e2..e107455b0168 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1228,7 +1228,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *req) if (ret) return ret; - for_each_engine_id(waiter, dev_priv, id) { + for_each_engine(waiter, dev_priv, id) { u64 gtt_offset = req->engine->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; @@ -1265,7 +1265,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *req) if (ret) return ret; - for_each_engine_id(waiter, dev_priv, id) { + for_each_engine(waiter, dev_priv, id) { u64 gtt_offset = req->engine->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; @@ -1292,6 +1292,7 @@ static int gen6_signal(struct drm_i915_gem_request *req) struct intel_ring *ring = req->ring; struct drm_i915_private *dev_priv = req->i915; struct intel_engine_cs *engine; + enum intel_engine_id id; int ret, num_rings; num_rings = INTEL_INFO(dev_priv)->num_rings; @@ -1299,7 +1300,7 @@ static int gen6_signal(struct drm_i915_gem_request *req) if (ret) return ret; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { i915_reg_t mbox_reg; if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK)) @@ -2091,9 +2092,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv; - if (!intel_engine_initialized(engine)) - return; - dev_priv = engine->i915; if (engine->buffer) { @@ -2120,13 +2118,16 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) intel_ring_context_unpin(dev_priv->kernel_context, engine); engine->i915 = NULL; + dev_priv->engine[engine->id] = NULL; + kfree(engine); } void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + enum intel_engine_id id; - for_each_engine(engine, dev_priv) { + for_each_engine(engine, dev_priv, id) { engine->buffer->head = engine->buffer->tail; engine->buffer->last_retired_head = -1; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 36eff9765cc2..32b2e6332ccf 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -395,12 +395,6 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); }; -static inline bool -intel_engine_initialized(const struct intel_engine_cs *engine) -{ - return engine->i915 != NULL; -} - static inline unsigned intel_engine_flag(const struct intel_engine_cs *engine) { @@ -421,7 +415,7 @@ intel_engine_sync_index(struct intel_engine_cs *engine, * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; */ - idx = (other - engine) - 1; + idx = (other->id - engine->id) - 1; if (idx < 0) idx += I915_NUM_ENGINES; |