From 25c26f18ea796a56830c2e356f2b3e0c929b0a6f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 19 May 2020 14:08:02 +0100 Subject: drm/i915/selftests: Measure dispatch latency A useful metric of the system's health is how fast we can tell the GPU to do various actions, so measure our latency. v2: Refactor all the instruction building into emitters. v3: Mark the error handling if not perfect, at least consistent. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Joonas Lahtinen Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200519130802.4067-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_request.c | 823 ++++++++++++++++++++++++++ 1 file changed, 823 insertions(+) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 6014e8dfcbb1..92c628f18c60 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -24,16 +24,20 @@ #include #include +#include #include "gem/i915_gem_pm.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_pm.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "i915_random.h" #include "i915_selftest.h" +#include "igt_flush_test.h" #include "igt_live_test.h" #include "igt_spinner.h" #include "lib_sw_fence.h" @@ -1524,6 +1528,824 @@ struct perf_series { struct intel_context *ce[]; }; +static int cmp_u32(const void *A, const void *B) +{ + const u32 *a = A, *b = B; + + return *a - *b; +} + +static u32 trifilter(u32 *a) +{ + u64 sum; + +#define TF_COUNT 5 + sort(a, TF_COUNT, sizeof(*a), cmp_u32, NULL); + + sum = mul_u32_u32(a[2], 2); + sum += a[1]; + sum += a[3]; + + GEM_BUG_ON(sum > U32_MAX); + return sum; +#define TF_BIAS 2 +} + +static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles) +{ + u64 ns = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles); + + return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS); +} + +static u32 *emit_timestamp_store(u32 *cs, struct intel_context *ce, u32 offset) +{ + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP((ce->engine->mmio_base))); + *cs++ = offset; + *cs++ = 0; + + return cs; +} + +static u32 *emit_store_dw(u32 *cs, u32 offset, u32 value) +{ + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = offset; + *cs++ = 0; + *cs++ = value; + + return cs; +} + +static u32 *emit_semaphore_poll(u32 *cs, u32 mode, u32 value, u32 offset) +{ + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + mode; + *cs++ = value; + *cs++ = offset; + *cs++ = 0; + + return cs; +} + +static u32 *emit_semaphore_poll_until(u32 *cs, u32 offset, u32 value) +{ + return emit_semaphore_poll(cs, MI_SEMAPHORE_SAD_EQ_SDD, value, offset); +} + +static void semaphore_set(u32 *sema, u32 value) +{ + WRITE_ONCE(*sema, value); + wmb(); /* flush the update to the cache, and beyond */ +} + +static u32 *hwsp_scratch(const struct intel_context *ce) +{ + return memset32(ce->engine->status_page.addr + 1000, 0, 21); +} + +static u32 hwsp_offset(const struct intel_context *ce, u32 *dw) +{ + return (i915_ggtt_offset(ce->engine->status_page.vma) + + offset_in_page(dw)); +} + +static int measure_semaphore_response(struct intel_context *ce) +{ + u32 *sema = hwsp_scratch(ce); + const u32 offset = hwsp_offset(ce, sema); + u32 elapsed[TF_COUNT], cycles; + struct i915_request *rq; + u32 *cs; + int err; + int i; + + /* + * Measure how many cycles it takes for the HW to detect the change + * in a semaphore value. + * + * A: read CS_TIMESTAMP from CPU + * poke semaphore + * B: read CS_TIMESTAMP on GPU + * + * Semaphore latency: B - A + */ + + semaphore_set(sema, -1); + + rq = i915_request_create(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 4 + 12 * ARRAY_SIZE(elapsed)); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto err; + } + + cs = emit_store_dw(cs, offset, 0); + for (i = 1; i <= ARRAY_SIZE(elapsed); i++) { + cs = emit_semaphore_poll_until(cs, offset, i); + cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32)); + cs = emit_store_dw(cs, offset, 0); + } + + intel_ring_advance(rq, cs); + i915_request_add(rq); + + if (wait_for(READ_ONCE(*sema) == 0, 50)) { + err = -EIO; + goto err; + } + + for (i = 1; i <= ARRAY_SIZE(elapsed); i++) { + preempt_disable(); + cycles = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP); + semaphore_set(sema, i); + preempt_enable(); + + if (wait_for(READ_ONCE(*sema) == 0, 50)) { + err = -EIO; + goto err; + } + + elapsed[i - 1] = sema[i] - cycles; + } + + cycles = trifilter(elapsed); + pr_info("%s: semaphore response %d cycles, %lluns\n", + ce->engine->name, cycles >> TF_BIAS, + cycles_to_ns(ce->engine, cycles)); + + return intel_gt_wait_for_idle(ce->engine->gt, HZ); + +err: + intel_gt_set_wedged(ce->engine->gt); + return err; +} + +static int measure_idle_dispatch(struct intel_context *ce) +{ + u32 *sema = hwsp_scratch(ce); + const u32 offset = hwsp_offset(ce, sema); + u32 elapsed[TF_COUNT], cycles; + u32 *cs; + int err; + int i; + + /* + * Measure how long it takes for us to submit a request while the + * engine is idle, but is resting in our context. + * + * A: read CS_TIMESTAMP from CPU + * submit request + * B: read CS_TIMESTAMP on GPU + * + * Submission latency: B - A + */ + + for (i = 0; i < ARRAY_SIZE(elapsed); i++) { + struct i915_request *rq; + + err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2); + if (err) + return err; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err; + } + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto err; + } + + cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32)); + + intel_ring_advance(rq, cs); + + preempt_disable(); + local_bh_disable(); + elapsed[i] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP); + i915_request_add(rq); + local_bh_enable(); + preempt_enable(); + } + + err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2); + if (err) + goto err; + + for (i = 0; i < ARRAY_SIZE(elapsed); i++) + elapsed[i] = sema[i] - elapsed[i]; + + cycles = trifilter(elapsed); + pr_info("%s: idle dispatch latency %d cycles, %lluns\n", + ce->engine->name, cycles >> TF_BIAS, + cycles_to_ns(ce->engine, cycles)); + + return intel_gt_wait_for_idle(ce->engine->gt, HZ); + +err: + intel_gt_set_wedged(ce->engine->gt); + return err; +} + +static int measure_busy_dispatch(struct intel_context *ce) +{ + u32 *sema = hwsp_scratch(ce); + const u32 offset = hwsp_offset(ce, sema); + u32 elapsed[TF_COUNT + 1], cycles; + u32 *cs; + int err; + int i; + + /* + * Measure how long it takes for us to submit a request while the + * engine is busy, polling on a semaphore in our context. With + * direct submission, this will include the cost of a lite restore. + * + * A: read CS_TIMESTAMP from CPU + * submit request + * B: read CS_TIMESTAMP on GPU + * + * Submission latency: B - A + */ + + for (i = 1; i <= ARRAY_SIZE(elapsed); i++) { + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err; + } + + cs = intel_ring_begin(rq, 12); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto err; + } + + cs = emit_store_dw(cs, offset + i * sizeof(u32), -1); + cs = emit_semaphore_poll_until(cs, offset, i); + cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32)); + + intel_ring_advance(rq, cs); + + if (i > 1 && wait_for(READ_ONCE(sema[i - 1]), 500)) { + err = -EIO; + goto err; + } + + preempt_disable(); + local_bh_disable(); + elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP); + i915_request_add(rq); + local_bh_enable(); + semaphore_set(sema, i - 1); + preempt_enable(); + } + + wait_for(READ_ONCE(sema[i - 1]), 500); + semaphore_set(sema, i - 1); + + for (i = 1; i <= TF_COUNT; i++) { + GEM_BUG_ON(sema[i] == -1); + elapsed[i - 1] = sema[i] - elapsed[i]; + } + + cycles = trifilter(elapsed); + pr_info("%s: busy dispatch latency %d cycles, %lluns\n", + ce->engine->name, cycles >> TF_BIAS, + cycles_to_ns(ce->engine, cycles)); + + return intel_gt_wait_for_idle(ce->engine->gt, HZ); + +err: + intel_gt_set_wedged(ce->engine->gt); + return err; +} + +static int plug(struct intel_engine_cs *engine, u32 *sema, u32 mode, int value) +{ + const u32 offset = + i915_ggtt_offset(engine->status_page.vma) + + offset_in_page(sema); + struct i915_request *rq; + u32 *cs; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + cs = emit_semaphore_poll(cs, mode, value, offset); + + intel_ring_advance(rq, cs); + i915_request_add(rq); + + return 0; +} + +static int measure_inter_request(struct intel_context *ce) +{ + u32 *sema = hwsp_scratch(ce); + const u32 offset = hwsp_offset(ce, sema); + u32 elapsed[TF_COUNT + 1], cycles; + struct i915_sw_fence *submit; + int i, err; + + /* + * Measure how long it takes to advance from one request into the + * next. Between each request we flush the GPU caches to memory, + * update the breadcrumbs, and then invalidate those caches. + * We queue up all the requests to be submitted in one batch so + * it should be one set of contiguous measurements. + * + * A: read CS_TIMESTAMP on GPU + * advance request + * B: read CS_TIMESTAMP on GPU + * + * Request latency: B - A + */ + + err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0); + if (err) + return err; + + submit = heap_fence_create(GFP_KERNEL); + if (!submit) { + semaphore_set(sema, 1); + return -ENOMEM; + } + + intel_engine_flush_submission(ce->engine); + for (i = 1; i <= ARRAY_SIZE(elapsed); i++) { + struct i915_request *rq; + u32 *cs; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_submit; + } + + err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, + submit, + GFP_KERNEL); + if (err < 0) { + i915_request_add(rq); + goto err_submit; + } + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto err_submit; + } + + cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32)); + + intel_ring_advance(rq, cs); + i915_request_add(rq); + } + local_bh_disable(); + i915_sw_fence_commit(submit); + local_bh_enable(); + intel_engine_flush_submission(ce->engine); + heap_fence_put(submit); + + semaphore_set(sema, 1); + err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2); + if (err) + goto err; + + for (i = 1; i <= TF_COUNT; i++) + elapsed[i - 1] = sema[i + 1] - sema[i]; + + cycles = trifilter(elapsed); + pr_info("%s: inter-request latency %d cycles, %lluns\n", + ce->engine->name, cycles >> TF_BIAS, + cycles_to_ns(ce->engine, cycles)); + + return intel_gt_wait_for_idle(ce->engine->gt, HZ); + +err_submit: + i915_sw_fence_commit(submit); + heap_fence_put(submit); + semaphore_set(sema, 1); +err: + intel_gt_set_wedged(ce->engine->gt); + return err; +} + +static int measure_context_switch(struct intel_context *ce) +{ + u32 *sema = hwsp_scratch(ce); + const u32 offset = hwsp_offset(ce, sema); + struct i915_request *fence = NULL; + u32 elapsed[TF_COUNT + 1], cycles; + int i, j, err; + u32 *cs; + + /* + * Measure how long it takes to advance from one request in one + * context to a request in another context. This allows us to + * measure how long the context save/restore take, along with all + * the inter-context setup we require. + * + * A: read CS_TIMESTAMP on GPU + * switch context + * B: read CS_TIMESTAMP on GPU + * + * Context switch latency: B - A + */ + + err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0); + if (err) + return err; + + for (i = 1; i <= ARRAY_SIZE(elapsed); i++) { + struct intel_context *arr[] = { + ce, ce->engine->kernel_context + }; + u32 addr = offset + ARRAY_SIZE(arr) * i * sizeof(u32); + + for (j = 0; j < ARRAY_SIZE(arr); j++) { + struct i915_request *rq; + + rq = i915_request_create(arr[j]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_fence; + } + + if (fence) { + err = i915_request_await_dma_fence(rq, + &fence->fence); + if (err) { + i915_request_add(rq); + goto err_fence; + } + } + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto err_fence; + } + + cs = emit_timestamp_store(cs, ce, addr); + addr += sizeof(u32); + + intel_ring_advance(rq, cs); + + i915_request_put(fence); + fence = i915_request_get(rq); + + i915_request_add(rq); + } + } + i915_request_put(fence); + intel_engine_flush_submission(ce->engine); + + semaphore_set(sema, 1); + err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2); + if (err) + goto err; + + for (i = 1; i <= TF_COUNT; i++) + elapsed[i - 1] = sema[2 * i + 2] - sema[2 * i + 1]; + + cycles = trifilter(elapsed); + pr_info("%s: context switch latency %d cycles, %lluns\n", + ce->engine->name, cycles >> TF_BIAS, + cycles_to_ns(ce->engine, cycles)); + + return intel_gt_wait_for_idle(ce->engine->gt, HZ); + +err_fence: + i915_request_put(fence); + semaphore_set(sema, 1); +err: + intel_gt_set_wedged(ce->engine->gt); + return err; +} + +static int measure_preemption(struct intel_context *ce) +{ + u32 *sema = hwsp_scratch(ce); + const u32 offset = hwsp_offset(ce, sema); + u32 elapsed[TF_COUNT], cycles; + u32 *cs; + int err; + int i; + + /* + * We measure two latencies while triggering preemption. The first + * latency is how long it takes for us to submit a preempting request. + * The second latency is how it takes for us to return from the + * preemption back to the original context. + * + * A: read CS_TIMESTAMP from CPU + * submit preemption + * B: read CS_TIMESTAMP on GPU (in preempting context) + * context switch + * C: read CS_TIMESTAMP on GPU (in original context) + * + * Preemption dispatch latency: B - A + * Preemption switch latency: C - B + */ + + if (!intel_engine_has_preemption(ce->engine)) + return 0; + + for (i = 1; i <= ARRAY_SIZE(elapsed); i++) { + u32 addr = offset + 2 * i * sizeof(u32); + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err; + } + + cs = intel_ring_begin(rq, 12); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto err; + } + + cs = emit_store_dw(cs, addr, -1); + cs = emit_semaphore_poll_until(cs, offset, i); + cs = emit_timestamp_store(cs, ce, addr + sizeof(u32)); + + intel_ring_advance(rq, cs); + i915_request_add(rq); + + if (wait_for(READ_ONCE(sema[2 * i]) == -1, 500)) { + err = -EIO; + goto err; + } + + rq = i915_request_create(ce->engine->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err; + } + + cs = intel_ring_begin(rq, 8); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto err; + } + + cs = emit_timestamp_store(cs, ce, addr); + cs = emit_store_dw(cs, offset, i); + + intel_ring_advance(rq, cs); + rq->sched.attr.priority = I915_PRIORITY_BARRIER; + + elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP); + i915_request_add(rq); + } + + if (wait_for(READ_ONCE(sema[2 * i - 2]) != -1, 500)) { + err = -EIO; + goto err; + } + + for (i = 1; i <= TF_COUNT; i++) + elapsed[i - 1] = sema[2 * i + 0] - elapsed[i - 1]; + + cycles = trifilter(elapsed); + pr_info("%s: preemption dispatch latency %d cycles, %lluns\n", + ce->engine->name, cycles >> TF_BIAS, + cycles_to_ns(ce->engine, cycles)); + + for (i = 1; i <= TF_COUNT; i++) + elapsed[i - 1] = sema[2 * i + 1] - sema[2 * i + 0]; + + cycles = trifilter(elapsed); + pr_info("%s: preemption switch latency %d cycles, %lluns\n", + ce->engine->name, cycles >> TF_BIAS, + cycles_to_ns(ce->engine, cycles)); + + return intel_gt_wait_for_idle(ce->engine->gt, HZ); + +err: + intel_gt_set_wedged(ce->engine->gt); + return err; +} + +struct signal_cb { + struct dma_fence_cb base; + bool seen; +}; + +static void signal_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct signal_cb *s = container_of(cb, typeof(*s), base); + + smp_store_mb(s->seen, true); /* be safe, be strong */ +} + +static int measure_completion(struct intel_context *ce) +{ + u32 *sema = hwsp_scratch(ce); + const u32 offset = hwsp_offset(ce, sema); + u32 elapsed[TF_COUNT], cycles; + u32 *cs; + int err; + int i; + + /* + * Measure how long it takes for the signal (interrupt) to be + * sent from the GPU to be processed by the CPU. + * + * A: read CS_TIMESTAMP on GPU + * signal + * B: read CS_TIMESTAMP from CPU + * + * Completion latency: B - A + */ + + for (i = 1; i <= ARRAY_SIZE(elapsed); i++) { + struct signal_cb cb = { .seen = false }; + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err; + } + + cs = intel_ring_begin(rq, 12); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto err; + } + + cs = emit_store_dw(cs, offset + i * sizeof(u32), -1); + cs = emit_semaphore_poll_until(cs, offset, i); + cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32)); + + intel_ring_advance(rq, cs); + + dma_fence_add_callback(&rq->fence, &cb.base, signal_cb); + + local_bh_disable(); + i915_request_add(rq); + local_bh_enable(); + + if (wait_for(READ_ONCE(sema[i]) == -1, 50)) { + err = -EIO; + goto err; + } + + preempt_disable(); + semaphore_set(sema, i); + while (!READ_ONCE(cb.seen)) + cpu_relax(); + + elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP); + preempt_enable(); + } + + err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2); + if (err) + goto err; + + for (i = 0; i < ARRAY_SIZE(elapsed); i++) { + GEM_BUG_ON(sema[i + 1] == -1); + elapsed[i] = elapsed[i] - sema[i + 1]; + } + + cycles = trifilter(elapsed); + pr_info("%s: completion latency %d cycles, %lluns\n", + ce->engine->name, cycles >> TF_BIAS, + cycles_to_ns(ce->engine, cycles)); + + return intel_gt_wait_for_idle(ce->engine->gt, HZ); + +err: + intel_gt_set_wedged(ce->engine->gt); + return err; +} + +static void rps_pin(struct intel_gt *gt) +{ + /* Pin the frequency to max */ + atomic_inc(>->rps.num_waiters); + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + + mutex_lock(>->rps.lock); + intel_rps_set(>->rps, gt->rps.max_freq); + mutex_unlock(>->rps.lock); +} + +static void rps_unpin(struct intel_gt *gt) +{ + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); + atomic_dec(>->rps.num_waiters); +} + +static void engine_heartbeat_disable(struct intel_engine_cs *engine) +{ + engine->props.heartbeat_interval_ms = 0; + + intel_engine_pm_get(engine); + intel_engine_park_heartbeat(engine); +} + +static void engine_heartbeat_enable(struct intel_engine_cs *engine) +{ + intel_engine_pm_put(engine); + + engine->props.heartbeat_interval_ms = + engine->defaults.heartbeat_interval_ms; +} + +static int perf_request_latency(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct pm_qos_request qos; + int err = 0; + + if (INTEL_GEN(i915) < 8) /* per-engine CS timestamp, semaphores */ + return 0; + + cpu_latency_qos_add_request(&qos, 0); /* disable cstates */ + + for_each_uabi_engine(engine, i915) { + struct intel_context *ce; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + goto out; + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + goto out; + } + + engine_heartbeat_disable(engine); + rps_pin(engine->gt); + + if (err == 0) + err = measure_semaphore_response(ce); + if (err == 0) + err = measure_idle_dispatch(ce); + if (err == 0) + err = measure_busy_dispatch(ce); + if (err == 0) + err = measure_inter_request(ce); + if (err == 0) + err = measure_context_switch(ce); + if (err == 0) + err = measure_preemption(ce); + if (err == 0) + err = measure_completion(ce); + + rps_unpin(engine->gt); + engine_heartbeat_enable(engine); + + intel_context_unpin(ce); + intel_context_put(ce); + if (err) + goto out; + } + +out: + if (igt_flush_test(i915)) + err = -EIO; + + cpu_latency_qos_remove_request(&qos); + return err; +} + static int s_sync0(void *arg) { struct perf_series *ps = arg; @@ -2042,6 +2864,7 @@ static int perf_parallel_engines(void *arg) int i915_request_perf_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { + SUBTEST(perf_request_latency), SUBTEST(perf_series_engines), SUBTEST(perf_parallel_engines), }; -- cgit v1.2.3 From 1b90e4a43b7444fc4159c6dd338a44e8808e8718 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 17 Jun 2020 14:09:15 +0100 Subject: drm/i915/selftests: Enable selftesting of busy-stats A couple of very simple tests to ensure that the basic properties of per-engine busyness accounting [0% and 100% busy] are faithful. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Cc: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200617130916.15261-1-chris@chris-wilson.co.uk --- .../gpu/drm/i915/gt/selftest_engine_heartbeat.c | 47 +++++----- .../gpu/drm/i915/gt/selftest_engine_heartbeat.h | 14 +++ drivers/gpu/drm/i915/gt/selftest_engine_pm.c | 103 +++++++++++++++++++++ drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 29 ++---- drivers/gpu/drm/i915/gt/selftest_lrc.c | 79 +++++++--------- drivers/gpu/drm/i915/gt/selftest_rps.c | 68 ++++++-------- drivers/gpu/drm/i915/gt/selftest_timeline.c | 21 +---- drivers/gpu/drm/i915/selftests/i915_request.c | 21 +---- 8 files changed, 212 insertions(+), 170 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index 697114dd1f47..f3034c613bc0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -10,6 +10,7 @@ #include "intel_gt_requests.h" #include "i915_selftest.h" +#include "selftest_engine_heartbeat.h" static int timeline_sync(struct intel_timeline *tl) { @@ -142,24 +143,6 @@ out: return err; } -static void engine_heartbeat_disable(struct intel_engine_cs *engine, - unsigned long *saved) -{ - *saved = engine->props.heartbeat_interval_ms; - engine->props.heartbeat_interval_ms = 0; - - intel_engine_pm_get(engine); - intel_engine_park_heartbeat(engine); -} - -static void engine_heartbeat_enable(struct intel_engine_cs *engine, - unsigned long saved) -{ - intel_engine_pm_put(engine); - - engine->props.heartbeat_interval_ms = saved; -} - static int live_idle_flush(void *arg) { struct intel_gt *gt = arg; @@ -170,11 +153,9 @@ static int live_idle_flush(void *arg) /* Check that we can flush the idle barriers */ for_each_engine(engine, gt, id) { - unsigned long heartbeat; - - engine_heartbeat_disable(engine, &heartbeat); + st_engine_heartbeat_disable(engine); err = __live_idle_pulse(engine, intel_engine_flush_barriers); - engine_heartbeat_enable(engine, heartbeat); + st_engine_heartbeat_enable(engine); if (err) break; } @@ -192,11 +173,9 @@ static int live_idle_pulse(void *arg) /* Check that heartbeat pulses flush the idle barriers */ for_each_engine(engine, gt, id) { - unsigned long heartbeat; - - engine_heartbeat_disable(engine, &heartbeat); + st_engine_heartbeat_disable(engine); err = __live_idle_pulse(engine, intel_engine_pulse); - engine_heartbeat_enable(engine, heartbeat); + st_engine_heartbeat_enable(engine); if (err && err != -ENODEV) break; @@ -394,3 +373,19 @@ int intel_heartbeat_live_selftests(struct drm_i915_private *i915) i915_modparams.enable_hangcheck = saved_hangcheck; return err; } + +void st_engine_heartbeat_disable(struct intel_engine_cs *engine) +{ + engine->props.heartbeat_interval_ms = 0; + + intel_engine_pm_get(engine); + intel_engine_park_heartbeat(engine); +} + +void st_engine_heartbeat_enable(struct intel_engine_cs *engine) +{ + intel_engine_pm_put(engine); + + engine->props.heartbeat_interval_ms = + engine->defaults.heartbeat_interval_ms; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h new file mode 100644 index 000000000000..cd27113d5400 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef SELFTEST_ENGINE_HEARTBEAT_H +#define SELFTEST_ENGINE_HEARTBEAT_H + +struct intel_engine_cs; + +void st_engine_heartbeat_disable(struct intel_engine_cs *engine); +void st_engine_heartbeat_enable(struct intel_engine_cs *engine); + +#endif /* SELFTEST_ENGINE_HEARTBEAT_H */ diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index cbf6b0735272..dd54dcb5cca2 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -6,7 +6,109 @@ #include "i915_selftest.h" #include "selftest_engine.h" +#include "selftest_engine_heartbeat.h" #include "selftests/igt_atomic.h" +#include "selftests/igt_flush_test.h" +#include "selftests/igt_spinner.h" + +static int live_engine_busy_stats(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + int err = 0; + + /* + * Check that if an engine supports busy-stats, they tell the truth. + */ + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + GEM_BUG_ON(intel_gt_pm_is_awake(gt)); + for_each_engine(engine, gt, id) { + struct i915_request *rq; + ktime_t de; + u64 dt; + + if (!intel_engine_supports_stats(engine)) + continue; + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (intel_gt_pm_wait_for_idle(gt)) { + err = -EBUSY; + break; + } + + st_engine_heartbeat_disable(engine); + + ENGINE_TRACE(engine, "measuring idle time\n"); + preempt_disable(); + dt = ktime_to_ns(ktime_get()); + de = intel_engine_get_busy_time(engine); + udelay(100); + de = ktime_sub(intel_engine_get_busy_time(engine), de); + dt = ktime_to_ns(ktime_get()) - dt; + preempt_enable(); + if (de < 0 || de > 10) { + pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n", + engine->name, + de, (int)div64_u64(100 * de, dt), dt); + GEM_TRACE_DUMP(); + err = -EINVAL; + goto end; + } + + /* 100% busy */ + rq = igt_spinner_create_request(&spin, + engine->kernel_context, + MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto end; + } + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + intel_gt_set_wedged(engine->gt); + err = -ETIME; + goto end; + } + + ENGINE_TRACE(engine, "measuring busy time\n"); + preempt_disable(); + dt = ktime_to_ns(ktime_get()); + de = intel_engine_get_busy_time(engine); + udelay(100); + de = ktime_sub(intel_engine_get_busy_time(engine), de); + dt = ktime_to_ns(ktime_get()) - dt; + preempt_enable(); + if (100 * de < 95 * dt || 95 * de > 100 * dt) { + pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n", + engine->name, + de, (int)div64_u64(100 * de, dt), dt); + GEM_TRACE_DUMP(); + err = -EINVAL; + goto end; + } + +end: + st_engine_heartbeat_enable(engine); + igt_spinner_end(&spin); + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + break; + } + + igt_spinner_fini(&spin); + if (igt_flush_test(gt->i915)) + err = -EIO; + return err; +} static int live_engine_pm(void *arg) { @@ -77,6 +179,7 @@ static int live_engine_pm(void *arg) int live_engine_pm_selftests(struct intel_gt *gt) { static const struct i915_subtest tests[] = { + SUBTEST(live_engine_busy_stats), SUBTEST(live_engine_pm), }; diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 7461936d549d..fb5ebf930ab2 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -29,6 +29,7 @@ #include "intel_gt.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" +#include "selftest_engine_heartbeat.h" #include "i915_selftest.h" #include "selftests/i915_random.h" @@ -310,22 +311,6 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq) 1000)); } -static void engine_heartbeat_disable(struct intel_engine_cs *engine) -{ - engine->props.heartbeat_interval_ms = 0; - - intel_engine_pm_get(engine); - intel_engine_park_heartbeat(engine); -} - -static void engine_heartbeat_enable(struct intel_engine_cs *engine) -{ - intel_engine_pm_put(engine); - - engine->props.heartbeat_interval_ms = - engine->defaults.heartbeat_interval_ms; -} - static int igt_hang_sanitycheck(void *arg) { struct intel_gt *gt = arg; @@ -482,7 +467,7 @@ static int igt_reset_nop_engine(void *arg) reset_engine_count = i915_reset_engine_count(global, engine); count = 0; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { int i; @@ -540,7 +525,7 @@ static int igt_reset_nop_engine(void *arg) } } while (time_before(jiffies, end_time)); clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); pr_info("%s(%s): %d resets\n", __func__, engine->name, count); @@ -590,7 +575,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) reset_count = i915_reset_count(global); reset_engine_count = i915_reset_engine_count(global, engine); - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { if (active) { @@ -642,7 +627,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) } } while (time_before(jiffies, end_time)); clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (err) break; @@ -841,7 +826,7 @@ static int __igt_reset_engines(struct intel_gt *gt, yield(); /* start all threads before we begin */ - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { struct i915_request *rq = NULL; @@ -931,7 +916,7 @@ static int __igt_reset_engines(struct intel_gt *gt, } } while (time_before(jiffies, end_time)); clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); pr_info("i915_reset_engine(%s:%s): %lu resets\n", engine->name, test_name, count); diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 58e4e9aafe94..daa4aabab9a7 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -9,6 +9,7 @@ #include "gem/i915_gem_pm.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_reset.h" +#include "gt/selftest_engine_heartbeat.h" #include "i915_selftest.h" #include "selftests/i915_random.h" @@ -51,22 +52,6 @@ static struct i915_vma *create_scratch(struct intel_gt *gt) return vma; } -static void engine_heartbeat_disable(struct intel_engine_cs *engine) -{ - engine->props.heartbeat_interval_ms = 0; - - intel_engine_pm_get(engine); - intel_engine_park_heartbeat(engine); -} - -static void engine_heartbeat_enable(struct intel_engine_cs *engine) -{ - intel_engine_pm_put(engine); - - engine->props.heartbeat_interval_ms = - engine->defaults.heartbeat_interval_ms; -} - static bool is_active(struct i915_request *rq) { if (i915_request_is_active(rq)) @@ -234,7 +219,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio) err = -EIO; break; } - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); for (n = 0; n < ARRAY_SIZE(ce); n++) { struct intel_context *tmp; @@ -342,7 +327,7 @@ err_ce: intel_context_put(ce[n]); } - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (igt_live_test_end(&t)) err = -EIO; if (err) @@ -396,7 +381,7 @@ static int live_unlite_ring(void *arg) err = -EIO; break; } - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); for (n = 0; n < ARRAY_SIZE(ce); n++) { struct intel_context *tmp; @@ -502,7 +487,7 @@ err_ce: intel_context_unpin(ce[n]); intel_context_put(ce[n]); } - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (igt_live_test_end(&t)) err = -EIO; if (err) @@ -621,7 +606,7 @@ static int live_hold_reset(void *arg) break; } - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); if (IS_ERR(rq)) { @@ -681,7 +666,7 @@ static int live_hold_reset(void *arg) i915_request_put(rq); out: - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); intel_context_put(ce); if (err) break; @@ -728,7 +713,7 @@ static int live_error_interrupt(void *arg) const struct error_phase *p; int err = 0; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); for (p = phases; p->error[0] != GOOD; p++) { struct i915_request *client[ARRAY_SIZE(phases->error)]; @@ -827,7 +812,7 @@ out: } } - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (err) { intel_gt_set_wedged(gt); return err; @@ -1042,9 +1027,9 @@ static int live_timeslice_preempt(void *arg) memset(vaddr, 0, PAGE_SIZE); - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); err = slice_semaphore_queue(engine, vma, 5); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (err) goto err_pin; @@ -1166,7 +1151,7 @@ static int live_timeslice_rewind(void *arg) * Expect execution/evaluation order XZY */ - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); timeslice = xchg(&engine->props.timeslice_duration_ms, 1); slot = memset32(engine->status_page.addr + 1000, 0, 4); @@ -1261,7 +1246,7 @@ err: wmb(); engine->props.timeslice_duration_ms = timeslice; - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); for (i = 0; i < 3; i++) i915_request_put(rq[i]); if (igt_flush_test(gt->i915)) @@ -1353,7 +1338,7 @@ static int live_timeslice_queue(void *arg) if (!intel_engine_has_preemption(engine)) continue; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); memset(vaddr, 0, PAGE_SIZE); /* ELSP[0]: semaphore wait */ @@ -1414,7 +1399,7 @@ static int live_timeslice_queue(void *arg) err_rq: i915_request_put(rq); err_heartbeat: - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (err) break; } @@ -1460,7 +1445,7 @@ static int live_timeslice_nopreempt(void *arg) break; } - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); timeslice = xchg(&engine->props.timeslice_duration_ms, 1); /* Create an unpreemptible spinner */ @@ -1529,7 +1514,7 @@ out_spin: igt_spinner_end(&spin); out_heartbeat: xchg(&engine->props.timeslice_duration_ms, timeslice); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (err) break; @@ -2433,7 +2418,7 @@ static int live_suppress_self_preempt(void *arg) if (igt_flush_test(gt->i915)) goto err_wedged; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); engine->execlists.preempt_hang.count = 0; rq_a = spinner_create_request(&a.spin, @@ -2441,14 +2426,14 @@ static int live_suppress_self_preempt(void *arg) MI_NOOP); if (IS_ERR(rq_a)) { err = PTR_ERR(rq_a); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); goto err_client_b; } i915_request_add(rq_a); if (!igt_wait_for_spinner(&a.spin, rq_a)) { pr_err("First client failed to start\n"); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); goto err_wedged; } @@ -2460,7 +2445,7 @@ static int live_suppress_self_preempt(void *arg) MI_NOOP); if (IS_ERR(rq_b)) { err = PTR_ERR(rq_b); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); goto err_client_b; } i915_request_add(rq_b); @@ -2471,7 +2456,7 @@ static int live_suppress_self_preempt(void *arg) if (!igt_wait_for_spinner(&b.spin, rq_b)) { pr_err("Second client failed to start\n"); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); goto err_wedged; } @@ -2485,12 +2470,12 @@ static int live_suppress_self_preempt(void *arg) engine->name, engine->execlists.preempt_hang.count, depth); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); err = -EINVAL; goto err_client_b; } - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (igt_flush_test(gt->i915)) goto err_wedged; } @@ -2902,7 +2887,7 @@ static int live_preempt_ring(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); for (n = 0; n <= 3; n++) { err = __live_preempt_ring(engine, &spin, @@ -2911,7 +2896,7 @@ static int live_preempt_ring(void *arg) break; } - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (err) break; } @@ -4568,7 +4553,7 @@ static int reset_virtual_engine(struct intel_gt *gt, } for (n = 0; n < nsibling; n++) - engine_heartbeat_disable(siblings[n]); + st_engine_heartbeat_disable(siblings[n]); rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK); if (IS_ERR(rq)) { @@ -4639,7 +4624,7 @@ out_rq: i915_request_put(rq); out_heartbeat: for (n = 0; n < nsibling; n++) - engine_heartbeat_enable(siblings[n]); + st_engine_heartbeat_enable(siblings[n]); intel_context_put(ve); out_spin: @@ -5314,7 +5299,7 @@ static int live_lrc_gpr(void *arg) return PTR_ERR(scratch); for_each_engine(engine, gt, id) { - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); err = __live_lrc_gpr(engine, scratch, false); if (err) @@ -5325,7 +5310,7 @@ static int live_lrc_gpr(void *arg) goto err; err: - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (igt_flush_test(gt->i915)) err = -EIO; if (err) @@ -5474,7 +5459,7 @@ static int live_lrc_timestamp(void *arg) for_each_engine(data.engine, gt, id) { int i, err = 0; - engine_heartbeat_disable(data.engine); + st_engine_heartbeat_disable(data.engine); for (i = 0; i < ARRAY_SIZE(data.ce); i++) { struct intel_context *tmp; @@ -5507,7 +5492,7 @@ static int live_lrc_timestamp(void *arg) } err: - engine_heartbeat_enable(data.engine); + st_engine_heartbeat_enable(data.engine); for (i = 0; i < ARRAY_SIZE(data.ce); i++) { if (!data.ce[i]) break; diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 5049c3dd08a6..bb753f0c12eb 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -12,6 +12,7 @@ #include "intel_gt_clock_utils.h" #include "intel_gt_pm.h" #include "intel_rc6.h" +#include "selftest_engine_heartbeat.h" #include "selftest_rps.h" #include "selftests/igt_flush_test.h" #include "selftests/igt_spinner.h" @@ -20,22 +21,6 @@ /* Try to isolate the impact of cstates from determing frequency response */ #define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */ -static void engine_heartbeat_disable(struct intel_engine_cs *engine) -{ - engine->props.heartbeat_interval_ms = 0; - - intel_engine_pm_get(engine); - intel_engine_park_heartbeat(engine); -} - -static void engine_heartbeat_enable(struct intel_engine_cs *engine) -{ - intel_engine_pm_put(engine); - - engine->props.heartbeat_interval_ms = - engine->defaults.heartbeat_interval_ms; -} - static void dummy_rps_work(struct work_struct *wrk) { } @@ -249,13 +234,13 @@ int live_rps_clock_interval(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); rq = igt_spinner_create_request(&spin, engine->kernel_context, MI_NOOP); if (IS_ERR(rq)) { - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); err = PTR_ERR(rq); break; } @@ -266,7 +251,7 @@ int live_rps_clock_interval(void *arg) pr_err("%s: RPS spinner did not start\n", engine->name); igt_spinner_end(&spin); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); intel_gt_set_wedged(engine->gt); err = -EIO; break; @@ -322,7 +307,7 @@ int live_rps_clock_interval(void *arg) intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); igt_spinner_end(&spin); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (err == 0) { u64 time = intel_gt_pm_interval_to_ns(gt, cycles); @@ -408,7 +393,7 @@ int live_rps_control(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); rq = igt_spinner_create_request(&spin, engine->kernel_context, @@ -424,7 +409,7 @@ int live_rps_control(void *arg) pr_err("%s: RPS spinner did not start\n", engine->name); igt_spinner_end(&spin); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); intel_gt_set_wedged(engine->gt); err = -EIO; break; @@ -434,7 +419,7 @@ int live_rps_control(void *arg) pr_err("%s: could not set minimum frequency [%x], only %x!\n", engine->name, rps->min_freq, read_cagf(rps)); igt_spinner_end(&spin); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); show_pstate_limits(rps); err = -EINVAL; break; @@ -451,7 +436,7 @@ int live_rps_control(void *arg) pr_err("%s: could not restore minimum frequency [%x], only %x!\n", engine->name, rps->min_freq, read_cagf(rps)); igt_spinner_end(&spin); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); show_pstate_limits(rps); err = -EINVAL; break; @@ -466,7 +451,7 @@ int live_rps_control(void *arg) min_dt = ktime_sub(ktime_get(), min_dt); igt_spinner_end(&spin); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n", engine->name, @@ -637,14 +622,14 @@ int live_rps_frequency_cs(void *arg) int freq; } min, max; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); vma = create_spin_counter(engine, engine->kernel_context->vm, false, &cancel, &cntr); if (IS_ERR(vma)) { err = PTR_ERR(vma); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); break; } @@ -725,7 +710,7 @@ err_vma: i915_vma_unpin(vma); i915_vma_put(vma); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (igt_flush_test(gt->i915)) err = -EIO; if (err) @@ -779,14 +764,14 @@ int live_rps_frequency_srm(void *arg) int freq; } min, max; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); vma = create_spin_counter(engine, engine->kernel_context->vm, true, &cancel, &cntr); if (IS_ERR(vma)) { err = PTR_ERR(vma); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); break; } @@ -866,7 +851,7 @@ err_vma: i915_vma_unpin(vma); i915_vma_put(vma); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (igt_flush_test(gt->i915)) err = -EIO; if (err) @@ -1061,11 +1046,11 @@ int live_rps_interrupt(void *arg) intel_gt_pm_wait_for_idle(engine->gt); GEM_BUG_ON(intel_rps_is_active(rps)); - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); err = __rps_up_interrupt(rps, engine, &spin); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (err) goto out; @@ -1074,13 +1059,13 @@ int live_rps_interrupt(void *arg) /* Keep the engine awake but idle and check for DOWN */ if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) { - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); intel_rc6_disable(>->rc6); err = __rps_down_interrupt(rps, engine); intel_rc6_enable(>->rc6); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (err) goto out; } @@ -1165,13 +1150,13 @@ int live_rps_power(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); rq = igt_spinner_create_request(&spin, engine->kernel_context, MI_NOOP); if (IS_ERR(rq)) { - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); err = PTR_ERR(rq); break; } @@ -1182,7 +1167,7 @@ int live_rps_power(void *arg) pr_err("%s: RPS spinner did not start\n", engine->name); igt_spinner_end(&spin); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); intel_gt_set_wedged(engine->gt); err = -EIO; break; @@ -1195,7 +1180,7 @@ int live_rps_power(void *arg) min.power = measure_power_at(rps, &min.freq); igt_spinner_end(&spin); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n", engine->name, @@ -1252,6 +1237,11 @@ int live_rps_dynamic(void *arg) if (igt_spinner_init(&spin, gt)) return -ENOMEM; + if (intel_rps_has_interrupts(rps)) + pr_info("RPS has interrupt support\n"); + if (intel_rps_uses_timer(rps)) + pr_info("RPS has timer support\n"); + for_each_engine(engine, gt, id) { struct i915_request *rq; struct { diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index b2aad7ef046a..fcdee951579b 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -12,6 +12,7 @@ #include "intel_gt.h" #include "intel_gt_requests.h" #include "intel_ring.h" +#include "selftest_engine_heartbeat.h" #include "../selftests/i915_random.h" #include "../i915_selftest.h" @@ -751,22 +752,6 @@ out_free: return err; } -static void engine_heartbeat_disable(struct intel_engine_cs *engine) -{ - engine->props.heartbeat_interval_ms = 0; - - intel_engine_pm_get(engine); - intel_engine_park_heartbeat(engine); -} - -static void engine_heartbeat_enable(struct intel_engine_cs *engine) -{ - intel_engine_pm_put(engine); - - engine->props.heartbeat_interval_ms = - engine->defaults.heartbeat_interval_ms; -} - static int live_hwsp_rollover_kernel(void *arg) { struct intel_gt *gt = arg; @@ -785,7 +770,7 @@ static int live_hwsp_rollover_kernel(void *arg) struct i915_request *rq[3] = {}; int i; - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); if (intel_gt_wait_for_idle(gt, HZ / 2)) { err = -EIO; goto out; @@ -836,7 +821,7 @@ static int live_hwsp_rollover_kernel(void *arg) out: for (i = 0; i < ARRAY_SIZE(rq); i++) i915_request_put(rq[i]); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); if (err) break; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 92c628f18c60..06d18aae070b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -34,6 +34,7 @@ #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" +#include "gt/selftest_engine_heartbeat.h" #include "i915_random.h" #include "i915_selftest.h" @@ -2270,22 +2271,6 @@ static void rps_unpin(struct intel_gt *gt) atomic_dec(>->rps.num_waiters); } -static void engine_heartbeat_disable(struct intel_engine_cs *engine) -{ - engine->props.heartbeat_interval_ms = 0; - - intel_engine_pm_get(engine); - intel_engine_park_heartbeat(engine); -} - -static void engine_heartbeat_enable(struct intel_engine_cs *engine) -{ - intel_engine_pm_put(engine); - - engine->props.heartbeat_interval_ms = - engine->defaults.heartbeat_interval_ms; -} - static int perf_request_latency(void *arg) { struct drm_i915_private *i915 = arg; @@ -2311,7 +2296,7 @@ static int perf_request_latency(void *arg) goto out; } - engine_heartbeat_disable(engine); + st_engine_heartbeat_disable(engine); rps_pin(engine->gt); if (err == 0) @@ -2330,7 +2315,7 @@ static int perf_request_latency(void *arg) err = measure_completion(ce); rps_unpin(engine->gt); - engine_heartbeat_enable(engine); + st_engine_heartbeat_enable(engine); intel_context_unpin(ce); intel_context_put(ce); -- cgit v1.2.3 From 810b7ee3008ab2ac94791f75857815484c2f9cce Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 17 Jun 2020 14:09:16 +0100 Subject: drm/i915/gt: Always report the sample time for busy-stats Return the monotonic timestamp (ktime_get()) at the time of sampling the busy-time. This is used in preference to taking ktime_get() separately before or after the read seqlock as there can be some large variance in reported timestamps. For selftests trying to ascertain that we are reporting accurate to within a few microseconds, even a small delay leads to the test failing. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200617130916.15261-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine.h | 3 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 12 ++--- drivers/gpu/drm/i915/gt/intel_rps.c | 9 ++-- drivers/gpu/drm/i915/gt/selftest_engine_pm.c | 18 ++++---- drivers/gpu/drm/i915/i915_pmu.c | 5 ++- drivers/gpu/drm/i915/selftests/i915_request.c | 63 +++++++++++++++++---------- 6 files changed, 66 insertions(+), 44 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 791897f8d847..a9249a23903a 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -334,7 +334,8 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...); -ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); +ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, + ktime_t *now); struct i915_request * intel_engine_find_active_request(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 045179c65c44..c62b3cbdbbf9 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1595,7 +1595,8 @@ void intel_engine_dump(struct intel_engine_cs *engine, intel_engine_print_breadcrumbs(engine, m); } -static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) +static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine, + ktime_t *now) { ktime_t total = engine->stats.total; @@ -1603,9 +1604,9 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) * If the engine is executing something at the moment * add it to the total. */ + *now = ktime_get(); if (atomic_read(&engine->stats.active)) - total = ktime_add(total, - ktime_sub(ktime_get(), engine->stats.start)); + total = ktime_add(total, ktime_sub(*now, engine->stats.start)); return total; } @@ -1613,17 +1614,18 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) /** * intel_engine_get_busy_time() - Return current accumulated engine busyness * @engine: engine to report on + * @now: monotonic timestamp of sampling * * Returns accumulated time @engine was busy since engine stats were enabled. */ -ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine) +ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) { unsigned int seq; ktime_t total; do { seq = read_seqbegin(&engine->stats.lock); - total = __intel_engine_get_busy_time(engine); + total = __intel_engine_get_busy_time(engine, now); } while (read_seqretry(&engine->stats.lock, seq)); return total; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 2f59fc6df3c2..bdece932592b 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -53,13 +53,13 @@ static void rps_timer(struct timer_list *t) struct intel_engine_cs *engine; enum intel_engine_id id; s64 max_busy[3] = {}; - ktime_t dt, last; + ktime_t dt, timestamp, last; for_each_engine(engine, rps_to_gt(rps), id) { s64 busy; int i; - dt = intel_engine_get_busy_time(engine); + dt = intel_engine_get_busy_time(engine, ×tamp); last = engine->stats.rps; engine->stats.rps = dt; @@ -70,15 +70,14 @@ static void rps_timer(struct timer_list *t) } } - dt = ktime_get(); last = rps->pm_timestamp; - rps->pm_timestamp = dt; + rps->pm_timestamp = timestamp; if (intel_rps_is_active(rps)) { s64 busy; int i; - dt = ktime_sub(dt, last); + dt = ktime_sub(timestamp, last); /* * Our goal is to evaluate each engine independently, so we run diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index dd54dcb5cca2..b08fc5390e8a 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -29,8 +29,8 @@ static int live_engine_busy_stats(void *arg) GEM_BUG_ON(intel_gt_pm_is_awake(gt)); for_each_engine(engine, gt, id) { struct i915_request *rq; - ktime_t de; - u64 dt; + ktime_t de, dt; + ktime_t t[2]; if (!intel_engine_supports_stats(engine)) continue; @@ -47,12 +47,11 @@ static int live_engine_busy_stats(void *arg) ENGINE_TRACE(engine, "measuring idle time\n"); preempt_disable(); - dt = ktime_to_ns(ktime_get()); - de = intel_engine_get_busy_time(engine); + de = intel_engine_get_busy_time(engine, &t[0]); udelay(100); - de = ktime_sub(intel_engine_get_busy_time(engine), de); - dt = ktime_to_ns(ktime_get()) - dt; + de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de); preempt_enable(); + dt = ktime_sub(t[1], t[0]); if (de < 0 || de > 10) { pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n", engine->name, @@ -80,12 +79,11 @@ static int live_engine_busy_stats(void *arg) ENGINE_TRACE(engine, "measuring busy time\n"); preempt_disable(); - dt = ktime_to_ns(ktime_get()); - de = intel_engine_get_busy_time(engine); + de = intel_engine_get_busy_time(engine, &t[0]); udelay(100); - de = ktime_sub(intel_engine_get_busy_time(engine), de); - dt = ktime_to_ns(ktime_get()) - dt; + de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de); preempt_enable(); + dt = ktime_sub(t[1], t[0]); if (100 * de < 95 * dt || 95 * de > 100 * dt) { pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n", engine->name, diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 802837de1767..28bc5f13ae52 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -565,7 +565,10 @@ static u64 __i915_pmu_event_read(struct perf_event *event) /* Do nothing */ } else if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) { - val = ktime_to_ns(intel_engine_get_busy_time(engine)); + ktime_t unused; + + val = ktime_to_ns(intel_engine_get_busy_time(engine, + &unused)); } else { val = engine->pmu.sample[sample].cur; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 06d18aae070b..9271aad7f779 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -2492,9 +2492,11 @@ static int perf_series_engines(void *arg) intel_engine_pm_get(p->engine); if (intel_engine_supports_stats(p->engine)) - p->busy = intel_engine_get_busy_time(p->engine) + 1; + p->busy = intel_engine_get_busy_time(p->engine, + &p->time) + 1; + else + p->time = ktime_get(); p->runtime = -intel_context_get_total_runtime_ns(ce); - p->time = ktime_get(); } err = (*fn)(ps); @@ -2505,13 +2507,15 @@ static int perf_series_engines(void *arg) struct perf_stats *p = &stats[idx]; struct intel_context *ce = ps->ce[idx]; int integer, decimal; - u64 busy, dt; + u64 busy, dt, now; - p->time = ktime_sub(ktime_get(), p->time); - if (p->busy) { - p->busy = ktime_sub(intel_engine_get_busy_time(p->engine), + if (p->busy) + p->busy = ktime_sub(intel_engine_get_busy_time(p->engine, + &now), p->busy - 1); - } + else + now = ktime_get(); + p->time = ktime_sub(now, p->time); err = switch_to_kernel_sync(ce, err); p->runtime += intel_context_get_total_runtime_ns(ce); @@ -2571,13 +2575,14 @@ static int p_sync0(void *arg) return err; } - busy = false; if (intel_engine_supports_stats(engine)) { - p->busy = intel_engine_get_busy_time(engine); + p->busy = intel_engine_get_busy_time(engine, &p->time); busy = true; + } else { + p->time = ktime_get(); + busy = false; } - p->time = ktime_get(); count = 0; do { struct i915_request *rq; @@ -2600,11 +2605,15 @@ static int p_sync0(void *arg) count++; } while (!__igt_timeout(end_time, NULL)); - p->time = ktime_sub(ktime_get(), p->time); if (busy) { - p->busy = ktime_sub(intel_engine_get_busy_time(engine), + ktime_t now; + + p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now), p->busy); + p->time = ktime_sub(now, p->time); + } else { + p->time = ktime_sub(ktime_get(), p->time); } err = switch_to_kernel_sync(ce, err); @@ -2637,13 +2646,14 @@ static int p_sync1(void *arg) return err; } - busy = false; if (intel_engine_supports_stats(engine)) { - p->busy = intel_engine_get_busy_time(engine); + p->busy = intel_engine_get_busy_time(engine, &p->time); busy = true; + } else { + p->time = ktime_get(); + busy = false; } - p->time = ktime_get(); count = 0; do { struct i915_request *rq; @@ -2668,11 +2678,15 @@ static int p_sync1(void *arg) count++; } while (!__igt_timeout(end_time, NULL)); i915_request_put(prev); - p->time = ktime_sub(ktime_get(), p->time); if (busy) { - p->busy = ktime_sub(intel_engine_get_busy_time(engine), + ktime_t now; + + p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now), p->busy); + p->time = ktime_sub(now, p->time); + } else { + p->time = ktime_sub(ktime_get(), p->time); } err = switch_to_kernel_sync(ce, err); @@ -2704,14 +2718,15 @@ static int p_many(void *arg) return err; } - busy = false; if (intel_engine_supports_stats(engine)) { - p->busy = intel_engine_get_busy_time(engine); + p->busy = intel_engine_get_busy_time(engine, &p->time); busy = true; + } else { + p->time = ktime_get(); + busy = false; } count = 0; - p->time = ktime_get(); do { struct i915_request *rq; @@ -2724,11 +2739,15 @@ static int p_many(void *arg) i915_request_add(rq); count++; } while (!__igt_timeout(end_time, NULL)); - p->time = ktime_sub(ktime_get(), p->time); if (busy) { - p->busy = ktime_sub(intel_engine_get_busy_time(engine), + ktime_t now; + + p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now), p->busy); + p->time = ktime_sub(now, p->time); + } else { + p->time = ktime_sub(ktime_get(), p->time); } err = switch_to_kernel_sync(ce, err); -- cgit v1.2.3 From 792592e72aba4162c35cd3155d4d5b99d5fb5762 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 7 Jul 2020 17:39:47 -0700 Subject: drm/i915: Move the engine mask to intel_gt_info Since the engines belong to the GT, move the runtime-updated list of available engines to the intel_gt struct. The original mask has been renamed to indicate it contains the maximum engine list that can be found on a matching device. In preparation for other info being moved to the gt in follow up patches (sseu), introduce an intel_gt_info structure to group all gt-related runtime info. v2: s/max_engine_mask/platform_engine_mask (tvrtko), fix selftest Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Andi Shyti Cc: Venkata Sandeep Dhanalakota Reviewed-by: Tvrtko Ursulin #v1 Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-5-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 3 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 13 ++++---- drivers/gpu/drm/i915/gt/intel_gt.c | 6 ++++ drivers/gpu/drm/i915/gt/intel_gt.h | 4 +++ drivers/gpu/drm/i915/gt/intel_gt_types.h | 8 +++++ drivers/gpu/drm/i915/gt/intel_reset.c | 6 ++-- drivers/gpu/drm/i915/gt/intel_ring_submission.c | 2 +- drivers/gpu/drm/i915/gt/selftest_lrc.c | 8 ++--- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 2 +- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 2 ++ drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 6 ++-- drivers/gpu/drm/i915/i915_gpu_error.c | 23 ++++++++----- drivers/gpu/drm/i915/i915_gpu_error.h | 3 ++ drivers/gpu/drm/i915/i915_pci.c | 42 ++++++++++++------------ drivers/gpu/drm/i915/intel_device_info.c | 1 - drivers/gpu/drm/i915/intel_device_info.h | 7 +--- drivers/gpu/drm/i915/intel_uncore.c | 2 +- drivers/gpu/drm/i915/selftests/i915_request.c | 2 +- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 3 +- 21 files changed, 84 insertions(+), 62 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index dd15a799f9d6..6b4ec66cb558 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1980,8 +1980,7 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) static int num_vcs_engines(const struct drm_i915_private *i915) { - return hweight64(INTEL_INFO(i915)->engine_mask & - GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0)); + return hweight64(VDBOX_MASK(&i915->gt)); } /* diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 04114de15fe3..fca3c2348e5e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -370,7 +370,7 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) * instances. */ if ((INTEL_GEN(i915) >= 11 && - RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) || + engine->gt->info.vdbox_sfc_access & engine->mask) || (INTEL_GEN(i915) >= 9 && engine->instance == 0)) engine->uabi_capabilities |= I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; @@ -463,7 +463,7 @@ void intel_engines_free(struct intel_gt *gt) static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; - struct intel_device_info *info = mkwrite_device_info(i915); + struct intel_gt_info *info = >->info; struct intel_uncore *uncore = gt->uncore; unsigned int logical_vdbox = 0; unsigned int i; @@ -471,6 +471,8 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) u16 vdbox_mask; u16 vebox_mask; + info->engine_mask = INTEL_INFO(i915)->platform_engine_mask; + if (INTEL_GEN(i915) < 11) return info->engine_mask; @@ -498,7 +500,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) * In TGL each VDBOX has access to an SFC. */ if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0) - RUNTIME_INFO(i915)->vdbox_sfc_access |= BIT(i); + gt->info.vdbox_sfc_access |= BIT(i); } drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", vdbox_mask, VDBOX_MASK(gt)); @@ -531,7 +533,6 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) int intel_engines_init_mmio(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; - struct intel_device_info *device_info = mkwrite_device_info(i915); const unsigned int engine_mask = init_engine_mask(gt); unsigned int mask = 0; unsigned int i; @@ -561,9 +562,9 @@ int intel_engines_init_mmio(struct intel_gt *gt) * engines. */ if (drm_WARN_ON(&i915->drm, mask != engine_mask)) - device_info->engine_mask = mask; + gt->info.engine_mask = mask; - RUNTIME_INFO(i915)->num_engines = hweight32(mask); + gt->info.num_engines = hweight32(mask); intel_gt_check_and_clear_faults(gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 876f78759095..6a268c6d6a6f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -642,3 +642,9 @@ void intel_gt_driver_late_release(struct intel_gt *gt) intel_gt_fini_timelines(gt); intel_engines_free(gt); } + +void intel_gt_info_print(const struct intel_gt_info *info, + struct drm_printer *p) +{ + drm_printf(p, "available engines: %x\n", info->engine_mask); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 982957ca4e62..908fc5dea885 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -11,6 +11,7 @@ #include "intel_reset.h" struct drm_i915_private; +struct drm_printer; #define GT_TRACE(gt, fmt, ...) do { \ const struct intel_gt *gt__ __maybe_unused = (gt); \ @@ -72,4 +73,7 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt) return unlikely(test_bit(I915_WEDGED, >->reset.flags)); } +void intel_gt_info_print(const struct intel_gt_info *info, + struct drm_printer *p); + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 0cc1d6b185dc..bb7551867c00 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -109,6 +109,14 @@ struct intel_gt { struct intel_gt_buffer_pool buffer_pool; struct i915_vma *scratch; + + struct intel_gt_info { + intel_engine_mask_t engine_mask; + u8 num_engines; + + /* Media engine access to SFC per instance */ + u8 vdbox_sfc_access; + } info; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 121bf39a6f3e..46a5ceffc22f 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -342,7 +342,7 @@ static int gen6_reset_engines(struct intel_gt *gt, static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) { struct intel_uncore *uncore = engine->uncore; - u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; + u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; i915_reg_t sfc_forced_lock, sfc_forced_lock_ack; u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit; i915_reg_t sfc_usage; @@ -417,7 +417,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) static void gen11_unlock_sfc(struct intel_engine_cs *engine) { struct intel_uncore *uncore = engine->uncore; - u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; + u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; i915_reg_t sfc_forced_lock; u32 sfc_forced_lock_bit; @@ -1246,7 +1246,7 @@ void intel_gt_handle_error(struct intel_gt *gt, */ wakeref = intel_runtime_pm_get(gt->uncore->rpm); - engine_mask &= INTEL_INFO(gt->i915)->engine_mask; + engine_mask &= gt->info.engine_mask; if (flags & I915_ERROR_CAPTURE) { i915_capture_error_state(gt->i915); diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 68a08486fc87..b09b83deecef 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -649,7 +649,7 @@ static inline int mi_set_context(struct i915_request *rq, struct drm_i915_private *i915 = engine->i915; enum intel_engine_id id; const int num_engines = - IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; + IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0; bool force_restore = false; int len; u32 *cs; diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index daa4aabab9a7..3fc5de961280 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -963,7 +963,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer, goto out; if (i915_request_wait(head, 0, - 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) { + 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) { pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n", count, n); GEM_TRACE_DUMP(); @@ -3569,8 +3569,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) } pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", - count, flags, - RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext); + count, flags, smoke->gt->info.num_engines, smoke->ncontext); return 0; } @@ -3597,8 +3596,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", - count, flags, - RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext); + count, flags, smoke->gt->info.num_engines, smoke->ncontext); return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index fbdd6b0677db..c10ae1660e53 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -106,7 +106,7 @@ static void __guc_ads_init(struct intel_guc *guc) blob->system_info.vdbox_enable_mask = VDBOX_MASK(gt); blob->system_info.vebox_enable_mask = VEBOX_MASK(gt); - blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; + blob->system_info.vdbox_sfc_support_mask = gt->info.vdbox_sfc_access; base = intel_guc_ggtt_offset(guc, guc->ads_vma); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 686013f4a2f8..b58177f0858a 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -347,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; } - engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask; + engine_mask &= vgpu->gvt->gt->info.engine_mask; } /* vgpu_lock already hold by emulate mmio r/w */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 94ed442910d6..41ca8ff2aa16 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -34,6 +34,7 @@ #include "gem/i915_gem_context.h" #include "gt/intel_gt_buffer_pool.h" #include "gt/intel_gt_clock_utils.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" @@ -61,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data) intel_device_info_print_static(INTEL_INFO(i915), &p); intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); + intel_gt_info_print(&i915->gt.info, &p); intel_driver_caps_print(&i915->caps, &p); kernel_param_lock(THIS_MODULE); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 611287353420..67789df42be8 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -886,6 +886,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) intel_device_info_print_static(INTEL_INFO(dev_priv), &p); intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); + intel_gt_info_print(&dev_priv->gt.info, &p); } if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b3968beb7048..21bb9f7cc452 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1256,7 +1256,7 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) /* Iterator over subset of engines selected by mask */ #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ - for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \ + for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ (tmp__) ? \ ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 0;) @@ -1563,12 +1563,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) -#define HAS_ENGINE(gt, id) __HAS_ENGINE(INTEL_INFO((gt)->i915)->engine_mask, id) +#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \ unsigned int first__ = (first); \ unsigned int count__ = (count); \ - (INTEL_INFO((gt)->i915)->engine_mask & \ + ((gt)->info.engine_mask & \ GENMASK(first__ + count__ - 1, first__)) >> first__; \ }) #define VDBOX_MASK(gt) \ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 866166ada10e..9cb9aa39c33d 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -42,6 +42,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_lmem.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "i915_drv.h" @@ -619,16 +620,15 @@ static void print_error_vma(struct drm_i915_error_state_buf *m, } static void err_print_capabilities(struct drm_i915_error_state_buf *m, - const struct intel_device_info *info, - const struct intel_runtime_info *runtime, - const struct intel_driver_caps *caps) + struct i915_gpu_coredump *error) { struct drm_printer p = i915_error_printer(m); - intel_device_info_print_static(info, &p); - intel_device_info_print_runtime(runtime, &p); - intel_device_info_print_topology(&runtime->sseu, &p); - intel_driver_caps_print(caps, &p); + intel_device_info_print_static(&error->device_info, &p); + intel_device_info_print_runtime(&error->runtime_info, &p); + intel_device_info_print_topology(&error->runtime_info.sseu, &p); + intel_gt_info_print(&error->gt->info, &p); + intel_driver_caps_print(&error->driver_caps, &p); } static void err_print_params(struct drm_i915_error_state_buf *m, @@ -798,8 +798,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, if (error->display) intel_display_print_error_state(m, error->display); - err_print_capabilities(m, &error->device_info, &error->runtime_info, - &error->driver_caps); + err_print_capabilities(m, error); err_print_params(m, &error->params); } @@ -1630,6 +1629,11 @@ static void gt_record_regs(struct intel_gt_coredump *gt) gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); } +static void gt_record_info(struct intel_gt_coredump *gt) +{ + memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info)); +} + /* * Generate a semi-unique error code. The code is not meant to have meaning, The * code's only purpose is to try to prevent false duplicated bug reports by @@ -1808,6 +1812,7 @@ struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915) return ERR_PTR(-ENOMEM); } + gt_record_info(error->gt); gt_record_engines(error->gt, compress); if (INTEL_INFO(i915)->has_gt_uc) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 76b80fbfb7e9..0220b0992808 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -15,6 +15,7 @@ #include #include "gt/intel_engine.h" +#include "gt/intel_gt_types.h" #include "gt/uc/intel_uc_fw.h" #include "intel_device_info.h" @@ -118,6 +119,8 @@ struct intel_gt_coredump { bool awake; bool simulated; + struct intel_gt_info info; + /* Generic register state */ u32 eir; u32 pgtbl_er; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 0be3b66ce666..db916fff3f0d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -168,7 +168,7 @@ .gpu_reset_clobbers_display = true, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ @@ -188,7 +188,7 @@ .gpu_reset_clobbers_display = true, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ @@ -226,7 +226,7 @@ static const struct intel_device_info i865g_info = { .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ - .engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ .dma_mask_size = 32, \ @@ -317,7 +317,7 @@ static const struct intel_device_info pnv_m_info = { .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ - .engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ .dma_mask_size = 36, \ @@ -349,7 +349,7 @@ static const struct intel_device_info i965gm_info = { static const struct intel_device_info g45_info = { GEN4_FEATURES, PLATFORM(INTEL_G45), - .engine_mask = BIT(RCS0) | BIT(VCS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; @@ -359,7 +359,7 @@ static const struct intel_device_info gm45_info = { .is_mobile = 1, .display.has_fbc = 1, .display.supports_tv = 1, - .engine_mask = BIT(RCS0) | BIT(VCS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; @@ -368,7 +368,7 @@ static const struct intel_device_info gm45_info = { .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ - .engine_mask = BIT(RCS0) | BIT(VCS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ /* ilk does support rc6, but we do not implement [power] contexts */ \ @@ -398,7 +398,7 @@ static const struct intel_device_info ilk_m_info = { .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ @@ -449,7 +449,7 @@ static const struct intel_device_info snb_m_gt2_info = { .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ @@ -520,7 +520,7 @@ static const struct intel_device_info vlv_info = { .ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), .display_mmio_offset = VLV_DISPLAY_BASE, I9XX_PIPE_OFFSETS, I9XX_CURSOR_OFFSETS, @@ -531,7 +531,7 @@ static const struct intel_device_info vlv_info = { #define G75_FEATURES \ GEN7_FEATURES, \ - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ .display.has_ddi = 1, \ @@ -598,7 +598,7 @@ static const struct intel_device_info bdw_rsvd_info = { static const struct intel_device_info bdw_gt3_info = { BDW_PLATFORM, .gt = 3, - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -609,7 +609,7 @@ static const struct intel_device_info chv_info = { .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hotplug = 1, .is_lp = 1, - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), .has_64bit_reloc = 1, .has_runtime_pm = 1, .has_rc6 = 1, @@ -662,7 +662,7 @@ static const struct intel_device_info skl_gt2_info = { #define SKL_GT3_PLUS_PLATFORM \ SKL_PLATFORM, \ - .engine_mask = \ + .platform_engine_mask = \ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) @@ -681,7 +681,7 @@ static const struct intel_device_info skl_gt4_info = { .is_lp = 1, \ .num_supported_dbuf_slices = 1, \ .display.has_hotplug = 1, \ - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ @@ -744,7 +744,7 @@ static const struct intel_device_info kbl_gt2_info = { static const struct intel_device_info kbl_gt3_info = { KBL_PLATFORM, .gt = 3, - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -765,7 +765,7 @@ static const struct intel_device_info cfl_gt2_info = { static const struct intel_device_info cfl_gt3_info = { CFL_PLATFORM, .gt = 3, - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -834,7 +834,7 @@ static const struct intel_device_info cnl_info = { static const struct intel_device_info icl_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; @@ -842,7 +842,7 @@ static const struct intel_device_info ehl_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), .require_force_probe = 1, - .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), .ppgtt_size = 36, }; @@ -878,7 +878,7 @@ static const struct intel_device_info tgl_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), .display.has_modular_fia = 1, - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; @@ -891,7 +891,7 @@ static const struct intel_device_info rkl_info = { BIT(TRANSCODER_C), .require_force_probe = 1, .display.has_psr_hw_tracking = 0, - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0), }; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 92ebea35c752..a362a66fce11 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -92,7 +92,6 @@ static const char *iommu_name(void) void intel_device_info_print_static(const struct intel_device_info *info, struct drm_printer *p) { - drm_printf(p, "engines: %x\n", info->engine_mask); drm_printf(p, "gen: %d\n", info->gen); drm_printf(p, "gt: %d\n", info->gt); drm_printf(p, "iommu: %s\n", iommu_name()); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index fa60fdc1d75a..b010b6728432 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -157,7 +157,7 @@ struct intel_device_info { u8 gen; u8 gt; /* GT number, 0 if undefined */ - intel_engine_mask_t engine_mask; /* Engines supported by the HW */ + intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */ enum intel_platform platform; @@ -219,8 +219,6 @@ struct intel_runtime_info { u8 num_sprites[I915_MAX_PIPES]; u8 num_scalers[I915_MAX_PIPES]; - u8 num_engines; - /* Slice/subslice/EU info */ struct sseu_dev_info sseu; @@ -228,9 +226,6 @@ struct intel_runtime_info { u32 cs_timestamp_frequency_hz; u32 cs_timestamp_period_ns; - - /* Media engine access to SFC per instance */ - u8 vdbox_sfc_access; }; struct intel_driver_caps { diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 83e576cff161..f5edee17902a 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1530,7 +1530,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) if (INTEL_GEN(i915) >= 11) { /* we'll prune the domains of missing engines later */ - intel_engine_mask_t emask = INTEL_INFO(i915)->engine_mask; + intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask; int i; uncore->funcs.force_wake_get = fw_domains_get_with_fallback; diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 9271aad7f779..57dd6f5122ee 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -1454,7 +1454,7 @@ out_flush: idx++; } pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", - num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus); + num_waits, num_fences, idx, ncpus); ret = igt_live_test_end(&live) ?: ret; out_contexts: diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 9b105b811f1f..9a46be05425a 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -190,7 +190,8 @@ struct drm_i915_private *mock_gem_device(void) mock_init_ggtt(i915, &i915->ggtt); i915->gt.vm = i915_vm_get(&i915->ggtt.vm); - mkwrite_device_info(i915)->engine_mask = BIT(0); + mkwrite_device_info(i915)->platform_engine_mask = BIT(0); + i915->gt.info.engine_mask = BIT(0); i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0); if (!i915->gt.engine[RCS0]) -- cgit v1.2.3