summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/selftests/i915_request.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 01:04:15 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 01:04:15 +0300
commitfaa392181a0bd42c5478175cef601adeecdc91b6 (patch)
treee020e1142e34786676d0cd40f539bccdbb66099e /drivers/gpu/drm/i915/selftests/i915_request.c
parentcfa3b8068b09f25037146bfd5eed041b78878bee (diff)
parent9ca1f474cea0edc14a1d7ec933e5472c0ff115d3 (diff)
downloadlinux-faa392181a0bd42c5478175cef601adeecdc91b6.tar.xz
Merge tag 'drm-next-2020-06-02' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "Highlights: - Core DRM had a lot of refactoring around managed drm resources to make drivers simpler. - Intel Tigerlake support is on by default - amdgpu now support p2p PCI buffer sharing and encrypted GPU memory Details: core: - uapi: error out EBUSY when existing master - uapi: rework SET/DROP MASTER permission handling - remove drm_pci.h - drm_pci* are now legacy - introduced managed DRM resources - subclassing support for drm_framebuffer - simple encoder helper - edid improvements - vblank + writeback documentation improved - drm/mm - optimise tree searches - port drivers to use devm_drm_dev_alloc dma-buf: - add flag for p2p buffer support mst: - ACT timeout improvements - remove drm_dp_mst_has_audio - don't use 2nd TX slot - spec recommends against it bridge: - dw-hdmi various improvements - chrontel ch7033 support - fix stack issues with old gcc hdmi: - add unpack function for drm infoframe fbdev: - misc fbdev driver fixes i915: - uapi: global sseu pinning - uapi: OA buffer polling - uapi: remove generated perf code - uapi: per-engine default property values in sysfs - Tigerlake GEN12 enabled. - Lots of gem refactoring - Tigerlake enablement patches - move to drm_device logging - Icelake gamma HW readout - push MST link retrain to hotplug work - bandwidth atomic helpers - ICL fixes - RPS/GT refactoring - Cherryview full-ppgtt support - i915 locking guidelines documented - require linear fb stride to be 512 multiple on gen9 - Tigerlake SAGV support amdgpu: - uapi: encrypted GPU memory handling - uapi: add MEM_SYNC IB flag - p2p dma-buf support - export VRAM dma-bufs - FRU chip access support - RAS/SR-IOV updates - Powerplay locking fixes - VCN DPG (powergating) enablement - GFX10 clockgating fixes - DC fixes - GPU reset fixes - navi SDMA fix - expose FP16 for modesetting - DP 1.4 compliance fixes - gfx10 soft recovery - Improved Critical Thermal Faults handling - resizable BAR on gmc10 amdkfd: - uapi: GWS resource management - track GPU memory per process - report PCI domain in topology radeon: - safe reg list generator fixes nouveau: - HD audio fixes on recent systems - vGPU detection (fail probe if we're on one, for now) - Interlaced mode fixes (mostly avoidance on Turing, which doesn't support it) - SVM improvements/fixes - NVIDIA format modifier support - Misc other fixes. adv7511: - HDMI SPDIF support ast: - allocate crtc state size - fix double assignment - fix suspend bochs: - drop connector register cirrus: - move to tiny drivers. exynos: - fix imported dma-buf mapping - enable runtime PM - fixes and cleanups mediatek: - DPI pin mode swap - config mipi_tx current/impedance lima: - devfreq + cooling device support - task handling improvements - runtime PM support pl111: - vexpress init improvements - fix module auto-load rcar-du: - DT bindings conversion to YAML - Planes zpos sanity check and fix - MAINTAINERS entry for LVDS panel driver mcde: - fix return value mgag200: - use managed config init stm: - read endpoints from DT vboxvideo: - use PCI managed functions - drop WC mtrr vkms: - enable cursor by default rockchip: - afbc support virtio: - various cleanups qxl: - fix cursor notify port hisilicon: - 128-byte stride alignment fix sun4i: - improved format handling" * tag 'drm-next-2020-06-02' of git://anongit.freedesktop.org/drm/drm: (1401 commits) drm/amd/display: Fix potential integer wraparound resulting in a hang drm/amd/display: drop cursor position check in atomic test drm/amdgpu: fix device attribute node create failed with multi gpu drm/nouveau: use correct conflicting framebuffer API drm/vblank: Fix -Wformat compile warnings on some arches drm/amdgpu: Sync with VM root BO when switching VM to CPU update mode drm/amd/display: Handle GPU reset for DC block drm/amdgpu: add apu flags (v2) drm/amd/powerpay: Disable gfxoff when setting manual mode on picasso and raven drm/amdgpu: fix pm sysfs node handling (v2) drm/amdgpu: move gpu_info parsing after common early init drm/amdgpu: move discovery gfx config fetching drm/nouveau/dispnv50: fix runtime pm imbalance on error drm/nouveau: fix runtime pm imbalance on error drm/nouveau: fix runtime pm imbalance on error drm/nouveau/debugfs: fix runtime pm imbalance on error drm/nouveau/nouveau/hmm: fix migrate zero page to GPU drm/nouveau/nouveau/hmm: fix nouveau_dmem_chunk allocations drm/nouveau/kms/nv50-: Share DP SST mode_valid() handling with MST drm/nouveau/kms/nv50-: Move 8BPC limit for MST into nv50_mstc_get_modes() ...
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c623
1 files changed, 602 insertions, 21 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index f89d9c42f1fa..6014e8dfcbb1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -23,11 +23,13 @@
*/
#include <linux/prime_numbers.h>
+#include <linux/pm_qos.h>
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "i915_random.h"
@@ -51,6 +53,11 @@ static unsigned int num_uabi_engines(struct drm_i915_private *i915)
return count;
}
+static struct intel_engine_cs *rcs0(struct drm_i915_private *i915)
+{
+ return intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, 0);
+}
+
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -58,7 +65,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
- request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
+ request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
if (!request)
return -ENOMEM;
@@ -76,7 +83,7 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
- request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
@@ -145,7 +152,7 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
- request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
@@ -420,7 +427,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
struct smoketest t = {
- .engine = i915->engine[RCS0],
+ .engine = rcs0(i915),
.ncontexts = 1024,
.max_batch = 1024,
.request_alloc = __mock_request_alloc
@@ -809,10 +816,12 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(batch->vm->gt);
+ __i915_gem_object_flush_map(batch->obj, 0, sizeof(*cmd));
i915_gem_object_unpin_map(batch->obj);
+ intel_gt_chipset_flush(batch->vm->gt);
+
return 0;
}
@@ -858,13 +867,6 @@ static int live_all_engines(void *arg)
goto out_request;
}
- err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
- 0);
- GEM_BUG_ON(err);
- request[idx]->batch = batch;
-
i915_vma_lock(batch);
err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0)
@@ -872,6 +874,13 @@ static int live_all_engines(void *arg)
i915_vma_unlock(batch);
GEM_BUG_ON(err);
+ err = engine->emit_bb_start(request[idx],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[idx]->batch = batch;
+
i915_request_get(request[idx]);
i915_request_add(request[idx]);
idx++;
@@ -986,13 +995,6 @@ static int live_sequential_engines(void *arg)
}
}
- err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
- 0);
- GEM_BUG_ON(err);
- request[idx]->batch = batch;
-
i915_vma_lock(batch);
err = i915_request_await_object(request[idx],
batch->obj, false);
@@ -1001,6 +1003,13 @@ static int live_sequential_engines(void *arg)
i915_vma_unlock(batch);
GEM_BUG_ON(err);
+ err = engine->emit_bb_start(request[idx],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[idx]->batch = batch;
+
i915_request_get(request[idx]);
i915_request_add(request[idx]);
@@ -1053,9 +1062,12 @@ out_request:
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(engine->gt);
+ __i915_gem_object_flush_map(request[idx]->batch->obj,
+ 0, sizeof(*cmd));
i915_gem_object_unpin_map(request[idx]->batch->obj);
+
+ intel_gt_chipset_flush(engine->gt);
}
i915_vma_put(request[idx]->batch);
@@ -1233,7 +1245,7 @@ static int live_parallel_engines(void *arg)
struct igt_live_test t;
unsigned int idx;
- snprintf(name, sizeof(name), "%pS", fn);
+ snprintf(name, sizeof(name), "%ps", *fn);
err = igt_live_test_begin(&t, i915, __func__, name);
if (err)
break;
@@ -1470,3 +1482,572 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
return i915_subtests(tests, i915);
}
+
+static int switch_to_kernel_sync(struct intel_context *ce, int err)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ while (!err && !intel_engine_is_idle(ce->engine))
+ intel_engine_flush_submission(ce->engine);
+
+ return err;
+}
+
+struct perf_stats {
+ struct intel_engine_cs *engine;
+ unsigned long count;
+ ktime_t time;
+ ktime_t busy;
+ u64 runtime;
+};
+
+struct perf_series {
+ struct drm_i915_private *i915;
+ unsigned int nengines;
+ struct intel_context *ce[];
+};
+
+static int s_sync0(void *arg)
+{
+ struct perf_series *ps = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+ int err = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+
+ return err;
+}
+
+static int s_sync1(void *arg)
+{
+ struct perf_series *ps = arg;
+ struct i915_request *prev = NULL;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+ int err = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(prev);
+ prev = rq;
+ if (err)
+ break;
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(prev);
+
+ return err;
+}
+
+static int s_many(void *arg)
+{
+ struct perf_series *ps = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+
+ return 0;
+}
+
+static int perf_series_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ s_sync0,
+ s_sync1,
+ s_many,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct pm_qos_request qos;
+ struct perf_stats *stats;
+ struct perf_series *ps;
+ unsigned int idx;
+ int err = 0;
+
+ stats = kcalloc(nengines, sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL);
+ if (!ps) {
+ kfree(stats);
+ return -ENOMEM;
+ }
+
+ cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
+
+ ps->i915 = i915;
+ ps->nengines = nengines;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ ps->ce[idx++] = ce;
+ }
+ GEM_BUG_ON(idx != ps->nengines);
+
+ for (fn = func; *fn && !err; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ for (idx = 0; idx < nengines; idx++) {
+ struct perf_stats *p =
+ memset(&stats[idx], 0, sizeof(stats[idx]));
+ struct intel_context *ce = ps->ce[idx];
+
+ p->engine = ps->ce[idx]->engine;
+ intel_engine_pm_get(p->engine);
+
+ if (intel_engine_supports_stats(p->engine))
+ p->busy = intel_engine_get_busy_time(p->engine) + 1;
+ p->runtime = -intel_context_get_total_runtime_ns(ce);
+ p->time = ktime_get();
+ }
+
+ err = (*fn)(ps);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+
+ for (idx = 0; idx < nengines; idx++) {
+ struct perf_stats *p = &stats[idx];
+ struct intel_context *ce = ps->ce[idx];
+ int integer, decimal;
+ u64 busy, dt;
+
+ p->time = ktime_sub(ktime_get(), p->time);
+ if (p->busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(p->engine),
+ p->busy - 1);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime += intel_context_get_total_runtime_ns(ce);
+ intel_engine_pm_put(p->engine);
+
+ busy = 100 * ktime_to_ns(p->busy);
+ dt = ktime_to_ns(p->time);
+ if (dt) {
+ integer = div64_u64(busy, dt);
+ busy -= integer * dt;
+ decimal = div64_u64(100 * busy, dt);
+ } else {
+ integer = 0;
+ decimal = 0;
+ }
+
+ pr_info("%s %5s: { seqno:%d, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+ name, p->engine->name, ce->timeline->seqno,
+ integer, decimal,
+ div_u64(p->runtime, 1000 * 1000),
+ div_u64(ktime_to_ns(p->time), 1000 * 1000));
+ }
+ }
+
+out:
+ for (idx = 0; idx < nengines; idx++) {
+ if (IS_ERR_OR_NULL(ps->ce[idx]))
+ break;
+
+ intel_context_unpin(ps->ce[idx]);
+ intel_context_put(ps->ce[idx]);
+ }
+ kfree(ps);
+
+ cpu_latency_qos_remove_request(&qos);
+ kfree(stats);
+ return err;
+}
+
+static int p_sync0(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ bool busy;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ p->time = ktime_get();
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int p_sync1(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct i915_request *prev = NULL;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ bool busy;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ p->time = ktime_get();
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(prev);
+ prev = rq;
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(prev);
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int p_many(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int err = 0;
+ bool busy;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ count = 0;
+ p->time = ktime_get();
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int perf_parallel_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ p_sync0,
+ p_sync1,
+ p_many,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct pm_qos_request qos;
+ struct {
+ struct perf_stats p;
+ struct task_struct *tsk;
+ } *engines;
+ int err = 0;
+
+ engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
+ if (!engines)
+ return -ENOMEM;
+
+ cpu_latency_qos_add_request(&qos, 0);
+
+ for (fn = func; *fn; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+ unsigned int idx;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ atomic_set(&i915->selftest.counter, nengines);
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ intel_engine_pm_get(engine);
+
+ memset(&engines[idx].p, 0, sizeof(engines[idx].p));
+ engines[idx].p.engine = engine;
+
+ engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
+ "igt:%s", engine->name);
+ if (IS_ERR(engines[idx].tsk)) {
+ err = PTR_ERR(engines[idx].tsk);
+ intel_engine_pm_put(engine);
+ break;
+ }
+ get_task_struct(engines[idx++].tsk);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ int status;
+
+ if (IS_ERR(engines[idx].tsk))
+ break;
+
+ status = kthread_stop(engines[idx].tsk);
+ if (status && !err)
+ err = status;
+
+ intel_engine_pm_put(engine);
+ put_task_struct(engines[idx++].tsk);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct perf_stats *p = &engines[idx].p;
+ u64 busy = 100 * ktime_to_ns(p->busy);
+ u64 dt = ktime_to_ns(p->time);
+ int integer, decimal;
+
+ if (dt) {
+ integer = div64_u64(busy, dt);
+ busy -= integer * dt;
+ decimal = div64_u64(100 * busy, dt);
+ } else {
+ integer = 0;
+ decimal = 0;
+ }
+
+ GEM_BUG_ON(engine != p->engine);
+ pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+ name, engine->name, p->count, integer, decimal,
+ div_u64(p->runtime, 1000 * 1000),
+ div_u64(ktime_to_ns(p->time), 1000 * 1000));
+ idx++;
+ }
+ }
+
+ cpu_latency_qos_remove_request(&qos);
+ kfree(engines);
+ return err;
+}
+
+int i915_request_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_series_engines),
+ SUBTEST(perf_parallel_engines),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}