diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
18 files changed, 840 insertions, 235 deletions
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 5cc8101bb2b1..2ea69394f428 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -27,6 +27,7 @@ #include <linux/prime_numbers.h> #include "mock_drm.h" +#include "i915_random.h" static const unsigned int page_sizes[] = { I915_GTT_PAGE_SIZE_2M, @@ -989,17 +990,9 @@ static int gpu_write(struct i915_vma *vma, i915_vma_unpin(batch); i915_vma_close(batch); - err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); - if (err) - goto err_request; - - err = i915_switch_context(rq); - if (err) - goto err_request; - - err = rq->engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - flags); + err = engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + flags); if (err) goto err_request; @@ -1047,19 +1040,78 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) return err; } +static int __igt_write_huge(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct drm_i915_gem_object *obj, + u64 size, u64 offset, + u32 dword, u32 val) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; + unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; + struct i915_vma *vma; + int err; + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_vma_unbind(vma); + if (err) + goto out_vma_close; + + err = i915_vma_pin(vma, size, 0, flags | offset); + if (err) { + /* + * The ggtt may have some pages reserved so + * refrain from erroring out. + */ + if (err == -ENOSPC && i915_is_ggtt(vm)) + err = 0; + + goto out_vma_close; + } + + err = igt_check_page_sizes(vma); + if (err) + goto out_vma_unpin; + + err = gpu_write(vma, ctx, engine, dword, val); + if (err) { + pr_err("gpu-write failed at offset=%llx\n", offset); + goto out_vma_unpin; + } + + err = cpu_check(obj, dword, val); + if (err) { + pr_err("cpu-check failed at offset=%llx\n", offset); + goto out_vma_unpin; + } + +out_vma_unpin: + i915_vma_unpin(vma); +out_vma_close: + i915_vma_close(vma); + + return err; +} + static int igt_write_huge(struct i915_gem_context *ctx, struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; + static struct intel_engine_cs *engines[I915_NUM_ENGINES]; struct intel_engine_cs *engine; - struct i915_vma *vma; - unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); unsigned int max_page_size; unsigned int id; u64 max; u64 num; u64 size; + int *order; + int i, n; int err = 0; GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); @@ -1071,78 +1123,56 @@ static int igt_write_huge(struct i915_gem_context *ctx, max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); max = div_u64((vm->total - size), max_page_size); - vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - + n = 0; for_each_engine(engine, i915, id) { - IGT_TIMEOUT(end_time); - if (!intel_engine_can_store_dword(engine)) { - pr_info("store-dword-imm not supported on engine=%u\n", - id); + pr_info("store-dword-imm not supported on engine=%u\n", id); continue; } + engines[n++] = engine; + } - /* - * Try various offsets until we timeout -- we want to avoid - * issues hidden by effectively always using offset = 0. - */ - for_each_prime_number_from(num, 0, max) { - u64 offset = num * max_page_size; - u32 dword; - - err = i915_vma_unbind(vma); - if (err) - goto out_vma_close; - - err = i915_vma_pin(vma, size, max_page_size, flags | offset); - if (err) { - /* - * The ggtt may have some pages reserved so - * refrain from erroring out. - */ - if (err == -ENOSPC && i915_is_ggtt(vm)) { - err = 0; - continue; - } - - goto out_vma_close; - } + if (!n) + return 0; - err = igt_check_page_sizes(vma); - if (err) - goto out_vma_unpin; + /* + * To keep things interesting when alternating between engines in our + * randomized order, lets also make feeding to the same engine a few + * times in succession a possibility by enlarging the permutation array. + */ + order = i915_random_order(n * I915_NUM_ENGINES, &prng); + if (!order) + return -ENOMEM; - dword = offset_in_page(num) / 4; + /* + * Try various offsets in an ascending/descending fashion until we + * timeout -- we want to avoid issues hidden by effectively always using + * offset = 0. + */ + i = 0; + for_each_prime_number_from(num, 0, max) { + u64 offset_low = num * max_page_size; + u64 offset_high = (max - num) * max_page_size; + u32 dword = offset_in_page(num) / 4; - err = gpu_write(vma, ctx, engine, dword, num + 1); - if (err) { - pr_err("gpu-write failed at offset=%llx", offset); - goto out_vma_unpin; - } + engine = engines[order[i] % n]; + i = (i + 1) % (n * I915_NUM_ENGINES); - err = cpu_check(obj, dword, num + 1); - if (err) { - pr_err("cpu-check failed at offset=%llx", offset); - goto out_vma_unpin; - } + err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1); + if (err) + break; - i915_vma_unpin(vma); + err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1); + if (err) + break; - if (num > 0 && - igt_timeout(end_time, - "%s timed out on engine=%u at offset=%llx, max_page_size=%x\n", - __func__, id, offset, max_page_size)) - break; - } + if (igt_timeout(end_time, + "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", + __func__, engine->id, offset_low, offset_high, max_page_size)) + break; } -out_vma_unpin: - if (i915_vma_is_pinned(vma)) - i915_vma_unpin(vma); -out_vma_close: - i915_vma_close(vma); + kfree(order); return err; } @@ -1159,6 +1189,9 @@ static int igt_ppgtt_exhaust_huge(void *arg) int n, i; int err = -ENODEV; + if (supported == I915_GTT_PAGE_SIZE_4K) + return 0; + /* * Sanity check creating objects with a varying mix of page sizes -- * ensuring that our writes lands in the right place. @@ -1604,7 +1637,7 @@ static int igt_shrink_thp(void *arg) * shmem to truncate our pages. */ i915_gem_shrink_all(i915); - if (!IS_ERR_OR_NULL(obj->mm.pages)) { + if (i915_gem_object_has_pages(obj)) { pr_err("shrink-all didn't truncate the pages\n"); err = -EINVAL; goto out_close; @@ -1716,6 +1749,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); + intel_runtime_pm_get(dev_priv); ctx = live_context(dev_priv, file); if (IS_ERR(ctx)) { @@ -1726,6 +1760,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) err = i915_subtests(tests, ctx); out_unlock: + intel_runtime_pm_put(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); mock_file_free(dev_priv, file); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index 35d778d70626..7a0d1e17c1ad 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -33,7 +33,7 @@ static int cpu_set(struct drm_i915_gem_object *obj, { unsigned int needs_clflush; struct page *page; - typeof(v) *map; + u32 *map; int err; err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); @@ -59,7 +59,7 @@ static int cpu_get(struct drm_i915_gem_object *obj, { unsigned int needs_clflush; struct page *page; - typeof(v) map; + u32 *map; int err; err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); @@ -82,7 +82,7 @@ static int gtt_set(struct drm_i915_gem_object *obj, u32 v) { struct i915_vma *vma; - typeof(v) *map; + u32 __iomem *map; int err; err = i915_gem_object_set_to_gtt_domain(obj, true); @@ -98,7 +98,7 @@ static int gtt_set(struct drm_i915_gem_object *obj, if (IS_ERR(map)) return PTR_ERR(map); - map[offset / sizeof(*map)] = v; + iowrite32(v, &map[offset / sizeof(*map)]); i915_vma_unpin_iomap(vma); return 0; @@ -109,7 +109,7 @@ static int gtt_get(struct drm_i915_gem_object *obj, u32 *v) { struct i915_vma *vma; - typeof(v) map; + u32 __iomem *map; int err; err = i915_gem_object_set_to_gtt_domain(obj, false); @@ -125,7 +125,7 @@ static int gtt_get(struct drm_i915_gem_object *obj, if (IS_ERR(map)) return PTR_ERR(map); - *v = map[offset / sizeof(*map)]; + *v = ioread32(&map[offset / sizeof(*map)]); i915_vma_unpin_iomap(vma); return 0; @@ -135,7 +135,7 @@ static int wc_set(struct drm_i915_gem_object *obj, unsigned long offset, u32 v) { - typeof(v) *map; + u32 *map; int err; err = i915_gem_object_set_to_wc_domain(obj, true); @@ -156,7 +156,7 @@ static int wc_get(struct drm_i915_gem_object *obj, unsigned long offset, u32 *v) { - typeof(v) map; + u32 *map; int err; err = i915_gem_object_set_to_wc_domain(obj, false); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index def5052862ae..56a803d11916 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -158,14 +158,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj, goto err_batch; } - err = engine->emit_flush(rq, EMIT_INVALIDATE); - if (err) - goto err_request; - - err = i915_switch_context(rq); - if (err) - goto err_request; - flags = 0; if (INTEL_GEN(vm->i915) <= 5) flags |= I915_DISPATCH_SECURE; @@ -272,6 +264,23 @@ out_unmap: return err; } +static int file_add_object(struct drm_file *file, + struct drm_i915_gem_object *obj) +{ + int err; + + GEM_BUG_ON(obj->base.handle_count); + + /* tie the object to the drm_file for easy reaping */ + err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL); + if (err < 0) + return err; + + i915_gem_object_get(obj); + obj->base.handle_count++; + return 0; +} + static struct drm_i915_gem_object * create_test_object(struct i915_gem_context *ctx, struct drm_file *file, @@ -281,7 +290,6 @@ create_test_object(struct i915_gem_context *ctx, struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base; u64 size; - u32 handle; int err; size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); @@ -291,8 +299,7 @@ create_test_object(struct i915_gem_context *ctx, if (IS_ERR(obj)) return obj; - /* tie the handle to the drm_file for easy reaping */ - err = drm_gem_handle_create(file, &obj->base, &handle); + err = file_add_object(file, obj); i915_gem_object_put(obj); if (err) return ERR_PTR(err); @@ -325,7 +332,7 @@ static int igt_ctx_exec(void *arg) LIST_HEAD(objects); unsigned long ncontexts, ndwords, dw; bool first_shared_gtt = true; - int err; + int err = -ENODEV; /* Create a few different contexts (with different mm) and write * through each ctx/mm using the GPU making sure those writes end @@ -369,7 +376,9 @@ static int igt_ctx_exec(void *arg) } } + intel_runtime_pm_get(i915); err = gpu_fill(obj, ctx, engine, dw); + intel_runtime_pm_put(i915); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index f463105ff48d..e1ddad635d73 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -355,6 +355,7 @@ static int igt_evict_contexts(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); + intel_runtime_pm_get(i915); /* Reserve a block so that we know we have enough to fit a few rq */ memset(&hole, 0, sizeof(hole)); @@ -463,6 +464,7 @@ out_locked: } if (drm_mm_node_allocated(&hole)) drm_mm_remove_node(&hole); + intel_runtime_pm_put(i915); mutex_unlock(&i915->drm.struct_mutex); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 9da0c9f99916..4a28d713a7d8 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -216,13 +216,21 @@ static int lowlevel_hole(struct drm_i915_private *i915, hole_size = (hole_end - hole_start) >> size; if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) hole_size = KMALLOC_MAX_SIZE / sizeof(u32); - count = hole_size; + count = hole_size >> 1; + if (!count) { + pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n", + __func__, hole_start, hole_end, size, hole_size); + break; + } + do { - count >>= 1; order = i915_random_order(count, &prng); - } while (!order && count); - if (!order) - break; + if (order) + break; + } while (count >>= 1); + if (!count) + return -ENOMEM; + GEM_BUG_ON(!order); GEM_BUG_ON(count * BIT_ULL(size) > vm->total); GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end); @@ -267,7 +275,9 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma.node.size = BIT_ULL(size); mock_vma.node.start = addr; + intel_runtime_pm_get(i915); vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); + intel_runtime_pm_put(i915); } count = n; @@ -697,18 +707,26 @@ static int drunk_hole(struct drm_i915_private *i915, unsigned int *order, count, n; struct i915_vma *vma; u64 hole_size; - int err; + int err = -ENODEV; hole_size = (hole_end - hole_start) >> size; if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) hole_size = KMALLOC_MAX_SIZE / sizeof(u32); - count = hole_size; + count = hole_size >> 1; + if (!count) { + pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n", + __func__, hole_start, hole_end, size, hole_size); + break; + } + do { - count >>= 1; order = i915_random_order(count, &prng); - } while (!order && count); - if (!order) - break; + if (order) + break; + } while (count >>= 1); + if (!count) + return -ENOMEM; + GEM_BUG_ON(!order); /* Ignore allocation failures (i.e. don't report them as * a test failure) as we are purposefully allocating very @@ -956,7 +974,7 @@ static int exercise_ggtt(struct drm_i915_private *i915, u64 hole_start, hole_end, last = 0; struct drm_mm_node *node; IGT_TIMEOUT(end_time); - int err; + int err = 0; mutex_lock(&i915->drm.struct_mutex); restart: @@ -1047,6 +1065,7 @@ static int igt_ggtt_page(void *arg) goto out_remove; } + intel_runtime_pm_get(i915); for (n = 0; n < count; n++) { u64 offset = tmp.start + order[n] * PAGE_SIZE; u32 __iomem *vaddr; @@ -1055,7 +1074,7 @@ static int igt_ggtt_page(void *arg) i915_gem_object_get_dma_address(obj, 0), offset, I915_CACHE_NONE, 0); - vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset); + vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); iowrite32(n, vaddr + n); io_mapping_unmap_atomic(vaddr); @@ -1073,7 +1092,7 @@ static int igt_ggtt_page(void *arg) i915_gem_object_get_dma_address(obj, 0), offset, I915_CACHE_NONE, 0); - vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset); + vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); val = ioread32(vaddr + n); io_mapping_unmap_atomic(vaddr); @@ -1086,6 +1105,7 @@ static int igt_ggtt_page(void *arg) break; } } + intel_runtime_pm_put(i915); kfree(order); out_remove: @@ -1160,7 +1180,7 @@ static int igt_gtt_reserve(void *arg) struct drm_i915_gem_object *obj, *on; LIST_HEAD(objects); u64 total; - int err; + int err = -ENODEV; /* i915_gem_gtt_reserve() tries to reserve the precise range * for the node, and evicts if it has to. So our test checks that @@ -1351,7 +1371,7 @@ static int igt_gtt_insert(void *arg) }, *ii; LIST_HEAD(objects); u64 total; - int err; + int err = -ENODEV; /* i915_gem_gtt_insert() tries to allocate some free space in the GTT * to the node, evicting if required. diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index 1b8774a42e48..f32aa6bb79e2 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -317,6 +317,7 @@ static int igt_partial_tiling(void *arg) } mutex_lock(&i915->drm.struct_mutex); + intel_runtime_pm_get(i915); if (1) { IGT_TIMEOUT(end); @@ -418,6 +419,7 @@ next_tiling: ; } out_unlock: + intel_runtime_pm_put(i915); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_unpin_pages(obj); out: diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c index a999161e8db1..647bf2bbd799 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c @@ -332,7 +332,7 @@ static int live_nop_request(void *arg) struct intel_engine_cs *engine; struct live_test t; unsigned int id; - int err; + int err = -ENODEV; /* Submit various sized batches of empty requests, to each engine * (individually), and wait for the batch to complete. We can check @@ -459,14 +459,6 @@ empty_request(struct intel_engine_cs *engine, if (IS_ERR(request)) return request; - err = engine->emit_flush(request, EMIT_INVALIDATE); - if (err) - goto out_request; - - err = i915_switch_context(request); - if (err) - goto out_request; - err = engine->emit_bb_start(request, batch->node.start, batch->node.size, @@ -675,12 +667,6 @@ static int live_all_engines(void *arg) goto out_request; } - err = engine->emit_flush(request[id], EMIT_INVALIDATE); - GEM_BUG_ON(err); - - err = i915_switch_context(request[id]); - GEM_BUG_ON(err); - err = engine->emit_bb_start(request[id], batch->node.start, batch->node.size, @@ -797,12 +783,6 @@ static int live_sequential_engines(void *arg) } } - err = engine->emit_flush(request[id], EMIT_INVALIDATE); - GEM_BUG_ON(err); - - err = i915_switch_context(request[id]); - GEM_BUG_ON(err); - err = engine->emit_bb_start(request[id], batch->node.start, batch->node.size, diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c index 4795877abe56..3000e6a7d82d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c @@ -79,7 +79,7 @@ static int igt_sync(void *arg) }, *p; struct intel_timeline *tl; int order, offset; - int ret; + int ret = -ENODEV; tl = mock_timeline(0); if (!tl) diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index d7dd98a6acad..088f45bc6199 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -20,3 +20,4 @@ selftest(evict, i915_gem_evict_live_selftests) selftest(hugepages, i915_gem_huge_page_live_selftests) selftest(contexts, i915_gem_context_live_selftests) selftest(hangcheck, intel_hangcheck_live_selftests) +selftest(guc, intel_guc_live_selftest) diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c index b85872cc7fbe..2088ae57aa89 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.c +++ b/drivers/gpu/drm/i915/selftests/i915_random.c @@ -57,7 +57,7 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state) { unsigned int *order, i; - order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); + order = kmalloc_array(count, sizeof(*order), GFP_KERNEL | __GFP_NOWARN); if (!order) return order; diff --git a/drivers/gpu/drm/i915/selftests/i915_syncmap.c b/drivers/gpu/drm/i915/selftests/i915_syncmap.c index bcab3d00a785..47f4ae18a1ef 100644 --- a/drivers/gpu/drm/i915/selftests/i915_syncmap.c +++ b/drivers/gpu/drm/i915/selftests/i915_syncmap.c @@ -333,7 +333,7 @@ static int igt_syncmap_join_below(void *arg) { struct i915_syncmap *sync; unsigned int step, order, idx; - int err; + int err = -ENODEV; i915_syncmap_init(&sync); @@ -402,7 +402,7 @@ static int igt_syncmap_neighbours(void *arg) I915_RND_STATE(prng); IGT_TIMEOUT(end_time); struct i915_syncmap *sync; - int err; + int err = -ENODEV; /* * Each leaf holds KSYNCMAP seqno. Check that when we create KSYNCMAP @@ -447,7 +447,7 @@ static int igt_syncmap_compact(void *arg) { struct i915_syncmap *sync; unsigned int idx, order; - int err; + int err = -ENODEV; i915_syncmap_init(&sync); diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 2e86ec136b35..eb89e301b602 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -150,7 +150,7 @@ static int igt_vma_create(void *arg) IGT_TIMEOUT(end_time); LIST_HEAD(contexts); LIST_HEAD(objects); - int err; + int err = -ENOMEM; /* Exercise creating many vma amonst many objections, checking the * vma creation and lookup routines. diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c new file mode 100644 index 000000000000..3f9016466dea --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -0,0 +1,353 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "../i915_selftest.h" + +/* max doorbell number + negative test for each client type */ +#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM) + +static struct intel_guc_client *clients[ATTEMPTS]; + +static bool available_dbs(struct intel_guc *guc, u32 priority) +{ + unsigned long offset; + unsigned long end; + u16 id; + + /* first half is used for normal priority, second half for high */ + offset = 0; + end = GUC_NUM_DOORBELLS / 2; + if (priority <= GUC_CLIENT_PRIORITY_HIGH) { + offset = end; + end += offset; + } + + id = find_next_zero_bit(guc->doorbell_bitmap, end, offset); + if (id < end) + return true; + + return false; +} + +static int check_all_doorbells(struct intel_guc *guc) +{ + u16 db_id; + + pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS); + for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) { + if (!doorbell_ok(guc, db_id)) { + pr_err("doorbell %d, not ok\n", db_id); + return -EIO; + } + } + + return 0; +} + +/* + * Basic client sanity check, handy to validate create_clients. + */ +static int validate_client(struct intel_guc_client *client, + int client_priority, + bool is_preempt_client) +{ + struct drm_i915_private *dev_priv = guc_to_i915(client->guc); + struct i915_gem_context *ctx_owner = is_preempt_client ? + dev_priv->preempt_context : dev_priv->kernel_context; + + if (client->owner != ctx_owner || + client->engines != INTEL_INFO(dev_priv)->ring_mask || + client->priority != client_priority || + client->doorbell_id == GUC_DOORBELL_INVALID) + return -EINVAL; + else + return 0; +} + +static bool client_doorbell_in_sync(struct intel_guc_client *client) +{ + return doorbell_ok(client->guc, client->doorbell_id); +} + +/* + * Check that we're able to synchronize guc_clients with their doorbells + * + * We're creating clients and reserving doorbells once, at module load. During + * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to + * GuC being reset. In other words - GuC clients are still around, but the + * status of their doorbells may be incorrect. This is the reason behind + * validating that the doorbells status expected by the driver matches what the + * GuC/HW have. + */ +static int igt_guc_clients(void *args) +{ + struct drm_i915_private *dev_priv = args; + struct intel_guc *guc; + int err = 0; + + GEM_BUG_ON(!HAS_GUC(dev_priv)); + mutex_lock(&dev_priv->drm.struct_mutex); + + guc = &dev_priv->guc; + if (!guc) { + pr_err("No guc object!\n"); + err = -EINVAL; + goto unlock; + } + + err = check_all_doorbells(guc); + if (err) + goto unlock; + + /* + * Get rid of clients created during driver load because the test will + * recreate them. + */ + guc_clients_destroy(guc); + if (guc->execbuf_client || guc->preempt_client) { + pr_err("guc_clients_destroy lied!\n"); + err = -EINVAL; + goto unlock; + } + + err = guc_clients_create(guc); + if (err) { + pr_err("Failed to create clients\n"); + goto unlock; + } + GEM_BUG_ON(!guc->execbuf_client); + GEM_BUG_ON(!guc->preempt_client); + + err = validate_client(guc->execbuf_client, + GUC_CLIENT_PRIORITY_KMD_NORMAL, false); + if (err) { + pr_err("execbug client validation failed\n"); + goto out; + } + + err = validate_client(guc->preempt_client, + GUC_CLIENT_PRIORITY_KMD_HIGH, true); + if (err) { + pr_err("preempt client validation failed\n"); + goto out; + } + + /* each client should now have reserved a doorbell */ + if (!has_doorbell(guc->execbuf_client) || + !has_doorbell(guc->preempt_client)) { + pr_err("guc_clients_create didn't reserve doorbells\n"); + err = -EINVAL; + goto out; + } + + /* Now create the doorbells */ + guc_clients_doorbell_init(guc); + + /* each client should now have received a doorbell */ + if (!client_doorbell_in_sync(guc->execbuf_client) || + !client_doorbell_in_sync(guc->preempt_client)) { + pr_err("failed to initialize the doorbells\n"); + err = -EINVAL; + goto out; + } + + /* + * Basic test - an attempt to reallocate a valid doorbell to the + * client it is currently assigned should not cause a failure. + */ + err = guc_clients_doorbell_init(guc); + if (err) + goto out; + + /* + * Negative test - a client with no doorbell (invalid db id). + * After destroying the doorbell, the db id is changed to + * GUC_DOORBELL_INVALID and the firmware will reject any attempt to + * allocate a doorbell with an invalid id (db has to be reserved before + * allocation). + */ + destroy_doorbell(guc->execbuf_client); + if (client_doorbell_in_sync(guc->execbuf_client)) { + pr_err("destroy db did not work\n"); + err = -EINVAL; + goto out; + } + + unreserve_doorbell(guc->execbuf_client); + err = guc_clients_doorbell_init(guc); + if (err != -EIO) { + pr_err("unexpected (err = %d)", err); + goto out; + } + + if (!available_dbs(guc, guc->execbuf_client->priority)) { + pr_err("doorbell not available when it should\n"); + err = -EIO; + goto out; + } + + /* clean after test */ + err = reserve_doorbell(guc->execbuf_client); + if (err) { + pr_err("failed to reserve back the doorbell back\n"); + } + err = create_doorbell(guc->execbuf_client); + if (err) { + pr_err("recreate doorbell failed\n"); + goto out; + } + +out: + /* + * Leave clean state for other test, plus the driver always destroy the + * clients during unload. + */ + destroy_doorbell(guc->execbuf_client); + destroy_doorbell(guc->preempt_client); + guc_clients_destroy(guc); + guc_clients_create(guc); + guc_clients_doorbell_init(guc); +unlock: + mutex_unlock(&dev_priv->drm.struct_mutex); + return err; +} + +/* + * Create as many clients as number of doorbells. Note that there's already + * client(s)/doorbell(s) created during driver load, but this test creates + * its own and do not interact with the existing ones. + */ +static int igt_guc_doorbells(void *arg) +{ + struct drm_i915_private *dev_priv = arg; + struct intel_guc *guc; + int i, err = 0; + u16 db_id; + + GEM_BUG_ON(!HAS_GUC(dev_priv)); + mutex_lock(&dev_priv->drm.struct_mutex); + + guc = &dev_priv->guc; + if (!guc) { + pr_err("No guc object!\n"); + err = -EINVAL; + goto unlock; + } + + err = check_all_doorbells(guc); + if (err) + goto unlock; + + for (i = 0; i < ATTEMPTS; i++) { + clients[i] = guc_client_alloc(dev_priv, + INTEL_INFO(dev_priv)->ring_mask, + i % GUC_CLIENT_PRIORITY_NUM, + dev_priv->kernel_context); + + if (!clients[i]) { + pr_err("[%d] No guc client\n", i); + err = -EINVAL; + goto out; + } + + if (IS_ERR(clients[i])) { + if (PTR_ERR(clients[i]) != -ENOSPC) { + pr_err("[%d] unexpected error\n", i); + err = PTR_ERR(clients[i]); + goto out; + } + + if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) { + pr_err("[%d] non-db related alloc fail\n", i); + err = -EINVAL; + goto out; + } + + /* expected, ran out of dbs for this client type */ + continue; + } + + /* + * The check below is only valid because we keep a doorbell + * assigned during the whole life of the client. + */ + if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) { + pr_err("[%d] more clients than doorbells (%d >= %d)\n", + i, clients[i]->stage_id, GUC_NUM_DOORBELLS); + err = -EINVAL; + goto out; + } + + err = validate_client(clients[i], + i % GUC_CLIENT_PRIORITY_NUM, false); + if (err) { + pr_err("[%d] client_alloc sanity check failed!\n", i); + err = -EINVAL; + goto out; + } + + db_id = clients[i]->doorbell_id; + + err = create_doorbell(clients[i]); + if (err) { + pr_err("[%d] Failed to create a doorbell\n", i); + goto out; + } + + /* doorbell id shouldn't change, we are holding the mutex */ + if (db_id != clients[i]->doorbell_id) { + pr_err("[%d] doorbell id changed (%d != %d)\n", + i, db_id, clients[i]->doorbell_id); + err = -EINVAL; + goto out; + } + + err = check_all_doorbells(guc); + if (err) + goto out; + } + +out: + for (i = 0; i < ATTEMPTS; i++) + if (!IS_ERR_OR_NULL(clients[i])) { + destroy_doorbell(clients[i]); + guc_client_free(clients[i]); + } +unlock: + mutex_unlock(&dev_priv->drm.struct_mutex); + return err; +} + +int intel_guc_live_selftest(struct drm_i915_private *dev_priv) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_guc_clients), + SUBTEST(igt_guc_doorbells), + }; + + if (!USES_GUC_SUBMISSION(dev_priv)) + return 0; + + return i915_subtests(tests, dev_priv); +} diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 71ce06680d66..d1f91a533afa 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -114,14 +114,6 @@ static int emit_recurse_batch(struct hang *h, if (err) goto unpin_vma; - err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); - if (err) - goto unpin_hws; - - err = i915_switch_context(rq); - if (err) - goto unpin_hws; - i915_vma_move_to_active(vma, rq, 0); if (!i915_gem_object_has_active_reference(vma->obj)) { i915_gem_object_get(vma->obj); @@ -140,6 +132,12 @@ static int emit_recurse_batch(struct hang *h, *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = upper_32_bits(hws_address(hws, rq)); *batch++ = rq->fence.seqno; + *batch++ = MI_ARB_CHECK; + + memset(batch, 0, 1024); + batch += 1024 / sizeof(*batch); + + *batch++ = MI_ARB_CHECK; *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; *batch++ = lower_32_bits(vma->node.start); *batch++ = upper_32_bits(vma->node.start); @@ -148,6 +146,12 @@ static int emit_recurse_batch(struct hang *h, *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = rq->fence.seqno; + *batch++ = MI_ARB_CHECK; + + memset(batch, 0, 1024); + batch += 1024 / sizeof(*batch); + + *batch++ = MI_ARB_CHECK; *batch++ = MI_BATCH_BUFFER_START | 1 << 8; *batch++ = lower_32_bits(vma->node.start); } else if (INTEL_GEN(i915) >= 4) { @@ -155,12 +159,24 @@ static int emit_recurse_batch(struct hang *h, *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = rq->fence.seqno; + *batch++ = MI_ARB_CHECK; + + memset(batch, 0, 1024); + batch += 1024 / sizeof(*batch); + + *batch++ = MI_ARB_CHECK; *batch++ = MI_BATCH_BUFFER_START | 2 << 6; *batch++ = lower_32_bits(vma->node.start); } else { *batch++ = MI_STORE_DWORD_IMM; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = rq->fence.seqno; + *batch++ = MI_ARB_CHECK; + + memset(batch, 0, 1024); + batch += 1024 / sizeof(*batch); + + *batch++ = MI_ARB_CHECK; *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1; *batch++ = lower_32_bits(vma->node.start); } @@ -173,7 +189,6 @@ static int emit_recurse_batch(struct hang *h, err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); -unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); @@ -243,6 +258,16 @@ static void hang_fini(struct hang *h) i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED); } +static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq) +{ + return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq), + rq->fence.seqno), + 10) && + wait_for(i915_seqno_passed(hws_seqno(h, rq), + rq->fence.seqno), + 1000)); +} + static int igt_hang_sanitycheck(void *arg) { struct drm_i915_private *i915 = arg; @@ -305,6 +330,9 @@ static void global_reset_lock(struct drm_i915_private *i915) struct intel_engine_cs *engine; enum intel_engine_id id; + pr_debug("%s: current gpu_error=%08lx\n", + __func__, i915->gpu_error.flags); + while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) wait_event(i915->gpu_error.reset_queue, !test_bit(I915_RESET_BACKOFF, @@ -362,54 +390,128 @@ static int igt_global_reset(void *arg) return err; } -static int igt_reset_engine(void *arg) +static int __igt_reset_engine(struct drm_i915_private *i915, bool active) { - struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; enum intel_engine_id id; - unsigned int reset_count, reset_engine_count; + struct hang h; int err = 0; - /* Check that we can issue a global GPU and engine reset */ + /* Check that we can issue an engine reset on an idle engine (no-op) */ if (!intel_has_reset_engine(i915)) return 0; + if (active) { + mutex_lock(&i915->drm.struct_mutex); + err = hang_init(&h, i915); + mutex_unlock(&i915->drm.struct_mutex); + if (err) + return err; + } + for_each_engine(engine, i915, id) { - set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags); + unsigned int reset_count, reset_engine_count; + IGT_TIMEOUT(end_time); + + if (active && !intel_engine_can_store_dword(engine)) + continue; + reset_count = i915_reset_count(&i915->gpu_error); reset_engine_count = i915_reset_engine_count(&i915->gpu_error, engine); - err = i915_reset_engine(engine, I915_RESET_QUIET); - if (err) { - pr_err("i915_reset_engine failed\n"); - break; - } + set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + do { + if (active) { + struct drm_i915_gem_request *rq; + + mutex_lock(&i915->drm.struct_mutex); + rq = hang_create_request(&h, engine, + i915->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + mutex_unlock(&i915->drm.struct_mutex); + break; + } + + i915_gem_request_get(rq); + __i915_add_request(rq, true); + mutex_unlock(&i915->drm.struct_mutex); + + if (!wait_for_hang(&h, rq)) { + struct drm_printer p = drm_info_printer(i915->drm.dev); + + pr_err("%s: Failed to start request %x, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + i915_gem_request_put(rq); + err = -EIO; + break; + } - if (i915_reset_count(&i915->gpu_error) != reset_count) { - pr_err("Full GPU reset recorded! (engine reset expected)\n"); - err = -EINVAL; - break; - } + i915_gem_request_put(rq); + } + + engine->hangcheck.stalled = true; + engine->hangcheck.seqno = + intel_engine_get_seqno(engine); + + err = i915_reset_engine(engine, I915_RESET_QUIET); + if (err) { + pr_err("i915_reset_engine failed\n"); + break; + } + + if (i915_reset_count(&i915->gpu_error) != reset_count) { + pr_err("Full GPU reset recorded! (engine reset expected)\n"); + err = -EINVAL; + break; + } + + reset_engine_count += active; + if (i915_reset_engine_count(&i915->gpu_error, engine) != + reset_engine_count) { + pr_err("%s engine reset %srecorded!\n", + engine->name, active ? "not " : ""); + err = -EINVAL; + break; + } + + engine->hangcheck.stalled = false; + } while (time_before(jiffies, end_time)); + clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - if (i915_reset_engine_count(&i915->gpu_error, engine) == - reset_engine_count) { - pr_err("No %s engine reset recorded!\n", engine->name); - err = -EINVAL; + if (err) break; - } - clear_bit(I915_RESET_ENGINE + engine->id, - &i915->gpu_error.flags); + cond_resched(); } if (i915_terminally_wedged(&i915->gpu_error)) err = -EIO; + if (active) { + mutex_lock(&i915->drm.struct_mutex); + hang_fini(&h); + mutex_unlock(&i915->drm.struct_mutex); + } + return err; } +static int igt_reset_idle_engine(void *arg) +{ + return __igt_reset_engine(arg, false); +} + +static int igt_reset_active_engine(void *arg) +{ + return __igt_reset_engine(arg, true); +} + static int active_engine(void *data) { struct intel_engine_cs *engine = data; @@ -471,11 +573,12 @@ err_file: return err; } -static int igt_reset_active_engines(void *arg) +static int __igt_reset_engine_others(struct drm_i915_private *i915, + bool active) { - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine, *active; + struct intel_engine_cs *engine, *other; enum intel_engine_id id, tmp; + struct hang h; int err = 0; /* Check that issuing a reset on one engine does not interfere @@ -485,24 +588,36 @@ static int igt_reset_active_engines(void *arg) if (!intel_has_reset_engine(i915)) return 0; + if (active) { + mutex_lock(&i915->drm.struct_mutex); + err = hang_init(&h, i915); + mutex_unlock(&i915->drm.struct_mutex); + if (err) + return err; + } + for_each_engine(engine, i915, id) { - struct task_struct *threads[I915_NUM_ENGINES]; + struct task_struct *threads[I915_NUM_ENGINES] = {}; unsigned long resets[I915_NUM_ENGINES]; unsigned long global = i915_reset_count(&i915->gpu_error); + unsigned long count = 0; IGT_TIMEOUT(end_time); + if (active && !intel_engine_can_store_dword(engine)) + continue; + memset(threads, 0, sizeof(threads)); - for_each_engine(active, i915, tmp) { + for_each_engine(other, i915, tmp) { struct task_struct *tsk; - if (active == engine) - continue; - resets[tmp] = i915_reset_engine_count(&i915->gpu_error, - active); + other); + + if (other == engine) + continue; - tsk = kthread_run(active_engine, active, - "igt/%s", active->name); + tsk = kthread_run(active_engine, other, + "igt/%s", other->name); if (IS_ERR(tsk)) { err = PTR_ERR(tsk); goto unwind; @@ -512,20 +627,70 @@ static int igt_reset_active_engines(void *arg) get_task_struct(tsk); } - set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags); + set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); do { + if (active) { + struct drm_i915_gem_request *rq; + + mutex_lock(&i915->drm.struct_mutex); + rq = hang_create_request(&h, engine, + i915->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + mutex_unlock(&i915->drm.struct_mutex); + break; + } + + i915_gem_request_get(rq); + __i915_add_request(rq, true); + mutex_unlock(&i915->drm.struct_mutex); + + if (!wait_for_hang(&h, rq)) { + struct drm_printer p = drm_info_printer(i915->drm.dev); + + pr_err("%s: Failed to start request %x, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + i915_gem_request_put(rq); + err = -EIO; + break; + } + + i915_gem_request_put(rq); + } + + engine->hangcheck.stalled = true; + engine->hangcheck.seqno = + intel_engine_get_seqno(engine); + err = i915_reset_engine(engine, I915_RESET_QUIET); if (err) { - pr_err("i915_reset_engine(%s) failed, err=%d\n", - engine->name, err); + pr_err("i915_reset_engine(%s:%s) failed, err=%d\n", + engine->name, active ? "active" : "idle", err); break; } + + engine->hangcheck.stalled = false; + count++; } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + engine->id, - &i915->gpu_error.flags); + clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + pr_info("i915_reset_engine(%s:%s): %lu resets\n", + engine->name, active ? "active" : "idle", count); + + if (i915_reset_engine_count(&i915->gpu_error, engine) - + resets[engine->id] != (active ? count : 0)) { + pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n", + engine->name, active ? "active" : "idle", count, + i915_reset_engine_count(&i915->gpu_error, + engine) - resets[engine->id]); + if (!err) + err = -EINVAL; + } unwind: - for_each_engine(active, i915, tmp) { + for_each_engine(other, i915, tmp) { int ret; if (!threads[tmp]) @@ -533,27 +698,29 @@ unwind: ret = kthread_stop(threads[tmp]); if (ret) { - pr_err("kthread for active engine %s failed, err=%d\n", - active->name, ret); + pr_err("kthread for other engine %s failed, err=%d\n", + other->name, ret); if (!err) err = ret; } put_task_struct(threads[tmp]); if (resets[tmp] != i915_reset_engine_count(&i915->gpu_error, - active)) { + other)) { pr_err("Innocent engine %s was reset (count=%ld)\n", - active->name, + other->name, i915_reset_engine_count(&i915->gpu_error, - active) - resets[tmp]); - err = -EIO; + other) - resets[tmp]); + if (!err) + err = -EINVAL; } } if (global != i915_reset_count(&i915->gpu_error)) { pr_err("Global reset (count=%ld)!\n", i915_reset_count(&i915->gpu_error) - global); - err = -EIO; + if (!err) + err = -EINVAL; } if (err) @@ -565,9 +732,25 @@ unwind: if (i915_terminally_wedged(&i915->gpu_error)) err = -EIO; + if (active) { + mutex_lock(&i915->drm.struct_mutex); + hang_fini(&h); + mutex_unlock(&i915->drm.struct_mutex); + } + return err; } +static int igt_reset_idle_engine_others(void *arg) +{ + return __igt_reset_engine_others(arg, false); +} + +static int igt_reset_active_engine_others(void *arg) +{ + return __igt_reset_engine_others(arg, true); +} + static u32 fake_hangcheck(struct drm_i915_gem_request *rq) { u32 reset_count; @@ -583,16 +766,6 @@ static u32 fake_hangcheck(struct drm_i915_gem_request *rq) return reset_count; } -static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq) -{ - return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq), - rq->fence.seqno), - 10) && - wait_for(i915_seqno_passed(hws_seqno(h, rq), - rq->fence.seqno), - 1000)); -} - static int igt_wait_reset(void *arg) { struct drm_i915_private *i915 = arg; @@ -626,9 +799,9 @@ static int igt_wait_reset(void *arg) if (!wait_for_hang(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("Failed to start request %x, at %x\n", - rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(rq->engine, &p); + pr_err("%s: Failed to start request %x, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); i915_reset(i915, 0); i915_gem_set_wedged(i915); @@ -721,9 +894,10 @@ static int igt_reset_queue(void *arg) if (!wait_for_hang(&h, prev)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("Failed to start request %x, at %x\n", - prev->fence.seqno, hws_seqno(&h, prev)); - intel_engine_dump(rq->engine, &p); + pr_err("%s: Failed to start request %x, at %x\n", + __func__, prev->fence.seqno, hws_seqno(&h, prev)); + intel_engine_dump(prev->engine, &p, + "%s\n", prev->engine->name); i915_gem_request_put(rq); i915_gem_request_put(prev); @@ -827,9 +1001,9 @@ static int igt_handle_error(void *arg) if (!wait_for_hang(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("Failed to start request %x, at %x\n", - rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(rq->engine, &p); + pr_err("%s: Failed to start request %x, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); i915_reset(i915, 0); i915_gem_set_wedged(i915); @@ -872,21 +1046,26 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) static const struct i915_subtest tests[] = { SUBTEST(igt_global_reset), /* attempt to recover GPU first */ SUBTEST(igt_hang_sanitycheck), - SUBTEST(igt_reset_engine), - SUBTEST(igt_reset_active_engines), + SUBTEST(igt_reset_idle_engine), + SUBTEST(igt_reset_active_engine), + SUBTEST(igt_reset_idle_engine_others), + SUBTEST(igt_reset_active_engine_others), SUBTEST(igt_wait_reset), SUBTEST(igt_reset_queue), SUBTEST(igt_handle_error), }; + bool saved_hangcheck; int err; if (!intel_has_gpu_reset(i915)) return 0; intel_runtime_pm_get(i915); + saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); err = i915_subtests(tests, i915); + i915_modparams.enable_hangcheck = saved_hangcheck; intel_runtime_pm_put(i915); return err; diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 3cac22eb47ce..2f6367643171 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -120,10 +120,10 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri !IS_CHERRYVIEW(dev_priv)) return 0; - if (IS_VALLEYVIEW(dev_priv)) /* XXX system lockup! */ - return 0; - - if (IS_BROADWELL(dev_priv)) /* XXX random GPU hang afterwards! */ + /* + * This test may lockup the machine or cause GPU hangs afterwards. + */ + if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN)) return 0; valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid), @@ -148,7 +148,10 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri for_each_set_bit(offset, valid, FW_RANGE) { i915_reg_t reg = { offset }; + iosf_mbi_punit_acquire(); intel_uncore_forcewake_reset(dev_priv, false); + iosf_mbi_punit_release(); + check_for_unclaimed_mmio(dev_priv); (void)I915_READ(reg); diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 331c2b09869e..55c0e2c15782 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -32,6 +32,13 @@ static struct mock_request *first_request(struct mock_engine *engine) link); } +static void advance(struct mock_engine *engine, + struct mock_request *request) +{ + list_del_init(&request->link); + mock_seqno_advance(&engine->base, request->base.global_seqno); +} + static void hw_delay_complete(struct timer_list *t) { struct mock_engine *engine = from_timer(engine, t, hw_delay); @@ -39,15 +46,23 @@ static void hw_delay_complete(struct timer_list *t) spin_lock(&engine->hw_lock); - request = first_request(engine); - if (request) { - list_del_init(&request->link); - mock_seqno_advance(&engine->base, request->base.global_seqno); - } - + /* Timer fired, first request is complete */ request = first_request(engine); if (request) - mod_timer(&engine->hw_delay, jiffies + request->delay); + advance(engine, request); + + /* + * Also immediately signal any subsequent 0-delay requests, but + * requeue the timer for the next delayed request. + */ + while ((request = first_request(engine))) { + if (request->delay) { + mod_timer(&engine->hw_delay, jiffies + request->delay); + break; + } + + advance(engine, request); + } spin_unlock(&engine->hw_lock); } @@ -98,16 +113,22 @@ static void mock_submit_request(struct drm_i915_gem_request *request) spin_lock_irq(&engine->hw_lock); list_add_tail(&mock->link, &engine->hw_queue); - if (mock->link.prev == &engine->hw_queue) - mod_timer(&engine->hw_delay, jiffies + mock->delay); + if (mock->link.prev == &engine->hw_queue) { + if (mock->delay) + mod_timer(&engine->hw_delay, jiffies + mock->delay); + else + advance(engine, mock); + } spin_unlock_irq(&engine->hw_lock); } static struct intel_ring *mock_ring(struct intel_engine_cs *engine) { - const unsigned long sz = roundup_pow_of_two(sizeof(struct intel_ring)); + const unsigned long sz = PAGE_SIZE / 2; struct intel_ring *ring; + BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz); + ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); if (!ring) return NULL; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 04eb9362f4f8..1bc61f3f76fc 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -85,6 +85,8 @@ static void mock_device_release(struct drm_device *dev) i915_gemfs_fini(i915); + drm_mode_config_cleanup(&i915->drm); + drm_dev_fini(&i915->drm); put_device(&i915->drm.pdev->dev); } @@ -179,20 +181,15 @@ struct drm_i915_private *mock_gem_device(void) I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_2M; - spin_lock_init(&i915->mm.object_stat_lock); mock_uncore_init(i915); + i915_gem_init__mm(i915); init_waitqueue_head(&i915->gpu_error.wait_queue); init_waitqueue_head(&i915->gpu_error.reset_queue); i915->wq = alloc_ordered_workqueue("mock", 0); if (!i915->wq) - goto put_device; - - INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); - init_llist_head(&i915->mm.free_list); - INIT_LIST_HEAD(&i915->mm.unbound_list); - INIT_LIST_HEAD(&i915->mm.bound_list); + goto err_drv; mock_init_contexts(i915); @@ -271,6 +268,9 @@ err_objects: kmem_cache_destroy(i915->objects); err_wq: destroy_workqueue(i915->wq); +err_drv: + drm_mode_config_cleanup(&i915->drm); + drm_dev_fini(&i915->drm); put_device: put_device(&pdev->dev); err: diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 336e1afb250f..e96873f96116 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -110,8 +110,8 @@ void mock_init_ggtt(struct drm_i915_private *i915) ggtt->base.i915 = i915; - ggtt->mappable_base = 0; - ggtt->mappable_end = 2048 * PAGE_SIZE; + ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); + ggtt->mappable_end = resource_size(&ggtt->gmadr); ggtt->base.total = 4096 * PAGE_SIZE; ggtt->base.clear_range = nop_clear_range; |