diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c')
| -rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_request.c | 211 | 
1 files changed, 206 insertions, 5 deletions
| diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index d2a678a2497e..ee8e753d98ce 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -609,6 +609,206 @@ static int live_nop_request(void *arg)  	return err;  } +static int __cancel_inactive(struct intel_engine_cs *engine) +{ +	struct intel_context *ce; +	struct igt_spinner spin; +	struct i915_request *rq; +	int err = 0; + +	if (igt_spinner_init(&spin, engine->gt)) +		return -ENOMEM; + +	ce = intel_context_create(engine); +	if (IS_ERR(ce)) { +		err = PTR_ERR(ce); +		goto out_spin; +	} + +	rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); +	if (IS_ERR(rq)) { +		err = PTR_ERR(rq); +		goto out_ce; +	} + +	pr_debug("%s: Cancelling inactive request\n", engine->name); +	i915_request_cancel(rq, -EINTR); +	i915_request_get(rq); +	i915_request_add(rq); + +	if (i915_request_wait(rq, 0, HZ / 5) < 0) { +		struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + +		pr_err("%s: Failed to cancel inactive request\n", engine->name); +		intel_engine_dump(engine, &p, "%s\n", engine->name); +		err = -ETIME; +		goto out_rq; +	} + +	if (rq->fence.error != -EINTR) { +		pr_err("%s: fence not cancelled (%u)\n", +		       engine->name, rq->fence.error); +		err = -EINVAL; +	} + +out_rq: +	i915_request_put(rq); +out_ce: +	intel_context_put(ce); +out_spin: +	igt_spinner_fini(&spin); +	if (err) +		pr_err("%s: %s error %d\n", __func__, engine->name, err); +	return err; +} + +static int __cancel_active(struct intel_engine_cs *engine) +{ +	struct intel_context *ce; +	struct igt_spinner spin; +	struct i915_request *rq; +	int err = 0; + +	if (igt_spinner_init(&spin, engine->gt)) +		return -ENOMEM; + +	ce = intel_context_create(engine); +	if (IS_ERR(ce)) { +		err = PTR_ERR(ce); +		goto out_spin; +	} + +	rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); +	if (IS_ERR(rq)) { +		err = PTR_ERR(rq); +		goto out_ce; +	} + +	pr_debug("%s: Cancelling active request\n", engine->name); +	i915_request_get(rq); +	i915_request_add(rq); +	if (!igt_wait_for_spinner(&spin, rq)) { +		struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + +		pr_err("Failed to start spinner on %s\n", engine->name); +		intel_engine_dump(engine, &p, "%s\n", engine->name); +		err = -ETIME; +		goto out_rq; +	} +	i915_request_cancel(rq, -EINTR); + +	if (i915_request_wait(rq, 0, HZ / 5) < 0) { +		struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + +		pr_err("%s: Failed to cancel active request\n", engine->name); +		intel_engine_dump(engine, &p, "%s\n", engine->name); +		err = -ETIME; +		goto out_rq; +	} + +	if (rq->fence.error != -EINTR) { +		pr_err("%s: fence not cancelled (%u)\n", +		       engine->name, rq->fence.error); +		err = -EINVAL; +	} + +out_rq: +	i915_request_put(rq); +out_ce: +	intel_context_put(ce); +out_spin: +	igt_spinner_fini(&spin); +	if (err) +		pr_err("%s: %s error %d\n", __func__, engine->name, err); +	return err; +} + +static int __cancel_completed(struct intel_engine_cs *engine) +{ +	struct intel_context *ce; +	struct igt_spinner spin; +	struct i915_request *rq; +	int err = 0; + +	if (igt_spinner_init(&spin, engine->gt)) +		return -ENOMEM; + +	ce = intel_context_create(engine); +	if (IS_ERR(ce)) { +		err = PTR_ERR(ce); +		goto out_spin; +	} + +	rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); +	if (IS_ERR(rq)) { +		err = PTR_ERR(rq); +		goto out_ce; +	} +	igt_spinner_end(&spin); +	i915_request_get(rq); +	i915_request_add(rq); + +	if (i915_request_wait(rq, 0, HZ / 5) < 0) { +		err = -ETIME; +		goto out_rq; +	} + +	pr_debug("%s: Cancelling completed request\n", engine->name); +	i915_request_cancel(rq, -EINTR); +	if (rq->fence.error) { +		pr_err("%s: fence not cancelled (%u)\n", +		       engine->name, rq->fence.error); +		err = -EINVAL; +	} + +out_rq: +	i915_request_put(rq); +out_ce: +	intel_context_put(ce); +out_spin: +	igt_spinner_fini(&spin); +	if (err) +		pr_err("%s: %s error %d\n", __func__, engine->name, err); +	return err; +} + +static int live_cancel_request(void *arg) +{ +	struct drm_i915_private *i915 = arg; +	struct intel_engine_cs *engine; + +	/* +	 * Check cancellation of requests. We expect to be able to immediately +	 * cancel active requests, even if they are currently on the GPU. +	 */ + +	for_each_uabi_engine(engine, i915) { +		struct igt_live_test t; +		int err, err2; + +		if (!intel_engine_has_preemption(engine)) +			continue; + +		err = igt_live_test_begin(&t, i915, __func__, engine->name); +		if (err) +			return err; + +		err = __cancel_inactive(engine); +		if (err == 0) +			err = __cancel_active(engine); +		if (err == 0) +			err = __cancel_completed(engine); + +		err2 = igt_live_test_end(&t); +		if (err) +			return err; +		if (err2) +			return err2; +	} + +	return 0; +} +  static struct i915_vma *empty_batch(struct drm_i915_private *i915)  {  	struct drm_i915_gem_object *obj; @@ -620,7 +820,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)  	if (IS_ERR(obj))  		return ERR_CAST(obj); -	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); +	cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);  	if (IS_ERR(cmd)) {  		err = PTR_ERR(cmd);  		goto err; @@ -782,7 +982,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)  	if (err)  		goto err; -	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); +	cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);  	if (IS_ERR(cmd)) {  		err = PTR_ERR(cmd);  		goto err; @@ -817,7 +1017,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)  {  	u32 *cmd; -	cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); +	cmd = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);  	if (IS_ERR(cmd))  		return PTR_ERR(cmd); @@ -1070,8 +1270,8 @@ out_request:  		if (!request[idx])  			break; -		cmd = i915_gem_object_pin_map(request[idx]->batch->obj, -					      I915_MAP_WC); +		cmd = i915_gem_object_pin_map_unlocked(request[idx]->batch->obj, +						       I915_MAP_WC);  		if (!IS_ERR(cmd)) {  			*cmd = MI_BATCH_BUFFER_END; @@ -1486,6 +1686,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)  		SUBTEST(live_sequential_engines),  		SUBTEST(live_parallel_engines),  		SUBTEST(live_empty_request), +		SUBTEST(live_cancel_request),  		SUBTEST(live_breadcrumbs_smoketest),  	}; | 
