summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
blob: fef22753f4de61d30ea4476aff91c8d776842a9b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
 *
 * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
#include <linux/slab.h>
#include "vmwgfx_validation.h"
#include "vmwgfx_drv.h"

/**
 * struct vmw_validation_bo_node - Buffer object validation metadata.
 * @base: Metadata used for TTM reservation- and validation.
 * @hash: A hash entry used for the duplicate detection hash table.
 * @as_mob: Validate as mob.
 * @cpu_blit: Validate for cpu blit access.
 *
 * Bit fields are used since these structures are allocated and freed in
 * large numbers and space conservation is desired.
 */
struct vmw_validation_bo_node {
	struct ttm_validate_buffer base;
	struct drm_hash_item hash;
	u32 as_mob : 1;
	u32 cpu_blit : 1;
};

/**
 * struct vmw_validation_res_node - Resource validation metadata.
 * @head: List head for the resource validation list.
 * @hash: A hash entry used for the duplicate detection hash table.
 * @res: Reference counted resource pointer.
 * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
 * to a resource.
 * @new_backup_offset: Offset into the new backup mob for resources that can
 * share MOBs.
 * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
 * the command stream provides a mob bind operation.
 * @switching_backup: The validation process is switching backup MOB.
 * @first_usage: True iff the resource has been seen only once in the current
 * validation batch.
 * @reserved: Whether the resource is currently reserved by this process.
 * @private: Optionally additional memory for caller-private data.
 *
 * Bit fields are used since these structures are allocated and freed in
 * large numbers and space conservation is desired.
 */
struct vmw_validation_res_node {
	struct list_head head;
	struct drm_hash_item hash;
	struct vmw_resource *res;
	struct vmw_buffer_object *new_backup;
	unsigned long new_backup_offset;
	u32 no_buffer_needed : 1;
	u32 switching_backup : 1;
	u32 first_usage : 1;
	u32 reserved : 1;
	unsigned long private[0];
};

/**
 * vmw_validation_mem_alloc - Allocate kernel memory from the validation
 * context based allocator
 * @ctx: The validation context
 * @size: The number of bytes to allocated.
 *
 * The memory allocated may not exceed PAGE_SIZE, and the returned
 * address is aligned to sizeof(long). All memory allocated this way is
 * reclaimed after validation when calling any of the exported functions:
 * vmw_validation_unref_lists()
 * vmw_validation_revert()
 * vmw_validation_done()
 *
 * Return: Pointer to the allocated memory on success. NULL on failure.
 */
void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
			       unsigned int size)
{
	void *addr;

	size = vmw_validation_align(size);
	if (size > PAGE_SIZE)
		return NULL;

	if (ctx->mem_size_left < size) {
		struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);

		if (!page)
			return NULL;

		list_add_tail(&page->lru, &ctx->page_list);
		ctx->page_address = page_address(page);
		ctx->mem_size_left = PAGE_SIZE;
	}

	addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
	ctx->mem_size_left -= size;

	return addr;
}

/**
 * vmw_validation_mem_free - Free all memory allocated using
 * vmw_validation_mem_alloc()
 * @ctx: The validation context
 *
 * All memory previously allocated for this context using
 * vmw_validation_mem_alloc() is freed.
 */
static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
{
	struct page *entry, *next;

	list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
		list_del_init(&entry->lru);
		__free_page(entry);
	}

	ctx->mem_size_left = 0;
}

/**
 * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
 * validation context's lists.
 * @ctx: The validation context to search.
 * @vbo: The buffer object to search for.
 *
 * Return: Pointer to the struct vmw_validation_bo_node referencing the
 * duplicate, or NULL if none found.
 */
static struct vmw_validation_bo_node *
vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
			   struct vmw_buffer_object *vbo)
{
	struct  vmw_validation_bo_node *bo_node = NULL;

	if (!ctx->merge_dups)
		return NULL;

	if (ctx->ht) {
		struct drm_hash_item *hash;

		if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
			bo_node = container_of(hash, typeof(*bo_node), hash);
	} else {
		struct  vmw_validation_bo_node *entry;

		list_for_each_entry(entry, &ctx->bo_list, base.head) {
			if (entry->base.bo == &vbo->base) {
				bo_node = entry;
				break;
			}
		}
	}

	return bo_node;
}

/**
 * vmw_validation_find_res_dup - Find a duplicate resource entry in the
 * validation context's lists.
 * @ctx: The validation context to search.
 * @vbo: The buffer object to search for.
 *
 * Return: Pointer to the struct vmw_validation_bo_node referencing the
 * duplicate, or NULL if none found.
 */
static struct vmw_validation_res_node *
vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
			    struct vmw_resource *res)
{
	struct  vmw_validation_res_node *res_node = NULL;

	if (!ctx->merge_dups)
		return NULL;

	if (ctx->ht) {
		struct drm_hash_item *hash;

		if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
			res_node = container_of(hash, typeof(*res_node), hash);
	} else {
		struct  vmw_validation_res_node *entry;

		list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
			if (entry->res == res) {
				res_node = entry;
				goto out;
			}
		}

		list_for_each_entry(entry, &ctx->resource_list, head) {
			if (entry->res == res) {
				res_node = entry;
				break;
			}
		}

	}
out:
	return res_node;
}

/**
 * vmw_validation_add_bo - Add a buffer object to the validation context.
 * @ctx: The validation context.
 * @vbo: The buffer object.
 * @as_mob: Validate as mob, otherwise suitable for GMR operations.
 * @cpu_blit: Validate in a page-mappable location.
 *
 * Return: Zero on success, negative error code otherwise.
 */
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
			  struct vmw_buffer_object *vbo,
			  bool as_mob,
			  bool cpu_blit)
{
	struct vmw_validation_bo_node *bo_node;

	bo_node = vmw_validation_find_bo_dup(ctx, vbo);
	if (bo_node) {
		if (bo_node->as_mob != as_mob ||
		    bo_node->cpu_blit != cpu_blit) {
			DRM_ERROR("Inconsistent buffer usage.\n");
			return -EINVAL;
		}
	} else {
		struct ttm_validate_buffer *val_buf;
		int ret;

		bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
		if (!bo_node)
			return -ENOMEM;

		if (ctx->ht) {
			bo_node->hash.key = (unsigned long) vbo;
			ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
			if (ret) {
				DRM_ERROR("Failed to initialize a buffer "
					  "validation entry.\n");
				return ret;
			}
		}
		val_buf = &bo_node->base;
		val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
		if (!val_buf->bo)
			return -ESRCH;
		val_buf->num_shared = 0;
		list_add_tail(&val_buf->head, &ctx->bo_list);
		bo_node->as_mob = as_mob;
		bo_node->cpu_blit = cpu_blit;
	}

	return 0;
}

/**
 * vmw_validation_add_resource - Add a resource to the validation context.
 * @ctx: The validation context.
 * @res: The resource.
 * @priv_size: Size of private, additional metadata.
 * @p_node: Output pointer of additional metadata address.
 * @first_usage: Whether this was the first time this resource was seen.
 *
 * Return: Zero on success, negative error code otherwise.
 */
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
				struct vmw_resource *res,
				size_t priv_size,
				void **p_node,
				bool *first_usage)
{
	struct vmw_validation_res_node *node;
	int ret;

	node = vmw_validation_find_res_dup(ctx, res);
	if (node) {
		node->first_usage = 0;
		goto out_fill;
	}

	node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
	if (!node) {
		DRM_ERROR("Failed to allocate a resource validation "
			  "entry.\n");
		return -ENOMEM;
	}

	if (ctx->ht) {
		node->hash.key = (unsigned long) res;
		ret = drm_ht_insert_item(ctx->ht, &node->hash);
		if (ret) {
			DRM_ERROR("Failed to initialize a resource validation "
				  "entry.\n");
			return ret;
		}
	}
	node->res = vmw_resource_reference_unless_doomed(res);
	if (!node->res)
		return -ESRCH;

	node->first_usage = 1;
	if (!res->dev_priv->has_mob) {
		list_add_tail(&node->head, &ctx->resource_list);
	} else {
		switch (vmw_res_type(res)) {
		case vmw_res_context:
		case vmw_res_dx_context:
			list_add(&node->head, &ctx->resource_ctx_list);
			break;
		case vmw_res_cotable:
			list_add_tail(&node->head, &ctx->resource_ctx_list);
			break;
		default:
			list_add_tail(&node->head, &ctx->resource_list);
			break;
		}
	}

out_fill:
	if (first_usage)
		*first_usage = node->first_usage;
	if (p_node)
		*p_node = &node->private;

	return 0;
}

/**
 * vmw_validation_res_switch_backup - Register a backup MOB switch during
 * validation.
 * @ctx: The validation context.
 * @val_private: The additional meta-data pointer returned when the
 * resource was registered with the validation context. Used to identify
 * the resource.
 * @vbo: The new backup buffer object MOB. This buffer object needs to have
 * already been registered with the validation context.
 * @backup_offset: Offset into the new backup MOB.
 */
void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
				      void *val_private,
				      struct vmw_buffer_object *vbo,
				      unsigned long backup_offset)
{
	struct vmw_validation_res_node *val;

	val = container_of(val_private, typeof(*val), private);

	val->switching_backup = 1;
	if (val->first_usage)
		val->no_buffer_needed = 1;

	val->new_backup = vbo;
	val->new_backup_offset = backup_offset;
}

/**
 * vmw_validation_res_reserve - Reserve all resources registered with this
 * validation context.
 * @ctx: The validation context.
 * @intr: Use interruptible waits when possible.
 *
 * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
 * code on failure.
 */
int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
			       bool intr)
{
	struct vmw_validation_res_node *val;
	int ret = 0;

	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);

	list_for_each_entry(val, &ctx->resource_list, head) {
		struct vmw_resource *res = val->res;

		ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
		if (ret)
			goto out_unreserve;

		val->reserved = 1;
		if (res->backup) {
			struct vmw_buffer_object *vbo = res->backup;

			ret = vmw_validation_add_bo
				(ctx, vbo, vmw_resource_needs_backup(res),
				 false);
			if (ret)
				goto out_unreserve;
		}
	}

	return 0;

out_unreserve:
	vmw_validation_res_unreserve(ctx, true);
	return ret;
}

/**
 * vmw_validation_res_unreserve - Unreserve all reserved resources
 * registered with this validation context.
 * @ctx: The validation context.
 * @backoff: Whether this is a backoff- of a commit-type operation. This
 * is used to determine whether to switch backup MOBs or not.
 */
void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
				 bool backoff)
{
	struct vmw_validation_res_node *val;

	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);

	list_for_each_entry(val, &ctx->resource_list, head) {
		if (val->reserved)
			vmw_resource_unreserve(val->res,
					       !backoff &&
					       val->switching_backup,
					       val->new_backup,
					       val->new_backup_offset);
	}
}

/**
 * vmw_validation_bo_validate_single - Validate a single buffer object.
 * @bo: The TTM buffer object base.
 * @interruptible: Whether to perform waits interruptible if possible.
 * @validate_as_mob: Whether to validate in MOB memory.
 *
 * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
 * code on failure.
 */
int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
				      bool interruptible,
				      bool validate_as_mob)
{
	struct vmw_buffer_object *vbo =
		container_of(bo, struct vmw_buffer_object, base);
	struct ttm_operation_ctx ctx = {
		.interruptible = interruptible,
		.no_wait_gpu = false
	};
	int ret;

	if (vbo->pin_count > 0)
		return 0;

	if (validate_as_mob)
		return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);

	/**
	 * Put BO in VRAM if there is space, otherwise as a GMR.
	 * If there is no space in VRAM and GMR ids are all used up,
	 * start evicting GMRs to make room. If the DMA buffer can't be
	 * used as a GMR, this will return -ENOMEM.
	 */

	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
	if (ret == 0 || ret == -ERESTARTSYS)
		return ret;

	/**
	 * If that failed, try VRAM again, this time evicting
	 * previous contents.
	 */

	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
	return ret;
}

/**
 * vmw_validation_bo_validate - Validate all buffer objects registered with
 * the validation context.
 * @ctx: The validation context.
 * @intr: Whether to perform waits interruptible if possible.
 *
 * Return: Zero on success, -ERESTARTSYS if interrupted,
 * negative error code on failure.
 */
int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
{
	struct vmw_validation_bo_node *entry;
	int ret;

	list_for_each_entry(entry, &ctx->bo_list, base.head) {
		if (entry->cpu_blit) {
			struct ttm_operation_ctx ctx = {
				.interruptible = intr,
				.no_wait_gpu = false
			};

			ret = ttm_bo_validate(entry->base.bo,
					      &vmw_nonfixed_placement, &ctx);
		} else {
			ret = vmw_validation_bo_validate_single
			(entry->base.bo, intr, entry->as_mob);
		}
		if (ret)
			return ret;
	}
	return 0;
}

/**
 * vmw_validation_res_validate - Validate all resources registered with the
 * validation context.
 * @ctx: The validation context.
 * @intr: Whether to perform waits interruptible if possible.
 *
 * Before this function is called, all resource backup buffers must have
 * been validated.
 *
 * Return: Zero on success, -ERESTARTSYS if interrupted,
 * negative error code on failure.
 */
int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
{
	struct vmw_validation_res_node *val;
	int ret;

	list_for_each_entry(val, &ctx->resource_list, head) {
		struct vmw_resource *res = val->res;
		struct vmw_buffer_object *backup = res->backup;

		ret = vmw_resource_validate(res, intr);
		if (ret) {
			if (ret != -ERESTARTSYS)
				DRM_ERROR("Failed to validate resource.\n");
			return ret;
		}

		/* Check if the resource switched backup buffer */
		if (backup && res->backup && (backup != res->backup)) {
			struct vmw_buffer_object *vbo = res->backup;

			ret = vmw_validation_add_bo
				(ctx, vbo, vmw_resource_needs_backup(res),
				 false);
			if (ret)
				return ret;
		}
	}
	return 0;
}

/**
 * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
 * and unregister it from this validation context.
 * @ctx: The validation context.
 *
 * The hash table used for duplicate finding is an expensive resource and
 * may be protected by mutexes that may cause deadlocks during resource
 * unreferencing if held. After resource- and buffer object registering,
 * there is no longer any use for this hash table, so allow freeing it
 * either to shorten any mutex locking time, or before resources- and
 * buffer objects are freed during validation context cleanup.
 */
void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
{
	struct vmw_validation_bo_node *entry;
	struct vmw_validation_res_node *val;

	if (!ctx->ht)
		return;

	list_for_each_entry(entry, &ctx->bo_list, base.head)
		(void) drm_ht_remove_item(ctx->ht, &entry->hash);

	list_for_each_entry(val, &ctx->resource_list, head)
		(void) drm_ht_remove_item(ctx->ht, &val->hash);

	list_for_each_entry(val, &ctx->resource_ctx_list, head)
		(void) drm_ht_remove_item(ctx->ht, &val->hash);

	ctx->ht = NULL;
}

/**
 * vmw_validation_unref_lists - Unregister previously registered buffer
 * object and resources.
 * @ctx: The validation context.
 *
 * Note that this function may cause buffer object- and resource destructors
 * to be invoked.
 */
void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
{
	struct vmw_validation_bo_node *entry;
	struct vmw_validation_res_node *val;

	list_for_each_entry(entry, &ctx->bo_list, base.head)
		ttm_bo_unref(&entry->base.bo);

	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
	list_for_each_entry(val, &ctx->resource_list, head)
		vmw_resource_unreference(&val->res);

	/*
	 * No need to detach each list entry since they are all freed with
	 * vmw_validation_free_mem. Just make the inaccessible.
	 */
	INIT_LIST_HEAD(&ctx->bo_list);
	INIT_LIST_HEAD(&ctx->resource_list);

	vmw_validation_mem_free(ctx);
}

/**
 * vmw_validation_prepare - Prepare a validation context for command
 * submission.
 * @ctx: The validation context.
 * @mutex: The mutex used to protect resource reservation.
 * @intr: Whether to perform waits interruptible if possible.
 *
 * Note that the single reservation mutex @mutex is an unfortunate
 * construct. Ideally resource reservation should be moved to per-resource
 * ww_mutexes.
 * If this functions doesn't return Zero to indicate success, all resources
 * are left unreserved but still referenced.
 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 * on error.
 */
int vmw_validation_prepare(struct vmw_validation_context *ctx,
			   struct mutex *mutex,
			   bool intr)
{
	int ret = 0;

	if (mutex) {
		if (intr)
			ret = mutex_lock_interruptible(mutex);
		else
			mutex_lock(mutex);
		if (ret)
			return -ERESTARTSYS;
	}

	ctx->res_mutex = mutex;
	ret = vmw_validation_res_reserve(ctx, intr);
	if (ret)
		goto out_no_res_reserve;

	ret = vmw_validation_bo_reserve(ctx, intr);
	if (ret)
		goto out_no_bo_reserve;

	ret = vmw_validation_bo_validate(ctx, intr);
	if (ret)
		goto out_no_validate;

	ret = vmw_validation_res_validate(ctx, intr);
	if (ret)
		goto out_no_validate;

	return 0;

out_no_validate:
	vmw_validation_bo_backoff(ctx);
out_no_bo_reserve:
	vmw_validation_res_unreserve(ctx, true);
out_no_res_reserve:
	if (mutex)
		mutex_unlock(mutex);

	return ret;
}

/**
 * vmw_validation_revert - Revert validation actions if command submission
 * failed.
 *
 * @ctx: The validation context.
 *
 * The caller still needs to unref resources after a call to this function.
 */
void vmw_validation_revert(struct vmw_validation_context *ctx)
{
	vmw_validation_bo_backoff(ctx);
	vmw_validation_res_unreserve(ctx, true);
	if (ctx->res_mutex)
		mutex_unlock(ctx->res_mutex);
	vmw_validation_unref_lists(ctx);
}

/**
 * vmw_validation_cone - Commit validation actions after command submission
 * success.
 * @ctx: The validation context.
 * @fence: Fence with which to fence all buffer objects taking part in the
 * command submission.
 *
 * The caller does NOT need to unref resources after a call to this function.
 */
void vmw_validation_done(struct vmw_validation_context *ctx,
			 struct vmw_fence_obj *fence)
{
	vmw_validation_bo_fence(ctx, fence);
	vmw_validation_res_unreserve(ctx, false);
	if (ctx->res_mutex)
		mutex_unlock(ctx->res_mutex);
	vmw_validation_unref_lists(ctx);
}

/**
 * vmw_validation_preload_bo - Preload the validation memory allocator for a
 * call to vmw_validation_add_bo().
 * @ctx: Pointer to the validation context.
 *
 * Iff this function returns successfully, the next call to
 * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
 * but voids the guarantee.
 *
 * Returns: Zero if successful, %-EINVAL otherwise.
 */
int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
{
	unsigned int size = sizeof(struct vmw_validation_bo_node);

	if (!vmw_validation_mem_alloc(ctx, size))
		return -ENOMEM;

	ctx->mem_size_left += size;
	return 0;
}

/**
 * vmw_validation_preload_res - Preload the validation memory allocator for a
 * call to vmw_validation_add_res().
 * @ctx: Pointer to the validation context.
 * @size: Size of the validation node extra data. See below.
 *
 * Iff this function returns successfully, the next call to
 * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
 * sleep. An error is not fatal but voids the guarantee.
 *
 * Returns: Zero if successful, %-EINVAL otherwise.
 */
int vmw_validation_preload_res(struct vmw_validation_context *ctx,
			       unsigned int size)
{
	size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
				    size) +
		vmw_validation_align(sizeof(struct vmw_validation_bo_node));
	if (!vmw_validation_mem_alloc(ctx, size))
		return -ENOMEM;

	ctx->mem_size_left += size;
	return 0;
}