1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
|
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef _TTM_RESOURCE_H_
#define _TTM_RESOURCE_H_
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/iosys-map.h>
#include <linux/dma-fence.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_caching.h>
#include <drm/ttm/ttm_kmap_iter.h>
#define TTM_MAX_BO_PRIORITY 4U
#define TTM_NUM_MEM_TYPES 8
struct ttm_device;
struct ttm_resource_manager;
struct ttm_resource;
struct ttm_place;
struct ttm_buffer_object;
struct ttm_placement;
struct iosys_map;
struct io_mapping;
struct sg_table;
struct scatterlist;
/**
* enum ttm_lru_item_type - enumerate ttm_lru_item subclasses
*/
enum ttm_lru_item_type {
/** @TTM_LRU_RESOURCE: The resource subclass */
TTM_LRU_RESOURCE,
/** @TTM_LRU_HITCH: The iterator hitch subclass */
TTM_LRU_HITCH
};
/**
* struct ttm_lru_item - The TTM lru list node base class
* @link: The list link
* @type: The subclass type
*/
struct ttm_lru_item {
struct list_head link;
enum ttm_lru_item_type type;
};
/**
* ttm_lru_item_init() - initialize a struct ttm_lru_item
* @item: The item to initialize
* @type: The subclass type
*/
static inline void ttm_lru_item_init(struct ttm_lru_item *item,
enum ttm_lru_item_type type)
{
item->type = type;
INIT_LIST_HEAD(&item->link);
}
static inline bool ttm_lru_item_is_res(const struct ttm_lru_item *item)
{
return item->type == TTM_LRU_RESOURCE;
}
struct ttm_resource_manager_func {
/**
* struct ttm_resource_manager_func member alloc
*
* @man: Pointer to a memory type manager.
* @bo: Pointer to the buffer object we're allocating space for.
* @place: Placement details.
* @res: Resulting pointer to the ttm_resource.
*
* This function should allocate space in the memory type managed
* by @man. Placement details if applicable are given by @place. If
* successful, a filled in ttm_resource object should be returned in
* @res. @res::start should be set to a value identifying the beginning
* of the range allocated, and the function should return zero.
* If the manager can't fulfill the request -ENOSPC should be returned.
* If a system error occurred, preventing the request to be fulfilled,
* the function should return a negative error code.
*
* This function may not be called from within atomic context and needs
* to take care of its own locking to protect any data structures
* managing the space.
*/
int (*alloc)(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res);
/**
* struct ttm_resource_manager_func member free
*
* @man: Pointer to a memory type manager.
* @res: Pointer to a struct ttm_resource to be freed.
*
* This function frees memory type resources previously allocated.
* May not be called from within atomic context.
*/
void (*free)(struct ttm_resource_manager *man,
struct ttm_resource *res);
/**
* struct ttm_resource_manager_func member intersects
*
* @man: Pointer to a memory type manager.
* @res: Pointer to a struct ttm_resource to be checked.
* @place: Placement to check against.
* @size: Size of the check.
*
* Test if @res intersects with @place + @size. Used to judge if
* evictions are valueable or not.
*/
bool (*intersects)(struct ttm_resource_manager *man,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size);
/**
* struct ttm_resource_manager_func member compatible
*
* @man: Pointer to a memory type manager.
* @res: Pointer to a struct ttm_resource to be checked.
* @place: Placement to check against.
* @size: Size of the check.
*
* Test if @res compatible with @place + @size. Used to check of
* the need to move the backing store or not.
*/
bool (*compatible)(struct ttm_resource_manager *man,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size);
/**
* struct ttm_resource_manager_func member debug
*
* @man: Pointer to a memory type manager.
* @printer: Prefix to be used in printout to identify the caller.
*
* This function is called to print out the state of the memory
* type manager to aid debugging of out-of-memory conditions.
* It may not be called from within atomic context.
*/
void (*debug)(struct ttm_resource_manager *man,
struct drm_printer *printer);
};
/**
* struct ttm_resource_manager
*
* @use_type: The memory type is enabled.
* @use_tt: If a TT object should be used for the backing store.
* @size: Size of the managed region.
* @bdev: ttm device this manager belongs to
* @func: structure pointer implementing the range manager. See above
* @move_lock: lock for move fence
* @move: The fence of the last pipelined move operation.
* @lru: The lru list for this memory type.
*
* This structure is used to identify and manage memory types for a device.
*/
struct ttm_resource_manager {
/*
* No protection. Constant from start.
*/
bool use_type;
bool use_tt;
struct ttm_device *bdev;
uint64_t size;
const struct ttm_resource_manager_func *func;
spinlock_t move_lock;
/*
* Protected by @move_lock.
*/
struct dma_fence *move;
/*
* Protected by the bdev->lru_lock.
*/
struct list_head lru[TTM_MAX_BO_PRIORITY];
/**
* @usage: How much of the resources are used, protected by the
* bdev->lru_lock.
*/
uint64_t usage;
};
/**
* struct ttm_bus_placement
*
* @addr: mapped virtual address
* @offset: physical addr
* @is_iomem: is this io memory ?
* @caching: See enum ttm_caching
*
* Structure indicating the bus placement of an object.
*/
struct ttm_bus_placement {
void *addr;
phys_addr_t offset;
bool is_iomem;
enum ttm_caching caching;
};
/**
* struct ttm_resource
*
* @start: Start of the allocation.
* @size: Actual size of resource in bytes.
* @mem_type: Resource type of the allocation.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
* @bo: weak reference to the BO, protected by ttm_device::lru_lock
*
* Structure indicating the placement and space resources used by a
* buffer object.
*/
struct ttm_resource {
unsigned long start;
size_t size;
uint32_t mem_type;
uint32_t placement;
struct ttm_bus_placement bus;
struct ttm_buffer_object *bo;
/**
* @lru: Least recently used list, see &ttm_resource_manager.lru
*/
struct ttm_lru_item lru;
};
/**
* ttm_lru_item_to_res() - Downcast a struct ttm_lru_item to a struct ttm_resource
* @item: The struct ttm_lru_item to downcast
*
* Return: Pointer to the embedding struct ttm_resource
*/
static inline struct ttm_resource *
ttm_lru_item_to_res(struct ttm_lru_item *item)
{
return container_of(item, struct ttm_resource, lru);
}
/**
* struct ttm_lru_bulk_move_pos
*
* @first: first res in the bulk move range
* @last: last res in the bulk move range
*
* Range of resources for a lru bulk move.
*/
struct ttm_lru_bulk_move_pos {
struct ttm_resource *first;
struct ttm_resource *last;
};
/**
* struct ttm_lru_bulk_move
* @pos: first/last lru entry for resources in the each domain/priority
* @cursor_list: The list of cursors currently traversing any of
* the sublists of @pos. Protected by the ttm device's lru_lock.
*
* Container for the current bulk move state. Should be used with
* ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
* All BOs in a bulk_move structure need to share the same reservation object to
* ensure that the bulk as a whole is locked for eviction even if only one BO of
* the bulk is evicted.
*/
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
struct list_head cursor_list;
};
/**
* struct ttm_resource_cursor
* @man: The resource manager currently being iterated over
* @hitch: A hitch list node inserted before the next resource
* to iterate over.
* @bulk_link: A list link for the list of cursors traversing the
* bulk sublist of @bulk. Protected by the ttm device's lru_lock.
* @bulk: Pointer to struct ttm_lru_bulk_move whose subrange @hitch is
* inserted to. NULL if none. Never dereference this pointer since
* the struct ttm_lru_bulk_move object pointed to might have been
* freed. The pointer is only for comparison.
* @mem_type: The memory type of the LRU list being traversed.
* This field is valid iff @bulk != NULL.
* @priority: the current priority
*
* Cursor to iterate over the resources in a manager.
*/
struct ttm_resource_cursor {
struct ttm_resource_manager *man;
struct ttm_lru_item hitch;
struct list_head bulk_link;
struct ttm_lru_bulk_move *bulk;
unsigned int mem_type;
unsigned int priority;
};
void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
/**
* struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
* struct sg_table backed struct ttm_resource.
* @base: Embedded struct ttm_kmap_iter providing the usage interface.
* @iomap: struct io_mapping representing the underlying linear io_memory.
* @st: sg_table into @iomap, representing the memory of the struct ttm_resource.
* @start: Offset that needs to be subtracted from @st to make
* sg_dma_address(st->sgl) - @start == 0 for @iomap start.
* @cache: Scatterlist traversal cache for fast lookups.
* @cache.sg: Pointer to the currently cached scatterlist segment.
* @cache.i: First index of @sg. PAGE_SIZE granularity.
* @cache.end: Last index + 1 of @sg. PAGE_SIZE granularity.
* @cache.offs: First offset into @iomap of @sg. PAGE_SIZE granularity.
*/
struct ttm_kmap_iter_iomap {
struct ttm_kmap_iter base;
struct io_mapping *iomap;
struct sg_table *st;
resource_size_t start;
struct {
struct scatterlist *sg;
pgoff_t i;
pgoff_t end;
pgoff_t offs;
} cache;
};
/**
* struct ttm_kmap_iter_linear_io - Iterator specialization for linear io
* @base: The base iterator
* @dmap: Points to the starting address of the region
* @needs_unmap: Whether we need to unmap on fini
*/
struct ttm_kmap_iter_linear_io {
struct ttm_kmap_iter base;
struct iosys_map dmap;
bool needs_unmap;
};
/**
* ttm_resource_manager_set_used
*
* @man: A memory manager object.
* @used: usage state to set.
*
* Set the manager in use flag. If disabled the manager is no longer
* used for object placement.
*/
static inline void
ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used)
{
int i;
for (i = 0; i < TTM_MAX_BO_PRIORITY; i++)
WARN_ON(!list_empty(&man->lru[i]));
man->use_type = used;
}
/**
* ttm_resource_manager_used
*
* @man: Manager to get used state for
*
* Get the in use flag for a manager.
* Returns:
* true is used, false if not.
*/
static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man)
{
return man->use_type;
}
/**
* ttm_resource_manager_cleanup
*
* @man: A memory manager object.
*
* Cleanup the move fences from the memory manager object.
*/
static inline void
ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
{
dma_fence_put(man->move);
man->move = NULL;
}
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
struct ttm_lru_bulk_move *bulk);
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
void ttm_resource_move_to_lru_tail(struct ttm_resource *res);
void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res);
void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_resource *res);
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
bool ttm_resource_intersects(struct ttm_device *bdev,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size);
bool ttm_resource_compatible(struct ttm_resource *res,
struct ttm_placement *placement,
bool evicting);
void ttm_resource_set_bo(struct ttm_resource *res,
struct ttm_buffer_object *bo);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
struct ttm_device *bdev,
uint64_t size);
int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_resource_manager *man);
uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor);
struct ttm_resource *
ttm_resource_manager_next(struct ttm_resource_cursor *cursor);
struct ttm_resource *
ttm_lru_first_res_or_null(struct list_head *head);
/**
* ttm_resource_manager_for_each_res - iterate over all resources
* @man: the resource manager
* @cursor: struct ttm_resource_cursor for the current position
* @res: the current resource
*
* Iterate over all the evictable resources in a resource manager.
*/
#define ttm_resource_manager_for_each_res(man, cursor, res) \
for (res = ttm_resource_manager_first(man, cursor); res; \
res = ttm_resource_manager_next(cursor))
struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
struct io_mapping *iomap,
struct sg_table *st,
resource_size_t start);
struct ttm_kmap_iter_linear_io;
struct ttm_kmap_iter *
ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
struct ttm_device *bdev,
struct ttm_resource *mem);
void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
struct ttm_device *bdev,
struct ttm_resource *mem);
void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
struct dentry * parent,
const char *name);
#endif
|