diff options
author | Dave Airlie <airlied@redhat.com> | 2020-08-04 05:56:31 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-08-06 06:12:40 +0300 |
commit | 9de59bc201496f28bb8835c2bcbae3ddb186b548 (patch) | |
tree | e8468f8c884392df77c2948ffec75a9eba97ffb1 /drivers | |
parent | 80938c28ee67bad05b2982410440b5a2d200a2f5 (diff) | |
download | linux-9de59bc201496f28bb8835c2bcbae3ddb186b548.tar.xz |
drm/ttm: rename ttm_mem_type_manager -> ttm_resource_manager.
This name makes a lot more sense, since these are about managing
driver resources rather than just memory ranges.
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-59-airlied@gmail.com
Diffstat (limited to 'drivers')
21 files changed, 176 insertions, 176 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index ba4d11e8a960..e2b4d3fc601d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -517,7 +517,7 @@ out_put: uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - struct ttm_mem_type_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return amdgpu_vram_mgr_usage(vram_man); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5ef7b3b7c9af..65b67c82a4b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -299,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, { s64 time_us, increment_us; u64 free_vram, total_vram, used_vram; - struct ttm_mem_type_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); /* Allow a maximum of 200 accumulated ms. This is basically per-IB * throttling. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index e9de6f9538c0..b9050b7221d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -25,13 +25,13 @@ #include "amdgpu.h" struct amdgpu_gtt_mgr { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; atomic64_t available; }; -static inline struct amdgpu_gtt_mgr *to_gtt_mgr(struct ttm_mem_type_manager *man) +static inline struct amdgpu_gtt_mgr *to_gtt_mgr(struct ttm_resource_manager *man) { return container_of(man, struct amdgpu_gtt_mgr, manager); } @@ -54,7 +54,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); return snprintf(buf, PAGE_SIZE, "%llu\n", man->size * PAGE_SIZE); } @@ -72,7 +72,7 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_gtt_mgr_usage(man)); } @@ -82,7 +82,7 @@ static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO, static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO, amdgpu_mem_info_gtt_used_show, NULL); -static const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; +static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func; /** * amdgpu_gtt_mgr_init - init GTT manager and DRM MM * @@ -93,7 +93,7 @@ static const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; */ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct amdgpu_gtt_mgr *mgr; uint64_t start, size; int ret; @@ -108,7 +108,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(man, gtt_size >> PAGE_SHIFT); + ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT); start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; @@ -128,7 +128,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) } ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } @@ -142,13 +142,13 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) */ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) { - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(&adev->mman.bdev, man); + ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); if (ret) return; @@ -159,7 +159,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total); device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL); kfree(mgr); } @@ -186,7 +186,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) * * Dummy, allocate the node but no space for it yet. */ -static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, +static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -251,7 +251,7 @@ err_out: * * Free the allocated GTT again. */ -static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, +static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); @@ -274,7 +274,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, * * Return how many bytes are used in the GTT domain */ -uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); s64 result = man->size - atomic64_read(&mgr->available); @@ -282,7 +282,7 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) return (result > 0 ? result : 0) * PAGE_SIZE; } -int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) +int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_node *node; @@ -309,7 +309,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) * * Dump the table content using printk. */ -static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, +static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); @@ -323,7 +323,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, amdgpu_gtt_mgr_usage(man) >> 20); } -static const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { +static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = { .get_node = amdgpu_gtt_mgr_new, .put_node = amdgpu_gtt_mgr_del, .debug = amdgpu_gtt_mgr_debug diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 134cca1af744..fff9c013f337 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -616,9 +616,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file } case AMDGPU_INFO_MEMORY: { struct drm_amdgpu_memory_info mem; - struct ttm_mem_type_manager *vram_man = + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - struct ttm_mem_type_manager *gtt_man = + struct ttm_resource_manager *gtt_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); memset(&mem, 0, sizeof(mem)); mem.vram.total_heap_size = adev->gmc.real_vram_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ced418cba2f7..ce98df5b0c21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -442,7 +442,7 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, unsigned long size, u32 domain) { - struct ttm_mem_type_manager *man = NULL; + struct ttm_resource_manager *man = NULL; /* * If GTT is part of requested domains the check must succeed to diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 555695854076..2fc0214d9a95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2012,7 +2012,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) */ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) { - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); uint64_t size; int r; @@ -2234,7 +2234,7 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) unsigned ttm_pl = (uintptr_t)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl); struct drm_printer p = drm_seq_file_printer(m); man->func->debug(man, &p); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index c01fdb3f0458..3db29ae1f802 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -73,8 +73,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev); void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); -uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); -int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); +uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man); +int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man); u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, @@ -86,8 +86,8 @@ void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, struct device *dev, enum dma_data_direction dir, struct sg_table *sgt); -uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); +uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man); +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man); int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_late_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 03a6248f0c4e..6f888a63f22d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -29,7 +29,7 @@ #include "atom.h" struct amdgpu_vram_mgr { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; atomic64_t usage; @@ -37,7 +37,7 @@ struct amdgpu_vram_mgr { struct amdgpu_device *adev; }; -static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_mem_type_manager *man) +static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man) { return container_of(man, struct amdgpu_vram_mgr, manager); } @@ -89,7 +89,7 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_vram_mgr_usage(man)); } @@ -107,7 +107,7 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_vram_mgr_vis_usage(man)); } @@ -165,7 +165,7 @@ static const struct attribute *amdgpu_vram_mgr_attributes[] = { NULL }; -static const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; +static const struct ttm_resource_manager_func amdgpu_vram_mgr_func; /** * amdgpu_vram_mgr_init - init VRAM manager and DRM MM @@ -177,7 +177,7 @@ static const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; */ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct amdgpu_vram_mgr *mgr; int ret; @@ -190,7 +190,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - ttm_mem_type_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); + ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); man->func = &amdgpu_vram_mgr_func; @@ -205,7 +205,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) DRM_ERROR("Failed to register sysfs\n"); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } @@ -219,13 +219,13 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) */ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) { - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(&adev->mman.bdev, man); + ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); if (ret) return; @@ -235,7 +235,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); kfree(mgr); } @@ -321,7 +321,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, * * Allocate VRAM for the given BO. */ -static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, +static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -441,7 +441,7 @@ error: * * Free the allocated VRAM again. */ -static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, +static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -575,7 +575,7 @@ void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, * * Returns how many bytes are used in this domain. */ -uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -589,7 +589,7 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) * * Returns how many bytes are used in the visible part of VRAM */ -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -604,7 +604,7 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) * * Dump the table content using printk. */ -static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, +static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -618,7 +618,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, amdgpu_vram_mgr_vis_usage(man) >> 20); } -static const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { +static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { .get_node = amdgpu_vram_mgr_new, .put_node = amdgpu_vram_mgr_del, .debug = amdgpu_vram_mgr_debug diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 2187787f397e..e3660d00987d 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1075,10 +1075,10 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_vram_mm *vmm = node->minor->dev->vram_mm; - struct ttm_mem_type_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM); struct drm_printer p = drm_seq_file_printer(m); - ttm_mem_type_manager_debug(man, &p); + ttm_resource_manager_debug(man, &p); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 84387c810540..78b5a87b9855 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -32,13 +32,13 @@ #include <core/tegra.h> static void -nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg) +nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_mem_reg *reg) { nouveau_mem_del(reg); } static int -nouveau_vram_manager_new(struct ttm_mem_type_manager *man, +nouveau_vram_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *reg) @@ -63,13 +63,13 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, return 0; } -const struct ttm_mem_type_manager_func nouveau_vram_manager = { +const struct ttm_resource_manager_func nouveau_vram_manager = { .get_node = nouveau_vram_manager_new, .put_node = nouveau_manager_del, }; static int -nouveau_gart_manager_new(struct ttm_mem_type_manager *man, +nouveau_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *reg) @@ -86,13 +86,13 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, return 0; } -const struct ttm_mem_type_manager_func nouveau_gart_manager = { +const struct ttm_resource_manager_func nouveau_gart_manager = { .get_node = nouveau_gart_manager_new, .put_node = nouveau_manager_del, }; static int -nv04_gart_manager_new(struct ttm_mem_type_manager *man, +nv04_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *reg) @@ -118,7 +118,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man, return 0; } -const struct ttm_mem_type_manager_func nv04_gart_manager = { +const struct ttm_resource_manager_func nv04_gart_manager = { .get_node = nv04_gart_manager_new, .put_node = nouveau_manager_del, }; @@ -160,7 +160,7 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { /* Some BARs do not support being ioremapped WC */ const u8 type = mmu->type[drm->ttm.type_vram].type; - struct ttm_mem_type_manager *man = kzalloc(sizeof(*man), GFP_KERNEL); + struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL); if (!man) return -ENOMEM; @@ -175,10 +175,10 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) man->func = &nouveau_vram_manager; man->use_io_reserve_lru = true; - ttm_mem_type_manager_init(man, + ttm_resource_manager_init(man, drm->gem.vram_available >> PAGE_SHIFT); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } else { return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, @@ -191,12 +191,12 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) static void nouveau_ttm_fini_vram(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - ttm_mem_type_manager_set_used(man, false); - ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_set_used(man, false); + ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); kfree(man); } else @@ -206,10 +206,10 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm) static int nouveau_ttm_init_gtt(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT; unsigned available_caching, default_caching; - const struct ttm_mem_type_manager_func *func = NULL; + const struct ttm_resource_manager_func *func = NULL; if (drm->agp.bridge) { available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; @@ -237,24 +237,24 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) man->available_caching = available_caching; man->default_caching = default_caching; man->use_tt = true; - ttm_mem_type_manager_init(man, size_pages); + ttm_resource_manager_init(man, size_pages); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } static void nouveau_ttm_fini_gtt(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT); if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA && drm->agp.bridge) ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT); else { - ttm_mem_type_manager_set_used(man, false); - ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_set_used(man, false); + ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL); kfree(man); } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h index 085280754b3e..eaf25461cd91 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.h +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h @@ -8,9 +8,9 @@ nouveau_bdev(struct ttm_bo_device *bd) return container_of(bd, struct nouveau_drm, ttm.bdev); } -extern const struct ttm_mem_type_manager_func nouveau_vram_manager; -extern const struct ttm_mem_type_manager_func nouveau_gart_manager; -extern const struct ttm_mem_type_manager_func nv04_gart_manager; +extern const struct ttm_resource_manager_func nouveau_vram_manager; +extern const struct ttm_resource_manager_func nouveau_gart_manager; +extern const struct ttm_resource_manager_func nv04_gart_manager; struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, u32 page_flags); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 7b9f7a94332a..727049046014 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -275,10 +275,10 @@ void qxl_ttm_fini(struct qxl_device *qdev) static int qxl_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; - struct ttm_mem_type_manager *man = (struct ttm_mem_type_manager *)node->info_ent->data; + struct ttm_resource_manager *man = (struct ttm_resource_manager *)node->info_ent->data; struct drm_printer p = drm_seq_file_printer(m); - ttm_mem_type_manager_debug(man, &p); + ttm_resource_manager_debug(man, &p); return 0; } #endif diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3ec028dba739..7f5dfe04789e 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -224,7 +224,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_info *args = data; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 5f536de3986d..21a01737b1be 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -838,7 +838,7 @@ void radeon_ttm_fini(struct radeon_device *rdev) * isn't running */ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!rdev->mman.initialized) return; @@ -897,7 +897,7 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) unsigned ttm_pl = *(int*)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&rdev->mman.bdev, ttm_pl); + struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev, ttm_pl); struct drm_printer p = drm_seq_file_printer(m); man->func->debug(man, &p); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e0188250b6ec..ff68f25ddbd4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -77,7 +77,7 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place, return 0; } -void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, +void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p) { drm_printf(p, " use_type: %d\n", man->use_type); @@ -88,14 +88,14 @@ void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, if (man->func && man->func->debug) (*man->func->debug)(man, p); } -EXPORT_SYMBOL(ttm_mem_type_manager_debug); +EXPORT_SYMBOL(ttm_resource_manager_debug); static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct drm_printer p = drm_debug_printer(TTM_PFX); int i, ret, mem_type; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, @@ -108,7 +108,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, drm_printf(&p, " placement[%d]=0x%08X (%d)\n", i, placement->placement[i].flags, mem_type); man = ttm_manager_type(bo->bdev, mem_type); - ttm_mem_type_manager_debug(man, &p); + ttm_resource_manager_debug(man, &p); } } @@ -148,7 +148,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!list_empty(&bo->lru)) return; @@ -223,7 +223,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!pos->first) continue; @@ -238,7 +238,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!pos->first) continue; @@ -272,8 +272,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); - struct ttm_mem_type_manager *new_man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; ret = ttm_mem_io_lock(old_man, true); @@ -551,7 +551,7 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); size_t acc_size = bo->acc_size; int ret; @@ -768,7 +768,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, } static int ttm_mem_evict_first(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man, + struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_operation_ctx *ctx, struct ww_acquire_ctx *ticket) @@ -843,7 +843,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); mem->mm_node = NULL; if (!man->func || !man->func->get_node) @@ -854,7 +854,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); if (!man->func || !man->func->put_node) return; @@ -869,7 +869,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put); * Add the last move fence to the BO and reserve a new shared slot. */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, - struct ttm_mem_type_manager *man, + struct ttm_resource_manager *man, struct ttm_mem_reg *mem, bool no_wait_gpu) { @@ -909,7 +909,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); struct ww_acquire_ctx *ticket; int ret; @@ -929,7 +929,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } -static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, +static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man, uint32_t cur_placement, uint32_t proposed_placement) { @@ -954,7 +954,7 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, return result; } -static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, +static bool ttm_bo_mt_compatible(struct ttm_resource_manager *man, uint32_t mem_type, const struct ttm_place *place, uint32_t *masked_placement) @@ -991,7 +991,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, { struct ttm_bo_device *bdev = bo->bdev; uint32_t mem_type = TTM_PL_SYSTEM; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; uint32_t cur_flags = 0; int ret; @@ -1000,7 +1000,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, return ret; man = ttm_manager_type(bdev, mem_type); - if (!man || !ttm_mem_type_manager_used(man)) + if (!man || !ttm_resource_manager_used(man)) return -EBUSY; if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) @@ -1047,7 +1047,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, for (i = 0; i < placement->num_placement; ++i) { const struct ttm_place *place = &placement->placement[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; ret = ttm_bo_mem_placement(bo, place, mem, ctx); if (ret == -EBUSY) @@ -1404,8 +1404,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_create); -int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man) +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man) { struct ttm_operation_ctx ctx = { .interruptible = false, @@ -1447,12 +1447,12 @@ int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, return 0; } -EXPORT_SYMBOL(ttm_mem_type_manager_force_list_clean); +EXPORT_SYMBOL(ttm_resource_manager_force_list_clean); int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { pr_err("Illegal memory manager memory type %u\n", mem_type); @@ -1464,11 +1464,11 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) return 0; } - return ttm_mem_type_manager_force_list_clean(bdev, man); + return ttm_resource_manager_force_list_clean(bdev, man); } EXPORT_SYMBOL(ttm_bo_evict_mm); -void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, +void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size) { unsigned i; @@ -1483,7 +1483,7 @@ void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, INIT_LIST_HEAD(&man->lru[i]); man->move = NULL; } -EXPORT_SYMBOL(ttm_mem_type_manager_init); +EXPORT_SYMBOL(ttm_resource_manager_init); static void ttm_bo_global_kobj_release(struct kobject *kobj) { @@ -1550,10 +1550,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) struct ttm_bo_global *glob = &ttm_bo_glob; int ret = 0; unsigned i; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; man = ttm_manager_type(bdev, TTM_PL_SYSTEM); - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); mutex_lock(&ttm_global_mutex); @@ -1580,7 +1580,7 @@ EXPORT_SYMBOL(ttm_bo_device_release); static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) { - struct ttm_mem_type_manager *man = &bdev->sysman; + struct ttm_resource_manager *man = &bdev->sysman; /* * Initialize the system memory buffer type. @@ -1590,9 +1590,9 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(man, 0); + ttm_resource_manager_init(man, 0); ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); } int ttm_bo_device_init(struct ttm_bo_device *bdev, @@ -1643,7 +1643,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); ttm_mem_io_lock(man, false); ttm_bo_unmap_virtual_locked(bo); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 879c8ded0cd8..8ef0de8e36c5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -91,7 +91,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) +int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible) { if (likely(!man->use_io_reserve_lru)) return 0; @@ -103,7 +103,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) return 0; } -void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) +void ttm_mem_io_unlock(struct ttm_resource_manager *man) { if (likely(!man->use_io_reserve_lru)) return; @@ -111,7 +111,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) mutex_unlock(&man->io_reserve_mutex); } -static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) +static int ttm_mem_io_evict(struct ttm_resource_manager *man) { struct ttm_buffer_object *bo; @@ -129,7 +129,7 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; if (mem->bus.io_reserved_count++) @@ -162,7 +162,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) { - struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); struct ttm_mem_reg *mem = &bo->mem; int ret; @@ -195,7 +195,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void **virtual) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; void *addr; @@ -230,7 +230,7 @@ static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void *virtual) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; man = ttm_manager_type(bdev, mem->mem_type); @@ -303,7 +303,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg old_copy = *old_mem; @@ -570,7 +570,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_type_manager *man = + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); unsigned long offset, size; int ret; @@ -600,7 +600,7 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { struct ttm_buffer_object *bo = map->bo; - struct ttm_mem_type_manager *man = + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); if (!map->virtual) @@ -634,7 +634,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_mem_reg *old_mem = &bo->mem; int ret; struct ttm_buffer_object *ghost_obj; @@ -697,8 +697,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_type_manager *from = ttm_manager_type(bdev, old_mem->mem_type); - struct ttm_mem_type_manager *to = ttm_manager_type(bdev, new_mem->mem_type); + struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type); + struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); int ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index db4e21d11967..ba2e8bd198ad 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -281,7 +281,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgoff_t i; vm_fault_t ret = VM_FAULT_NOPAGE; unsigned long address = vmf->address; - struct ttm_mem_type_manager *man = + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); /* diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 7fddc74b3827..df62177cd913 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -44,17 +44,17 @@ */ struct ttm_range_manager { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; }; -static inline struct ttm_range_manager *to_range_manager(struct ttm_mem_type_manager *man) +static inline struct ttm_range_manager *to_range_manager(struct ttm_resource_manager *man) { return container_of(man, struct ttm_range_manager, manager); } -static int ttm_range_man_get_node(struct ttm_mem_type_manager *man, +static int ttm_range_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -95,7 +95,7 @@ static int ttm_range_man_get_node(struct ttm_mem_type_manager *man, return ret; } -static void ttm_range_man_put_node(struct ttm_mem_type_manager *man, +static void ttm_range_man_put_node(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = to_range_manager(man); @@ -110,7 +110,7 @@ static void ttm_range_man_put_node(struct ttm_mem_type_manager *man, } } -static const struct ttm_mem_type_manager_func ttm_range_manager_func; +static const struct ttm_resource_manager_func ttm_range_manager_func; int ttm_range_man_init(struct ttm_bo_device *bdev, unsigned type, @@ -119,7 +119,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, bool use_tt, unsigned long p_size) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct ttm_range_manager *rman; rman = kzalloc(sizeof(*rman), GFP_KERNEL); @@ -133,13 +133,13 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, man->func = &ttm_range_manager_func; - ttm_mem_type_manager_init(man, p_size); + ttm_resource_manager_init(man, p_size); drm_mm_init(&rman->mm, 0, p_size); spin_lock_init(&rman->lock); ttm_set_driver_manager(bdev, type, &rman->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } EXPORT_SYMBOL(ttm_range_man_init); @@ -147,14 +147,14 @@ EXPORT_SYMBOL(ttm_range_man_init); int ttm_range_man_fini(struct ttm_bo_device *bdev, unsigned type) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, type); struct ttm_range_manager *rman = to_range_manager(man); struct drm_mm *mm = &rman->mm; int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(bdev, man); + ret = ttm_resource_manager_force_list_clean(bdev, man); if (ret) return ret; @@ -163,14 +163,14 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev, drm_mm_takedown(mm); spin_unlock(&rman->lock); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(bdev, type, NULL); kfree(rman); return 0; } EXPORT_SYMBOL(ttm_range_man_fini); -static void ttm_range_man_debug(struct ttm_mem_type_manager *man, +static void ttm_range_man_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct ttm_range_manager *rman = to_range_manager(man); @@ -180,7 +180,7 @@ static void ttm_range_man_debug(struct ttm_mem_type_manager *man, spin_unlock(&rman->lock); } -static const struct ttm_mem_type_manager_func ttm_range_manager_func = { +static const struct ttm_resource_manager_func ttm_range_manager_func = { .get_node = ttm_range_man_get_node, .put_node = ttm_range_man_put_node, .debug = ttm_range_man_debug diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a7b3c8ee7f21..a68ae0204bf5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -630,7 +630,7 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv) TTM_PL_FLAG_CACHED, TTM_PL_FLAG_CACHED, false, dev_priv->vram_size >> PAGE_SHIFT); #endif - ttm_mem_type_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); + ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); return ret; } @@ -1189,12 +1189,12 @@ static void vmw_master_drop(struct drm_device *dev, */ static void __vmw_svga_enable(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); spin_lock(&dev_priv->svga_lock); - if (!ttm_mem_type_manager_used(man)) { + if (!ttm_resource_manager_used(man)) { vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); } spin_unlock(&dev_priv->svga_lock); } @@ -1220,11 +1220,11 @@ void vmw_svga_enable(struct vmw_private *dev_priv) */ static void __vmw_svga_disable(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); spin_lock(&dev_priv->svga_lock); - if (ttm_mem_type_manager_used(man)) { - ttm_mem_type_manager_set_used(man, false); + if (ttm_resource_manager_used(man)) { + ttm_resource_manager_set_used(man, false); vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_HIDE | SVGA_REG_ENABLE_ENABLE); @@ -1241,7 +1241,7 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv) */ void vmw_svga_disable(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); /* * Disabling SVGA will turn off device modesetting capabilities, so * notify KMS about that so that it doesn't cache atomic state that @@ -1257,8 +1257,8 @@ void vmw_svga_disable(struct vmw_private *dev_priv) vmw_kms_lost_device(dev_priv->dev); ttm_write_lock(&dev_priv->reservation_sem, false); spin_lock(&dev_priv->svga_lock); - if (ttm_mem_type_manager_used(man)) { - ttm_mem_type_manager_set_used(man, false); + if (ttm_resource_manager_used(man)) { + ttm_resource_manager_set_used(man, false); spin_unlock(&dev_priv->svga_lock); if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) DRM_ERROR("Failed evicting VRAM buffers.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index ca5037184814..c8fe6e9cf092 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -37,7 +37,7 @@ #include <linux/kernel.h> struct vmwgfx_gmrid_man { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; spinlock_t lock; struct ida gmr_ida; uint32_t max_gmr_ids; @@ -45,12 +45,12 @@ struct vmwgfx_gmrid_man { uint32_t used_gmr_pages; }; -static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_mem_type_manager *man) +static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man) { return container_of(man, struct vmwgfx_gmrid_man, manager); } -static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, +static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -84,7 +84,7 @@ nospace: return -ENOSPC; } -static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, +static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); @@ -98,11 +98,11 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, } } -static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; +static const struct ttm_resource_manager_func vmw_gmrid_manager_func; int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct vmwgfx_gmrid_man *gman = kzalloc(sizeof(*gman), GFP_KERNEL); @@ -116,7 +116,7 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) man->default_caching = TTM_PL_FLAG_CACHED; /* TODO: This is most likely not correct */ man->use_tt = true; - ttm_mem_type_manager_init(man, 0); + ttm_resource_manager_init(man, 0); spin_lock_init(&gman->lock); gman->used_gmr_pages = 0; ida_init(&gman->gmr_ida); @@ -134,20 +134,20 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) BUG(); } ttm_set_driver_manager(&dev_priv->bdev, type, &gman->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, type); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, type); struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ttm_mem_type_manager_force_list_clean(&dev_priv->bdev, man); + ttm_resource_manager_force_list_clean(&dev_priv->bdev, man); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&dev_priv->bdev, type, NULL); ida_destroy(&gman->gmr_ida); @@ -155,7 +155,7 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) } -static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { +static const struct ttm_resource_manager_func vmw_gmrid_manager_func = { .get_node = vmw_gmrid_man_get_node, .put_node = vmw_gmrid_man_put_node, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 4110e8309188..6cac7b091205 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -16,12 +16,12 @@ * @lock: Manager lock. */ struct vmw_thp_manager { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; }; -static struct vmw_thp_manager *to_thp_manager(struct ttm_mem_type_manager *man) +static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man) { return container_of(man, struct vmw_thp_manager, manager); } @@ -44,7 +44,7 @@ static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, return -ENOSPC; } -static int vmw_thp_get_node(struct ttm_mem_type_manager *man, +static int vmw_thp_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -106,7 +106,7 @@ found_unlock: -static void vmw_thp_put_node(struct ttm_mem_type_manager *man, +static void vmw_thp_put_node(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct vmw_thp_manager *rman = to_thp_manager(man); @@ -123,7 +123,7 @@ static void vmw_thp_put_node(struct ttm_mem_type_manager *man, int vmw_thp_init(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct vmw_thp_manager *rman; rman = kzalloc(sizeof(*rman), GFP_KERNEL); @@ -134,39 +134,39 @@ int vmw_thp_init(struct vmw_private *dev_priv) man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(man, + ttm_resource_manager_init(man, dev_priv->vram_size >> PAGE_SHIFT); drm_mm_init(&rman->mm, 0, man->size); spin_lock_init(&rman->lock); ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } void vmw_thp_fini(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); struct vmw_thp_manager *rman = to_thp_manager(man); struct drm_mm *mm = &rman->mm; int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(&dev_priv->bdev, man); + ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man); if (ret) return; spin_lock(&rman->lock); drm_mm_clean(mm); drm_mm_takedown(mm); spin_unlock(&rman->lock); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL); kfree(rman); } -static void vmw_thp_debug(struct ttm_mem_type_manager *man, +static void vmw_thp_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct vmw_thp_manager *rman = to_thp_manager(man); @@ -176,7 +176,7 @@ static void vmw_thp_debug(struct ttm_mem_type_manager *man, spin_unlock(&rman->lock); } -const struct ttm_mem_type_manager_func vmw_thp_func = { +const struct ttm_resource_manager_func vmw_thp_func = { .get_node = vmw_thp_get_node, .put_node = vmw_thp_put_node, .debug = vmw_thp_debug |