diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem_vram_helper.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem_vram_helper.c | 735 |
1 files changed, 615 insertions, 120 deletions
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index fd751078bae1..666cb4c22bb9 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1,10 +1,15 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include <drm/drm_gem_vram_helper.h> +#include <drm/drm_debugfs.h> #include <drm/drm_device.h> +#include <drm/drm_file.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_ttm_helper.h> +#include <drm/drm_gem_vram_helper.h> #include <drm/drm_mode.h> +#include <drm/drm_plane.h> #include <drm/drm_prime.h> -#include <drm/drm_vram_mm_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <drm/ttm/ttm_page_alloc.h> static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; @@ -14,6 +19,11 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; * * This library provides a GEM buffer object that is backed by video RAM * (VRAM). It can be used for framebuffer devices with dedicated memory. + * + * The data structure &struct drm_vram_mm and its helpers implement a memory + * manager for simple framebuffer devices with dedicated video memory. Buffer + * objects are either placed in video RAM or evicted to system memory. The rsp. + * buffer object is provided by &struct drm_gem_vram_object. */ /* @@ -26,6 +36,10 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) * TTM buffer object in 'bo' has already been cleaned * up; only release the GEM object. */ + + WARN_ON(gbo->kmap_use_count); + WARN_ON(gbo->kmap.virtual); + drm_gem_object_release(&gbo->bo.base); } @@ -47,6 +61,7 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, { unsigned int i; unsigned int c = 0; + u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN; gbo->placement.placement = gbo->placements; gbo->placement.busy_placement = gbo->placements; @@ -54,15 +69,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, if (pl_flag & TTM_PL_FLAG_VRAM) gbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + TTM_PL_FLAG_VRAM | + invariant_flags; if (pl_flag & TTM_PL_FLAG_SYSTEM) gbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + TTM_PL_FLAG_SYSTEM | + invariant_flags; if (!c) gbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + TTM_PL_FLAG_SYSTEM | + invariant_flags; gbo->placement.num_placement = c; gbo->placement.num_busy_placement = c; @@ -82,8 +100,7 @@ static int drm_gem_vram_init(struct drm_device *dev, int ret; size_t acc_size; - if (!gbo->bo.base.funcs) - gbo->bo.base.funcs = &drm_gem_vram_object_funcs; + gbo->bo.base.funcs = &drm_gem_vram_object_funcs; ret = drm_gem_object_init(dev, &gbo->bo.base, size); if (ret) @@ -192,30 +209,12 @@ s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo) } EXPORT_SYMBOL(drm_gem_vram_offset); -/** - * drm_gem_vram_pin() - Pins a GEM VRAM object in a region. - * @gbo: the GEM VRAM object - * @pl_flag: a bitmask of possible memory regions - * - * Pinning a buffer object ensures that it is not evicted from - * a memory region. A pinned buffer object has to be unpinned before - * it can be pinned to another region. If the pl_flag argument is 0, - * the buffer is pinned at its current location (video RAM or system - * memory). - * - * Returns: - * 0 on success, or - * a negative error code otherwise. - */ -int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) +static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo, + unsigned long pl_flag) { int i, ret; struct ttm_operation_ctx ctx = { false, false }; - ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); - if (ret < 0) - return ret; - if (gbo->pin_count) goto out; @@ -227,62 +226,123 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); if (ret < 0) - goto err_ttm_bo_unreserve; + return ret; out: ++gbo->pin_count; - ttm_bo_unreserve(&gbo->bo); return 0; - -err_ttm_bo_unreserve: - ttm_bo_unreserve(&gbo->bo); - return ret; } -EXPORT_SYMBOL(drm_gem_vram_pin); /** - * drm_gem_vram_unpin() - Unpins a GEM VRAM object + * drm_gem_vram_pin() - Pins a GEM VRAM object in a region. * @gbo: the GEM VRAM object + * @pl_flag: a bitmask of possible memory regions + * + * Pinning a buffer object ensures that it is not evicted from + * a memory region. A pinned buffer object has to be unpinned before + * it can be pinned to another region. If the pl_flag argument is 0, + * the buffer is pinned at its current location (video RAM or system + * memory). + * + * Small buffer objects, such as cursor images, can lead to memory + * fragmentation if they are pinned in the middle of video RAM. This + * is especially a problem on devices with only a small amount of + * video RAM. Fragmentation can prevent the primary framebuffer from + * fitting in, even though there's enough memory overall. The modifier + * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned + * at the high end of the memory region to avoid fragmentation. * * Returns: * 0 on success, or * a negative error code otherwise. */ -int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) +int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) { - int i, ret; - struct ttm_operation_ctx ctx = { false, false }; + int ret; ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); - if (ret < 0) + if (ret) return ret; + ret = drm_gem_vram_pin_locked(gbo, pl_flag); + ttm_bo_unreserve(&gbo->bo); + + return ret; +} +EXPORT_SYMBOL(drm_gem_vram_pin); + +static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo) +{ + int i, ret; + struct ttm_operation_ctx ctx = { false, false }; if (WARN_ON_ONCE(!gbo->pin_count)) - goto out; + return 0; --gbo->pin_count; if (gbo->pin_count) - goto out; + return 0; for (i = 0; i < gbo->placement.num_placement ; ++i) gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); if (ret < 0) - goto err_ttm_bo_unreserve; - -out: - ttm_bo_unreserve(&gbo->bo); + return ret; return 0; +} -err_ttm_bo_unreserve: +/** + * drm_gem_vram_unpin() - Unpins a GEM VRAM object + * @gbo: the GEM VRAM object + * + * Returns: + * 0 on success, or + * a negative error code otherwise. + */ +int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) +{ + int ret; + + ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); + if (ret) + return ret; + ret = drm_gem_vram_unpin_locked(gbo); ttm_bo_unreserve(&gbo->bo); + return ret; } EXPORT_SYMBOL(drm_gem_vram_unpin); +static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, + bool map, bool *is_iomem) +{ + int ret; + struct ttm_bo_kmap_obj *kmap = &gbo->kmap; + + if (gbo->kmap_use_count > 0) + goto out; + + if (kmap->virtual || !map) + goto out; + + ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); + if (ret) + return ERR_PTR(ret); + +out: + if (!kmap->virtual) { + if (is_iomem) + *is_iomem = false; + return NULL; /* not mapped; don't increment ref */ + } + ++gbo->kmap_use_count; + if (is_iomem) + return ttm_kmap_obj_virtual(kmap, is_iomem); + return kmap->virtual; +} + /** * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space * @gbo: the GEM VRAM object @@ -304,43 +364,121 @@ void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, bool *is_iomem) { int ret; - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; - - if (kmap->virtual || !map) - goto out; + void *virtual; - ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); + ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); if (ret) return ERR_PTR(ret); + virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem); + ttm_bo_unreserve(&gbo->bo); -out: - if (!is_iomem) - return kmap->virtual; - if (!kmap->virtual) { - *is_iomem = false; - return NULL; - } - return ttm_kmap_obj_virtual(kmap, is_iomem); + return virtual; } EXPORT_SYMBOL(drm_gem_vram_kmap); +static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) +{ + if (WARN_ON_ONCE(!gbo->kmap_use_count)) + return; + if (--gbo->kmap_use_count > 0) + return; + + /* + * Permanently mapping and unmapping buffers adds overhead from + * updating the page tables and creates debugging output. Therefore, + * we delay the actual unmap operation until the BO gets evicted + * from memory. See drm_gem_vram_bo_driver_move_notify(). + */ +} + /** * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object * @gbo: the GEM VRAM object */ void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo) { - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; + int ret; - if (!kmap->virtual) + ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); + if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) return; - - ttm_bo_kunmap(kmap); - kmap->virtual = NULL; + drm_gem_vram_kunmap_locked(gbo); + ttm_bo_unreserve(&gbo->bo); } EXPORT_SYMBOL(drm_gem_vram_kunmap); /** + * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address + * space + * @gbo: The GEM VRAM object to map + * + * The vmap function pins a GEM VRAM object to its current location, either + * system or video memory, and maps its buffer into kernel address space. + * As pinned object cannot be relocated, you should avoid pinning objects + * permanently. Call drm_gem_vram_vunmap() with the returned address to + * unmap and unpin the GEM VRAM object. + * + * If you have special requirements for the pinning or mapping operations, + * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly. + * + * Returns: + * The buffer's virtual address on success, or + * an ERR_PTR()-encoded error code otherwise. + */ +void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo) +{ + int ret; + void *base; + + ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); + if (ret) + return ERR_PTR(ret); + + ret = drm_gem_vram_pin_locked(gbo, 0); + if (ret) + goto err_ttm_bo_unreserve; + base = drm_gem_vram_kmap_locked(gbo, true, NULL); + if (IS_ERR(base)) { + ret = PTR_ERR(base); + goto err_drm_gem_vram_unpin_locked; + } + + ttm_bo_unreserve(&gbo->bo); + + return base; + +err_drm_gem_vram_unpin_locked: + drm_gem_vram_unpin_locked(gbo); +err_ttm_bo_unreserve: + ttm_bo_unreserve(&gbo->bo); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(drm_gem_vram_vmap); + +/** + * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object + * @gbo: The GEM VRAM object to unmap + * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap() + * + * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See + * the documentation for drm_gem_vram_vmap() for more information. + */ +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) +{ + int ret; + + ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); + if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) + return; + + drm_gem_vram_kunmap_locked(gbo); + drm_gem_vram_unpin_locked(gbo); + + ttm_bo_unreserve(&gbo->bo); +} +EXPORT_SYMBOL(drm_gem_vram_vunmap); + +/** * drm_gem_vram_fill_create_dumb() - \ Helper for implementing &struct drm_driver.dumb_create * @file: the DRM file @@ -410,59 +548,27 @@ static bool drm_is_gem_vram(struct ttm_buffer_object *bo) return (bo->destroy == ttm_buffer_object_destroy); } -/** - * drm_gem_vram_bo_driver_evict_flags() - \ - Implements &struct ttm_bo_driver.evict_flags - * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo - * @pl: TTM placement information. - */ -void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo, - struct ttm_placement *pl) +static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, + struct ttm_placement *pl) { - struct drm_gem_vram_object *gbo; - - /* TTM may pass BOs that are not GEM VRAM BOs. */ - if (!drm_is_gem_vram(bo)) - return; - - gbo = drm_gem_vram_of_bo(bo); drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM); *pl = gbo->placement; } -EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags); -/** - * drm_gem_vram_bo_driver_verify_access() - \ - Implements &struct ttm_bo_driver.verify_access - * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo - * @filp: File pointer. - * - * Returns: - * 0 on success, or - * a negative errno code otherwise. - */ -int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo, - struct file *filp) +static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, + bool evict, + struct ttm_mem_reg *new_mem) { - struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo); + struct ttm_bo_kmap_obj *kmap = &gbo->kmap; - return drm_vma_node_verify_access(&gbo->bo.base.vma_node, - filp->private_data); -} -EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access); + if (WARN_ON_ONCE(gbo->kmap_use_count)) + return; -/* - * drm_gem_vram_mm_funcs - Functions for &struct drm_vram_mm - * - * Most users of @struct drm_gem_vram_object will also use - * @struct drm_vram_mm. This instance of &struct drm_vram_mm_funcs - * can be used to connect both. - */ -const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = { - .evict_flags = drm_gem_vram_bo_driver_evict_flags, - .verify_access = drm_gem_vram_bo_driver_verify_access -}; -EXPORT_SYMBOL(drm_gem_vram_mm_funcs); + if (!kmap->virtual) + return; + ttm_bo_kunmap(kmap); + kmap->virtual = NULL; +} /* * Helpers for struct drm_gem_object_funcs @@ -544,6 +650,129 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset); /* + * Helpers for struct drm_plane_helper_funcs + */ + +/** + * drm_gem_vram_plane_helper_prepare_fb() - \ + * Implements &struct drm_plane_helper_funcs.prepare_fb + * @plane: a DRM plane + * @new_state: the plane's new state + * + * During plane updates, this function pins the GEM VRAM + * objects of the plane's new framebuffer to VRAM. Call + * drm_gem_vram_plane_helper_cleanup_fb() to unpin them. + * + * Returns: + * 0 on success, or + * a negative errno code otherwise. + */ +int +drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + size_t i; + struct drm_gem_vram_object *gbo; + int ret; + + if (!new_state->fb) + return 0; + + for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) { + if (!new_state->fb->obj[i]) + continue; + gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); + if (ret) + goto err_drm_gem_vram_unpin; + } + + return 0; + +err_drm_gem_vram_unpin: + while (i) { + --i; + gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); + drm_gem_vram_unpin(gbo); + } + return ret; +} +EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb); + +/** + * drm_gem_vram_plane_helper_cleanup_fb() - \ + * Implements &struct drm_plane_helper_funcs.cleanup_fb + * @plane: a DRM plane + * @old_state: the plane's old state + * + * During plane updates, this function unpins the GEM VRAM + * objects of the plane's old framebuffer from VRAM. Complements + * drm_gem_vram_plane_helper_prepare_fb(). + */ +void +drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + size_t i; + struct drm_gem_vram_object *gbo; + + if (!old_state->fb) + return; + + for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) { + if (!old_state->fb->obj[i]) + continue; + gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]); + drm_gem_vram_unpin(gbo); + } +} +EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb); + +/* + * Helpers for struct drm_simple_display_pipe_funcs + */ + +/** + * drm_gem_vram_simple_display_pipe_prepare_fb() - \ + * Implements &struct drm_simple_display_pipe_funcs.prepare_fb + * @pipe: a simple display pipe + * @new_state: the plane's new state + * + * During plane updates, this function pins the GEM VRAM + * objects of the plane's new framebuffer to VRAM. Call + * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them. + * + * Returns: + * 0 on success, or + * a negative errno code otherwise. + */ +int drm_gem_vram_simple_display_pipe_prepare_fb( + struct drm_simple_display_pipe *pipe, + struct drm_plane_state *new_state) +{ + return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state); +} +EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb); + +/** + * drm_gem_vram_simple_display_pipe_cleanup_fb() - \ + * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb + * @pipe: a simple display pipe + * @old_state: the plane's old state + * + * During plane updates, this function unpins the GEM VRAM + * objects of the plane's old framebuffer from VRAM. Complements + * drm_gem_vram_simple_display_pipe_prepare_fb(). + */ +void drm_gem_vram_simple_display_pipe_cleanup_fb( + struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state) +{ + drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state); +} +EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb); + +/* * PRIME helpers */ @@ -595,17 +824,11 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - int ret; void *base; - ret = drm_gem_vram_pin(gbo, 0); - if (ret) - return NULL; - base = drm_gem_vram_kmap(gbo, true, NULL); - if (IS_ERR(base)) { - drm_gem_vram_unpin(gbo); + base = drm_gem_vram_vmap(gbo); + if (IS_ERR(base)) return NULL; - } return base; } @@ -620,8 +843,7 @@ static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - drm_gem_vram_kunmap(gbo); - drm_gem_vram_unpin(gbo); + drm_gem_vram_vunmap(gbo, vaddr); } /* @@ -633,5 +855,278 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { .pin = drm_gem_vram_object_pin, .unpin = drm_gem_vram_object_unpin, .vmap = drm_gem_vram_object_vmap, - .vunmap = drm_gem_vram_object_vunmap + .vunmap = drm_gem_vram_object_vunmap, + .mmap = drm_gem_ttm_mmap, + .print_info = drm_gem_ttm_print_info, +}; + +/* + * VRAM memory manager + */ + +/* + * TTM TT + */ + +static void backend_func_destroy(struct ttm_tt *tt) +{ + ttm_tt_fini(tt); + kfree(tt); +} + +static struct ttm_backend_func backend_func = { + .destroy = backend_func_destroy +}; + +/* + * TTM BO device + */ + +static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, + uint32_t page_flags) +{ + struct ttm_tt *tt; + int ret; + + tt = kzalloc(sizeof(*tt), GFP_KERNEL); + if (!tt) + return NULL; + + tt->func = &backend_func; + + ret = ttm_tt_init(tt, bo, page_flags); + if (ret < 0) + goto err_ttm_tt_init; + + return tt; + +err_ttm_tt_init: + kfree(tt); + return NULL; +} + +static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man) +{ + switch (type) { + case TTM_PL_SYSTEM: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + man->func = &ttm_bo_manager_func; + man->flags = TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + default: + return -EINVAL; + } + return 0; +} + +static void bo_driver_evict_flags(struct ttm_buffer_object *bo, + struct ttm_placement *placement) +{ + struct drm_gem_vram_object *gbo; + + /* TTM may pass BOs that are not GEM VRAM BOs. */ + if (!drm_is_gem_vram(bo)) + return; + + gbo = drm_gem_vram_of_bo(bo); + + drm_gem_vram_bo_driver_evict_flags(gbo, placement); +} + +static void bo_driver_move_notify(struct ttm_buffer_object *bo, + bool evict, + struct ttm_mem_reg *new_mem) +{ + struct drm_gem_vram_object *gbo; + + /* TTM may pass BOs that are not GEM VRAM BOs. */ + if (!drm_is_gem_vram(bo)) + return; + + gbo = drm_gem_vram_of_bo(bo); + + drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); +} + +static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = bdev->man + mem->mem_type; + struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); + + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + + mem->bus.addr = NULL; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + + switch (mem->mem_type) { + case TTM_PL_SYSTEM: /* nothing to do */ + mem->bus.offset = 0; + mem->bus.base = 0; + mem->bus.is_iomem = false; + break; + case TTM_PL_VRAM: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = vmm->vram_base; + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void bo_driver_io_mem_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ } + +static struct ttm_bo_driver bo_driver = { + .ttm_tt_create = bo_driver_ttm_tt_create, + .ttm_tt_populate = ttm_pool_populate, + .ttm_tt_unpopulate = ttm_pool_unpopulate, + .init_mem_type = bo_driver_init_mem_type, + .eviction_valuable = ttm_bo_eviction_valuable, + .evict_flags = bo_driver_evict_flags, + .move_notify = bo_driver_move_notify, + .io_mem_reserve = bo_driver_io_mem_reserve, + .io_mem_free = bo_driver_io_mem_free, +}; + +/* + * struct drm_vram_mm + */ + +#if defined(CONFIG_DEBUG_FS) +static int drm_vram_mm_debugfs(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_vram_mm *vmm = node->minor->dev->vram_mm; + struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv; + struct drm_printer p = drm_seq_file_printer(m); + + spin_lock(&ttm_bo_glob.lru_lock); + drm_mm_print(mm, &p); + spin_unlock(&ttm_bo_glob.lru_lock); + return 0; +} + +static const struct drm_info_list drm_vram_mm_debugfs_list[] = { + { "vram-mm", drm_vram_mm_debugfs, 0, NULL }, }; +#endif + +/** + * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file. + * + * @minor: drm minor device. + * + * Returns: + * 0 on success, or + * a negative error code otherwise. + */ +int drm_vram_mm_debugfs_init(struct drm_minor *minor) +{ + int ret = 0; + +#if defined(CONFIG_DEBUG_FS) + ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list, + ARRAY_SIZE(drm_vram_mm_debugfs_list), + minor->debugfs_root, minor); +#endif + return ret; +} +EXPORT_SYMBOL(drm_vram_mm_debugfs_init); + +static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, + uint64_t vram_base, size_t vram_size) +{ + int ret; + + vmm->vram_base = vram_base; + vmm->vram_size = vram_size; + + ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, + dev->anon_inode->i_mapping, + dev->vma_offset_manager, + true); + if (ret) + return ret; + + ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT); + if (ret) + return ret; + + return 0; +} + +static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) +{ + ttm_bo_device_release(&vmm->bdev); +} + +/* + * Helpers for integration with struct drm_device + */ + +/** + * drm_vram_helper_alloc_mm - Allocates a device's instance of \ + &struct drm_vram_mm + * @dev: the DRM device + * @vram_base: the base address of the video memory + * @vram_size: the size of the video memory in bytes + * + * Returns: + * The new instance of &struct drm_vram_mm on success, or + * an ERR_PTR()-encoded errno code otherwise. + */ +struct drm_vram_mm *drm_vram_helper_alloc_mm( + struct drm_device *dev, uint64_t vram_base, size_t vram_size) +{ + int ret; + + if (WARN_ON(dev->vram_mm)) + return dev->vram_mm; + + dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL); + if (!dev->vram_mm) + return ERR_PTR(-ENOMEM); + + ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size); + if (ret) + goto err_kfree; + + return dev->vram_mm; + +err_kfree: + kfree(dev->vram_mm); + dev->vram_mm = NULL; + return ERR_PTR(ret); +} +EXPORT_SYMBOL(drm_vram_helper_alloc_mm); + +/** + * drm_vram_helper_release_mm - Releases a device's instance of \ + &struct drm_vram_mm + * @dev: the DRM device + */ +void drm_vram_helper_release_mm(struct drm_device *dev) +{ + if (!dev->vram_mm) + return; + + drm_vram_mm_cleanup(dev->vram_mm); + kfree(dev->vram_mm); + dev->vram_mm = NULL; +} +EXPORT_SYMBOL(drm_vram_helper_release_mm); |