diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 351 |
1 files changed, 292 insertions, 59 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d939925efa81..acbbca9d5c92 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -29,6 +29,8 @@ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ +#include <linux/export.h> +#include <linux/swap.h> #include <linux/vmalloc.h> #include <drm/ttm/ttm_bo.h> @@ -37,6 +39,8 @@ #include <drm/drm_cache.h> +#include "ttm_bo_internal.h" + struct ttm_transfer_obj { struct ttm_buffer_object base; struct ttm_buffer_object *bo; @@ -254,6 +258,13 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); + ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); + if (ret) { + dma_resv_unlock(&fbo->base.base._resv); + kfree(fbo); + return ret; + } + if (fbo->base.resource) { ttm_resource_set_bo(fbo->base.resource, &fbo->base); bo->resource = NULL; @@ -262,12 +273,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, fbo->base.bulk_move = NULL; } - ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); - if (ret) { - kfree(fbo); - return ret; - } - ttm_bo_get(bo); fbo->bo = bo; @@ -378,6 +383,32 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, } /** + * ttm_bo_kmap_try_from_panic + * + * @bo: The buffer object + * @page: The page to map + * + * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic(). + * This should only be called from the panic handler, if you make sure the bo + * is the one being displayed, so is properly allocated, and protected. + * + * Returns the vaddr, that you can use to write to the bo, and that you should + * pass to kunmap_local() when you're done with this page, or NULL if the bo + * is in iomem. + */ +void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page) +{ + if (page + 1 > PFN_UP(bo->resource->size)) + return NULL; + + if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page]) + return kmap_local_page_try_from_panic(bo->ttm->pages[page]); + + return NULL; +} +EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic); + +/** * ttm_bo_kmap * * @bo: The buffer object. @@ -769,16 +800,15 @@ error_destroy_tt: return ret; } -static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk, - struct ttm_buffer_object *bo, - bool *needs_unlock) +static bool ttm_lru_walk_trylock(struct ttm_bo_lru_cursor *curs, + struct ttm_buffer_object *bo) { - struct ttm_operation_ctx *ctx = walk->ctx; + struct ttm_operation_ctx *ctx = curs->arg->ctx; - *needs_unlock = false; + curs->needs_unlock = false; if (dma_resv_trylock(bo->base.resv)) { - *needs_unlock = true; + curs->needs_unlock = true; return true; } @@ -790,27 +820,27 @@ static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk, return false; } -static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk, - struct ttm_buffer_object *bo, - bool *needs_unlock) +static int ttm_lru_walk_ticketlock(struct ttm_bo_lru_cursor *curs, + struct ttm_buffer_object *bo) { + struct ttm_lru_walk_arg *arg = curs->arg; struct dma_resv *resv = bo->base.resv; int ret; - if (walk->ctx->interruptible) - ret = dma_resv_lock_interruptible(resv, walk->ticket); + if (arg->ctx->interruptible) + ret = dma_resv_lock_interruptible(resv, arg->ticket); else - ret = dma_resv_lock(resv, walk->ticket); + ret = dma_resv_lock(resv, arg->ticket); if (!ret) { - *needs_unlock = true; + curs->needs_unlock = true; /* * Only a single ticketlock per loop. Ticketlocks are prone * to return -EDEADLK causing the eviction to fail, so * after waiting for the ticketlock, revert back to * trylocking for this walk. */ - walk->ticket = NULL; + arg->ticket = NULL; } else if (ret == -EDEADLK) { /* Caller needs to exit the ww transaction. */ ret = -ENOSPC; @@ -819,12 +849,6 @@ static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk, return ret; } -static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked) -{ - if (locked) - dma_resv_unlock(bo->base.resv); -} - /** * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on * valid items. @@ -859,40 +883,118 @@ static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked) s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, struct ttm_resource_manager *man, s64 target) { - struct ttm_resource_cursor cursor; - struct ttm_resource *res; + struct ttm_bo_lru_cursor cursor; + struct ttm_buffer_object *bo; s64 progress = 0; s64 lret; - spin_lock(&bdev->lru_lock); - ttm_resource_manager_for_each_res(man, &cursor, res) { - struct ttm_buffer_object *bo = res->bo; - bool bo_needs_unlock = false; + ttm_bo_lru_for_each_reserved_guarded(&cursor, man, &walk->arg, bo) { + lret = walk->ops->process_bo(walk, bo); + if (lret == -EBUSY || lret == -EALREADY) + lret = 0; + progress = (lret < 0) ? lret : progress + lret; + if (progress < 0 || progress >= target) + break; + } + if (IS_ERR(bo)) + return PTR_ERR(bo); + + return progress; +} +EXPORT_SYMBOL(ttm_lru_walk_for_evict); + +static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs) +{ + struct ttm_buffer_object *bo = curs->bo; + + if (bo) { + if (curs->needs_unlock) + dma_resv_unlock(bo->base.resv); + ttm_bo_put(bo); + curs->bo = NULL; + } +} + +/** + * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor + * and clean up any iteration it was used for. + * @curs: The cursor. + */ +void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs) +{ + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; + + ttm_bo_lru_cursor_cleanup_bo(curs); + spin_lock(lru_lock); + ttm_resource_cursor_fini(&curs->res_curs); + spin_unlock(lru_lock); +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_fini); + +/** + * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor + * @curs: The ttm_bo_lru_cursor to initialize. + * @man: The ttm resource_manager whose LRU lists to iterate over. + * @arg: The ttm_lru_walk_arg to govern the walk. + * + * Initialize a struct ttm_bo_lru_cursor. + * + * Return: Pointer to @curs. The function does not fail. + */ +struct ttm_bo_lru_cursor * +ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, + struct ttm_resource_manager *man, + struct ttm_lru_walk_arg *arg) +{ + memset(curs, 0, sizeof(*curs)); + ttm_resource_cursor_init(&curs->res_curs, man); + curs->arg = arg; + + return curs; +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_init); + +static struct ttm_buffer_object * +__ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs) +{ + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; + struct ttm_resource *res = NULL; + struct ttm_buffer_object *bo; + struct ttm_lru_walk_arg *arg = curs->arg; + bool first = !curs->bo; + + ttm_bo_lru_cursor_cleanup_bo(curs); + + spin_lock(lru_lock); + for (;;) { + int mem_type, ret = 0; bool bo_locked = false; - int mem_type; - /* - * Attempt a trylock before taking a reference on the bo, - * since if we do it the other way around, and the trylock fails, - * we need to drop the lru lock to put the bo. - */ - if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock)) + if (first) { + res = ttm_resource_manager_first(&curs->res_curs); + first = false; + } else { + res = ttm_resource_manager_next(&curs->res_curs); + } + if (!res) + break; + + bo = res->bo; + if (ttm_lru_walk_trylock(curs, bo)) bo_locked = true; - else if (!walk->ticket || walk->ctx->no_wait_gpu || - walk->trylock_only) + else if (!arg->ticket || arg->ctx->no_wait_gpu || arg->trylock_only) continue; if (!ttm_bo_get_unless_zero(bo)) { - ttm_lru_walk_unlock(bo, bo_needs_unlock); + if (curs->needs_unlock) + dma_resv_unlock(bo->base.resv); continue; } mem_type = res->mem_type; - spin_unlock(&bdev->lru_lock); - - lret = 0; + spin_unlock(lru_lock); if (!bo_locked) - lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock); + ret = ttm_lru_walk_ticketlock(curs, bo); /* * Note that in between the release of the lru lock and the @@ -901,21 +1003,152 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, * freed and allocated again with a different memory type. * In that case, just skip it. */ - if (!lret && bo->resource && bo->resource->mem_type == mem_type) - lret = walk->ops->process_bo(walk, bo); + curs->bo = bo; + if (!ret && bo->resource && bo->resource->mem_type == mem_type) + return bo; - ttm_lru_walk_unlock(bo, bo_needs_unlock); - ttm_bo_put(bo); - if (lret == -EBUSY || lret == -EALREADY) - lret = 0; - progress = (lret < 0) ? lret : progress + lret; + ttm_bo_lru_cursor_cleanup_bo(curs); + if (ret && ret != -EALREADY) + return ERR_PTR(ret); - spin_lock(&bdev->lru_lock); - if (progress < 0 || progress >= target) - break; + spin_lock(lru_lock); } - ttm_resource_cursor_fini(&cursor); - spin_unlock(&bdev->lru_lock); - return progress; + spin_unlock(lru_lock); + return res ? bo : NULL; +} + +/** + * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists + * to find and lock buffer object. + * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and + * ttm_bo_lru_cursor_first(). + * + * Return: A pointer to a locked and reference-counted buffer object, + * or NULL if none could be found and looping should be terminated. + */ +struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs) +{ + return __ttm_bo_lru_cursor_next(curs); +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_next); + +/** + * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists + * to find and lock buffer object. + * @curs: The cursor initialized using ttm_bo_lru_cursor_init(). + * + * Return: A pointer to a locked and reference-counted buffer object, + * or NULL if none could be found and looping should be terminated. + */ +struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs) +{ + ttm_bo_lru_cursor_cleanup_bo(curs); + return __ttm_bo_lru_cursor_next(curs); +} +EXPORT_SYMBOL(ttm_bo_lru_cursor_first); + +/** + * ttm_bo_shrink() - Helper to shrink a ttm buffer object. + * @ctx: The struct ttm_operation_ctx used for the shrinking operation. + * @bo: The buffer object. + * @flags: Flags governing the shrinking behaviour. + * + * The function uses the ttm_tt_back_up functionality to back up or + * purge a struct ttm_tt. If the bo is not in system, it's first + * moved there. + * + * Return: The number of pages shrunken or purged, or + * negative error code on failure. + */ +long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, + const struct ttm_bo_shrink_flags flags) +{ + static const struct ttm_place sys_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .mem_type = TTM_PL_SYSTEM, + .flags = 0, + }; + static struct ttm_placement sys_placement = { + .num_placement = 1, + .placement = &sys_placement_flags, + }; + struct ttm_tt *tt = bo->ttm; + long lret; + + dma_resv_assert_held(bo->base.resv); + + if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) { + int ret = ttm_bo_validate(bo, &sys_placement, ctx); + + /* Consider -ENOMEM and -ENOSPC non-fatal. */ + if (ret) { + if (ret == -ENOMEM || ret == -ENOSPC) + ret = -EBUSY; + return ret; + } + } + + ttm_bo_unmap_virtual(bo); + lret = ttm_bo_wait_ctx(bo, ctx); + if (lret < 0) + return lret; + + if (bo->bulk_move) { + spin_lock(&bo->bdev->lru_lock); + ttm_resource_del_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); + } + + lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags) + {.purge = flags.purge, + .writeback = flags.writeback}); + + if (lret <= 0 && bo->bulk_move) { + spin_lock(&bo->bdev->lru_lock); + ttm_resource_add_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); + } + + if (lret < 0 && lret != -EINTR) + return -EBUSY; + + return lret; +} +EXPORT_SYMBOL(ttm_bo_shrink); + +/** + * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking + * @ctx: The struct ttm_operation_ctx governing the shrinking. + * @bo: The candidate for shrinking. + * + * Check whether the object, given the information available to TTM, + * is suitable for shinking, This function can and should be used + * before attempting to shrink an object. + * + * Return: true if suitable. false if not. + */ +bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) +{ + return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count && + (!ctx->no_wait_gpu || + dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)); +} +EXPORT_SYMBOL(ttm_bo_shrink_suitable); + +/** + * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU + * during shrinking + * + * In some situations, like direct reclaim, waiting (in particular gpu waiting) + * should be avoided since it may stall a system that could otherwise make progress + * shrinking something else less time consuming. + * + * Return: true if gpu waiting should be avoided, false if not. + */ +bool ttm_bo_shrink_avoid_wait(void) +{ + return !current_is_kswapd(); } +EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait); |