diff options
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_agp_backend.c | 45 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 513 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 402 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_memory.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_range_manager.c (renamed from drivers/gpu/drm/ttm/ttm_bo_manager.c) | 84 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_resource.c | 146 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 93 |
12 files changed, 542 insertions, 823 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index caea2a099496..90c0da88cc98 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,7 +4,8 @@ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o + ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \ + ttm_resource.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 38f1351140e2..a98fd795b752 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -48,7 +48,7 @@ struct ttm_agp_backend { struct agp_bridge_data *bridge; }; -static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) +int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; @@ -57,6 +57,9 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); unsigned i; + if (agp_be->mem) + return 0; + mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); if (unlikely(mem == NULL)) return -ENOMEM; @@ -81,8 +84,9 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) return ret; } +EXPORT_SYMBOL(ttm_agp_bind); -static void ttm_agp_unbind(struct ttm_tt *ttm) +void ttm_agp_unbind(struct ttm_tt *ttm) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); @@ -95,8 +99,20 @@ static void ttm_agp_unbind(struct ttm_tt *ttm) agp_be->mem = NULL; } } +EXPORT_SYMBOL(ttm_agp_unbind); + +bool ttm_agp_is_bound(struct ttm_tt *ttm) +{ + struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); + + if (!ttm) + return false; + + return (agp_be->mem != NULL); +} +EXPORT_SYMBOL(ttm_agp_is_bound); -static void ttm_agp_destroy(struct ttm_tt *ttm) +void ttm_agp_destroy(struct ttm_tt *ttm) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); @@ -105,12 +121,7 @@ static void ttm_agp_destroy(struct ttm_tt *ttm) ttm_tt_fini(ttm); kfree(agp_be); } - -static struct ttm_backend_func ttm_agp_func = { - .bind = ttm_agp_bind, - .unbind = ttm_agp_unbind, - .destroy = ttm_agp_destroy, -}; +EXPORT_SYMBOL(ttm_agp_destroy); struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, struct agp_bridge_data *bridge, @@ -124,7 +135,6 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, agp_be->mem = NULL; agp_be->bridge = bridge; - agp_be->ttm.func = &ttm_agp_func; if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) { kfree(agp_be); @@ -134,18 +144,3 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, return &agp_be->ttm; } EXPORT_SYMBOL(ttm_agp_tt_create); - -int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) -{ - if (ttm->state != tt_unpopulated) - return 0; - - return ttm_pool_populate(ttm, ctx); -} -EXPORT_SYMBOL(ttm_agp_tt_populate); - -void ttm_agp_tt_unpopulate(struct ttm_tt *ttm) -{ - ttm_pool_unpopulate(ttm); -} -EXPORT_SYMBOL(ttm_agp_tt_unpopulate); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cc6a4e7551e3..70b3bee27850 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -64,51 +64,22 @@ static void ttm_bo_default_destroy(struct ttm_buffer_object *bo) kfree(bo); } -static inline int ttm_mem_type_from_place(const struct ttm_place *place, - uint32_t *mem_type) -{ - int pos; - - pos = ffs(place->flags & TTM_PL_MASK_MEM); - if (unlikely(!pos)) - return -EINVAL; - - *mem_type = pos - 1; - return 0; -} - -static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p, - int mem_type) -{ - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - - drm_printf(p, " has_type: %d\n", man->has_type); - drm_printf(p, " use_type: %d\n", man->use_type); - drm_printf(p, " flags: 0x%08X\n", man->flags); - drm_printf(p, " size: %llu\n", man->size); - drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); - drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); - if (mem_type != TTM_PL_SYSTEM) - (*man->func->debug)(man, p); -} - static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct drm_printer p = drm_debug_printer(TTM_PFX); - int i, ret, mem_type; + struct ttm_resource_manager *man; + int i, mem_type; drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, bo->mem.size >> 20); for (i = 0; i < placement->num_placement; i++) { - ret = ttm_mem_type_from_place(&placement->placement[i], - &mem_type); - if (ret) - return; + mem_type = placement->placement[i].mem_type; drm_printf(&p, " placement[%d]=0x%08X (%d)\n", i, placement->placement[i].flags, mem_type); - ttm_mem_type_debug(bo->bdev, &p, mem_type); + man = ttm_manager_type(bo->bdev, mem_type); + ttm_resource_manager_debug(man, &p); } } @@ -138,17 +109,11 @@ static struct kobj_type ttm_bo_glob_kobj_type = { .default_attrs = ttm_bo_global_attrs }; - -static inline uint32_t ttm_bo_type_flags(unsigned type) -{ - return 1 << (type); -} - static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!list_empty(&bo->lru)) return; @@ -156,10 +121,10 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, if (mem->placement & TTM_PL_FLAG_NO_EVICT) return; - man = &bdev->man[mem->mem_type]; + man = ttm_manager_type(bdev, mem->mem_type); list_add_tail(&bo->lru, &man->lru[bo->priority]); - if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm && + if (man->use_tt && bo->ttm && !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); @@ -223,7 +188,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!pos->first) continue; @@ -231,14 +196,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->last->base.resv); - man = &pos->first->bdev->man[TTM_PL_TT]; + man = ttm_manager_type(pos->first->bdev, TTM_PL_TT); list_bulk_move_tail(&man->lru[i], &pos->first->lru, &pos->last->lru); } for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!pos->first) continue; @@ -246,7 +211,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->last->base.resv); - man = &pos->first->bdev->man[TTM_PL_VRAM]; + man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM); list_bulk_move_tail(&man->lru[i], &pos->first->lru, &pos->last->lru); } @@ -268,38 +233,38 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, bool evict, + struct ttm_resource *mem, bool evict, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; - struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; + struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; - ret = ttm_mem_io_lock(old_man, true); - if (unlikely(ret != 0)) - goto out_err; - ttm_bo_unmap_virtual_locked(bo); - ttm_mem_io_unlock(old_man); + ttm_bo_unmap_virtual(bo); /* * Create and bind a ttm if required. */ - if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { - if (bo->ttm == NULL) { - bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); - ret = ttm_tt_create(bo, zero); - if (ret) - goto out_err; - } + if (new_man->use_tt) { + /* Zero init the new TTM structure if the old location should + * have used one as well. + */ + ret = ttm_tt_create(bo, old_man->use_tt); + if (ret) + goto out_err; ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); if (ret) goto out_err; if (mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_tt_bind(bo->ttm, mem, ctx); + ret = ttm_tt_populate(bdev, bo->ttm, ctx); + if (ret) + goto out_err; + + ret = ttm_bo_tt_bind(bo, mem); if (ret) goto out_err; } @@ -315,8 +280,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (bdev->driver->move_notify) bdev->driver->move_notify(bo, evict, mem); - if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && - !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) + if (old_man->use_tt && new_man->use_tt) ret = ttm_bo_move_ttm(bo, ctx, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, ctx, mem); @@ -334,17 +298,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } moved: - bo->evicted = false; - ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; return 0; out_err: - new_man = &bdev->man[bo->mem.mem_type]; - if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } + new_man = ttm_manager_type(bdev, bo->mem.mem_type); + if (!new_man->use_tt) + ttm_bo_tt_destroy(bo); return ret; } @@ -362,9 +322,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) if (bo->bdev->driver->move_notify) bo->bdev->driver->move_notify(bo, false, NULL); - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - ttm_bo_mem_put(bo, &bo->mem); + ttm_bo_tt_destroy(bo); + ttm_resource_free(bo, &bo->mem); } static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) @@ -552,7 +511,6 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; size_t acc_size = bo->acc_size; int ret; @@ -570,9 +528,7 @@ static void ttm_bo_release(struct kref *kref) bo->bdev->driver->release_notify(bo); drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); - ttm_mem_io_lock(man, false); - ttm_mem_io_free_vm(bo); - ttm_mem_io_unlock(man); + ttm_mem_io_free(bdev, &bo->mem); } if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || @@ -643,7 +599,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_reg evict_mem; + struct ttm_resource evict_mem; struct ttm_placement placement; int ret = 0; @@ -654,17 +610,16 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bdev->driver->evict_flags(bo, &placement); if (!placement.num_placement && !placement.num_busy_placement) { - ret = ttm_bo_pipeline_gutting(bo); - if (ret) - return ret; + ttm_bo_wait(bo, false, false); + ttm_bo_cleanup_memtype_use(bo); return ttm_tt_create(bo, false); } evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.bus.io_reserved_vm = false; - evict_mem.bus.io_reserved_count = 0; + evict_mem.bus.offset = 0; + evict_mem.bus.addr = NULL; ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); if (ret) { @@ -680,10 +635,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, if (unlikely(ret)) { if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); - ttm_bo_mem_put(bo, &evict_mem); - goto out; + ttm_resource_free(bo, &evict_mem); } - bo->evicted = true; out: return ret; } @@ -769,14 +722,13 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, return r == -EDEADLK ? -EBUSY : r; } -static int ttm_mem_evict_first(struct ttm_bo_device *bdev, - uint32_t mem_type, - const struct ttm_place *place, - struct ttm_operation_ctx *ctx, - struct ww_acquire_ctx *ticket) +int ttm_mem_evict_first(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man, + const struct ttm_place *place, + struct ttm_operation_ctx *ctx, + struct ww_acquire_ctx *ticket) { struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; bool locked = false; unsigned i; int ret; @@ -842,38 +794,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, return ret; } -static int ttm_bo_mem_get(struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_mem_reg *mem) -{ - struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; - - mem->mm_node = NULL; - if (!man->func || !man->func->get_node) - return 0; - - return man->func->get_node(man, bo, place, mem); -} - -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) -{ - struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; - - if (!man->func || !man->func->put_node) - return; - - man->func->put_node(man, mem); - mem->mm_node = NULL; - mem->mem_type = TTM_PL_SYSTEM; -} -EXPORT_SYMBOL(ttm_bo_mem_put); - /** * Add the last move fence to the BO and reserve a new shared slot. */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, - struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem, + struct ttm_resource_manager *man, + struct ttm_resource *mem, bool no_wait_gpu) { struct dma_fence *fence; @@ -910,22 +836,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, */ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); struct ww_acquire_ctx *ticket; int ret; ticket = dma_resv_locking_ctx(bo->base.resv); do { - ret = ttm_bo_mem_get(bo, place, mem); + ret = ttm_resource_alloc(bo, place, mem); if (likely(!ret)) break; if (unlikely(ret != -ENOSPC)) return ret; - ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx, + ret = ttm_mem_evict_first(bdev, man, place, ctx, ticket); if (unlikely(ret != 0)) return ret; @@ -934,7 +860,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } -static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, +static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man, uint32_t cur_placement, uint32_t proposed_placement) { @@ -947,8 +873,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, if ((cur_placement & caching) != 0) result |= (cur_placement & caching); - else if ((man->default_caching & caching) != 0) - result |= man->default_caching; else if ((TTM_PL_FLAG_CACHED & caching) != 0) result |= TTM_PL_FLAG_CACHED; else if ((TTM_PL_FLAG_WC & caching) != 0) @@ -959,25 +883,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, return result; } -static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, - uint32_t mem_type, - const struct ttm_place *place, - uint32_t *masked_placement) -{ - uint32_t cur_flags = ttm_bo_type_flags(mem_type); - - if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) - return false; - - if ((place->flags & man->available_caching) == 0) - return false; - - cur_flags |= (place->flags & man->available_caching); - - *masked_placement = cur_flags; - return true; -} - /** * ttm_bo_mem_placement - check if placement is compatible * @bo: BO to find memory for @@ -991,34 +896,22 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, */ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - uint32_t mem_type = TTM_PL_SYSTEM; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; uint32_t cur_flags = 0; - int ret; - - ret = ttm_mem_type_from_place(place, &mem_type); - if (ret) - return ret; - - man = &bdev->man[mem_type]; - if (!man->has_type || !man->use_type) - return -EBUSY; - if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) + man = ttm_manager_type(bdev, place->mem_type); + if (!man || !ttm_resource_manager_used(man)) return -EBUSY; - cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); - /* - * Use the access and other non-mapping-related flag bits from - * the memory placement flags to the current flags - */ - ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE); + cur_flags = ttm_bo_select_caching(man, bo->mem.placement, + place->flags); + cur_flags |= place->flags & ~TTM_PL_MASK_CACHING; - mem->mem_type = mem_type; + mem->mem_type = place->mem_type; mem->placement = cur_flags; spin_lock(&ttm_bo_glob.lru_lock); @@ -1039,7 +932,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; @@ -1052,25 +945,23 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, for (i = 0; i < placement->num_placement; ++i) { const struct ttm_place *place = &placement->placement[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; ret = ttm_bo_mem_placement(bo, place, mem, ctx); - if (ret == -EBUSY) - continue; if (ret) - goto error; + continue; type_found = true; - ret = ttm_bo_mem_get(bo, place, mem); + ret = ttm_resource_alloc(bo, place, mem); if (ret == -ENOSPC) continue; if (unlikely(ret)) goto error; - man = &bdev->man[mem->mem_type]; + man = ttm_manager_type(bdev, mem->mem_type); ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); if (unlikely(ret)) { - ttm_bo_mem_put(bo, mem); + ttm_resource_free(bo, mem); if (ret == -EBUSY) continue; @@ -1083,10 +974,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, const struct ttm_place *place = &placement->busy_placement[i]; ret = ttm_bo_mem_placement(bo, place, mem, ctx); - if (ret == -EBUSY) - continue; if (ret) - goto error; + continue; type_found = true; ret = ttm_bo_mem_force_space(bo, place, mem, ctx); @@ -1105,9 +994,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, error: if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { - spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail_unlocked(bo); } return ret; @@ -1119,15 +1006,15 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { int ret = 0; - struct ttm_mem_reg mem; + struct ttm_resource mem; dma_resv_assert_held(bo->base.resv); mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; - mem.bus.io_reserved_vm = false; - mem.bus.io_reserved_count = 0; + mem.bus.offset = 0; + mem.bus.addr = NULL; mem.mm_node = NULL; /* @@ -1139,13 +1026,13 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx); out_unlock: if (ret) - ttm_bo_mem_put(bo, &mem); + ttm_resource_free(bo, &mem); return ret; } static bool ttm_bo_places_compat(const struct ttm_place *places, unsigned num_placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, uint32_t *new_flags) { unsigned i; @@ -1159,7 +1046,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places, *new_flags = heap->flags; if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && - (*new_flags & mem->placement & TTM_PL_MASK_MEM) && + (mem->mem_type == heap->mem_type) && (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) return true; @@ -1168,7 +1055,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places, } bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, uint32_t *new_flags) { if (ttm_bo_places_compat(placement->placement, placement->num_placement, @@ -1214,17 +1101,13 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, if (ret) return ret; } else { - /* - * Use the access and other non-mapping-related flag bits from - * the compatible memory placement flags to the active flags - */ - ttm_flag_masked(&bo->mem.placement, new_flags, - ~TTM_PL_MASK_MEMTYPE); + bo->mem.placement &= TTM_PL_MASK_CACHING; + bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING; } /* * We might need to add a TTM. */ - if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { + if (bo->mem.mem_type == TTM_PL_SYSTEM) { ret = ttm_tt_create(bo, true); if (ret) return ret; @@ -1276,7 +1159,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); - INIT_LIST_HEAD(&bo->io_reserve_lru); bo->bdev = bdev; bo->type = type; bo->num_pages = num_pages; @@ -1285,10 +1167,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; - bo->mem.bus.io_reserved_vm = false; - bo->mem.bus.io_reserved_count = 0; + bo->mem.bus.offset = 0; + bo->mem.bus.addr = NULL; bo->moving = NULL; - bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); + bo->mem.placement = TTM_PL_FLAG_CACHED; bo->acc_size = acc_size; bo->sg = sg; if (resv) { @@ -1335,9 +1217,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, return ret; } - spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail_unlocked(bo); return ret; } @@ -1371,9 +1251,9 @@ int ttm_bo_init(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_init); -size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, - unsigned long bo_size, - unsigned struct_size) +static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, + unsigned long bo_size, + unsigned struct_size) { unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; size_t size = 0; @@ -1383,7 +1263,6 @@ size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, size += ttm_round_pot(sizeof(struct ttm_tt)); return size; } -EXPORT_SYMBOL(ttm_bo_acc_size); size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, unsigned long bo_size, @@ -1426,144 +1305,24 @@ int ttm_bo_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_create); -static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, - unsigned mem_type) -{ - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false, - .flags = TTM_OPT_FLAG_FORCE_ALLOC - }; - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - struct ttm_bo_global *glob = &ttm_bo_glob; - struct dma_fence *fence; - int ret; - unsigned i; - - /* - * Can't use standard list traversal since we're unlocking. - */ - - spin_lock(&glob->lru_lock); - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { - while (!list_empty(&man->lru[i])) { - spin_unlock(&glob->lru_lock); - ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx, - NULL); - if (ret) - return ret; - spin_lock(&glob->lru_lock); - } - } - spin_unlock(&glob->lru_lock); - - spin_lock(&man->move_lock); - fence = dma_fence_get(man->move); - spin_unlock(&man->move_lock); - - if (fence) { - ret = dma_fence_wait(fence, false); - dma_fence_put(fence); - if (ret) - return ret; - } - - return 0; -} - -int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) -{ - struct ttm_mem_type_manager *man; - int ret = -EINVAL; - - if (mem_type >= TTM_NUM_MEM_TYPES) { - pr_err("Illegal memory type %d\n", mem_type); - return ret; - } - man = &bdev->man[mem_type]; - - if (!man->has_type) { - pr_err("Trying to take down uninitialized memory manager type %u\n", - mem_type); - return ret; - } - - man->use_type = false; - man->has_type = false; - - ret = 0; - if (mem_type > 0) { - ret = ttm_bo_force_list_clean(bdev, mem_type); - if (ret) { - pr_err("Cleanup eviction failed\n"); - return ret; - } - - ret = (*man->func->takedown)(man); - } - - dma_fence_put(man->move); - man->move = NULL; - - return ret; -} -EXPORT_SYMBOL(ttm_bo_clean_mm); - int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { pr_err("Illegal memory manager memory type %u\n", mem_type); return -EINVAL; } - if (!man->has_type) { + if (!man) { pr_err("Memory type %u has not been initialized\n", mem_type); return 0; } - return ttm_bo_force_list_clean(bdev, mem_type); + return ttm_resource_manager_force_list_clean(bdev, man); } EXPORT_SYMBOL(ttm_bo_evict_mm); -int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, - unsigned long p_size) -{ - int ret; - struct ttm_mem_type_manager *man; - unsigned i; - - BUG_ON(type >= TTM_NUM_MEM_TYPES); - man = &bdev->man[type]; - BUG_ON(man->has_type); - man->use_io_reserve_lru = false; - mutex_init(&man->io_reserve_mutex); - spin_lock_init(&man->move_lock); - INIT_LIST_HEAD(&man->io_reserve_lru); - - ret = bdev->driver->init_mem_type(bdev, type, man); - if (ret) - return ret; - man->bdev = bdev; - - if (type != TTM_PL_SYSTEM) { - ret = (*man->func->init)(man, p_size); - if (ret) - return ret; - } - man->has_type = true; - man->use_type = true; - man->size = p_size; - - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) - INIT_LIST_HEAD(&man->lru[i]); - man->move = NULL; - - return 0; -} -EXPORT_SYMBOL(ttm_bo_init_mm); - static void ttm_bo_global_kobj_release(struct kobject *kobj) { struct ttm_bo_global *glob = @@ -1628,21 +1387,12 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) { struct ttm_bo_global *glob = &ttm_bo_glob; int ret = 0; - unsigned i = TTM_NUM_MEM_TYPES; - struct ttm_mem_type_manager *man; - - while (i--) { - man = &bdev->man[i]; - if (man->has_type) { - man->use_type = false; - if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { - ret = -EBUSY; - pr_err("DRM memory manager type %d is not clean\n", - i); - } - man->has_type = false; - } - } + unsigned i; + struct ttm_resource_manager *man; + + man = ttm_manager_type(bdev, TTM_PL_SYSTEM); + ttm_resource_manager_set_used(man, false); + ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); mutex_lock(&ttm_global_mutex); list_del(&bdev->device_list); @@ -1655,7 +1405,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) spin_lock(&glob->lru_lock); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) - if (list_empty(&bdev->man[0].lru[0])) + if (list_empty(&man->lru[0])) pr_debug("Swap list %d was clean\n", i); spin_unlock(&glob->lru_lock); @@ -1666,6 +1416,21 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) } EXPORT_SYMBOL(ttm_bo_device_release); +static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) +{ + struct ttm_resource_manager *man = &bdev->sysman; + + /* + * Initialize the system memory buffer type. + * Other types need to be driver / IOCTL initialized. + */ + man->use_tt = true; + + ttm_resource_manager_init(man, 0); + ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); + ttm_resource_manager_set_used(man, true); +} + int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_driver *driver, struct address_space *mapping, @@ -1684,15 +1449,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->driver = driver; - memset(bdev->man, 0, sizeof(bdev->man)); - - /* - * Initialize the system memory buffer type. - * Other types need to be driver / IOCTL initialized. - */ - ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); - if (unlikely(ret != 0)) - goto out_no_sys; + ttm_bo_init_sysman(bdev); bdev->vma_manager = vma_manager; INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); @@ -1704,9 +1461,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, mutex_unlock(&ttm_global_mutex); return 0; -out_no_sys: - ttm_bo_global_release(); - return ret; } EXPORT_SYMBOL(ttm_bo_device_init); @@ -1714,25 +1468,13 @@ EXPORT_SYMBOL(ttm_bo_device_init); * buffer object vm functions. */ -void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) -{ - struct ttm_bo_device *bdev = bo->bdev; - - drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); - ttm_mem_io_free_vm(bo); -} - void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; - ttm_mem_io_lock(man, false); - ttm_bo_unmap_virtual_locked(bo); - ttm_mem_io_unlock(man); + drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); + ttm_mem_io_free(bdev, &bo->mem); } - - EXPORT_SYMBOL(ttm_bo_unmap_virtual); int ttm_bo_wait(struct ttm_buffer_object *bo, @@ -1812,11 +1554,11 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm->caching_state != tt_cached) { struct ttm_operation_ctx ctx = { false, false }; - struct ttm_mem_reg evict_mem; + struct ttm_resource evict_mem; evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; + evict_mem.placement = TTM_PL_FLAG_CACHED; evict_mem.mem_type = TTM_PL_SYSTEM; ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx); @@ -1842,7 +1584,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) if (bo->bdev->driver->swap_notify) bo->bdev->driver->swap_notify(bo); - ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); + ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage); out: /** @@ -1867,3 +1609,22 @@ void ttm_bo_swapout_all(void) while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0); } EXPORT_SYMBOL(ttm_bo_swapout_all); + +void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) +{ + if (bo->ttm == NULL) + return; + + ttm_tt_destroy(bo->bdev, bo->ttm); + bo->ttm = NULL; +} + +int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem) +{ + return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem); +} + +void ttm_bo_tt_unbind(struct ttm_buffer_object *bo) +{ + bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm); +} diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index e6c8bd254055..fb2a25f8408f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -47,15 +47,15 @@ struct ttm_transfer_obj { void ttm_bo_free_old_node(struct ttm_buffer_object *bo) { - ttm_bo_mem_put(bo, &bo->mem); + ttm_resource_free(bo, &bo->mem); } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_tt *ttm = bo->ttm; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { @@ -67,10 +67,8 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, return ret; } - ttm_tt_unbind(ttm); + ttm_bo_tt_unbind(bo); ttm_bo_free_old_node(bo); - ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, - TTM_PL_MASK_MEM); old_mem->mem_type = TTM_PL_SYSTEM; } @@ -79,146 +77,70 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, return ret; if (new_mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_tt_bind(ttm, new_mem, ctx); + + ret = ttm_tt_populate(bo->bdev, ttm, ctx); if (unlikely(ret != 0)) return ret; - } - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ret = ttm_bo_tt_bind(bo, new_mem); + if (unlikely(ret != 0)) + return ret; + } + ttm_bo_assign_mem(bo, new_mem); return 0; } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) -{ - if (likely(!man->use_io_reserve_lru)) - return 0; - - if (interruptible) - return mutex_lock_interruptible(&man->io_reserve_mutex); - - mutex_lock(&man->io_reserve_mutex); - return 0; -} - -void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) -{ - if (likely(!man->use_io_reserve_lru)) - return; - - mutex_unlock(&man->io_reserve_mutex); -} - -static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) -{ - struct ttm_buffer_object *bo; - - bo = list_first_entry_or_null(&man->io_reserve_lru, - struct ttm_buffer_object, - io_reserve_lru); - if (!bo) - return -ENOSPC; - - list_del_init(&bo->io_reserve_lru); - ttm_bo_unmap_virtual_locked(bo); - return 0; -} - int ttm_mem_io_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - int ret; - - if (mem->bus.io_reserved_count++) + if (mem->bus.offset || mem->bus.addr) return 0; + mem->bus.is_iomem = false; if (!bdev->driver->io_mem_reserve) return 0; -retry: - ret = bdev->driver->io_mem_reserve(bdev, mem); - if (ret == -ENOSPC) { - ret = ttm_mem_io_evict(man); - if (ret == 0) - goto retry; - } - return ret; + return bdev->driver->io_mem_reserve(bdev, mem); } void ttm_mem_io_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { - if (--mem->bus.io_reserved_count) + if (!mem->bus.offset && !mem->bus.addr) return; - if (!bdev->driver->io_mem_free) - return; + if (bdev->driver->io_mem_free) + bdev->driver->io_mem_free(bdev, mem); - bdev->driver->io_mem_free(bdev, mem); + mem->bus.offset = 0; + mem->bus.addr = NULL; } -int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) -{ - struct ttm_mem_type_manager *man = &bo->bdev->man[bo->mem.mem_type]; - struct ttm_mem_reg *mem = &bo->mem; - int ret; - - if (mem->bus.io_reserved_vm) - return 0; - - ret = ttm_mem_io_reserve(bo->bdev, mem); - if (unlikely(ret != 0)) - return ret; - mem->bus.io_reserved_vm = true; - if (man->use_io_reserve_lru) - list_add_tail(&bo->io_reserve_lru, - &man->io_reserve_lru); - return 0; -} - -void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) -{ - struct ttm_mem_reg *mem = &bo->mem; - - if (!mem->bus.io_reserved_vm) - return; - - mem->bus.io_reserved_vm = false; - list_del_init(&bo->io_reserve_lru); - ttm_mem_io_free(bo->bdev, mem); -} - -static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem, +static int ttm_resource_ioremap(struct ttm_bo_device *bdev, + struct ttm_resource *mem, void **virtual) { - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; int ret; void *addr; *virtual = NULL; - (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bdev, mem); - ttm_mem_io_unlock(man); if (ret || !mem->bus.is_iomem) return ret; if (mem->bus.addr) { addr = mem->bus.addr; } else { + size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; + if (mem->placement & TTM_PL_FLAG_WC) - addr = ioremap_wc(mem->bus.base + mem->bus.offset, - mem->bus.size); + addr = ioremap_wc(mem->bus.offset, bus_size); else - addr = ioremap(mem->bus.base + mem->bus.offset, - mem->bus.size); + addr = ioremap(mem->bus.offset, bus_size); if (!addr) { - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); - ttm_mem_io_unlock(man); return -ENOMEM; } } @@ -226,19 +148,13 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, return 0; } -static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem, +static void ttm_resource_iounmap(struct ttm_bo_device *bdev, + struct ttm_resource *mem, void *virtual) { - struct ttm_mem_type_manager *man; - - man = &bdev->man[mem->mem_type]; - if (virtual && mem->bus.addr == NULL) iounmap(virtual); - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); - ttm_mem_io_unlock(man); } static int ttm_copy_io_page(void *dst, void *src, unsigned long page) @@ -300,13 +216,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_tt *ttm = bo->ttm; - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg old_copy = *old_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; @@ -319,10 +235,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, if (ret) return ret; - ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); + ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap); if (ret) return ret; - ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); + ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap); if (ret) goto out; @@ -336,7 +252,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * Don't move nonexistent data. Clear destination instead. */ if (old_iomap == NULL && - (ttm == NULL || (ttm->state == tt_unpopulated && + (ttm == NULL || (!ttm_tt_is_populated(ttm) && !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); goto out2; @@ -346,7 +262,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * TTM might be null for moves within the same region. */ if (ttm) { - ret = ttm_tt_populate(ttm, ctx); + ret = ttm_tt_populate(bdev, ttm, ctx); if (ret) goto out1; } @@ -381,24 +297,22 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, mb(); out2: old_copy = *old_mem; - *old_mem = *new_mem; - new_mem->mm_node = NULL; - if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { - ttm_tt_destroy(ttm); - bo->ttm = NULL; - } + ttm_bo_assign_mem(bo, new_mem); + + if (!man->use_tt) + ttm_bo_tt_destroy(bo); out1: - ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); + ttm_resource_iounmap(bdev, old_mem, new_iomap); out: - ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); + ttm_resource_iounmap(bdev, &old_copy, old_iomap); /* * On error, keep the mm node! */ if (!ret) - ttm_bo_mem_put(bo, &old_copy); + ttm_resource_free(bo, &old_copy); return ret; } EXPORT_SYMBOL(ttm_bo_move_memcpy); @@ -452,7 +366,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.swap); - INIT_LIST_HEAD(&fbo->base.io_reserve_lru); fbo->base.moving = NULL; drm_vma_node_reset(&fbo->base.base.vma_node); @@ -502,7 +415,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, unsigned long size, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_reg *mem = &bo->mem; + struct ttm_resource *mem = &bo->mem; if (bo->mem.bus.addr) { map->bo_kmap_type = ttm_bo_map_premapped; @@ -510,12 +423,10 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, } else { map->bo_kmap_type = ttm_bo_map_iomap; if (mem->placement & TTM_PL_FLAG_WC) - map->virtual = ioremap_wc(bo->mem.bus.base + - bo->mem.bus.offset + offset, + map->virtual = ioremap_wc(bo->mem.bus.offset + offset, size); else - map->virtual = ioremap(bo->mem.bus.base + - bo->mem.bus.offset + offset, + map->virtual = ioremap(bo->mem.bus.offset + offset, size); } return (!map->virtual) ? -ENOMEM : 0; @@ -526,7 +437,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_reg *mem = &bo->mem; + struct ttm_resource *mem = &bo->mem; struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false @@ -537,7 +448,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, BUG_ON(!ttm); - ret = ttm_tt_populate(ttm, &ctx); + ret = ttm_tt_populate(bo->bdev, ttm, &ctx); if (ret) return ret; @@ -567,8 +478,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_type_manager *man = - &bo->bdev->man[bo->mem.mem_type]; unsigned long offset, size; int ret; @@ -579,9 +488,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, if (start_page > bo->num_pages) return -EINVAL; - (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); - ttm_mem_io_unlock(man); if (ret) return ret; if (!bo->mem.bus.is_iomem) { @@ -596,10 +503,6 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { - struct ttm_buffer_object *bo = map->bo; - struct ttm_mem_type_manager *man = - &bo->bdev->man[bo->mem.mem_type]; - if (!map->virtual) return; switch (map->bo_kmap_type) { @@ -617,167 +520,116 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) default: BUG(); } - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(map->bo->bdev, &map->bo->mem); - ttm_mem_io_unlock(man); map->virtual = NULL; map->page = NULL; } EXPORT_SYMBOL(ttm_bo_kunmap); -int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct dma_fence *fence, - bool evict, - struct ttm_mem_reg *new_mem) +static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, + bool dst_use_tt) { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; - struct ttm_mem_reg *old_mem = &bo->mem; int ret; - struct ttm_buffer_object *ghost_obj; - - dma_resv_add_excl_fence(bo->base.resv, fence); - if (evict) { - ret = ttm_bo_wait(bo, false, false); - if (ret) - return ret; - - if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } - ttm_bo_free_old_node(bo); - } else { - /** - * This should help pipeline ordinary buffer moves. - * - * Hang old buffer memory on a new buffer object, - * and leave it to be released when the GPU - * operation has completed. - */ - - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); - - ret = ttm_buffer_object_transfer(bo, &ghost_obj); - if (ret) - return ret; - - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); - - /** - * If we're not moving to fixed memory, the TTM object - * needs to stay alive. Otherwhise hang it on the ghost - * bo to be unbound and destroyed. - */ - - if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) - ghost_obj->ttm = NULL; - else - bo->ttm = NULL; - - dma_resv_unlock(&ghost_obj->base._resv); - ttm_bo_put(ghost_obj); - } - - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ret = ttm_bo_wait(bo, false, false); + if (ret) + return ret; + if (!dst_use_tt) + ttm_bo_tt_destroy(bo); + ttm_bo_free_old_node(bo); return 0; } -EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); -int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem) +static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, + struct dma_fence *fence, + bool dst_use_tt) { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_reg *old_mem = &bo->mem; - - struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; - struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type]; - + struct ttm_buffer_object *ghost_obj; int ret; - dma_resv_add_excl_fence(bo->base.resv, fence); - - if (!evict) { - struct ttm_buffer_object *ghost_obj; - - /** - * This should help pipeline ordinary buffer moves. - * - * Hang old buffer memory on a new buffer object, - * and leave it to be released when the GPU - * operation has completed. - */ - - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); + /** + * This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the GPU + * operation has completed. + */ - ret = ttm_buffer_object_transfer(bo, &ghost_obj); - if (ret) - return ret; + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); + ret = ttm_buffer_object_transfer(bo, &ghost_obj); + if (ret) + return ret; - /** - * If we're not moving to fixed memory, the TTM object - * needs to stay alive. Otherwhise hang it on the ghost - * bo to be unbound and destroyed. - */ + dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); - if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED)) - ghost_obj->ttm = NULL; - else - bo->ttm = NULL; + /** + * If we're not moving to fixed memory, the TTM object + * needs to stay alive. Otherwhise hang it on the ghost + * bo to be unbound and destroyed. + */ - dma_resv_unlock(&ghost_obj->base._resv); - ttm_bo_put(ghost_obj); + if (dst_use_tt) + ghost_obj->ttm = NULL; + else + bo->ttm = NULL; - } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { + dma_resv_unlock(&ghost_obj->base._resv); + ttm_bo_put(ghost_obj); + return 0; +} - /** - * BO doesn't have a TTM we need to bind/unbind. Just remember - * this eviction and free up the allocation - */ +static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, + struct dma_fence *fence) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); - spin_lock(&from->move_lock); - if (!from->move || dma_fence_is_later(fence, from->move)) { - dma_fence_put(from->move); - from->move = dma_fence_get(fence); - } - spin_unlock(&from->move_lock); + /** + * BO doesn't have a TTM we need to bind/unbind. Just remember + * this eviction and free up the allocation + */ + spin_lock(&from->move_lock); + if (!from->move || dma_fence_is_later(fence, from->move)) { + dma_fence_put(from->move); + from->move = dma_fence_get(fence); + } + spin_unlock(&from->move_lock); - ttm_bo_free_old_node(bo); + ttm_bo_free_old_node(bo); - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); +} - } else { - /** - * Last resort, wait for the move to be completed. - * - * Should never happen in pratice. - */ +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, + bool evict, + bool pipeline, + struct ttm_resource *new_mem) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + int ret = 0; - ret = ttm_bo_wait(bo, false, false); - if (ret) - return ret; + dma_resv_add_excl_fence(bo->base.resv, fence); + if (!evict) + ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); + else if (!from->use_tt && pipeline) + ttm_bo_move_pipeline_evict(bo, fence); + else + ret = ttm_bo_wait_free_node(bo, man->use_tt); - if (to->flags & TTM_MEMTYPE_FLAG_FIXED) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } - ttm_bo_free_old_node(bo); - } + if (ret) + return ret; - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ttm_bo_assign_mem(bo, new_mem); return 0; } -EXPORT_SYMBOL(ttm_bo_pipeline_move); +EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 4732dcc80e11..98a006fc30a5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -101,8 +101,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, if (bdev->driver->io_mem_pfn) return bdev->driver->io_mem_pfn(bo, page_offset); - return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) - + page_offset; + return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset; } /** @@ -281,8 +280,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgoff_t i; vm_fault_t ret = VM_FAULT_NOPAGE; unsigned long address = vmf->address; - struct ttm_mem_type_manager *man = - &bdev->man[bo->mem.mem_type]; /* * Refuse to fault imported pages. This should be handled @@ -308,9 +305,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, } if (bo->moving != moving) { - spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail_unlocked(bo); } dma_fence_put(moving); } @@ -323,24 +318,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, if (unlikely(ret != 0)) return ret; - err = ttm_mem_io_lock(man, true); + err = ttm_mem_io_reserve(bdev, &bo->mem); if (unlikely(err != 0)) - return VM_FAULT_NOPAGE; - err = ttm_mem_io_reserve_vm(bo); - if (unlikely(err != 0)) { - ret = VM_FAULT_SIGBUS; - goto out_io_unlock; - } + return VM_FAULT_SIGBUS; page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); page_last = vma_pages(vma) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); - if (unlikely(page_offset >= bo->num_pages)) { - ret = VM_FAULT_SIGBUS; - goto out_io_unlock; - } + if (unlikely(page_offset >= bo->num_pages)) + return VM_FAULT_SIGBUS; prot = ttm_io_prot(bo->mem.placement, prot); if (!bo->mem.bus.is_iomem) { @@ -352,21 +340,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, }; ttm = bo->ttm; - if (ttm_tt_populate(bo->ttm, &ctx)) { - ret = VM_FAULT_OOM; - goto out_io_unlock; - } + if (ttm_tt_populate(bdev, bo->ttm, &ctx)) + return VM_FAULT_OOM; } else { /* Iomem should not be marked encrypted */ prot = pgprot_decrypted(prot); } /* We don't prefault on huge faults. Yet. */ - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) { - ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset, - fault_page_size, prot); - goto out_io_unlock; - } + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) + return ttm_bo_vm_insert_huge(vmf, bo, page_offset, + fault_page_size, prot); /* * Speculatively prefault a number of pages. Only error on @@ -378,8 +362,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, } else { page = ttm->pages[page_offset]; if (unlikely(!page && i == 0)) { - ret = VM_FAULT_OOM; - goto out_io_unlock; + return VM_FAULT_OOM; } else if (unlikely(!page)) { break; } @@ -406,7 +389,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, /* Never error on prefaulted PTEs */ if (unlikely((ret & VM_FAULT_ERROR))) { if (i == 0) - goto out_io_unlock; + return VM_FAULT_NOPAGE; else break; } @@ -415,9 +398,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, if (unlikely(++page_offset >= page_last)) break; } - ret = VM_FAULT_NOPAGE; -out_io_unlock: - ttm_mem_io_unlock(man); return ret; } EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 1797f04c0534..8a8f1a6a83a6 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -93,7 +93,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); + ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); if (ret == -EALREADY && dups) { struct ttm_validate_buffer *safe = entry; entry = list_prev_entry(entry, head); @@ -119,13 +119,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ttm_eu_backoff_reservation_reverse(list, entry); if (ret == -EDEADLK) { - if (intr) { - ret = dma_resv_lock_slow_interruptible(bo->base.resv, - ticket); - } else { - dma_resv_lock_slow(bo->base.resv, ticket); - ret = 0; - } + ret = ttm_bo_reserve_slowpath(bo, intr, ticket); } if (!ret && entry->num_shared) @@ -133,8 +127,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, entry->num_shared); if (unlikely(ret != 0)) { - if (ret == -EINTR) - ret = -ERESTARTSYS; if (ticket) { ww_acquire_done(ticket); ww_acquire_fini(ticket); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index acd63b70d814..89d50f38c0f2 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -259,7 +259,7 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, return false; } -/** +/* * At this point we only support a single shrink callback. * Extend this if needed, perhaps using a linked list of callbacks. * Note that this function is reentrant: @@ -554,7 +554,6 @@ ttm_check_under_lowerlimit(struct ttm_mem_global *glob, return false; } -EXPORT_SYMBOL(ttm_check_under_lowerlimit); static int ttm_mem_global_reserve(struct ttm_mem_global *glob, struct ttm_mem_zone *single_zone, @@ -682,9 +681,3 @@ size_t ttm_round_pot(size_t size) return 0; } EXPORT_SYMBOL(ttm_round_pot); - -uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob) -{ - return glob->zone_kernel->max_mem; -} -EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index b40a4678c296..14660f723f71 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -1044,7 +1044,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) put_pages: ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ttm->caching_state); - ttm->state = tt_unpopulated; + ttm_tt_set_unpopulated(ttm); } int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) @@ -1053,7 +1053,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) unsigned i; int ret; - if (ttm->state != tt_unpopulated) + if (ttm_tt_is_populated(ttm)) return 0; if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) @@ -1083,7 +1083,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) } } - ttm->state = tt_unbound; + ttm_tt_set_populated(ttm); return 0; } EXPORT_SYMBOL(ttm_pool_populate); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index faefaaef7909..5e2df11685e7 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -894,7 +894,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, unsigned i; int ret; - if (ttm->state != tt_unpopulated) + if (ttm_tt_is_populated(ttm)) return 0; if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx)) @@ -982,7 +982,7 @@ skip_huge: } } - ttm->state = tt_unbound; + ttm_tt_set_populated(ttm); return 0; } EXPORT_SYMBOL_GPL(ttm_dma_populate); @@ -1076,7 +1076,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) /* shrink pool if necessary (only on !is_cached pools)*/ if (npages) ttm_dma_page_pool_free(pool, npages, false); - ttm->state = tt_unpopulated; + ttm_tt_set_unpopulated(ttm); } EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index facd3049c3aa..1da0e277c511 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -44,16 +44,22 @@ */ struct ttm_range_manager { + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; }; -static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, +static inline struct ttm_range_manager *to_range_manager(struct ttm_resource_manager *man) +{ + return container_of(man, struct ttm_range_manager, manager); +} + +static int ttm_range_man_alloc(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { - struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; + struct ttm_range_manager *rman = to_range_manager(man); struct drm_mm *mm = &rman->mm; struct drm_mm_node *node; enum drm_mm_insert_mode mode; @@ -89,10 +95,10 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, return ret; } -static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) +static void ttm_range_man_free(struct ttm_resource_manager *man, + struct ttm_resource *mem) { - struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; + struct ttm_range_manager *rman = to_range_manager(man); if (mem->mm_node) { spin_lock(&rman->lock); @@ -104,53 +110,73 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, } } -static int ttm_bo_man_init(struct ttm_mem_type_manager *man, - unsigned long p_size) +static const struct ttm_resource_manager_func ttm_range_manager_func; + +int ttm_range_man_init(struct ttm_bo_device *bdev, + unsigned type, bool use_tt, + unsigned long p_size) { + struct ttm_resource_manager *man; struct ttm_range_manager *rman; rman = kzalloc(sizeof(*rman), GFP_KERNEL); if (!rman) return -ENOMEM; + man = &rman->manager; + man->use_tt = use_tt; + + man->func = &ttm_range_manager_func; + + ttm_resource_manager_init(man, p_size); + drm_mm_init(&rman->mm, 0, p_size); spin_lock_init(&rman->lock); - man->priv = rman; + + ttm_set_driver_manager(bdev, type, &rman->manager); + ttm_resource_manager_set_used(man, true); return 0; } +EXPORT_SYMBOL(ttm_range_man_init); -static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) +int ttm_range_man_fini(struct ttm_bo_device *bdev, + unsigned type) { - struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; + struct ttm_resource_manager *man = ttm_manager_type(bdev, type); + struct ttm_range_manager *rman = to_range_manager(man); struct drm_mm *mm = &rman->mm; + int ret; + + ttm_resource_manager_set_used(man, false); + + ret = ttm_resource_manager_force_list_clean(bdev, man); + if (ret) + return ret; spin_lock(&rman->lock); - if (drm_mm_clean(mm)) { - drm_mm_takedown(mm); - spin_unlock(&rman->lock); - kfree(rman); - man->priv = NULL; - return 0; - } + drm_mm_clean(mm); + drm_mm_takedown(mm); spin_unlock(&rman->lock); - return -EBUSY; + + ttm_resource_manager_cleanup(man); + ttm_set_driver_manager(bdev, type, NULL); + kfree(rman); + return 0; } +EXPORT_SYMBOL(ttm_range_man_fini); -static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, - struct drm_printer *printer) +static void ttm_range_man_debug(struct ttm_resource_manager *man, + struct drm_printer *printer) { - struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; + struct ttm_range_manager *rman = to_range_manager(man); spin_lock(&rman->lock); drm_mm_print(&rman->mm, printer); spin_unlock(&rman->lock); } -const struct ttm_mem_type_manager_func ttm_bo_manager_func = { - .init = ttm_bo_man_init, - .takedown = ttm_bo_man_takedown, - .get_node = ttm_bo_man_get_node, - .put_node = ttm_bo_man_put_node, - .debug = ttm_bo_man_debug +static const struct ttm_resource_manager_func ttm_range_manager_func = { + .alloc = ttm_range_man_alloc, + .free = ttm_range_man_free, + .debug = ttm_range_man_debug }; -EXPORT_SYMBOL(ttm_bo_manager_func); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c new file mode 100644 index 000000000000..b325b9264203 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -0,0 +1,146 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#include <drm/ttm/ttm_resource.h> +#include <drm/ttm/ttm_bo_driver.h> + +int ttm_resource_alloc(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *res) +{ + struct ttm_resource_manager *man = + ttm_manager_type(bo->bdev, res->mem_type); + + res->mm_node = NULL; + if (!man->func || !man->func->alloc) + return 0; + + return man->func->alloc(man, bo, place, res); +} + +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res) +{ + struct ttm_resource_manager *man = + ttm_manager_type(bo->bdev, res->mem_type); + + if (man->func && man->func->free) + man->func->free(man, res); + + res->mm_node = NULL; + res->mem_type = TTM_PL_SYSTEM; +} +EXPORT_SYMBOL(ttm_resource_free); + +/** + * ttm_resource_manager_init + * + * @man: memory manager object to init + * @p_size: size managed area in pages. + * + * Initialise core parts of a manager object. + */ +void ttm_resource_manager_init(struct ttm_resource_manager *man, + unsigned long p_size) +{ + unsigned i; + + spin_lock_init(&man->move_lock); + man->size = p_size; + + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + INIT_LIST_HEAD(&man->lru[i]); + man->move = NULL; +} +EXPORT_SYMBOL(ttm_resource_manager_init); + +/* + * ttm_resource_manager_force_list_clean + * + * @bdev - device to use + * @man - manager to use + * + * Force all the objects out of a memory manager until clean. + * Part of memory manager cleanup sequence. + */ +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man) +{ + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false, + .flags = TTM_OPT_FLAG_FORCE_ALLOC + }; + struct ttm_bo_global *glob = &ttm_bo_glob; + struct dma_fence *fence; + int ret; + unsigned i; + + /* + * Can't use standard list traversal since we're unlocking. + */ + + spin_lock(&glob->lru_lock); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + while (!list_empty(&man->lru[i])) { + spin_unlock(&glob->lru_lock); + ret = ttm_mem_evict_first(bdev, man, NULL, &ctx, + NULL); + if (ret) + return ret; + spin_lock(&glob->lru_lock); + } + } + spin_unlock(&glob->lru_lock); + + spin_lock(&man->move_lock); + fence = dma_fence_get(man->move); + spin_unlock(&man->move_lock); + + if (fence) { + ret = dma_fence_wait(fence, false); + dma_fence_put(fence); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ttm_resource_manager_force_list_clean); + +/** + * ttm_resource_manager_debug + * + * @man: manager type to dump. + * @p: printer to use for debug. + */ +void ttm_resource_manager_debug(struct ttm_resource_manager *man, + struct drm_printer *p) +{ + drm_printf(p, " use_type: %d\n", man->use_type); + drm_printf(p, " use_tt: %d\n", man->use_tt); + drm_printf(p, " size: %llu\n", man->size); + if (man->func && man->func->debug) + (*man->func->debug)(man, p); +} +EXPORT_SYMBOL(ttm_resource_manager_debug); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 3437711ddb43..f43fa69a1e65 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -50,6 +50,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) dma_resv_assert_held(bo->base.resv); + if (bo->ttm) + return 0; + if (bdev->need_dma32) page_flags |= TTM_PAGE_FLAG_DMA32; @@ -67,7 +70,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) page_flags |= TTM_PAGE_FLAG_SG; break; default: - bo->ttm = NULL; pr_err("Illegal buffer object type\n"); return -EINVAL; } @@ -154,7 +156,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, if (ttm->caching_state == c_state) return 0; - if (ttm->state == tt_unpopulated) { + if (!ttm_tt_is_populated(ttm)) { /* Change caching but don't populate */ ttm->caching_state = c_state; return 0; @@ -205,33 +207,31 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) } EXPORT_SYMBOL(ttm_tt_set_placement_caching); -void ttm_tt_destroy(struct ttm_tt *ttm) +void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - if (ttm == NULL) - return; - - ttm_tt_unbind(ttm); - - if (ttm->state == tt_unbound) - ttm_tt_unpopulate(ttm); + ttm_tt_unpopulate(bdev, ttm); if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && ttm->swap_storage) fput(ttm->swap_storage); ttm->swap_storage = NULL; - ttm->func->destroy(ttm); +} +EXPORT_SYMBOL(ttm_tt_destroy_common); + +void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +{ + bdev->driver->ttm_tt_destroy(bdev, ttm); } static void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo, uint32_t page_flags) { - ttm->bdev = bo->bdev; ttm->num_pages = bo->num_pages; ttm->caching_state = tt_cached; ttm->page_flags = page_flags; - ttm->state = tt_unpopulated; + ttm_tt_set_unpopulated(ttm); ttm->swap_storage = NULL; ttm->sg = bo->sg; } @@ -306,39 +306,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) } EXPORT_SYMBOL(ttm_dma_tt_fini); -void ttm_tt_unbind(struct ttm_tt *ttm) -{ - if (ttm->state == tt_bound) { - ttm->func->unbind(ttm); - ttm->state = tt_unbound; - } -} - -int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, - struct ttm_operation_ctx *ctx) -{ - int ret = 0; - - if (!ttm) - return -EINVAL; - - if (ttm->state == tt_bound) - return 0; - - ret = ttm_tt_populate(ttm, ctx); - if (ret) - return ret; - - ret = ttm->func->bind(ttm, bo_mem); - if (unlikely(ret != 0)) - return ret; - - ttm->state = tt_bound; - - return 0; -} -EXPORT_SYMBOL(ttm_tt_bind); - int ttm_tt_swapin(struct ttm_tt *ttm) { struct address_space *swap_space; @@ -381,7 +348,8 @@ out_err: return ret; } -int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) +int ttm_tt_swapout(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct file *persistent_swap_storage) { struct address_space *swap_space; struct file *swap_storage; @@ -390,7 +358,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) int i; int ret = -ENOMEM; - BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); BUG_ON(ttm->caching_state != tt_cached); if (!persistent_swap_storage) { @@ -427,7 +394,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) put_page(to_page); } - ttm_tt_unpopulate(ttm); + ttm_tt_unpopulate(bdev, ttm); ttm->swap_storage = swap_storage; ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; if (persistent_swap_storage) @@ -441,7 +408,7 @@ out_err: return ret; } -static void ttm_tt_add_mapping(struct ttm_tt *ttm) +static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { pgoff_t i; @@ -449,24 +416,29 @@ static void ttm_tt_add_mapping(struct ttm_tt *ttm) return; for (i = 0; i < ttm->num_pages; ++i) - ttm->pages[i]->mapping = ttm->bdev->dev_mapping; + ttm->pages[i]->mapping = bdev->dev_mapping; } -int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) +int ttm_tt_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { int ret; - if (ttm->state != tt_unpopulated) + if (!ttm) + return -EINVAL; + + if (ttm_tt_is_populated(ttm)) return 0; - if (ttm->bdev->driver->ttm_tt_populate) - ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx); + if (bdev->driver->ttm_tt_populate) + ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx); else ret = ttm_pool_populate(ttm, ctx); if (!ret) - ttm_tt_add_mapping(ttm); + ttm_tt_add_mapping(bdev, ttm); return ret; } +EXPORT_SYMBOL(ttm_tt_populate); static void ttm_tt_clear_mapping(struct ttm_tt *ttm) { @@ -482,14 +454,15 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm) } } -void ttm_tt_unpopulate(struct ttm_tt *ttm) +void ttm_tt_unpopulate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - if (ttm->state == tt_unpopulated) + if (!ttm_tt_is_populated(ttm)) return; ttm_tt_clear_mapping(ttm); - if (ttm->bdev->driver->ttm_tt_unpopulate) - ttm->bdev->driver->ttm_tt_unpopulate(ttm); + if (bdev->driver->ttm_tt_unpopulate) + bdev->driver->ttm_tt_unpopulate(bdev, ttm); else ttm_pool_unpopulate(ttm); } |