diff options
author | Jerome Glisse <jglisse@redhat.com> | 2011-11-03 07:59:28 +0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-12-06 14:39:24 +0400 |
commit | b1e5f172325547270f35e7d1e42416a606e1dbd2 (patch) | |
tree | 03fc21fd5f74add89441308008b45987d09cfbc6 /drivers/gpu/drm/ttm | |
parent | 649bf3ca77343e3be1e0af8e21356fa569b1abd9 (diff) | |
download | linux-b1e5f172325547270f35e7d1e42416a606e1dbd2.tar.xz |
drm/ttm: introduce callback for ttm_tt populate & unpopulate V4
Move the page allocation and freeing to driver callback and
provide ttm code helper function for those.
Most intrusive change, is the fact that we now only fully
populate an object this simplify some of code designed around
the page fault design.
V2 Rebase on top of memory accounting overhaul
V3 New rebase on top of more memory accouting changes
V4 Rebase on top of no memory account changes (where/when is my
delorean when i need it ?)
Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 91 |
4 files changed, 86 insertions, 102 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 082fcaea583f..60f204d67dbb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, unsigned long page, pgprot_t prot) { - struct page *d = ttm_tt_get_page(ttm, page); + struct page *d = ttm->pages[page]; void *dst; if (!d) @@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, unsigned long page, pgprot_t prot) { - struct page *s = ttm_tt_get_page(ttm, page); + struct page *s = ttm->pages[page]; void *src; if (!s) @@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, if (old_iomap == NULL && ttm == NULL) goto out2; + if (ttm->state == tt_unpopulated) { + ret = ttm->bdev->driver->ttm_tt_populate(ttm); + if (ret) + goto out1; + } + add = 0; dir = 1; @@ -502,10 +508,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, { struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; struct ttm_tt *ttm = bo->ttm; - struct page *d; - int i; + int ret; BUG_ON(!ttm); + + if (ttm->state == tt_unpopulated) { + ret = ttm->bdev->driver->ttm_tt_populate(ttm); + if (ret) + return ret; + } + if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { /* * We're mapping a single page, and the desired @@ -513,18 +525,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, */ map->bo_kmap_type = ttm_bo_map_kmap; - map->page = ttm_tt_get_page(ttm, start_page); + map->page = ttm->pages[start_page]; map->virtual = kmap(map->page); } else { - /* - * Populate the part we're mapping; - */ - for (i = start_page; i < start_page + num_pages; ++i) { - d = ttm_tt_get_page(ttm, i); - if (!d) - return -ENOMEM; - } - /* * We need to use vmap to get the desired page protection * or to make the buffer object look contiguous. diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 221b924acebe..54412848de88 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? vm_get_page_prot(vma->vm_flags) : ttm_io_prot(bo->mem.placement, vma->vm_page_prot); + + /* Allocate all page at once, most common usage */ + if (ttm->bdev->driver->ttm_tt_populate(ttm)) { + retval = VM_FAULT_OOM; + goto out_io_unlock; + } } /* * Speculatively prefault a number of pages. Only error on * first page. */ - for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { if (bo->mem.bus.is_iomem) pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; else { - page = ttm_tt_get_page(ttm, page_offset); + page = ttm->pages[page_offset]; if (unlikely(!page && i == 0)) { retval = VM_FAULT_OOM; goto out_io_unlock; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 0f3e6d2395b3..8d6267e434ab 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -855,6 +855,63 @@ void ttm_page_alloc_fini(void) _manager = NULL; } +int ttm_pool_populate(struct ttm_tt *ttm) +{ + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; + unsigned i; + int ret; + + if (ttm->state != tt_unpopulated) + return 0; + + for (i = 0; i < ttm->num_pages; ++i) { + ret = ttm_get_pages(&ttm->pages[i], ttm->page_flags, + ttm->caching_state, 1, + &ttm->dma_address[i]); + if (ret != 0) { + ttm_pool_unpopulate(ttm); + return -ENOMEM; + } + + ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], + false, false); + if (unlikely(ret != 0)) { + ttm_pool_unpopulate(ttm); + return -ENOMEM; + } + } + + if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { + ret = ttm_tt_swapin(ttm); + if (unlikely(ret != 0)) { + ttm_pool_unpopulate(ttm); + return ret; + } + } + + ttm->state = tt_unbound; + return 0; +} +EXPORT_SYMBOL(ttm_pool_populate); + +void ttm_pool_unpopulate(struct ttm_tt *ttm) +{ + unsigned i; + + for (i = 0; i < ttm->num_pages; ++i) { + if (ttm->pages[i]) { + ttm_mem_global_free_page(ttm->glob->mem_glob, + ttm->pages[i]); + ttm_put_pages(&ttm->pages[i], 1, + ttm->page_flags, + ttm->caching_state, + ttm->dma_address); + } + } + ttm->state = tt_unpopulated; +} +EXPORT_SYMBOL(ttm_pool_unpopulate); + int ttm_page_alloc_debugfs(struct seq_file *m, void *data) { struct ttm_page_pool *p; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index fbc90dce1de8..77f0e6f79f30 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -43,8 +43,6 @@ #include "ttm/ttm_placement.h" #include "ttm/ttm_page_alloc.h" -static int ttm_tt_swapin(struct ttm_tt *ttm); - /** * Allocates storage for pointers to the pages that back the ttm. */ @@ -63,69 +61,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) ttm->dma_address = NULL; } -static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) -{ - struct page *p; - struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; - int ret; - - if (NULL == (p = ttm->pages[index])) { - - ret = ttm_get_pages(&p, ttm->page_flags, ttm->caching_state, 1, - &ttm->dma_address[index]); - if (ret != 0) - return NULL; - - ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); - if (unlikely(ret != 0)) - goto out_err; - - ttm->pages[index] = p; - } - return p; -out_err: - ttm_put_pages(&p, 1, ttm->page_flags, - ttm->caching_state, &ttm->dma_address[index]); - return NULL; -} - -struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index) -{ - int ret; - - if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { - ret = ttm_tt_swapin(ttm); - if (unlikely(ret != 0)) - return NULL; - } - return __ttm_tt_get_page(ttm, index); -} - -int ttm_tt_populate(struct ttm_tt *ttm) -{ - struct page *page; - unsigned long i; - int ret; - - if (ttm->state != tt_unpopulated) - return 0; - - if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { - ret = ttm_tt_swapin(ttm); - if (unlikely(ret != 0)) - return ret; - } - - for (i = 0; i < ttm->num_pages; ++i) { - page = __ttm_tt_get_page(ttm, i); - if (!page) - return -ENOMEM; - } - ttm->state = tt_unbound; - return 0; -} -EXPORT_SYMBOL(ttm_tt_populate); - #ifdef CONFIG_X86 static inline int ttm_tt_set_page_caching(struct page *p, enum ttm_caching_state c_old, @@ -227,21 +162,6 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) } EXPORT_SYMBOL(ttm_tt_set_placement_caching); -static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) -{ - unsigned i; - - for (i = 0; i < ttm->num_pages; ++i) { - if (ttm->pages[i]) { - ttm_mem_global_free_page(ttm->glob->mem_glob, - ttm->pages[i]); - ttm_put_pages(&ttm->pages[i], 1, ttm->page_flags, - ttm->caching_state, &ttm->dma_address[i]); - } - } - ttm->state = tt_unpopulated; -} - void ttm_tt_destroy(struct ttm_tt *ttm) { if (unlikely(ttm == NULL)) @@ -252,7 +172,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm) } if (likely(ttm->pages != NULL)) { - ttm_tt_free_alloced_pages(ttm); + ttm->bdev->driver->ttm_tt_unpopulate(ttm); ttm_tt_free_page_directory(ttm); } @@ -307,7 +227,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) if (ttm->state == tt_bound) return 0; - ret = ttm_tt_populate(ttm); + ret = ttm->bdev->driver->ttm_tt_populate(ttm); if (ret) return ret; @@ -321,7 +241,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) } EXPORT_SYMBOL(ttm_tt_bind); -static int ttm_tt_swapin(struct ttm_tt *ttm) +int ttm_tt_swapin(struct ttm_tt *ttm) { struct address_space *swap_space; struct file *swap_storage; @@ -343,7 +263,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) ret = PTR_ERR(from_page); goto out_err; } - to_page = __ttm_tt_get_page(ttm, i); + to_page = ttm->pages[i]; if (unlikely(to_page == NULL)) goto out_err; @@ -364,7 +284,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) return 0; out_err: - ttm_tt_free_alloced_pages(ttm); return ret; } @@ -416,7 +335,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) page_cache_release(to_page); } - ttm_tt_free_alloced_pages(ttm); + ttm->bdev->driver->ttm_tt_unpopulate(ttm); ttm->swap_storage = swap_storage; ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; if (persistent_swap_storage) |