diff options
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 63 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 62 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 59 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 25 |
6 files changed, 68 insertions, 169 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 5d8688e522d1..7c484729f9b2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -287,12 +287,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (ret) { if (bdev->driver->move_notify) { - struct ttm_mem_reg tmp_mem = *mem; - *mem = bo->mem; - bo->mem = tmp_mem; + swap(*mem, bo->mem); bdev->driver->move_notify(bo, false, mem); - bo->mem = *mem; - *mem = tmp_mem; + swap(*mem, bo->mem); } goto out_err; @@ -590,12 +587,18 @@ static void ttm_bo_release(struct kref *kref) kref_put(&bo->list_kref, ttm_bo_release_list); } +void ttm_bo_put(struct ttm_buffer_object *bo) +{ + kref_put(&bo->kref, ttm_bo_release); +} +EXPORT_SYMBOL(ttm_bo_put); + void ttm_bo_unref(struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo = *p_bo; *p_bo = NULL; - kref_put(&bo->kref, ttm_bo_release); + ttm_bo_put(bo); } EXPORT_SYMBOL(ttm_bo_unref); @@ -1201,7 +1204,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, if (!resv) ttm_bo_unreserve(bo); - ttm_bo_unref(&bo); + ttm_bo_put(bo); return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index f2c167702eef..046a6dda690a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -463,7 +463,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo) struct ttm_transfer_obj *fbo; fbo = container_of(bo, struct ttm_transfer_obj, base); - ttm_bo_unref(&fbo->bo); + ttm_bo_put(fbo->bo); kfree(fbo); } @@ -492,8 +492,9 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, if (!fbo) return -ENOMEM; + ttm_bo_get(bo); fbo->base = *bo; - fbo->bo = ttm_bo_reference(bo); + fbo->bo = bo; /** * Fix up members that we shouldn't copy directly: @@ -730,7 +731,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, bo->ttm = NULL; ttm_bo_unreserve(ghost_obj); - ttm_bo_unref(&ghost_obj); + ttm_bo_put(ghost_obj); } *old_mem = *new_mem; @@ -786,7 +787,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, bo->ttm = NULL; ttm_bo_unreserve(ghost_obj); - ttm_bo_unref(&ghost_obj); + ttm_bo_put(ghost_obj); } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { @@ -851,7 +852,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) bo->ttm = NULL; ttm_bo_unreserve(ghost); - ttm_bo_unref(&ghost); + ttm_bo_put(ghost); return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index c7ece7613a6a..6fe91c1b692d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -44,10 +44,11 @@ #define TTM_BO_VM_NUM_PREFAULT 16 -static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, +static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_fault *vmf) { - int ret = 0; + vm_fault_t ret = 0; + int err = 0; if (likely(!bo->moving)) goto out_unlock; @@ -67,20 +68,20 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) goto out_unlock; - ttm_bo_reference(bo); + ttm_bo_get(bo); up_read(&vmf->vma->vm_mm->mmap_sem); (void) dma_fence_wait(bo->moving, true); ttm_bo_unreserve(bo); - ttm_bo_unref(&bo); + ttm_bo_put(bo); goto out_unlock; } /* * Ordinary wait. */ - ret = dma_fence_wait(bo->moving, true); - if (unlikely(ret != 0)) { - ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : + err = dma_fence_wait(bo->moving, true); + if (unlikely(err != 0)) { + ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; } @@ -105,7 +106,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, + page_offset; } -static int ttm_bo_vm_fault(struct vm_fault *vmf) +static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = (struct ttm_buffer_object *) @@ -116,8 +117,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) unsigned long pfn; struct ttm_tt *ttm = NULL; struct page *page; - int ret; + int err; int i; + vm_fault_t ret = VM_FAULT_NOPAGE; unsigned long address = vmf->address; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; @@ -129,17 +131,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) * for reserve, and if it fails, retry the fault after waiting * for the buffer to become unreserved. */ - ret = ttm_bo_reserve(bo, true, true, NULL); - if (unlikely(ret != 0)) { - if (ret != -EBUSY) + err = ttm_bo_reserve(bo, true, true, NULL); + if (unlikely(err != 0)) { + if (err != -EBUSY) return VM_FAULT_NOPAGE; if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { - ttm_bo_reference(bo); + ttm_bo_get(bo); up_read(&vmf->vma->vm_mm->mmap_sem); (void) ttm_bo_wait_unreserved(bo); - ttm_bo_unref(&bo); + ttm_bo_put(bo); } return VM_FAULT_RETRY; @@ -163,8 +165,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) } if (bdev->driver->fault_reserve_notify) { - ret = bdev->driver->fault_reserve_notify(bo); - switch (ret) { + err = bdev->driver->fault_reserve_notify(bo); + switch (err) { case 0: break; case -EBUSY: @@ -192,13 +194,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) goto out_unlock; } - ret = ttm_mem_io_lock(man, true); - if (unlikely(ret != 0)) { + err = ttm_mem_io_lock(man, true); + if (unlikely(err != 0)) { ret = VM_FAULT_NOPAGE; goto out_unlock; } - ret = ttm_mem_io_reserve_vm(bo); - if (unlikely(ret != 0)) { + err = ttm_mem_io_reserve_vm(bo); + if (unlikely(err != 0)) { ret = VM_FAULT_SIGBUS; goto out_io_unlock; } @@ -266,23 +268,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) } if (vma->vm_flags & VM_MIXEDMAP) - ret = vm_insert_mixed(&cvma, address, + ret = vmf_insert_mixed(&cvma, address, __pfn_to_pfn_t(pfn, PFN_DEV)); else - ret = vm_insert_pfn(&cvma, address, pfn); + ret = vmf_insert_pfn(&cvma, address, pfn); /* * Somebody beat us to this PTE or prefaulting to * an already populated PTE, or prefaulting error. */ - if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) + if (unlikely((ret == VM_FAULT_NOPAGE && i > 0))) break; - else if (unlikely(ret != 0)) { - ret = - (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; + else if (unlikely(ret & VM_FAULT_ERROR)) goto out_io_unlock; - } address += PAGE_SIZE; if (unlikely(++page_offset >= page_last)) @@ -303,14 +302,14 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma) WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); - (void)ttm_bo_reference(bo); + ttm_bo_get(bo); } static void ttm_bo_vm_close(struct vm_area_struct *vma) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; - ttm_bo_unref(&bo); + ttm_bo_put(bo); vma->vm_private_data = NULL; } @@ -462,7 +461,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; return 0; out_unref: - ttm_bo_unref(&bo); + ttm_bo_put(bo); return ret; } EXPORT_SYMBOL(ttm_bo_mmap); @@ -472,8 +471,10 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) if (vma->vm_pgoff != 0) return -EACCES; + ttm_bo_get(bo); + vma->vm_ops = &ttm_bo_vm_ops; - vma->vm_private_data = ttm_bo_reference(bo); + vma->vm_private_data = bo; vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags |= VM_IO | VM_DONTEXPAND; return 0; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 6e2d1300b457..f841accc2c00 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -47,13 +47,7 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_page_alloc.h> - -#if IS_ENABLED(CONFIG_AGP) -#include <asm/agp.h> -#endif -#ifdef CONFIG_X86 -#include <asm/set_memory.h> -#endif +#include <drm/ttm/ttm_set_memory.h> #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define SMALL_ALLOCATION 16 @@ -222,52 +216,6 @@ static struct kobj_type ttm_pool_kobj_type = { static struct ttm_pool_manager *_manager; -#ifndef CONFIG_X86 -static int set_pages_wb(struct page *page, int numpages) -{ -#if IS_ENABLED(CONFIG_AGP) - int i; - - for (i = 0; i < numpages; i++) - unmap_page_from_agp(page++); -#endif - return 0; -} - -static int set_pages_array_wb(struct page **pages, int addrinarray) -{ -#if IS_ENABLED(CONFIG_AGP) - int i; - - for (i = 0; i < addrinarray; i++) - unmap_page_from_agp(pages[i]); -#endif - return 0; -} - -static int set_pages_array_wc(struct page **pages, int addrinarray) -{ -#if IS_ENABLED(CONFIG_AGP) - int i; - - for (i = 0; i < addrinarray; i++) - map_page_into_agp(pages[i]); -#endif - return 0; -} - -static int set_pages_array_uc(struct page **pages, int addrinarray) -{ -#if IS_ENABLED(CONFIG_AGP) - int i; - - for (i = 0; i < addrinarray; i++) - map_page_into_agp(pages[i]); -#endif - return 0; -} -#endif - /** * Select the right pool or requested caching state and ttm flags. */ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, @@ -302,13 +250,13 @@ static void ttm_pages_put(struct page *pages[], unsigned npages, unsigned int i, pages_nr = (1 << order); if (order == 0) { - if (set_pages_array_wb(pages, npages)) + if (ttm_set_pages_array_wb(pages, npages)) pr_err("Failed to set %d pages to wb!\n", npages); } for (i = 0; i < npages; ++i) { if (order > 0) { - if (set_pages_wb(pages[i], pages_nr)) + if (ttm_set_pages_wb(pages[i], pages_nr)) pr_err("Failed to set %d pages to wb!\n", pages_nr); } __free_pages(pages[i], order); @@ -498,12 +446,12 @@ static int ttm_set_pages_caching(struct page **pages, /* Set page caching */ switch (cstate) { case tt_uncached: - r = set_pages_array_uc(pages, cpages); + r = ttm_set_pages_array_uc(pages, cpages); if (r) pr_err("Failed to set %d pages to uc!\n", cpages); break; case tt_wc: - r = set_pages_array_wc(pages, cpages); + r = ttm_set_pages_array_wc(pages, cpages); if (r) pr_err("Failed to set %d pages to wc!\n", cpages); break; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 3f14c1cc0789..507be7ac1165 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -50,12 +50,7 @@ #include <linux/kthread.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_page_alloc.h> -#if IS_ENABLED(CONFIG_AGP) -#include <asm/agp.h> -#endif -#ifdef CONFIG_X86 -#include <asm/set_memory.h> -#endif +#include <drm/ttm/ttm_set_memory.h> #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define SMALL_ALLOCATION 4 @@ -268,54 +263,19 @@ static struct kobj_type ttm_pool_kobj_type = { .default_attrs = ttm_pool_attrs, }; -#ifndef CONFIG_X86 -static int set_pages_array_wb(struct page **pages, int addrinarray) -{ -#if IS_ENABLED(CONFIG_AGP) - int i; - - for (i = 0; i < addrinarray; i++) - unmap_page_from_agp(pages[i]); -#endif - return 0; -} - -static int set_pages_array_wc(struct page **pages, int addrinarray) -{ -#if IS_ENABLED(CONFIG_AGP) - int i; - - for (i = 0; i < addrinarray; i++) - map_page_into_agp(pages[i]); -#endif - return 0; -} - -static int set_pages_array_uc(struct page **pages, int addrinarray) -{ -#if IS_ENABLED(CONFIG_AGP) - int i; - - for (i = 0; i < addrinarray; i++) - map_page_into_agp(pages[i]); -#endif - return 0; -} -#endif /* for !CONFIG_X86 */ - static int ttm_set_pages_caching(struct dma_pool *pool, struct page **pages, unsigned cpages) { int r = 0; /* Set page caching */ if (pool->type & IS_UC) { - r = set_pages_array_uc(pages, cpages); + r = ttm_set_pages_array_uc(pages, cpages); if (r) pr_err("%s: Failed to set %d pages to uc!\n", pool->dev_name, cpages); } if (pool->type & IS_WC) { - r = set_pages_array_wc(pages, cpages); + r = ttm_set_pages_array_wc(pages, cpages); if (r) pr_err("%s: Failed to set %d pages to wc!\n", pool->dev_name, cpages); @@ -389,17 +349,14 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool, static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) { struct page *page = d_page->p; - unsigned i, num_pages; + unsigned num_pages; /* Don't set WB on WB page pool. */ if (!(pool->type & IS_CACHED)) { num_pages = pool->size / PAGE_SIZE; - for (i = 0; i < num_pages; ++i, ++page) { - if (set_pages_array_wb(&page, 1)) { - pr_err("%s: Failed to set %d pages to wb!\n", - pool->dev_name, 1); - } - } + if (ttm_set_pages_wb(page, num_pages)) + pr_err("%s: Failed to set %d pages to wb!\n", + pool->dev_name, num_pages); } list_del(&d_page->page_list); @@ -420,7 +377,7 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, /* Don't set WB on WB page pool. */ if (npages && !(pool->type & IS_CACHED) && - set_pages_array_wb(pages, npages)) + ttm_set_pages_array_wb(pages, npages)) pr_err("%s: Failed to set %d pages to wb!\n", pool->dev_name, npages); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a1e543972ca7..e3a0691582ff 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -38,9 +38,7 @@ #include <drm/drm_cache.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_page_alloc.h> -#ifdef CONFIG_X86 -#include <asm/set_memory.h> -#endif +#include <drm/ttm/ttm_set_memory.h> /** * Allocates a ttm structure for the given BO. @@ -115,10 +113,9 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) return 0; } -#ifdef CONFIG_X86 -static inline int ttm_tt_set_page_caching(struct page *p, - enum ttm_caching_state c_old, - enum ttm_caching_state c_new) +static int ttm_tt_set_page_caching(struct page *p, + enum ttm_caching_state c_old, + enum ttm_caching_state c_new) { int ret = 0; @@ -129,26 +126,18 @@ static inline int ttm_tt_set_page_caching(struct page *p, /* p isn't in the default caching state, set it to * writeback first to free its current memtype. */ - ret = set_pages_wb(p, 1); + ret = ttm_set_pages_wb(p, 1); if (ret) return ret; } if (c_new == tt_wc) - ret = set_memory_wc((unsigned long) page_address(p), 1); + ret = ttm_set_pages_wc(p, 1); else if (c_new == tt_uncached) - ret = set_pages_uc(p, 1); + ret = ttm_set_pages_uc(p, 1); return ret; } -#else /* CONFIG_X86 */ -static inline int ttm_tt_set_page_caching(struct page *p, - enum ttm_caching_state c_old, - enum ttm_caching_state c_new) -{ - return 0; -} -#endif /* CONFIG_X86 */ /* * Change caching policy for the linear kernel map |