diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 101 |
1 files changed, 77 insertions, 24 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9ccdd189d717..0f42b1a24446 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -494,29 +494,32 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re /* * TTM backend functions. */ +struct amdgpu_ttm_gup_task_list { + struct list_head list; + struct task_struct *task; +}; + struct amdgpu_ttm_tt { - struct ttm_dma_tt ttm; - struct amdgpu_device *adev; - u64 offset; - uint64_t userptr; - struct mm_struct *usermm; - uint32_t userflags; + struct ttm_dma_tt ttm; + struct amdgpu_device *adev; + u64 offset; + uint64_t userptr; + struct mm_struct *usermm; + uint32_t userflags; + spinlock_t guptasklock; + struct list_head guptasks; + atomic_t mmu_invalidations; }; -/* prepare the sg table with the user pages */ -static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) +int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) { - struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - unsigned pinned = 0, nents; - int r; - int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); - enum dma_data_direction direction = write ? - DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + unsigned pinned = 0; + int r; if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { - /* check that we only pin down anonymous memory + /* check that we only use anonymous memory to prevent problems with writeback */ unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; struct vm_area_struct *vma; @@ -529,10 +532,21 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) do { unsigned num_pages = ttm->num_pages - pinned; uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; - struct page **pages = ttm->pages + pinned; + struct page **p = pages + pinned; + struct amdgpu_ttm_gup_task_list guptask; + + guptask.task = current; + spin_lock(>t->guptasklock); + list_add(&guptask.list, >t->guptasks); + spin_unlock(>t->guptasklock); r = get_user_pages(current, current->mm, userptr, num_pages, - write, 0, pages, NULL); + write, 0, p, NULL); + + spin_lock(>t->guptasklock); + list_del(&guptask.list); + spin_unlock(>t->guptasklock); + if (r < 0) goto release_pages; @@ -540,6 +554,25 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) } while (pinned < ttm->num_pages); + return 0; + +release_pages: + release_pages(pages, pinned, 0); + return r; +} + +/* prepare the sg table with the user pages */ +static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) +{ + struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); + struct amdgpu_ttm_tt *gtt = (void *)ttm; + unsigned nents; + int r; + + int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); + enum dma_data_direction direction = write ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, ttm->num_pages << PAGE_SHIFT, GFP_KERNEL); @@ -558,9 +591,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) release_sg: kfree(ttm->sg); - -release_pages: - release_pages(ttm->pages, pinned, 0); return r; } @@ -783,6 +813,10 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, gtt->userptr = addr; gtt->usermm = current->mm; gtt->userflags = flags; + spin_lock_init(>t->guptasklock); + INIT_LIST_HEAD(>t->guptasks); + atomic_set(>t->mmu_invalidations, 0); + return 0; } @@ -800,21 +834,40 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, unsigned long end) { struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_gup_task_list *entry; unsigned long size; - if (gtt == NULL) - return false; - - if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr) + if (gtt == NULL || !gtt->userptr) return false; size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; if (gtt->userptr > end || gtt->userptr + size <= start) return false; + spin_lock(>t->guptasklock); + list_for_each_entry(entry, >t->guptasks, list) { + if (entry->task == current) { + spin_unlock(>t->guptasklock); + return false; + } + } + spin_unlock(>t->guptasklock); + + atomic_inc(>t->mmu_invalidations); + return true; } +bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, + int *last_invalidated) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + int prev_invalidated = *last_invalidated; + + *last_invalidated = atomic_read(>t->mmu_invalidations); + return prev_invalidated != *last_invalidated; +} + bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; |