diff options
Diffstat (limited to 'drivers/gpu/drm/etnaviv/etnaviv_gem.c')
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem.c | 197 |
1 files changed, 40 insertions, 157 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index daee3f1196df..fcc969fa0e69 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -24,6 +24,9 @@ #include "etnaviv_gpu.h" #include "etnaviv_mmu.h" +static struct lock_class_key etnaviv_shm_lock_class; +static struct lock_class_key etnaviv_userptr_lock_class; + static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) { struct drm_device *dev = etnaviv_obj->base.dev; @@ -583,7 +586,7 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) kfree(etnaviv_obj); } -int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) +void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) { struct etnaviv_drm_private *priv = dev->dev_private; struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); @@ -591,8 +594,6 @@ int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) mutex_lock(&priv->gem_lock); list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list); mutex_unlock(&priv->gem_lock); - - return 0; } static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, @@ -640,8 +641,9 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, return 0; } -static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, - u32 size, u32 flags) +/* convenience method to construct a GEM buffer object, and userspace handle */ +int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, + u32 size, u32 flags, u32 *handle) { struct drm_gem_object *obj = NULL; int ret; @@ -653,6 +655,8 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, if (ret) goto fail; + lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class); + ret = drm_gem_object_init(dev, obj, size); if (ret == 0) { struct address_space *mapping; @@ -660,7 +664,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, /* * Our buffers are kept pinned, so allocating them * from the MOVABLE zone is a really bad idea, and - * conflicts with CMA. See coments above new_inode() + * conflicts with CMA. See comments above new_inode() * why this is required _and_ expected if you're * going to pin these pages. */ @@ -672,33 +676,12 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, if (ret) goto fail; - return obj; - -fail: - drm_gem_object_put_unlocked(obj); - return ERR_PTR(ret); -} - -/* convenience method to construct a GEM buffer object, and userspace handle */ -int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, - u32 size, u32 flags, u32 *handle) -{ - struct drm_gem_object *obj; - int ret; - - obj = __etnaviv_gem_new(dev, size, flags); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - ret = etnaviv_gem_obj_add(dev, obj); - if (ret < 0) { - drm_gem_object_put_unlocked(obj); - return ret; - } + etnaviv_gem_obj_add(dev, obj); ret = drm_gem_handle_create(file, obj, handle); /* drop reference from allocate - handle holds it now */ +fail: drm_gem_object_put_unlocked(obj); return ret; @@ -722,139 +705,41 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, return 0; } -struct get_pages_work { - struct work_struct work; - struct mm_struct *mm; - struct task_struct *task; - struct etnaviv_gem_object *etnaviv_obj; -}; - -static struct page **etnaviv_gem_userptr_do_get_pages( - struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task) -{ - int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; - struct page **pvec; - uintptr_t ptr; - unsigned int flags = 0; - - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (!pvec) - return ERR_PTR(-ENOMEM); - - if (!etnaviv_obj->userptr.ro) - flags |= FOLL_WRITE; - - pinned = 0; - ptr = etnaviv_obj->userptr.ptr; - - down_read(&mm->mmap_sem); - while (pinned < npages) { - ret = get_user_pages_remote(task, mm, ptr, npages - pinned, - flags, pvec + pinned, NULL, NULL); - if (ret < 0) - break; - - ptr += ret * PAGE_SIZE; - pinned += ret; - } - up_read(&mm->mmap_sem); - - if (ret < 0) { - release_pages(pvec, pinned); - kvfree(pvec); - return ERR_PTR(ret); - } - - return pvec; -} - -static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work) -{ - struct get_pages_work *work = container_of(_work, typeof(*work), work); - struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj; - struct page **pvec; - - pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task); - - mutex_lock(&etnaviv_obj->lock); - if (IS_ERR(pvec)) { - etnaviv_obj->userptr.work = ERR_CAST(pvec); - } else { - etnaviv_obj->userptr.work = NULL; - etnaviv_obj->pages = pvec; - } - - mutex_unlock(&etnaviv_obj->lock); - drm_gem_object_put_unlocked(&etnaviv_obj->base); - - mmput(work->mm); - put_task_struct(work->task); - kfree(work); -} - static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) { struct page **pvec = NULL; - struct get_pages_work *work; - struct mm_struct *mm; - int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; - - if (etnaviv_obj->userptr.work) { - if (IS_ERR(etnaviv_obj->userptr.work)) { - ret = PTR_ERR(etnaviv_obj->userptr.work); - etnaviv_obj->userptr.work = NULL; - } else { - ret = -EAGAIN; - } - return ret; - } + struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr; + int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; - mm = get_task_mm(etnaviv_obj->userptr.task); - pinned = 0; - if (mm == current->mm) { - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (!pvec) { - mmput(mm); - return -ENOMEM; - } - - pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages, - !etnaviv_obj->userptr.ro, pvec); - if (pinned < 0) { - kvfree(pvec); - mmput(mm); - return pinned; - } - - if (pinned == npages) { - etnaviv_obj->pages = pvec; - mmput(mm); - return 0; - } - } + might_lock_read(¤t->mm->mmap_sem); - release_pages(pvec, pinned); - kvfree(pvec); + if (userptr->mm != current->mm) + return -EPERM; - work = kmalloc(sizeof(*work), GFP_KERNEL); - if (!work) { - mmput(mm); + pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); + if (!pvec) return -ENOMEM; - } - get_task_struct(current); - drm_gem_object_get(&etnaviv_obj->base); + do { + unsigned num_pages = npages - pinned; + uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE; + struct page **pages = pvec + pinned; - work->mm = mm; - work->task = current; - work->etnaviv_obj = etnaviv_obj; + ret = get_user_pages_fast(ptr, num_pages, + !userptr->ro ? FOLL_WRITE : 0, pages); + if (ret < 0) { + release_pages(pvec, pinned); + kvfree(pvec); + return ret; + } + + pinned += ret; - etnaviv_obj->userptr.work = &work->work; - INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages); + } while (pinned < npages); - etnaviv_queue_work(etnaviv_obj->base.dev, &work->work); + etnaviv_obj->pages = pvec; - return -EAGAIN; + return 0; } static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) @@ -870,7 +755,6 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) release_pages(etnaviv_obj->pages, npages); kvfree(etnaviv_obj->pages); } - put_task_struct(etnaviv_obj->userptr.task); } static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, @@ -897,17 +781,16 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, if (ret) return ret; + lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class); + etnaviv_obj->userptr.ptr = ptr; - etnaviv_obj->userptr.task = current; + etnaviv_obj->userptr.mm = current->mm; etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE); - get_task_struct(current); - ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); - if (ret) - goto unreference; + etnaviv_gem_obj_add(dev, &etnaviv_obj->base); ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); -unreference: + /* drop reference from allocate - handle holds it now */ drm_gem_object_put_unlocked(&etnaviv_obj->base); return ret; |