diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 425 |
1 files changed, 282 insertions, 143 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 465dab942afa..65f35544c1ec 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -26,6 +26,9 @@ #include "msm_gpu.h" #include "msm_mmu.h" +static void msm_gem_vunmap_locked(struct drm_gem_object *obj); + + static dma_addr_t physaddr(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -41,8 +44,7 @@ static bool use_pages(struct drm_gem_object *obj) } /* allocate pages from VRAM carveout, used when no IOMMU: */ -static struct page **get_pages_vram(struct drm_gem_object *obj, - int npages) +static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_drm_private *priv = obj->dev->dev_private; @@ -54,7 +56,9 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, if (!p) return ERR_PTR(-ENOMEM); + spin_lock(&priv->vram.lock); ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); + spin_unlock(&priv->vram.lock); if (ret) { kvfree(p); return ERR_PTR(ret); @@ -69,7 +73,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, return p; } -/* called with dev->struct_mutex held */ static struct page **get_pages(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -109,6 +112,18 @@ static struct page **get_pages(struct drm_gem_object *obj) return msm_obj->pages; } +static void put_pages_vram(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_drm_private *priv = obj->dev->dev_private; + + spin_lock(&priv->vram.lock); + drm_mm_remove_node(msm_obj->vram_node); + spin_unlock(&priv->vram.lock); + + kvfree(msm_obj->pages); +} + static void put_pages(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -125,10 +140,8 @@ static void put_pages(struct drm_gem_object *obj) if (use_pages(obj)) drm_gem_put_pages(obj, msm_obj->pages, true, false); - else { - drm_mm_remove_node(msm_obj->vram_node); - kvfree(msm_obj->pages); - } + else + put_pages_vram(obj); msm_obj->pages = NULL; } @@ -136,11 +149,18 @@ static void put_pages(struct drm_gem_object *obj) struct page **msm_gem_get_pages(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct msm_gem_object *msm_obj = to_msm_bo(obj); struct page **p; - mutex_lock(&dev->struct_mutex); + + mutex_lock(&msm_obj->lock); + + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { + mutex_unlock(&msm_obj->lock); + return ERR_PTR(-EBUSY); + } + p = get_pages(obj); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&msm_obj->lock); return p; } @@ -195,28 +215,25 @@ int msm_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; - struct drm_device *dev = obj->dev; - struct msm_drm_private *priv = dev->dev_private; + struct msm_gem_object *msm_obj = to_msm_bo(obj); struct page **pages; unsigned long pfn; pgoff_t pgoff; int ret; - /* This should only happen if userspace tries to pass a mmap'd - * but unfaulted gem bo vaddr into submit ioctl, triggering - * a page fault while struct_mutex is already held. This is - * not a valid use-case so just bail. - */ - if (priv->struct_mutex_task == current) - return VM_FAULT_SIGBUS; - - /* Make sure we don't parallel update on a fault, nor move or remove - * something from beneath our feet + /* + * vm_ops.open/drm_gem_mmap_obj and close get and put + * a reference on obj. So, we dont need to hold one here. */ - ret = mutex_lock_interruptible(&dev->struct_mutex); + ret = mutex_lock_interruptible(&msm_obj->lock); if (ret) goto out; + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { + mutex_unlock(&msm_obj->lock); + return VM_FAULT_SIGBUS; + } + /* make sure we have pages attached now */ pages = get_pages(obj); if (IS_ERR(pages)) { @@ -235,7 +252,7 @@ int msm_gem_fault(struct vm_fault *vmf) ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); out_unlock: - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&msm_obj->lock); out: switch (ret) { case -EAGAIN: @@ -259,9 +276,10 @@ out: static uint64_t mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&msm_obj->lock)); /* Make it mmapable */ ret = drm_gem_create_mmap_offset(obj); @@ -277,95 +295,140 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) { uint64_t offset; - mutex_lock(&obj->dev->struct_mutex); + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + mutex_lock(&msm_obj->lock); offset = mmap_offset(obj); - mutex_unlock(&obj->dev->struct_mutex); + mutex_unlock(&msm_obj->lock); return offset; } -static void -put_iova(struct drm_gem_object *obj) +static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) { - struct drm_device *dev = obj->dev; - struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); - int id; + struct msm_gem_vma *vma; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&msm_obj->lock)); - for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { - if (!priv->aspace[id]) - continue; - msm_gem_unmap_vma(priv->aspace[id], - &msm_obj->domain[id], msm_obj->sgt); - } + vma = kzalloc(sizeof(*vma), GFP_KERNEL); + if (!vma) + return ERR_PTR(-ENOMEM); + + vma->aspace = aspace; + + list_add_tail(&vma->list, &msm_obj->vmas); + + return vma; } -/* should be called under struct_mutex.. although it can be called - * from atomic context without struct_mutex to acquire an extra - * iova ref if you know one is already held. - * - * That means when I do eventually need to add support for unpinning - * the refcnt counter needs to be atomic_t. - */ -int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, - uint64_t *iova) +static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - int ret = 0; + struct msm_gem_vma *vma; - if (!msm_obj->domain[id].iova) { - struct msm_drm_private *priv = obj->dev->dev_private; - struct page **pages = get_pages(obj); + WARN_ON(!mutex_is_locked(&msm_obj->lock)); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - if (iommu_present(&platform_bus_type)) { - ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], - msm_obj->sgt, obj->size >> PAGE_SHIFT); - } else { - msm_obj->domain[id].iova = physaddr(obj); - } + list_for_each_entry(vma, &msm_obj->vmas, list) { + if (vma->aspace == aspace) + return vma; } - if (!ret) - *iova = msm_obj->domain[id].iova; + return NULL; +} - return ret; +static void del_vma(struct msm_gem_vma *vma) +{ + if (!vma) + return; + + list_del(&vma->list); + kfree(vma); +} + +/* Called with msm_obj->lock locked */ +static void +put_iova(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_gem_vma *vma, *tmp; + + WARN_ON(!mutex_is_locked(&msm_obj->lock)); + + list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { + msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt); + del_vma(vma); + } } /* get iova, taking a reference. Should have a matching put */ -int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) +int msm_gem_get_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - int ret; + struct msm_gem_vma *vma; + int ret = 0; - /* this is safe right now because we don't unmap until the - * bo is deleted: - */ - if (msm_obj->domain[id].iova) { - *iova = msm_obj->domain[id].iova; - return 0; + mutex_lock(&msm_obj->lock); + + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { + mutex_unlock(&msm_obj->lock); + return -EBUSY; + } + + vma = lookup_vma(obj, aspace); + + if (!vma) { + struct page **pages; + + vma = add_vma(obj, aspace); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + pages = get_pages(obj); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto fail; + } + + ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt, + obj->size >> PAGE_SHIFT); + if (ret) + goto fail; } - mutex_lock(&obj->dev->struct_mutex); - ret = msm_gem_get_iova_locked(obj, id, iova); - mutex_unlock(&obj->dev->struct_mutex); + *iova = vma->iova; + + mutex_unlock(&msm_obj->lock); + return 0; + +fail: + del_vma(vma); + + mutex_unlock(&msm_obj->lock); return ret; } /* get iova without taking a reference, used in places where you have * already done a 'msm_gem_get_iova()'. */ -uint64_t msm_gem_iova(struct drm_gem_object *obj, int id) +uint64_t msm_gem_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!msm_obj->domain[id].iova); - return msm_obj->domain[id].iova; + struct msm_gem_vma *vma; + + mutex_lock(&msm_obj->lock); + vma = lookup_vma(obj, aspace); + mutex_unlock(&msm_obj->lock); + WARN_ON(!vma); + + return vma ? vma->iova : 0; } -void msm_gem_put_iova(struct drm_gem_object *obj, int id) +void msm_gem_put_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) { // XXX TODO .. // NOTE: probably don't need a _locked() version.. we wouldn't @@ -405,45 +468,57 @@ fail: return ret; } -void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) +void *msm_gem_get_vaddr(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + int ret = 0; + + mutex_lock(&msm_obj->lock); + + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { + mutex_unlock(&msm_obj->lock); + return ERR_PTR(-EBUSY); + } + + /* increment vmap_count *before* vmap() call, so shrinker can + * check vmap_count (is_vunmapable()) outside of msm_obj->lock. + * This guarantees that we won't try to msm_gem_vunmap() this + * same object from within the vmap() call (while we already + * hold msm_obj->lock) + */ + msm_obj->vmap_count++; + if (!msm_obj->vaddr) { struct page **pages = get_pages(obj); - if (IS_ERR(pages)) - return ERR_CAST(pages); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto fail; + } msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); - if (msm_obj->vaddr == NULL) - return ERR_PTR(-ENOMEM); + if (msm_obj->vaddr == NULL) { + ret = -ENOMEM; + goto fail; + } } - msm_obj->vmap_count++; + + mutex_unlock(&msm_obj->lock); return msm_obj->vaddr; -} -void *msm_gem_get_vaddr(struct drm_gem_object *obj) -{ - void *ret; - mutex_lock(&obj->dev->struct_mutex); - ret = msm_gem_get_vaddr_locked(obj); - mutex_unlock(&obj->dev->struct_mutex); - return ret; +fail: + msm_obj->vmap_count--; + mutex_unlock(&msm_obj->lock); + return ERR_PTR(ret); } -void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) +void msm_gem_put_vaddr(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + + mutex_lock(&msm_obj->lock); WARN_ON(msm_obj->vmap_count < 1); msm_obj->vmap_count--; -} - -void msm_gem_put_vaddr(struct drm_gem_object *obj) -{ - mutex_lock(&obj->dev->struct_mutex); - msm_gem_put_vaddr_locked(obj); - mutex_unlock(&obj->dev->struct_mutex); + mutex_unlock(&msm_obj->lock); } /* Update madvise status, returns true if not purged, else @@ -453,15 +528,21 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + mutex_lock(&msm_obj->lock); + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); if (msm_obj->madv != __MSM_MADV_PURGED) msm_obj->madv = madv; - return (msm_obj->madv != __MSM_MADV_PURGED); + madv = msm_obj->madv; + + mutex_unlock(&msm_obj->lock); + + return (madv != __MSM_MADV_PURGED); } -void msm_gem_purge(struct drm_gem_object *obj) +void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) { struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -470,9 +551,11 @@ void msm_gem_purge(struct drm_gem_object *obj) WARN_ON(!is_purgeable(msm_obj)); WARN_ON(obj->import_attach); + mutex_lock_nested(&msm_obj->lock, subclass); + put_iova(obj); - msm_gem_vunmap(obj); + msm_gem_vunmap_locked(obj); put_pages(obj); @@ -490,12 +573,16 @@ void msm_gem_purge(struct drm_gem_object *obj) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); + + mutex_unlock(&msm_obj->lock); } -void msm_gem_vunmap(struct drm_gem_object *obj) +static void msm_gem_vunmap_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + WARN_ON(!mutex_is_locked(&msm_obj->lock)); + if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) return; @@ -503,6 +590,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj) msm_obj->vaddr = NULL; } +void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + mutex_lock_nested(&msm_obj->lock, subclass); + msm_gem_vunmap_locked(obj); + mutex_unlock(&msm_obj->lock); +} + /* must be called before _move_to_active().. */ int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive) @@ -619,13 +715,12 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object *robj = msm_obj->resv; struct reservation_object_list *fobj; - struct msm_drm_private *priv = obj->dev->dev_private; struct dma_fence *fence; + struct msm_gem_vma *vma; uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; - unsigned id; - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + mutex_lock(&msm_obj->lock); switch (msm_obj->madv) { case __MSM_MADV_PURGED: @@ -645,8 +740,9 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) obj->name, kref_read(&obj->refcount), off, msm_obj->vaddr); - for (id = 0; id < priv->num_aspaces; id++) - seq_printf(m, " %08llx", msm_obj->domain[id].iova); + /* FIXME: we need to print the address space here too */ + list_for_each_entry(vma, &msm_obj->vmas, list) + seq_printf(m, " %08llx", vma->iova); seq_printf(m, " %zu%s\n", obj->size, madv); @@ -665,6 +761,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) if (fence) describe_fence(fence, "Exclusive", m); rcu_read_unlock(); + + mutex_unlock(&msm_obj->lock); } void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) @@ -697,6 +795,8 @@ void msm_gem_free_object(struct drm_gem_object *obj) list_del(&msm_obj->mm_list); + mutex_lock(&msm_obj->lock); + put_iova(obj); if (obj->import_attach) { @@ -711,7 +811,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) drm_prime_gem_destroy(obj, msm_obj->sgt); } else { - msm_gem_vunmap(obj); + msm_gem_vunmap_locked(obj); put_pages(obj); } @@ -720,6 +820,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) drm_gem_object_release(obj); + mutex_unlock(&msm_obj->lock); kfree(msm_obj); } @@ -730,14 +831,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, struct drm_gem_object *obj; int ret; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - obj = msm_gem_new(dev, size, flags); - mutex_unlock(&dev->struct_mutex); - if (IS_ERR(obj)) return PTR_ERR(obj); @@ -752,11 +847,11 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, static int msm_gem_new_impl(struct drm_device *dev, uint32_t size, uint32_t flags, struct reservation_object *resv, - struct drm_gem_object **obj) + struct drm_gem_object **obj, + bool struct_mutex_locked) { struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; - bool use_vram = false; switch (flags & MSM_BO_CACHE_MASK) { case MSM_BO_UNCACHED: @@ -769,20 +864,11 @@ static int msm_gem_new_impl(struct drm_device *dev, return -EINVAL; } - if (!iommu_present(&platform_bus_type)) - use_vram = true; - else if ((flags & MSM_BO_STOLEN) && priv->vram.size) - use_vram = true; - - if (WARN_ON(use_vram && !priv->vram.size)) - return -EINVAL; - msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); if (!msm_obj) return -ENOMEM; - if (use_vram) - msm_obj->vram_node = &msm_obj->domain[0].node; + mutex_init(&msm_obj->lock); msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; @@ -795,48 +881,96 @@ static int msm_gem_new_impl(struct drm_device *dev, } INIT_LIST_HEAD(&msm_obj->submit_entry); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + INIT_LIST_HEAD(&msm_obj->vmas); + + if (struct_mutex_locked) { + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + } else { + mutex_lock(&dev->struct_mutex); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + mutex_unlock(&dev->struct_mutex); + } *obj = &msm_obj->base; return 0; } -struct drm_gem_object *msm_gem_new(struct drm_device *dev, - uint32_t size, uint32_t flags) +static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, + uint32_t size, uint32_t flags, bool struct_mutex_locked) { + struct msm_drm_private *priv = dev->dev_private; struct drm_gem_object *obj = NULL; + bool use_vram = false; int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - size = PAGE_ALIGN(size); + if (!iommu_present(&platform_bus_type)) + use_vram = true; + else if ((flags & MSM_BO_STOLEN) && priv->vram.size) + use_vram = true; + + if (WARN_ON(use_vram && !priv->vram.size)) + return ERR_PTR(-EINVAL); + /* Disallow zero sized objects as they make the underlying * infrastructure grumpy */ if (size == 0) return ERR_PTR(-EINVAL); - ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); + ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked); if (ret) goto fail; - if (use_pages(obj)) { + if (use_vram) { + struct msm_gem_vma *vma; + struct page **pages; + + vma = add_vma(obj, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto fail; + } + + to_msm_bo(obj)->vram_node = &vma->node; + + drm_gem_private_object_init(dev, obj, size); + + pages = get_pages(obj); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto fail; + } + + vma->iova = physaddr(obj); + } else { ret = drm_gem_object_init(dev, obj, size); if (ret) goto fail; - } else { - drm_gem_private_object_init(dev, obj, size); } return obj; fail: - drm_gem_object_unreference(obj); + drm_gem_object_unreference_unlocked(obj); return ERR_PTR(ret); } +struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, + uint32_t size, uint32_t flags) +{ + return _msm_gem_new(dev, size, flags, true); +} + +struct drm_gem_object *msm_gem_new(struct drm_device *dev, + uint32_t size, uint32_t flags) +{ + return _msm_gem_new(dev, size, flags, false); +} + struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt) { @@ -853,7 +987,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, size = PAGE_ALIGN(dmabuf->size); - ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); + ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); if (ret) goto fail; @@ -862,17 +996,22 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, npages = size / PAGE_SIZE; msm_obj = to_msm_bo(obj); + mutex_lock(&msm_obj->lock); msm_obj->sgt = sgt; msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!msm_obj->pages) { + mutex_unlock(&msm_obj->lock); ret = -ENOMEM; goto fail; } ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); - if (ret) + if (ret) { + mutex_unlock(&msm_obj->lock); goto fail; + } + mutex_unlock(&msm_obj->lock); return obj; fail: |