diff options
author | Dave Airlie <airlied@redhat.com> | 2023-03-14 05:17:27 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2023-03-14 05:18:54 +0300 |
commit | faf0d83e103e38e8bf7cc4e56da1a2edb9dfdf74 (patch) | |
tree | 5b0b838b0a7ac085d408e68207fe2748e2360357 /drivers/gpu/drm/drm_gem_shmem_helper.c | |
parent | eeac8ede17557680855031c6f305ece2378af326 (diff) | |
parent | 9228742caf899fa72230dd8da19ca4c7528badb8 (diff) | |
download | linux-faf0d83e103e38e8bf7cc4e56da1a2edb9dfdf74.tar.xz |
Merge tag 'drm-misc-next-2023-03-07' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for v6.4-rc1:
Note: Only changes since pull request from 2023-02-23 are included here.
UAPI Changes:
- Convert rockchip bindings to YAML.
- Constify kobj_type structure in dma-buf.
- FBDEV cmdline parser fixes, and other small fbdev fixes for mode
parsing.
Cross-subsystem Changes:
- Add Neil Armstrong as linaro maintainer.
- Actually signal the private stub dma-fence.
Core Changes:
- Add function for adding syncobj dep to sched_job and use it in panfrost, v3d.
- Improve DisplayID 2.0 topology parsing and EDID parsing in general.
- Add a gem eviction function and callback for generic GEM shrinker
purposes.
- Prepare to convert shmem helper to use the GEM reservation lock instead of own
locking. (Actual commit itself got reverted for now)
- Move the suballocator from radeon and amdgpu drivers to core in preparation
for Xe.
- Assorted small fixes and documentation.
- Fixes to HPD polling.
- Assorted small fixes in simpledrm, bridge, accel, shmem-helper,
and the selftest of format-helper.
- Remove dummy resource when ttm bo is created, and during pipelined
gutting. Fix all drivers to accept a NULL ttm_bo->resource.
- Handle pinned BO moving prevention in ttm core.
- Set drm panel-bridge orientation before connector is registered.
- Remove dumb_destroy callback.
- Add documentation to GEM_CLOSE, PRIME_HANDLE_TO_FD, PRIME_FD_TO_HANDLE, GETFB2 ioctl's.
- Add atomic enable_plane callback, use it in ast, mgag200, tidss.
Driver Changes:
- Use drm_gem_objects_lookup in vc4.
- Assorted small fixes to virtio, ast, bridge/tc358762, meson, nouveau.
- Allow virtio KMS to be disabled and compiled out.
- Add Radxa 8/10HD, Samsung AMS495QA01 panels.
- Fix ivpu compiler errors.
- Assorted fixes to drm/panel, malidp, rockchip, ivpu, amdgpu, vgem,
nouveau, vc4.
- Assorted cleanups, simplifications and fixes to vmwgfx.
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ac1f5186-54bb-02f4-ac56-907f5b76f3de@linux.intel.com
Diffstat (limited to 'drivers/gpu/drm/drm_gem_shmem_helper.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem_shmem_helper.c | 65 |
1 files changed, 36 insertions, 29 deletions
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 75185a960fc4..9b0d540ff4a8 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - WARN_ON(shmem->vmap_use_count); + drm_WARN_ON(obj->dev, shmem->vmap_use_count); if (obj->import_attach) { drm_prime_gem_destroy(obj, shmem->sgt); @@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) drm_gem_shmem_put_pages(shmem); } - WARN_ON(shmem->pages_use_count); + drm_WARN_ON(obj->dev, shmem->pages_use_count); drm_gem_object_release(obj); mutex_destroy(&shmem->pages_lock); @@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) pages = drm_gem_get_pages(obj); if (IS_ERR(pages)) { - DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); + drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", + PTR_ERR(pages)); shmem->pages_use_count = 0; return PTR_ERR(pages); } @@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) */ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) { + struct drm_gem_object *obj = &shmem->base; int ret; - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); ret = mutex_lock_interruptible(&shmem->pages_lock); if (ret) @@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - if (WARN_ON_ONCE(!shmem->pages_use_count)) + if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) return; if (--shmem->pages_use_count > 0) @@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); */ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) { - WARN_ON(shmem->base.import_attach); + struct drm_gem_object *obj = &shmem->base; + + drm_WARN_ON(obj->dev, obj->import_attach); return drm_gem_shmem_get_pages(shmem); } @@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin); */ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) { - WARN_ON(shmem->base.import_attach); + struct drm_gem_object *obj = &shmem->base; + + drm_WARN_ON(obj->dev, obj->import_attach); drm_gem_shmem_put_pages(shmem); } @@ -295,24 +301,22 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct drm_gem_object *obj = &shmem->base; int ret = 0; - if (shmem->vmap_use_count++ > 0) { - iosys_map_set_vaddr(map, shmem->vaddr); - return 0; - } - if (obj->import_attach) { ret = dma_buf_vmap(obj->import_attach->dmabuf, map); if (!ret) { - if (WARN_ON(map->is_iomem)) { + if (drm_WARN_ON(obj->dev, map->is_iomem)) { dma_buf_vunmap(obj->import_attach->dmabuf, map); - ret = -EIO; - goto err_put_pages; + return -EIO; } - shmem->vaddr = map->vaddr; } } else { pgprot_t prot = PAGE_KERNEL; + if (shmem->vmap_use_count++ > 0) { + iosys_map_set_vaddr(map, shmem->vaddr); + return 0; + } + ret = drm_gem_shmem_get_pages(shmem); if (ret) goto err_zero_use; @@ -328,7 +332,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, } if (ret) { - DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); + drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret); goto err_put_pages; } @@ -378,15 +382,15 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, { struct drm_gem_object *obj = &shmem->base; - if (WARN_ON_ONCE(!shmem->vmap_use_count)) - return; - - if (--shmem->vmap_use_count > 0) - return; - if (obj->import_attach) { dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { + if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) + return; + + if (--shmem->vmap_use_count > 0) + return; + vunmap(shmem->vaddr); drm_gem_shmem_put_pages(shmem); } @@ -461,7 +465,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; - WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); + drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); @@ -550,7 +554,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) mutex_lock(&shmem->pages_lock); if (page_offset >= num_pages || - WARN_ON_ONCE(!shmem->pages) || + drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || shmem->madv < 0) { ret = VM_FAULT_SIGBUS; } else { @@ -569,7 +573,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); mutex_lock(&shmem->pages_lock); @@ -578,7 +582,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) * mmap'd, vm_open() just grabs an additional reference for the new * mm the vma is getting copied into (ie. on fork()). */ - if (!WARN_ON_ONCE(!shmem->pages_use_count)) + if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) shmem->pages_use_count++; mutex_unlock(&shmem->pages_lock); @@ -648,6 +652,9 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, struct drm_printer *p, unsigned int indent) { + if (shmem->base.import_attach) + return; + drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); @@ -672,7 +679,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); } @@ -687,7 +694,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ if (shmem->sgt) return shmem->sgt; - WARN_ON(obj->import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); ret = drm_gem_shmem_get_pages_locked(shmem); if (ret) |