diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-01 05:01:18 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-01 05:01:18 +0400 |
commit | 9fdadb2cbaf4b482dfd6086e8bd3d2db071a1702 (patch) | |
tree | 4a6e11e3379ee93c1992cfd595872dded1fd2fac /drivers | |
parent | 76f901eb4659779ecacd0e4eba49f55442daef53 (diff) | |
parent | 63bc620b45af8c743ac291c8725933278c712692 (diff) | |
download | linux-9fdadb2cbaf4b482dfd6086e8bd3d2db071a1702.tar.xz |
Merge branch 'drm-prime-vmap' of git://people.freedesktop.org/~airlied/linux
Pull drm prime mmap/vmap code from Dave Airlie:
"As mentioned previously these are the extra bits of drm that relied on
the dma-buf pull to work, the first three just stub out the mmap
interface, and the next set provide vmap export to i915/radeon/nouveau
and vmap import to udl."
* 'drm-prime-vmap' of git://people.freedesktop.org/~airlied/linux:
radeon: add radeon prime vmap support.
nouveau: add vmap support to nouveau prime support
udl: support vmapping imported dma-bufs
i915: add dma-buf vmap support for exporting vmapped buffer
radeon: add stub dma-buf mmap functionality
nouveau: add stub dma-buf mmap functionality.
i915: add stub dma-buf mmap callback.
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 61 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_prime.c | 45 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_prime.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/udl/udl_fb.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/udl/udl_gem.c | 25 |
8 files changed, 192 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 377c21f531e4..c9cfc67c2cf5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -942,6 +942,9 @@ struct drm_i915_gem_object { /* prime dma-buf support */ struct sg_table *sg_table; + void *dma_buf_vmapping; + int vmapping_count; + /** * Used for performing relocations during execbuffer insertion. */ diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 8e269178d6a5..aa308e1337db 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -74,6 +74,59 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) } } +static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) +{ + struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->base.dev; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ERR_PTR(ret); + + if (obj->dma_buf_vmapping) { + obj->vmapping_count++; + goto out_unlock; + } + + if (!obj->pages) { + ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ERR_PTR(ret); + } + } + + obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); + if (!obj->dma_buf_vmapping) { + DRM_ERROR("failed to vmap object\n"); + goto out_unlock; + } + + obj->vmapping_count = 1; +out_unlock: + mutex_unlock(&dev->struct_mutex); + return obj->dma_buf_vmapping; +} + +static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ + struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->base.dev; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return; + + --obj->vmapping_count; + if (obj->vmapping_count == 0) { + vunmap(obj->dma_buf_vmapping); + obj->dma_buf_vmapping = NULL; + } + mutex_unlock(&dev->struct_mutex); +} + static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) { return NULL; @@ -93,6 +146,11 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n } +static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) +{ + return -EINVAL; +} + static const struct dma_buf_ops i915_dmabuf_ops = { .map_dma_buf = i915_gem_map_dma_buf, .unmap_dma_buf = i915_gem_unmap_dma_buf, @@ -101,6 +159,9 @@ static const struct dma_buf_ops i915_dmabuf_ops = { .kmap_atomic = i915_gem_dmabuf_kmap_atomic, .kunmap = i915_gem_dmabuf_kunmap, .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, + .mmap = i915_gem_dmabuf_mmap, + .vmap = i915_gem_dmabuf_vmap, + .vunmap = i915_gem_dmabuf_vunmap, }; struct dma_buf *i915_gem_prime_export(struct drm_device *dev, diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 634d222c93de..8613cb23808c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -123,6 +123,9 @@ struct nouveau_bo { struct drm_gem_object *gem; int pin_refcnt; + + struct ttm_bo_kmap_obj dma_buf_vmap; + int vmapping_count; }; #define nouveau_bo_tile_layout(nvbo) \ diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index c58aab7370c5..a89240e5fb29 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -61,6 +61,48 @@ static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, } +static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) +{ + return -EINVAL; +} + +static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf) +{ + struct nouveau_bo *nvbo = dma_buf->priv; + struct drm_device *dev = nvbo->gem->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + if (nvbo->vmapping_count) { + nvbo->vmapping_count++; + goto out_unlock; + } + + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, + &nvbo->dma_buf_vmap); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ERR_PTR(ret); + } + nvbo->vmapping_count = 1; +out_unlock: + mutex_unlock(&dev->struct_mutex); + return nvbo->dma_buf_vmap.virtual; +} + +static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ + struct nouveau_bo *nvbo = dma_buf->priv; + struct drm_device *dev = nvbo->gem->dev; + + mutex_lock(&dev->struct_mutex); + nvbo->vmapping_count--; + if (nvbo->vmapping_count == 0) { + ttm_bo_kunmap(&nvbo->dma_buf_vmap); + } + mutex_unlock(&dev->struct_mutex); +} + static const struct dma_buf_ops nouveau_dmabuf_ops = { .map_dma_buf = nouveau_gem_map_dma_buf, .unmap_dma_buf = nouveau_gem_unmap_dma_buf, @@ -69,6 +111,9 @@ static const struct dma_buf_ops nouveau_dmabuf_ops = { .kmap_atomic = nouveau_gem_kmap_atomic, .kunmap = nouveau_gem_kunmap, .kunmap_atomic = nouveau_gem_kunmap_atomic, + .mmap = nouveau_gem_prime_mmap, + .vmap = nouveau_gem_prime_vmap, + .vunmap = nouveau_gem_prime_vunmap, }; static int diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 492654f8ee74..2e24022b389a 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -346,6 +346,9 @@ struct radeon_bo { /* Constant after initialization */ struct radeon_device *rdev; struct drm_gem_object gem_base; + + struct ttm_bo_kmap_obj dma_buf_vmap; + int vmapping_count; }; #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index b8f835d8ecb4..8ddab4c76710 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -85,6 +85,47 @@ static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, v } +static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) +{ + return -EINVAL; +} + +static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf) +{ + struct radeon_bo *bo = dma_buf->priv; + struct drm_device *dev = bo->rdev->ddev; + int ret; + + mutex_lock(&dev->struct_mutex); + if (bo->vmapping_count) { + bo->vmapping_count++; + goto out_unlock; + } + + ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, + &bo->dma_buf_vmap); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ERR_PTR(ret); + } + bo->vmapping_count = 1; +out_unlock: + mutex_unlock(&dev->struct_mutex); + return bo->dma_buf_vmap.virtual; +} + +static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ + struct radeon_bo *bo = dma_buf->priv; + struct drm_device *dev = bo->rdev->ddev; + + mutex_lock(&dev->struct_mutex); + bo->vmapping_count--; + if (bo->vmapping_count == 0) { + ttm_bo_kunmap(&bo->dma_buf_vmap); + } + mutex_unlock(&dev->struct_mutex); +} const static struct dma_buf_ops radeon_dmabuf_ops = { .map_dma_buf = radeon_gem_map_dma_buf, .unmap_dma_buf = radeon_gem_unmap_dma_buf, @@ -93,6 +134,9 @@ const static struct dma_buf_ops radeon_dmabuf_ops = { .kmap_atomic = radeon_gem_kmap_atomic, .kunmap = radeon_gem_kunmap, .kunmap_atomic = radeon_gem_kunmap_atomic, + .mmap = radeon_gem_prime_mmap, + .vmap = radeon_gem_prime_vmap, + .vunmap = radeon_gem_prime_vunmap, }; static int radeon_prime_create(struct drm_device *dev, diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index a029ee39b0c5..ce9a61179925 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, if (!fb->active_16) return 0; - if (!fb->obj->vmapping) - udl_gem_vmap(fb->obj); + if (!fb->obj->vmapping) { + ret = udl_gem_vmap(fb->obj); + if (ret == -ENOMEM) { + DRM_ERROR("failed to vmap fb\n"); + return 0; + } + if (!fb->obj->vmapping) { + DRM_ERROR("failed to vmapping\n"); + return 0; + } + } start_cycles = get_cycles(); diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 97acc9c6c95b..7bd65bdd15a8 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -180,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj) int page_count = obj->base.size / PAGE_SIZE; int ret; + if (obj->base.import_attach) { + ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf, + 0, obj->base.size, DMA_BIDIRECTIONAL); + if (ret) + return -EINVAL; + + obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf); + if (!obj->vmapping) + return -ENOMEM; + return 0; + } + ret = udl_gem_get_pages(obj, GFP_KERNEL); if (ret) return ret; @@ -192,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj) void udl_gem_vunmap(struct udl_gem_object *obj) { + if (obj->base.import_attach) { + dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping); + dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0, + obj->base.size, DMA_BIDIRECTIONAL); + return; + } + if (obj->vmapping) vunmap(obj->vmapping); @@ -202,12 +221,12 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj) { struct udl_gem_object *obj = to_udl_bo(gem_obj); - if (gem_obj->import_attach) - drm_prime_gem_destroy(gem_obj, obj->sg); - if (obj->vmapping) udl_gem_vunmap(obj); + if (gem_obj->import_attach) + drm_prime_gem_destroy(gem_obj, obj->sg); + if (obj->pages) udl_gem_put_pages(obj); |