diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2018-06-19 16:02:16 +0300 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2018-07-03 21:33:30 +0300 |
commit | f1d34bfd70b1b4543a139ea28bad4c001c5f413d (patch) | |
tree | 0d3fb3ee166a2d81f4f7e7e2338dd3c625929554 /drivers | |
parent | 07c13bb78c8b8a9cb6ee169659528945038d5e85 (diff) | |
download | linux-f1d34bfd70b1b4543a139ea28bad4c001c5f413d.tar.xz |
drm/vmwgfx: Replace vmw_dma_buffer with vmw_buffer_object
Initially vmware buffer objects were only used as DMA buffers, so the name
DMA buffer was a natural one. However, currently they are used also as
dumb buffers and MOBs backing guest backed objects so renaming them to
buffer objects is logical. Particularly since there is a dmabuf subsystem
in the kernel where a dma buffer means something completely different.
This also renames user-space api structures and IOCTL names
correspondingly, but the old names remain defined for now and the ABI
hasn't changed.
There are a couple of minor style changes to make checkpatch happy.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com>
Diffstat (limited to 'drivers')
19 files changed, 567 insertions, 568 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 794cc9d5c9b0..09b2aa08363e 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ - vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ + vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ - vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ + vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index d59d9dd16ebc..f26f658cccdb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -32,7 +32,7 @@ /** - * vmw_dmabuf_pin_in_placement - Validate a buffer to placement. + * vmw_bo_pin_in_placement - Validate a buffer to placement. * * @dev_priv: Driver private. * @buf: DMA buffer to move. @@ -42,10 +42,10 @@ * Returns * -ERESTARTSYS if interrupted by a signal. */ -int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - struct ttm_placement *placement, - bool interruptible) +int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + struct ttm_placement *placement, + bool interruptible) { struct ttm_operation_ctx ctx = {interruptible, false }; struct ttm_buffer_object *bo = &buf->base; @@ -79,7 +79,7 @@ err: } /** - * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr. + * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr. * * This function takes the reservation_sem in write mode. * Flushes and unpins the query bo to avoid failures. @@ -92,9 +92,9 @@ err: * Returns * -ERESTARTSYS if interrupted by a signal. */ -int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible) +int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible) { struct ttm_operation_ctx ctx = {interruptible, false }; struct ttm_buffer_object *bo = &buf->base; @@ -134,7 +134,7 @@ err: } /** - * vmw_dmabuf_pin_in_vram - Move a buffer to vram. + * vmw_bo_pin_in_vram - Move a buffer to vram. * * This function takes the reservation_sem in write mode. * Flushes and unpins the query bo to avoid failures. @@ -146,16 +146,16 @@ err: * Returns * -ERESTARTSYS if interrupted by a signal. */ -int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible) +int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible) { - return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement, - interruptible); + return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement, + interruptible); } /** - * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram. + * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram. * * This function takes the reservation_sem in write mode. * Flushes and unpins the query bo to avoid failures. @@ -167,9 +167,9 @@ int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, * Returns * -ERESTARTSYS if interrupted by a signal. */ -int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible) +int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible) { struct ttm_operation_ctx ctx = {interruptible, false }; struct ttm_buffer_object *bo = &buf->base; @@ -226,7 +226,7 @@ err_unlock: } /** - * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer. + * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer. * * This function takes the reservation_sem in write mode. * @@ -237,9 +237,9 @@ err_unlock: * Returns * -ERESTARTSYS if interrupted by a signal. */ -int vmw_dmabuf_unpin(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible) +int vmw_bo_unpin(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible) { struct ttm_buffer_object *bo = &buf->base; int ret; @@ -288,7 +288,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, * @pin: Whether to pin or unpin. * */ -void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin) +void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) { struct ttm_operation_ctx ctx = { false, true }; struct ttm_place pl; @@ -326,14 +326,14 @@ void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin) /* - * vmw_dma_buffer_unmap - Tear down a cached buffer object map. + * vmw_buffer_object_unmap - Tear down a cached buffer object map. * * @vbo: The buffer object whose map we are tearing down. * * This function tears down a cached map set up using - * vmw_dma_buffer_map_and_cache(). + * vmw_buffer_object_map_and_cache(). */ -void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo) +void vmw_buffer_object_unmap(struct vmw_buffer_object *vbo) { if (vbo->map.bo == NULL) return; @@ -343,7 +343,7 @@ void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo) /* - * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map + * vmw_buffer_object_map_and_cache - Map a buffer object and cache the map * * @vbo: The buffer object to map * Return: A kernel virtual address or NULL if mapping failed. @@ -357,7 +357,7 @@ void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo) * 3) Buffer object destruction * */ -void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo) +void *vmw_buffer_object_map_and_cache(struct vmw_buffer_object *vbo) { struct ttm_buffer_object *bo = &vbo->base; bool not_used; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 3767ac335aca..ff8acc74786c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -38,7 +38,7 @@ struct vmw_user_context { struct vmw_cmdbuf_res_manager *man; struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; spinlock_t cotable_lock; - struct vmw_dma_buffer *dx_query_mob; + struct vmw_buffer_object *dx_query_mob; }; static void vmw_user_context_free(struct vmw_resource *res); @@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx) * specified in the parameter. 0 otherwise. */ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, - struct vmw_dma_buffer *mob) + struct vmw_buffer_object *mob) { struct vmw_user_context *uctx = container_of(ctx_res, struct vmw_user_context, res); @@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, if (mob == NULL) { if (uctx->dx_query_mob) { uctx->dx_query_mob->dx_query_ctx = NULL; - vmw_dmabuf_unreference(&uctx->dx_query_mob); + vmw_bo_unreference(&uctx->dx_query_mob); uctx->dx_query_mob = NULL; } @@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, mob->dx_query_ctx = ctx_res; if (!uctx->dx_query_mob) - uctx->dx_query_mob = vmw_dmabuf_reference(mob); + uctx->dx_query_mob = vmw_bo_reference(mob); return 0; } @@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, * * @ctx_res: The context resource */ -struct vmw_dma_buffer * +struct vmw_buffer_object * vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res) { struct vmw_user_context *uctx = diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index cbf54ea7b4c0..1052cd3cb700 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) struct ttm_operation_ctx ctx = { false, false }; struct vmw_private *dev_priv = res->dev_priv; struct vmw_cotable *vcotbl = vmw_cotable(res); - struct vmw_dma_buffer *buf, *old_buf = res->backup; + struct vmw_buffer_object *buf, *old_buf = res->backup; struct ttm_buffer_object *bo, *old_bo = &res->backup->base; size_t old_size = res->backup_size; size_t old_size_read_back = vcotbl->size_read_back; @@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) if (!buf) return -ENOMEM; - ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, - true, vmw_dmabuf_bo_free); + ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, + true, vmw_bo_bo_free); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); return ret; @@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) /* Let go of the old mob. */ list_del(&res->mob_head); list_add_tail(&res->mob_head, &buf->res_list); - vmw_dmabuf_unreference(&old_buf); + vmw_bo_unreference(&old_buf); res->id = vcotbl->type; return 0; @@ -491,7 +491,7 @@ out_map_new: ttm_bo_kunmap(&old_map); out_wait: ttm_bo_unreserve(bo); - vmw_dmabuf_unreference(&buf); + vmw_bo_unreference(&buf); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 09cc721160c4..4f18304226bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -153,9 +153,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, + VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, + VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, vmw_kms_cursor_bypass_ioctl, @@ -219,7 +219,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { vmw_gb_surface_reference_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_SYNCCPU, - vmw_user_dmabuf_synccpu_ioctl, + vmw_user_bo_synccpu_ioctl, DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, vmw_extended_context_define_ioctl, @@ -321,7 +321,7 @@ static void vmw_print_capabilities(uint32_t capabilities) static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) { int ret; - struct vmw_dma_buffer *vbo; + struct vmw_buffer_object *vbo; struct ttm_bo_kmap_obj map; volatile SVGA3dQueryResult *result; bool dummy; @@ -335,9 +335,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) if (!vbo) return -ENOMEM; - ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, - &vmw_sys_ne_placement, false, - &vmw_dmabuf_bo_free); + ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, + &vmw_sys_ne_placement, false, + &vmw_bo_bo_free); if (unlikely(ret != 0)) return ret; @@ -358,7 +358,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) if (unlikely(ret != 0)) { DRM_ERROR("Dummy query buffer map failed.\n"); - vmw_dmabuf_unreference(&vbo); + vmw_bo_unreference(&vbo); } else dev_priv->dummy_query_bo = vbo; @@ -460,7 +460,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv) BUG_ON(dev_priv->pinned_bo != NULL); - vmw_dmabuf_unreference(&dev_priv->dummy_query_bo); + vmw_bo_unreference(&dev_priv->dummy_query_bo); if (dev_priv->cman) vmw_cmdbuf_remove_pool(dev_priv->cman); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 5fcbe1620d50..25c2f668ad6c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -86,7 +86,7 @@ struct vmw_fpriv { bool gb_aware; }; -struct vmw_dma_buffer { +struct vmw_buffer_object { struct ttm_buffer_object base; struct list_head res_list; s32 pin_count; @@ -120,7 +120,7 @@ struct vmw_resource { unsigned long backup_size; bool res_dirty; /* Protected by backup buffer reserved */ bool backup_dirty; /* Protected by backup buffer reserved */ - struct vmw_dma_buffer *backup; + struct vmw_buffer_object *backup; unsigned long backup_offset; unsigned long pin_count; /* Protected by resource reserved */ const struct vmw_res_func *func; @@ -304,7 +304,7 @@ struct vmw_sw_context{ uint32_t cmd_bounce_size; struct list_head resource_list; struct list_head ctx_resource_list; /* For contexts and cotables */ - struct vmw_dma_buffer *cur_query_bo; + struct vmw_buffer_object *cur_query_bo; struct list_head res_relocations; uint32_t *buf_start; struct vmw_res_cache_entry res_cache[vmw_res_max]; @@ -315,7 +315,7 @@ struct vmw_sw_context{ bool staged_bindings_inuse; struct list_head staged_cmd_res; struct vmw_resource_val_node *dx_ctx_node; - struct vmw_dma_buffer *dx_query_mob; + struct vmw_buffer_object *dx_query_mob; struct vmw_resource *dx_query_ctx; struct vmw_cmdbuf_res_manager *man; }; @@ -513,8 +513,8 @@ struct vmw_private { * are protected by the cmdbuf mutex. */ - struct vmw_dma_buffer *dummy_query_bo; - struct vmw_dma_buffer *pinned_bo; + struct vmw_buffer_object *dummy_query_bo; + struct vmw_buffer_object *pinned_bo; uint32_t query_cid; uint32_t query_cid_valid; bool dummy_query_bo_pinned; @@ -623,43 +623,43 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t handle, struct vmw_surface **out_surf, - struct vmw_dma_buffer **out_buf); + struct vmw_buffer_object **out_buf); extern int vmw_user_resource_lookup_handle( struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t handle, const struct vmw_user_resource_conv *converter, struct vmw_resource **p_res); -extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); -extern int vmw_dmabuf_init(struct vmw_private *dev_priv, - struct vmw_dma_buffer *vmw_bo, - size_t size, struct ttm_placement *placement, - bool interuptable, - void (*bo_free) (struct ttm_buffer_object *bo)); -extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile); -extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_dma_buffer **p_dma_buf, - struct ttm_base_object **p_base); -extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, - struct vmw_dma_buffer *dma_buf, - uint32_t *handle); -extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, - uint32_t cur_validate_node); -extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); -extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, - uint32_t id, struct vmw_dma_buffer **out, - struct ttm_base_object **base); +extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); +extern int vmw_bo_init(struct vmw_private *dev_priv, + struct vmw_buffer_object *vmw_bo, + size_t size, struct ttm_placement *placement, + bool interuptable, + void (*bo_free)(struct ttm_buffer_object *bo)); +extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, + struct ttm_object_file *tfile); +extern int vmw_user_bo_alloc(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t size, + bool shareable, + uint32_t *handle, + struct vmw_buffer_object **p_dma_buf, + struct ttm_base_object **p_base); +extern int vmw_user_bo_reference(struct ttm_object_file *tfile, + struct vmw_buffer_object *dma_buf, + uint32_t *handle); +extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern uint32_t vmw_bo_validate_node(struct ttm_buffer_object *bo, + uint32_t cur_validate_node); +extern void vmw_bo_validate_clear(struct ttm_buffer_object *bo); +extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, + uint32_t id, struct vmw_buffer_object **out, + struct ttm_base_object **base); extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, @@ -670,43 +670,43 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, struct vmw_resource **out); extern void vmw_resource_unreserve(struct vmw_resource *res, bool switch_backup, - struct vmw_dma_buffer *new_backup, + struct vmw_buffer_object *new_backup, unsigned long new_backup_offset); extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); extern void vmw_query_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo); -extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob); +extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, struct vmw_fence_obj *fence); extern void vmw_resource_evict_all(struct vmw_private *dev_priv); /** - * DMA buffer helper routines - vmwgfx_dmabuf.c + * Buffer object helper functions - vmwgfx_bo.c */ -extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv, - struct vmw_dma_buffer *bo, - struct ttm_placement *placement, +extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, + struct vmw_buffer_object *bo, + struct ttm_placement *placement, + bool interruptible); +extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible); +extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible); +extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, + struct vmw_buffer_object *bo, bool interruptible); -extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible); -extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible); -extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv, - struct vmw_dma_buffer *bo, - bool interruptible); -extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, - struct vmw_dma_buffer *bo, - bool interruptible); +extern int vmw_bo_unpin(struct vmw_private *vmw_priv, + struct vmw_buffer_object *bo, + bool interruptible); extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, SVGAGuestPtr *ptr); -extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin); -extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo); -extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo); +extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin); +extern void *vmw_buffer_object_map_and_cache(struct vmw_buffer_object *vbo); +extern void vmw_buffer_object_unmap(struct vmw_buffer_object *vbo); /** * Misc Ioctl functionality - vmwgfx_ioctl.c @@ -758,7 +758,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv); extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); /** - * TTM buffer object driver - vmwgfx_buffer.c + * TTM buffer object driver - vmwgfx_ttm_buffer.c */ extern const size_t vmw_tt_size; @@ -1041,8 +1041,8 @@ vmw_context_binding_state(struct vmw_resource *ctx); extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, bool readback); extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, - struct vmw_dma_buffer *mob); -extern struct vmw_dma_buffer * + struct vmw_buffer_object *mob); +extern struct vmw_buffer_object * vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); @@ -1243,9 +1243,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) return srf; } -static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) +static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) { - struct vmw_dma_buffer *tmp_buf = *buf; + struct vmw_buffer_object *tmp_buf = *buf; *buf = NULL; if (tmp_buf != NULL) { @@ -1255,7 +1255,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) } } -static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) +static inline struct vmw_buffer_object * +vmw_bo_reference(struct vmw_buffer_object *buf) { if (ttm_bo_reference(&buf->base)) return buf; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c9d5cc237124..a8b194655c40 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -92,7 +92,7 @@ struct vmw_resource_val_node { struct list_head head; struct drm_hash_item hash; struct vmw_resource *res; - struct vmw_dma_buffer *new_backup; + struct vmw_buffer_object *new_backup; struct vmw_ctx_binding_state *staged_bindings; unsigned long new_backup_offset; u32 first_usage : 1; @@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAMobId *id, - struct vmw_dma_buffer **vmw_bo_p); + struct vmw_buffer_object **vmw_bo_p); static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, - struct vmw_dma_buffer *vbo, + struct vmw_buffer_object *vbo, bool validate_as_mob, uint32_t *p_val_node); /** @@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, } vmw_resource_unreserve(res, switch_backup, val->new_backup, val->new_backup_offset); - vmw_dmabuf_unreference(&val->new_backup); + vmw_bo_unreference(&val->new_backup); } } @@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, } if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { - struct vmw_dma_buffer *dx_query_mob; + struct vmw_buffer_object *dx_query_mob; dx_query_mob = vmw_context_get_dx_query_mob(ctx); if (dx_query_mob) @@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, * submission is reached. */ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, - struct vmw_dma_buffer *vbo, + struct vmw_buffer_object *vbo, bool validate_as_mob, uint32_t *p_val_node) { @@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) return ret; if (res->backup) { - struct vmw_dma_buffer *vbo = res->backup; + struct vmw_buffer_object *vbo = res->backup; ret = vmw_bo_to_validate_list (sw_context, vbo, @@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) } if (sw_context->dx_query_mob) { - struct vmw_dma_buffer *expected_dx_query_mob; + struct vmw_buffer_object *expected_dx_query_mob; expected_dx_query_mob = vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); @@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) list_for_each_entry(val, &sw_context->resource_list, head) { struct vmw_resource *res = val->res; - struct vmw_dma_buffer *backup = res->backup; + struct vmw_buffer_object *backup = res->backup; ret = vmw_resource_validate(res); if (unlikely(ret != 0)) { @@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) /* Check if the resource switched backup buffer */ if (backup && res->backup && (backup != res->backup)) { - struct vmw_dma_buffer *vbo = res->backup; + struct vmw_buffer_object *vbo = res->backup; ret = vmw_bo_to_validate_list (sw_context, vbo, @@ -821,7 +821,7 @@ out_no_reloc: static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) { struct vmw_private *dev_priv = ctx_res->dev_priv; - struct vmw_dma_buffer *dx_query_mob; + struct vmw_buffer_object *dx_query_mob; struct { SVGA3dCmdHeader header; SVGA3dCmdDXBindAllQuery body; @@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, * command batch. */ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, - struct vmw_dma_buffer *new_query_bo, + struct vmw_buffer_object *new_query_bo, struct vmw_sw_context *sw_context) { struct vmw_res_cache_entry *ctx_entry = @@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, if (dev_priv->pinned_bo != sw_context->cur_query_bo) { if (dev_priv->pinned_bo) { vmw_bo_pin_reserved(dev_priv->pinned_bo, false); - vmw_dmabuf_unreference(&dev_priv->pinned_bo); + vmw_bo_unreference(&dev_priv->pinned_bo); } if (!sw_context->needs_post_query_barrier) { @@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, dev_priv->query_cid = sw_context->last_query_ctx->id; dev_priv->query_cid_valid = true; dev_priv->pinned_bo = - vmw_dmabuf_reference(sw_context->cur_query_bo); + vmw_bo_reference(sw_context->cur_query_bo); } } } @@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAMobId *id, - struct vmw_dma_buffer **vmw_bo_p) + struct vmw_buffer_object **vmw_bo_p) { - struct vmw_dma_buffer *vmw_bo = NULL; + struct vmw_buffer_object *vmw_bo = NULL; uint32_t handle = *id; struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, - NULL); + ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use MOB buffer.\n"); ret = -EINVAL; @@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, return 0; out_no_reloc: - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); *vmw_bo_p = NULL; return ret; } @@ -1343,15 +1342,14 @@ out_no_reloc: static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAGuestPtr *ptr, - struct vmw_dma_buffer **vmw_bo_p) + struct vmw_buffer_object **vmw_bo_p) { - struct vmw_dma_buffer *vmw_bo = NULL; + struct vmw_buffer_object *vmw_bo = NULL; uint32_t handle = ptr->gmrId; struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, - NULL); + ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); ret = -EINVAL; @@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, return 0; out_no_reloc: - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); *vmw_bo_p = NULL; return ret; } @@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, SVGA3dCmdDXBindQuery q; } *cmd; - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; int ret; @@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, sw_context->dx_query_mob = vmw_bo; sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return ret; } @@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; struct vmw_query_cmd { SVGA3dCmdHeader header; SVGA3dCmdEndGBQuery q; @@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return ret; } @@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; struct vmw_query_cmd { SVGA3dCmdHeader header; SVGA3dCmdEndQuery q; @@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return ret; } @@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; struct vmw_query_cmd { SVGA3dCmdHeader header; SVGA3dCmdWaitForGBQuery q; @@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return 0; } @@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; struct vmw_query_cmd { SVGA3dCmdHeader header; SVGA3dCmdWaitForQuery q; @@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return 0; } @@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_dma_buffer *vmw_bo = NULL; + struct vmw_buffer_object *vmw_bo = NULL; struct vmw_surface *srf = NULL; struct vmw_dma_cmd { SVGA3dCmdHeader header; @@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, header); out_no_surface: - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return ret; } @@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; int ret; struct { @@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return ret; } @@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, uint32_t *buf_id, unsigned long backup_offset) { - struct vmw_dma_buffer *dma_buf; + struct vmw_buffer_object *dma_buf; int ret; ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); @@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, if (val_node->first_usage) val_node->no_buffer_needed = true; - vmw_dmabuf_unreference(&val_node->new_backup); + vmw_bo_unreference(&val_node->new_backup); val_node->new_backup = dma_buf; val_node->new_backup_offset = backup_offset; @@ -3701,8 +3699,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, bool interruptible, bool validate_as_mob) { - struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, - base); + struct vmw_buffer_object *vbo = + container_of(bo, struct vmw_buffer_object, base); struct ttm_operation_ctx ctx = { interruptible, true }; int ret; @@ -4423,7 +4421,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); - vmw_dmabuf_unreference(&dev_priv->pinned_bo); + vmw_bo_unreference(&dev_priv->pinned_bo); out_unlock: return; @@ -4432,7 +4430,7 @@ out_no_emit: out_no_reserve: ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); - vmw_dmabuf_unreference(&dev_priv->pinned_bo); + vmw_bo_unreference(&dev_priv->pinned_bo); } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 9b7e0aca5f84..dcde4985c574 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -42,7 +42,7 @@ struct vmw_fb_par { void *vmalloc; struct mutex bo_mutex; - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; unsigned bo_size; struct drm_framebuffer *set_fb; struct drm_display_mode *set_mode; @@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work) struct drm_clip_rect clip; struct drm_framebuffer *cur_fb; u8 *src_ptr, *dst_ptr; - struct vmw_dma_buffer *vbo = par->vmw_bo; + struct vmw_buffer_object *vbo = par->vmw_bo; void *virtual; if (!READ_ONCE(par->dirty.active)) @@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work) (void) ttm_read_lock(&vmw_priv->reservation_sem, false); (void) ttm_bo_reserve(&vbo->base, false, false, NULL); - virtual = vmw_dma_buffer_map_and_cache(vbo); + virtual = vmw_buffer_object_map_and_cache(vbo); if (!virtual) goto out_unreserve; @@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) */ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, - size_t size, struct vmw_dma_buffer **out) + size_t size, struct vmw_buffer_object **out) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; int ret; (void) ttm_write_lock(&vmw_priv->reservation_sem, false); @@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, goto err_unlock; } - ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, + ret = vmw_bo_init(vmw_priv, vmw_bo, size, &vmw_sys_placement, false, - &vmw_dmabuf_bo_free); + &vmw_bo_bo_free); if (unlikely(ret != 0)) goto err_unlock; /* init frees the buffer on failure */ @@ -491,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, } if (par->vmw_bo && detach_bo && unref_bo) - vmw_dmabuf_unreference(&par->vmw_bo); + vmw_bo_unreference(&par->vmw_bo); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index c5e8eae0dbe2..5e0c8f775c92 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -377,8 +377,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, } vfb = vmw_framebuffer_to_vfb(fb); - if (!vfb->dmabuf) { - DRM_ERROR("Framebuffer not dmabuf backed.\n"); + if (!vfb->bo) { + DRM_ERROR("Framebuffer not buffer backed.\n"); ret = -EINVAL; goto out_no_ttm_lock; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index ef96ba7432ad..7a32be0cef14 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -85,10 +85,10 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv, return 0; } -static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, - struct vmw_dma_buffer *dmabuf, - u32 width, u32 height, - u32 hotspotX, u32 hotspotY) +static int vmw_cursor_update_bo(struct vmw_private *dev_priv, + struct vmw_buffer_object *bo, + u32 width, u32 height, + u32 hotspotX, u32 hotspotY) { struct ttm_bo_kmap_obj map; unsigned long kmap_offset; @@ -100,13 +100,13 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, kmap_offset = 0; kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; - ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL); + ret = ttm_bo_reserve(&bo->base, true, false, NULL); if (unlikely(ret != 0)) { DRM_ERROR("reserve failed\n"); return -EINVAL; } - ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); + ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map); if (unlikely(ret != 0)) goto err_unreserve; @@ -116,7 +116,7 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, ttm_bo_kunmap(&map); err_unreserve: - ttm_bo_unreserve(&dmabuf->base); + ttm_bo_unreserve(&bo->base); return ret; } @@ -352,13 +352,13 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, if (vps->surf) vmw_surface_unreference(&vps->surf); - if (vps->dmabuf) - vmw_dmabuf_unreference(&vps->dmabuf); + if (vps->bo) + vmw_bo_unreference(&vps->bo); if (fb) { - if (vmw_framebuffer_to_vfb(fb)->dmabuf) { - vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer; - vmw_dmabuf_reference(vps->dmabuf); + if (vmw_framebuffer_to_vfb(fb)->bo) { + vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer; + vmw_bo_reference(vps->bo); } else { vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; vmw_surface_reference(vps->surf); @@ -390,7 +390,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, } du->cursor_surface = vps->surf; - du->cursor_dmabuf = vps->dmabuf; + du->cursor_bo = vps->bo; if (vps->surf) { du->cursor_age = du->cursor_surface->snooper.age; @@ -399,11 +399,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, vps->surf->snooper.image, 64, 64, hotspot_x, hotspot_y); - } else if (vps->dmabuf) { - ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf, - plane->state->crtc_w, - plane->state->crtc_h, - hotspot_x, hotspot_y); + } else if (vps->bo) { + ret = vmw_cursor_update_bo(dev_priv, vps->bo, + plane->state->crtc_w, + plane->state->crtc_h, + hotspot_x, hotspot_y); } else { vmw_cursor_update_position(dev_priv, false, 0, 0); return; @@ -519,7 +519,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, ret = -EINVAL; } - if (!vmw_framebuffer_to_vfb(fb)->dmabuf) + if (!vmw_framebuffer_to_vfb(fb)->bo) surface = vmw_framebuffer_to_vfbs(fb)->surface; if (surface && !surface->snooper.image) { @@ -687,8 +687,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) if (vps->surf) (void) vmw_surface_reference(vps->surf); - if (vps->dmabuf) - (void) vmw_dmabuf_reference(vps->dmabuf); + if (vps->bo) + (void) vmw_bo_reference(vps->bo); state = &vps->base; @@ -745,8 +745,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane, if (vps->surf) vmw_surface_unreference(&vps->surf); - if (vps->dmabuf) - vmw_dmabuf_unreference(&vps->dmabuf); + if (vps->bo) + vmw_bo_unreference(&vps->bo); drm_atomic_helper_plane_destroy_state(plane, state); } @@ -902,12 +902,12 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, /** * vmw_kms_readback - Perform a readback from the screen system to - * a dma-buffer backed framebuffer. + * a buffer-object backed framebuffer. * * @dev_priv: Pointer to the device private structure. * @file_priv: Pointer to a struct drm_file identifying the caller. * Must be set to NULL if @user_fence_rep is NULL. - * @vfb: Pointer to the dma-buffer backed framebuffer. + * @vfb: Pointer to the buffer-object backed framebuffer. * @user_fence_rep: User-space provided structure for fence information. * Must be set to non-NULL if @file_priv is non-NULL. * @vclips: Array of clip rects. @@ -951,7 +951,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, struct vmw_framebuffer **out, const struct drm_mode_fb_cmd2 *mode_cmd, - bool is_dmabuf_proxy) + bool is_bo_proxy) { struct drm_device *dev = dev_priv->dev; @@ -1019,7 +1019,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); vfbs->surface = vmw_surface_reference(surface); vfbs->base.user_handle = mode_cmd->handles[0]; - vfbs->is_dmabuf_proxy = is_dmabuf_proxy; + vfbs->is_bo_proxy = is_bo_proxy; *out = &vfbs->base; @@ -1038,30 +1038,30 @@ out_err1: } /* - * Dmabuf framebuffer code + * Buffer-object framebuffer code */ -static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) +static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) { - struct vmw_framebuffer_dmabuf *vfbd = + struct vmw_framebuffer_bo *vfbd = vmw_framebuffer_to_vfbd(framebuffer); drm_framebuffer_cleanup(framebuffer); - vmw_dmabuf_unreference(&vfbd->buffer); + vmw_bo_unreference(&vfbd->buffer); if (vfbd->base.user_obj) ttm_base_object_unref(&vfbd->base.user_obj); kfree(vfbd); } -static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, - struct drm_file *file_priv, - unsigned flags, unsigned color, - struct drm_clip_rect *clips, - unsigned num_clips) +static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips) { struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); - struct vmw_framebuffer_dmabuf *vfbd = + struct vmw_framebuffer_bo *vfbd = vmw_framebuffer_to_vfbd(framebuffer); struct drm_clip_rect norect; int ret, increment = 1; @@ -1092,13 +1092,13 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, true, true, NULL); break; case vmw_du_screen_object: - ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base, - clips, NULL, num_clips, - increment, true, NULL, NULL); + ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base, + clips, NULL, num_clips, + increment, true, NULL, NULL); break; case vmw_du_legacy: - ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0, - clips, num_clips, increment); + ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0, + clips, num_clips, increment); break; default: ret = -EINVAL; @@ -1114,23 +1114,23 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, return ret; } -static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { - .destroy = vmw_framebuffer_dmabuf_destroy, - .dirty = vmw_framebuffer_dmabuf_dirty, +static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { + .destroy = vmw_framebuffer_bo_destroy, + .dirty = vmw_framebuffer_bo_dirty, }; /** - * Pin the dmabuffer in a location suitable for access by the + * Pin the bofer in a location suitable for access by the * display system. */ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) { struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; struct ttm_placement *placement; int ret; - buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : + buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; if (!buf) @@ -1139,12 +1139,12 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) switch (dev_priv->active_display_unit) { case vmw_du_legacy: vmw_overlay_pause_all(dev_priv); - ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false); + ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false); vmw_overlay_resume_all(dev_priv); break; case vmw_du_screen_object: case vmw_du_screen_target: - if (vfb->dmabuf) { + if (vfb->bo) { if (dev_priv->capabilities & SVGA_CAP_3D) { /* * Use surface DMA to get content to @@ -1160,8 +1160,7 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) placement = &vmw_mob_placement; } - return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement, - false); + return vmw_bo_pin_in_placement(dev_priv, buf, placement, false); default: return -EINVAL; } @@ -1172,36 +1171,36 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) { struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; - buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : + buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; if (WARN_ON(!buf)) return 0; - return vmw_dmabuf_unpin(dev_priv, buf, false); + return vmw_bo_unpin(dev_priv, buf, false); } /** - * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf + * vmw_create_bo_proxy - create a proxy surface for the buffer object * * @dev: DRM device * @mode_cmd: parameters for the new surface - * @dmabuf_mob: MOB backing the DMA buf + * @bo_mob: MOB backing the buffer object * @srf_out: newly created surface * - * When the content FB is a DMA buf, we create a surface as a proxy to the + * When the content FB is a buffer object, we create a surface as a proxy to the * same buffer. This way we can do a surface copy rather than a surface DMA. * This is a more efficient approach * * RETURNS: * 0 on success, error code otherwise */ -static int vmw_create_dmabuf_proxy(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct vmw_dma_buffer *dmabuf_mob, - struct vmw_surface **srf_out) +static int vmw_create_bo_proxy(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct vmw_buffer_object *bo_mob, + struct vmw_surface **srf_out) { uint32_t format; struct drm_vmw_size content_base_size = {0}; @@ -1258,8 +1257,8 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, /* Reserve and switch the backing mob. */ mutex_lock(&res->dev_priv->cmdbuf_mutex); (void) vmw_resource_reserve(res, false, true); - vmw_dmabuf_unreference(&res->backup); - res->backup = vmw_dmabuf_reference(dmabuf_mob); + vmw_bo_unreference(&res->backup); + res->backup = vmw_bo_reference(bo_mob); res->backup_offset = 0; vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); @@ -1269,21 +1268,21 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, -static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, - struct vmw_dma_buffer *dmabuf, - struct vmw_framebuffer **out, - const struct drm_mode_fb_cmd2 - *mode_cmd) +static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, + struct vmw_buffer_object *bo, + struct vmw_framebuffer **out, + const struct drm_mode_fb_cmd2 + *mode_cmd) { struct drm_device *dev = dev_priv->dev; - struct vmw_framebuffer_dmabuf *vfbd; + struct vmw_framebuffer_bo *vfbd; unsigned int requested_size; struct drm_format_name_buf format_name; int ret; requested_size = mode_cmd->height * mode_cmd->pitches[0]; - if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { + if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) { DRM_ERROR("Screen buffer object size is too small " "for requested mode.\n"); return -EINVAL; @@ -1312,20 +1311,20 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, } drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); - vfbd->base.dmabuf = true; - vfbd->buffer = vmw_dmabuf_reference(dmabuf); + vfbd->base.bo = true; + vfbd->buffer = vmw_bo_reference(bo); vfbd->base.user_handle = mode_cmd->handles[0]; *out = &vfbd->base; ret = drm_framebuffer_init(dev, &vfbd->base.base, - &vmw_framebuffer_dmabuf_funcs); + &vmw_framebuffer_bo_funcs); if (ret) goto out_err2; return 0; out_err2: - vmw_dmabuf_unreference(&dmabuf); + vmw_bo_unreference(&bo); kfree(vfbd); out_err1: return ret; @@ -1354,57 +1353,57 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) * vmw_kms_new_framebuffer - Create a new framebuffer. * * @dev_priv: Pointer to device private struct. - * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around. - * Either @dmabuf or @surface must be NULL. + * @bo: Pointer to buffer object to wrap the kms framebuffer around. + * Either @bo or @surface must be NULL. * @surface: Pointer to a surface to wrap the kms framebuffer around. - * Either @dmabuf or @surface must be NULL. - * @only_2d: No presents will occur to this dma buffer based framebuffer. This - * Helps the code to do some important optimizations. + * Either @bo or @surface must be NULL. + * @only_2d: No presents will occur to this buffer object based framebuffer. + * This helps the code to do some important optimizations. * @mode_cmd: Frame-buffer metadata. */ struct vmw_framebuffer * vmw_kms_new_framebuffer(struct vmw_private *dev_priv, - struct vmw_dma_buffer *dmabuf, + struct vmw_buffer_object *bo, struct vmw_surface *surface, bool only_2d, const struct drm_mode_fb_cmd2 *mode_cmd) { struct vmw_framebuffer *vfb = NULL; - bool is_dmabuf_proxy = false; + bool is_bo_proxy = false; int ret; /* * We cannot use the SurfaceDMA command in an non-accelerated VM, - * therefore, wrap the DMA buf in a surface so we can use the + * therefore, wrap the buffer object in a surface so we can use the * SurfaceCopy command. */ if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && - dmabuf && only_2d && + bo && only_2d && mode_cmd->width > 64 && /* Don't create a proxy for cursor */ dev_priv->active_display_unit == vmw_du_screen_target) { - ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, - dmabuf, &surface); + ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd, + bo, &surface); if (ret) return ERR_PTR(ret); - is_dmabuf_proxy = true; + is_bo_proxy = true; } /* Create the new framebuffer depending one what we have */ if (surface) { ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, mode_cmd, - is_dmabuf_proxy); + is_bo_proxy); /* - * vmw_create_dmabuf_proxy() adds a reference that is no longer + * vmw_create_bo_proxy() adds a reference that is no longer * needed */ - if (is_dmabuf_proxy) + if (is_bo_proxy) vmw_surface_unreference(&surface); - } else if (dmabuf) { - ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb, - mode_cmd); + } else if (bo) { + ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb, + mode_cmd); } else { BUG(); } @@ -1430,7 +1429,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_framebuffer *vfb = NULL; struct vmw_surface *surface = NULL; - struct vmw_dma_buffer *bo = NULL; + struct vmw_buffer_object *bo = NULL; struct ttm_base_object *user_obj; int ret; @@ -1466,7 +1465,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, * End conditioned code. */ - /* returns either a dmabuf or surface */ + /* returns either a bo or surface */ ret = vmw_user_lookup_handle(dev_priv, tfile, mode_cmd->handles[0], &surface, &bo); @@ -1494,7 +1493,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, err_out: /* vmw_user_lookup_handle takes one ref so does new_fb */ if (bo) - vmw_dmabuf_unreference(&bo); + vmw_bo_unreference(&bo); if (surface) vmw_surface_unreference(&surface); @@ -2427,7 +2426,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, * interrupted by a signal. */ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, bool interruptible, bool validate_as_mob, bool for_cpu_blit) @@ -2459,7 +2458,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, * Helper to be used if an error forces the caller to undo the actions of * vmw_kms_helper_buffer_prepare. */ -void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf) +void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf) { if (buf) ttm_bo_unreserve(&buf->base); @@ -2482,7 +2481,7 @@ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf) */ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, struct drm_file *file_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, struct vmw_fence_obj **out_fence, struct drm_vmw_fence_rep __user * user_fence_rep) @@ -2522,7 +2521,7 @@ void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx) struct vmw_resource *res = ctx->res; vmw_kms_helper_buffer_revert(ctx->buf); - vmw_dmabuf_unreference(&ctx->buf); + vmw_bo_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -2567,7 +2566,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, if (ret) goto out_unreserve; - ctx->buf = vmw_dmabuf_reference(res->backup); + ctx->buf = vmw_bo_reference(res->backup); } ret = vmw_resource_validate(res); if (ret) @@ -2600,7 +2599,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, out_fence, NULL); - vmw_dmabuf_unreference(&ctx->buf); + vmw_bo_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 6b7c012719f1..ff1caed38f94 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -90,7 +90,7 @@ struct vmw_kms_dirty { #define vmw_framebuffer_to_vfbs(x) \ container_of(x, struct vmw_framebuffer_surface, base.base) #define vmw_framebuffer_to_vfbd(x) \ - container_of(x, struct vmw_framebuffer_dmabuf, base.base) + container_of(x, struct vmw_framebuffer_bo, base.base) /** * Base class for framebuffers @@ -102,7 +102,7 @@ struct vmw_framebuffer { struct drm_framebuffer base; int (*pin)(struct vmw_framebuffer *fb); int (*unpin)(struct vmw_framebuffer *fb); - bool dmabuf; + bool bo; struct ttm_base_object *user_obj; uint32_t user_handle; }; @@ -117,15 +117,15 @@ struct vmw_clip_rect { struct vmw_framebuffer_surface { struct vmw_framebuffer base; struct vmw_surface *surface; - struct vmw_dma_buffer *buffer; + struct vmw_buffer_object *buffer; struct list_head head; - bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */ + bool is_bo_proxy; /* true if this is proxy surface for DMA buf */ }; -struct vmw_framebuffer_dmabuf { +struct vmw_framebuffer_bo { struct vmw_framebuffer base; - struct vmw_dma_buffer *buffer; + struct vmw_buffer_object *buffer; }; @@ -161,18 +161,18 @@ struct vmw_crtc_state { * * @base DRM plane object * @surf Display surface for STDU - * @dmabuf display dmabuf for SOU + * @bo display bo for SOU * @content_fb_type Used by STDU. - * @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit + * @bo_size Size of the bo, used by Screen Object Display Unit * @pinned pin count for STDU display surface */ struct vmw_plane_state { struct drm_plane_state base; struct vmw_surface *surf; - struct vmw_dma_buffer *dmabuf; + struct vmw_buffer_object *bo; int content_fb_type; - unsigned long dmabuf_size; + unsigned long bo_size; int pinned; @@ -209,7 +209,7 @@ struct vmw_display_unit { struct drm_plane cursor; struct vmw_surface *cursor_surface; - struct vmw_dma_buffer *cursor_dmabuf; + struct vmw_buffer_object *cursor_bo; size_t cursor_age; int cursor_x; @@ -243,7 +243,7 @@ struct vmw_display_unit { struct vmw_validation_ctx { struct vmw_resource *res; - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; }; #define vmw_crtc_to_du(x) \ @@ -291,14 +291,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, struct vmw_kms_dirty *dirty); int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, bool interruptible, bool validate_as_mob, bool for_cpu_blit); -void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf); +void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf); void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, struct drm_file *file_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, struct vmw_fence_obj **out_fence, struct drm_vmw_fence_rep __user * user_fence_rep); @@ -316,7 +316,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv, uint32_t num_clips); struct vmw_framebuffer * vmw_kms_new_framebuffer(struct vmw_private *dev_priv, - struct vmw_dma_buffer *dmabuf, + struct vmw_buffer_object *bo, struct vmw_surface *surface, bool only_2d, const struct drm_mode_fb_cmd2 *mode_cmd); @@ -384,11 +384,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector, */ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv); int vmw_kms_ldu_close_display(struct vmw_private *dev_priv); -int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, - struct vmw_framebuffer *framebuffer, - unsigned flags, unsigned color, - struct drm_clip_rect *clips, - unsigned num_clips, int increment); +int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips, int increment); int vmw_kms_update_proxy(struct vmw_resource *res, const struct drm_clip_rect *clips, unsigned num_clips, @@ -408,14 +408,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, unsigned num_clips, int inc, struct vmw_fence_obj **out_fence, struct drm_crtc *crtc); -int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, - struct vmw_framebuffer *framebuffer, - struct drm_clip_rect *clips, - struct drm_vmw_rect *vclips, - unsigned num_clips, int increment, - bool interruptible, - struct vmw_fence_obj **out_fence, - struct drm_crtc *crtc); +int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + struct drm_clip_rect *clips, + struct drm_vmw_rect *vclips, + unsigned int num_clips, int increment, + bool interruptible, + struct vmw_fence_obj **out_fence, + struct drm_crtc *crtc); int vmw_kms_sou_readback(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 4a5907e3f560..a2dd9a829219 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv) } -int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, - struct vmw_framebuffer *framebuffer, - unsigned flags, unsigned color, - struct drm_clip_rect *clips, - unsigned num_clips, int increment) +int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips, int increment) { size_t fifo_size; int i; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 222c9c2123a1..09420ef19ecb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -38,7 +38,7 @@ #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) struct vmw_stream { - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; bool claimed; bool paused; struct drm_vmw_control_stream_arg saved; @@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd, * -ERESTARTSYS if interrupted by a signal. */ static int vmw_overlay_send_put(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, struct drm_vmw_control_stream_arg *arg, bool interruptible) { @@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, * used with GMRs instead of being locked to vram. */ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, bool pin, bool inter) { if (!pin) - return vmw_dmabuf_unpin(dev_priv, buf, inter); + return vmw_bo_unpin(dev_priv, buf, inter); if (dev_priv->active_display_unit == vmw_du_legacy) - return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter); + return vmw_bo_pin_in_vram(dev_priv, buf, inter); - return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter); + return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter); } /** @@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, } if (!pause) { - vmw_dmabuf_unreference(&stream->buf); + vmw_bo_unreference(&stream->buf); stream->paused = false; } else { stream->paused = true; @@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, * -ERESTARTSYS if interrupted. */ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, struct drm_vmw_control_stream_arg *arg, bool interruptible) { @@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, } if (stream->buf != buf) - stream->buf = vmw_dmabuf_reference(buf); + stream->buf = vmw_bo_reference(buf); stream->saved = *arg; /* stream is no longer stopped/paused */ stream->paused = false; @@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct vmw_overlay *overlay = dev_priv->overlay_priv; struct drm_vmw_control_stream_arg *arg = (struct drm_vmw_control_stream_arg *)data; - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; struct vmw_resource *res; int ret; @@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL); + ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL); if (ret) goto out_unlock; ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); - vmw_dmabuf_unreference(&buf); + vmw_bo_unreference(&buf); out_unlock: mutex_unlock(&overlay->mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6b3a942b18df..5aaf9ac65cba 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -35,9 +35,9 @@ #define VMW_RES_EVICT_ERR_COUNT 10 -struct vmw_user_dma_buffer { +struct vmw_user_buffer_object { struct ttm_prime_object prime; - struct vmw_dma_buffer dma; + struct vmw_buffer_object vbo; }; struct vmw_bo_user_rep { @@ -45,17 +45,18 @@ struct vmw_bo_user_rep { uint64_t map_handle; }; -static inline struct vmw_dma_buffer * -vmw_dma_buffer(struct ttm_buffer_object *bo) +static inline struct vmw_buffer_object * +vmw_buffer_object(struct ttm_buffer_object *bo) { - return container_of(bo, struct vmw_dma_buffer, base); + return container_of(bo, struct vmw_buffer_object, base); } -static inline struct vmw_user_dma_buffer * -vmw_user_dma_buffer(struct ttm_buffer_object *bo) +static inline struct vmw_user_buffer_object * +vmw_user_buffer_object(struct ttm_buffer_object *bo) { - struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); - return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); + struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); + + return container_of(vmw_bo, struct vmw_user_buffer_object, vbo); } struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) @@ -116,7 +117,7 @@ static void vmw_resource_release(struct kref *kref) res->backup_dirty = false; list_del_init(&res->mob_head); ttm_bo_unreserve(bo); - vmw_dmabuf_unreference(&res->backup); + vmw_bo_unreference(&res->backup); } if (likely(res->hw_destroy != NULL)) { @@ -287,7 +288,7 @@ out_bad_resource: } /** - * Helper function that looks either a surface or dmabuf. + * Helper function that looks either a surface or bo. * * The pointer this pointed at by out_surf and out_buf needs to be null. */ @@ -295,7 +296,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t handle, struct vmw_surface **out_surf, - struct vmw_dma_buffer **out_buf) + struct vmw_buffer_object **out_buf) { struct vmw_resource *res; int ret; @@ -311,7 +312,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, } *out_surf = NULL; - ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); + ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL); return ret; } @@ -320,14 +321,14 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, */ /** - * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers + * vmw_bo_acc_size - Calculate the pinned memory usage of buffers * * @dev_priv: Pointer to a struct vmw_private identifying the device. * @size: The requested buffer size. * @user: Whether this is an ordinary dma buffer or a user dma buffer. */ -static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, - bool user) +static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size, + bool user) { static size_t struct_size, user_struct_size; size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -337,9 +338,9 @@ static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, size_t backend_size = ttm_round_pot(vmw_tt_size); struct_size = backend_size + - ttm_round_pot(sizeof(struct vmw_dma_buffer)); + ttm_round_pot(sizeof(struct vmw_buffer_object)); user_struct_size = backend_size + - ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); + ttm_round_pot(sizeof(struct vmw_user_buffer_object)); } if (dev_priv->map_mode == vmw_dma_alloc_coherent) @@ -350,36 +351,36 @@ static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, page_array_size; } -void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) +void vmw_bo_bo_free(struct ttm_buffer_object *bo) { - struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); - vmw_dma_buffer_unmap(vmw_bo); + vmw_buffer_object_unmap(vmw_bo); kfree(vmw_bo); } -static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) +static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) { - struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); + struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo); - vmw_dma_buffer_unmap(&vmw_user_bo->dma); + vmw_buffer_object_unmap(&vmw_user_bo->vbo); ttm_prime_object_kfree(vmw_user_bo, prime); } -int vmw_dmabuf_init(struct vmw_private *dev_priv, - struct vmw_dma_buffer *vmw_bo, - size_t size, struct ttm_placement *placement, - bool interruptible, - void (*bo_free) (struct ttm_buffer_object *bo)) +int vmw_bo_init(struct vmw_private *dev_priv, + struct vmw_buffer_object *vmw_bo, + size_t size, struct ttm_placement *placement, + bool interruptible, + void (*bo_free)(struct ttm_buffer_object *bo)) { struct ttm_bo_device *bdev = &dev_priv->bdev; size_t acc_size; int ret; - bool user = (bo_free == &vmw_user_dmabuf_destroy); + bool user = (bo_free == &vmw_user_bo_destroy); - BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); + WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free))); - acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); + acc_size = vmw_bo_acc_size(dev_priv, size, user); memset(vmw_bo, 0, sizeof(*vmw_bo)); INIT_LIST_HEAD(&vmw_bo->res_list); @@ -391,9 +392,9 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, return ret; } -static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) +static void vmw_user_bo_release(struct ttm_base_object **p_base) { - struct vmw_user_dma_buffer *vmw_user_bo; + struct vmw_user_buffer_object *vmw_user_bo; struct ttm_base_object *base = *p_base; struct ttm_buffer_object *bo; @@ -402,21 +403,22 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) if (unlikely(base == NULL)) return; - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, + vmw_user_bo = container_of(base, struct vmw_user_buffer_object, prime.base); - bo = &vmw_user_bo->dma.base; + bo = &vmw_user_bo->vbo.base; ttm_bo_unref(&bo); } -static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, - enum ttm_ref_type ref_type) +static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base, + enum ttm_ref_type ref_type) { - struct vmw_user_dma_buffer *user_bo; - user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); + struct vmw_user_buffer_object *user_bo; + + user_bo = container_of(base, struct vmw_user_buffer_object, prime.base); switch (ref_type) { case TTM_REF_SYNCCPU_WRITE: - ttm_bo_synccpu_write_release(&user_bo->dma.base); + ttm_bo_synccpu_write_release(&user_bo->vbo.base); break; default: BUG(); @@ -424,7 +426,7 @@ static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, } /** - * vmw_user_dmabuf_alloc - Allocate a user dma buffer + * vmw_user_bo_alloc - Allocate a user dma buffer * * @dev_priv: Pointer to a struct device private. * @tfile: Pointer to a struct ttm_object_file on which to register the user @@ -432,18 +434,18 @@ static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, * @size: Size of the dma buffer. * @shareable: Boolean whether the buffer is shareable with other open files. * @handle: Pointer to where the handle value should be assigned. - * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer + * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer * should be assigned. */ -int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_dma_buffer **p_dma_buf, - struct ttm_base_object **p_base) +int vmw_user_bo_alloc(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t size, + bool shareable, + uint32_t *handle, + struct vmw_buffer_object **p_vbo, + struct ttm_base_object **p_base) { - struct vmw_user_dma_buffer *user_bo; + struct vmw_user_buffer_object *user_bo; struct ttm_buffer_object *tmp; int ret; @@ -453,28 +455,28 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, return -ENOMEM; } - ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, - (dev_priv->has_mob) ? - &vmw_sys_placement : - &vmw_vram_sys_placement, true, - &vmw_user_dmabuf_destroy); + ret = vmw_bo_init(dev_priv, &user_bo->vbo, size, + (dev_priv->has_mob) ? + &vmw_sys_placement : + &vmw_vram_sys_placement, true, + &vmw_user_bo_destroy); if (unlikely(ret != 0)) return ret; - tmp = ttm_bo_reference(&user_bo->dma.base); + tmp = ttm_bo_reference(&user_bo->vbo.base); ret = ttm_prime_object_init(tfile, size, &user_bo->prime, shareable, ttm_buffer_type, - &vmw_user_dmabuf_release, - &vmw_user_dmabuf_ref_obj_release); + &vmw_user_bo_release, + &vmw_user_bo_ref_obj_release); if (unlikely(ret != 0)) { ttm_bo_unref(&tmp); goto out_no_base_object; } - *p_dma_buf = &user_bo->dma; + *p_vbo = &user_bo->vbo; if (p_base) { *p_base = &user_bo->prime.base; kref_get(&(*p_base)->refcount); @@ -486,21 +488,21 @@ out_no_base_object: } /** - * vmw_user_dmabuf_verify_access - verify access permissions on this + * vmw_user_bo_verify_access - verify access permissions on this * buffer object. * * @bo: Pointer to the buffer object being accessed * @tfile: Identifying the caller. */ -int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, +int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, struct ttm_object_file *tfile) { - struct vmw_user_dma_buffer *vmw_user_bo; + struct vmw_user_buffer_object *vmw_user_bo; - if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) + if (unlikely(bo->destroy != vmw_user_bo_destroy)) return -EPERM; - vmw_user_bo = vmw_user_dma_buffer(bo); + vmw_user_bo = vmw_user_buffer_object(bo); /* Check that the caller has opened the object. */ if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) @@ -511,7 +513,7 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, } /** - * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu + * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu * access, idling previous GPU operations on the buffer and optionally * blocking it for further command submissions. * @@ -521,11 +523,11 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, * * A blocking grab will be automatically released when @tfile is closed. */ -static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, +static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, struct ttm_object_file *tfile, uint32_t flags) { - struct ttm_buffer_object *bo = &user_bo->dma.base; + struct ttm_buffer_object *bo = &user_bo->vbo.base; bool existed; int ret; @@ -550,20 +552,20 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, ret = ttm_ref_object_add(tfile, &user_bo->prime.base, TTM_REF_SYNCCPU_WRITE, &existed, false); if (ret != 0 || existed) - ttm_bo_synccpu_write_release(&user_bo->dma.base); + ttm_bo_synccpu_write_release(&user_bo->vbo.base); return ret; } /** - * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, + * vmw_user_bo_synccpu_release - Release a previous grab for CPU access, * and unblock command submission on the buffer if blocked. * * @handle: Handle identifying the buffer object. * @tfile: Identifying the caller. * @flags: Flags indicating the type of release. */ -static int vmw_user_dmabuf_synccpu_release(uint32_t handle, +static int vmw_user_bo_synccpu_release(uint32_t handle, struct ttm_object_file *tfile, uint32_t flags) { @@ -575,7 +577,7 @@ static int vmw_user_dmabuf_synccpu_release(uint32_t handle, } /** - * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu + * vmw_user_bo_synccpu_release - ioctl function implementing the synccpu * functionality. * * @dev: Identifies the drm device. @@ -585,13 +587,13 @@ static int vmw_user_dmabuf_synccpu_release(uint32_t handle, * This function checks the ioctl arguments for validity and calls the * relevant synccpu functions. */ -int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, +int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vmw_synccpu_arg *arg = (struct drm_vmw_synccpu_arg *) data; - struct vmw_dma_buffer *dma_buf; - struct vmw_user_dma_buffer *user_bo; + struct vmw_buffer_object *vbo; + struct vmw_user_buffer_object *user_bo; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_base_object *buffer_base; int ret; @@ -606,15 +608,15 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, switch (arg->op) { case drm_vmw_synccpu_grab: - ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, + ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo, &buffer_base); if (unlikely(ret != 0)) return ret; - user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, - dma); - ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); - vmw_dmabuf_unreference(&dma_buf); + user_bo = container_of(vbo, struct vmw_user_buffer_object, + vbo); + ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags); + vmw_bo_unreference(&vbo); ttm_base_object_unref(&buffer_base); if (unlikely(ret != 0 && ret != -ERESTARTSYS && ret != -EBUSY)) { @@ -624,8 +626,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, } break; case drm_vmw_synccpu_release: - ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, - arg->flags); + ret = vmw_user_bo_synccpu_release(arg->handle, tfile, + arg->flags); if (unlikely(ret != 0)) { DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", (unsigned int) arg->handle); @@ -640,15 +642,15 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, return 0; } -int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); union drm_vmw_alloc_dmabuf_arg *arg = (union drm_vmw_alloc_dmabuf_arg *)data; struct drm_vmw_alloc_dmabuf_req *req = &arg->req; struct drm_vmw_dmabuf_rep *rep = &arg->rep; - struct vmw_dma_buffer *dma_buf; + struct vmw_buffer_object *vbo; uint32_t handle; int ret; @@ -656,27 +658,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) return ret; - ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - req->size, false, &handle, &dma_buf, - NULL); + ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, + req->size, false, &handle, &vbo, + NULL); if (unlikely(ret != 0)) - goto out_no_dmabuf; + goto out_no_bo; rep->handle = handle; - rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node); + rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node); rep->cur_gmr_id = handle; rep->cur_gmr_offset = 0; - vmw_dmabuf_unreference(&dma_buf); + vmw_bo_unreference(&vbo); -out_no_dmabuf: +out_no_bo: ttm_read_unlock(&dev_priv->reservation_sem); return ret; } -int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_vmw_unref_dmabuf_arg *arg = (struct drm_vmw_unref_dmabuf_arg *)data; @@ -686,11 +688,11 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, TTM_REF_USAGE); } -int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, - uint32_t handle, struct vmw_dma_buffer **out, +int vmw_user_bo_lookup(struct ttm_object_file *tfile, + uint32_t handle, struct vmw_buffer_object **out, struct ttm_base_object **p_base) { - struct vmw_user_dma_buffer *vmw_user_bo; + struct vmw_user_buffer_object *vmw_user_bo; struct ttm_base_object *base; base = ttm_base_object_lookup(tfile, handle); @@ -707,28 +709,28 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, return -EINVAL; } - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, + vmw_user_bo = container_of(base, struct vmw_user_buffer_object, prime.base); - (void)ttm_bo_reference(&vmw_user_bo->dma.base); + (void)ttm_bo_reference(&vmw_user_bo->vbo.base); if (p_base) *p_base = base; else ttm_base_object_unref(&base); - *out = &vmw_user_bo->dma; + *out = &vmw_user_bo->vbo; return 0; } -int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, - struct vmw_dma_buffer *dma_buf, +int vmw_user_bo_reference(struct ttm_object_file *tfile, + struct vmw_buffer_object *vbo, uint32_t *handle) { - struct vmw_user_dma_buffer *user_bo; + struct vmw_user_buffer_object *user_bo; - if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) + if (vbo->base.destroy != vmw_user_bo_destroy) return -EINVAL; - user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); + user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo); *handle = user_bo->prime.base.hash.key; return ttm_ref_object_add(tfile, &user_bo->prime.base, @@ -743,7 +745,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, * @args: Pointer to a struct drm_mode_create_dumb structure * * This is a driver callback for the core drm create_dumb functionality. - * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except + * Note that this is very similar to the vmw_bo_alloc ioctl, except * that the arguments have a different format. */ int vmw_dumb_create(struct drm_file *file_priv, @@ -751,7 +753,7 @@ int vmw_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_dma_buffer *dma_buf; + struct vmw_buffer_object *vbo; int ret; args->pitch = args->width * ((args->bpp + 7) / 8); @@ -761,14 +763,14 @@ int vmw_dumb_create(struct drm_file *file_priv, if (unlikely(ret != 0)) return ret; - ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, + ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, args->size, false, &args->handle, - &dma_buf, NULL); + &vbo, NULL); if (unlikely(ret != 0)) - goto out_no_dmabuf; + goto out_no_bo; - vmw_dmabuf_unreference(&dma_buf); -out_no_dmabuf: + vmw_bo_unreference(&vbo); +out_no_bo: ttm_read_unlock(&dev_priv->reservation_sem); return ret; } @@ -788,15 +790,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, uint64_t *offset) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_dma_buffer *out_buf; + struct vmw_buffer_object *out_buf; int ret; - ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); + ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL); if (ret != 0) return -EINVAL; *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); - vmw_dmabuf_unreference(&out_buf); + vmw_bo_unreference(&out_buf); return 0; } @@ -829,7 +831,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, { unsigned long size = (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; - struct vmw_dma_buffer *backup; + struct vmw_buffer_object *backup; int ret; if (likely(res->backup)) { @@ -841,16 +843,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, if (unlikely(!backup)) return -ENOMEM; - ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, + ret = vmw_bo_init(res->dev_priv, backup, res->backup_size, res->func->backup_placement, interruptible, - &vmw_dmabuf_bo_free); + &vmw_bo_bo_free); if (unlikely(ret != 0)) - goto out_no_dmabuf; + goto out_no_bo; res->backup = backup; -out_no_dmabuf: +out_no_bo: return ret; } @@ -919,7 +921,7 @@ out_bind_failed: */ void vmw_resource_unreserve(struct vmw_resource *res, bool switch_backup, - struct vmw_dma_buffer *new_backup, + struct vmw_buffer_object *new_backup, unsigned long new_backup_offset) { struct vmw_private *dev_priv = res->dev_priv; @@ -931,11 +933,11 @@ void vmw_resource_unreserve(struct vmw_resource *res, if (res->backup) { lockdep_assert_held(&res->backup->base.resv->lock.base); list_del_init(&res->mob_head); - vmw_dmabuf_unreference(&res->backup); + vmw_bo_unreference(&res->backup); } if (new_backup) { - res->backup = vmw_dmabuf_reference(new_backup); + res->backup = vmw_bo_reference(new_backup); lockdep_assert_held(&new_backup->base.resv->lock.base); list_add_tail(&res->mob_head, &new_backup->res_list); } else { @@ -1007,7 +1009,7 @@ out_no_validate: out_no_reserve: ttm_bo_unref(&val_buf->bo); if (backup_dirty) - vmw_dmabuf_unreference(&res->backup); + vmw_bo_unreference(&res->backup); return ret; } @@ -1171,7 +1173,7 @@ int vmw_resource_validate(struct vmw_resource *res) goto out_no_validate; else if (!res->func->needs_backup && res->backup) { list_del_init(&res->mob_head); - vmw_dmabuf_unreference(&res->backup); + vmw_bo_unreference(&res->backup); } return 0; @@ -1230,22 +1232,22 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, void vmw_resource_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { - struct vmw_dma_buffer *dma_buf; + struct vmw_buffer_object *vbo; if (mem == NULL) return; - if (bo->destroy != vmw_dmabuf_bo_free && - bo->destroy != vmw_user_dmabuf_destroy) + if (bo->destroy != vmw_bo_bo_free && + bo->destroy != vmw_user_bo_destroy) return; - dma_buf = container_of(bo, struct vmw_dma_buffer, base); + vbo = container_of(bo, struct vmw_buffer_object, base); /* * Kill any cached kernel maps before move. An optimization could * be to do this iff source or destination memory type is VRAM. */ - vmw_dma_buffer_unmap(dma_buf); + vmw_buffer_object_unmap(vbo); if (mem->mem_type != VMW_PL_MOB) { struct vmw_resource *res, *n; @@ -1254,7 +1256,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, val_buf.bo = bo; val_buf.shared = false; - list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { + list_for_each_entry_safe(res, n, &vbo->res_list, mob_head) { if (unlikely(res->func->unbind == NULL)) continue; @@ -1277,12 +1279,12 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, */ void vmw_resource_swap_notify(struct ttm_buffer_object *bo) { - if (bo->destroy != vmw_dmabuf_bo_free && - bo->destroy != vmw_user_dmabuf_destroy) + if (bo->destroy != vmw_bo_bo_free && + bo->destroy != vmw_user_bo_destroy) return; /* Kill any cached kernel maps before swapout */ - vmw_dma_buffer_unmap(vmw_dma_buffer(bo)); + vmw_buffer_object_unmap(vmw_buffer_object(bo)); } @@ -1294,7 +1296,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo) * Read back cached states from the device if they exist. This function * assumings binding_mutex is held. */ -int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) +int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob) { struct vmw_resource *dx_query_ctx; struct vmw_private *dev_priv; @@ -1344,7 +1346,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) void vmw_query_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { - struct vmw_dma_buffer *dx_query_mob; + struct vmw_buffer_object *dx_query_mob; struct ttm_bo_device *bdev = bo->bdev; struct vmw_private *dev_priv; @@ -1353,7 +1355,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, mutex_lock(&dev_priv->binding_mutex); - dx_query_mob = container_of(bo, struct vmw_dma_buffer, base); + dx_query_mob = container_of(bo, struct vmw_buffer_object, base); if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { mutex_unlock(&dev_priv->binding_mutex); return; @@ -1481,7 +1483,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) goto out_no_reserve; if (res->pin_count == 0) { - struct vmw_dma_buffer *vbo = NULL; + struct vmw_buffer_object *vbo = NULL; if (res->backup) { vbo = res->backup; @@ -1539,7 +1541,7 @@ void vmw_resource_unpin(struct vmw_resource *res) WARN_ON(res->pin_count == 0); if (--res->pin_count == 0 && res->backup) { - struct vmw_dma_buffer *vbo = res->backup; + struct vmw_buffer_object *vbo = res->backup; (void) ttm_bo_reserve(&vbo->base, false, false, NULL); vmw_bo_pin_reserved(vbo, false); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 9798640cbfcd..74dfd4621b7e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit { SVGAFifoCmdBlitScreenToGMRFB body; }; -struct vmw_kms_sou_dmabuf_blit { +struct vmw_kms_sou_bo_blit { uint32 header; SVGAFifoCmdBlitGMRFBToScreen body; }; @@ -83,7 +83,7 @@ struct vmw_screen_object_unit { struct vmw_display_unit base; unsigned long buffer_size; /**< Size of allocated buffer */ - struct vmw_dma_buffer *buffer; /**< Backing store buffer */ + struct vmw_buffer_object *buffer; /**< Backing store buffer */ bool defined; }; @@ -240,8 +240,8 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) } if (vfb) { - sou->buffer = vps->dmabuf; - sou->buffer_size = vps->dmabuf_size; + sou->buffer = vps->bo; + sou->buffer_size = vps->bo_size; ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y, &crtc->mode); @@ -408,10 +408,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, struct drm_crtc *crtc = plane->state->crtc ? plane->state->crtc : old_state->crtc; - if (vps->dmabuf) - vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false); - vmw_dmabuf_unreference(&vps->dmabuf); - vps->dmabuf_size = 0; + if (vps->bo) + vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false); + vmw_bo_unreference(&vps->bo); + vps->bo_size = 0; vmw_du_plane_cleanup_fb(plane, old_state); } @@ -440,8 +440,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, if (!new_fb) { - vmw_dmabuf_unreference(&vps->dmabuf); - vps->dmabuf_size = 0; + vmw_bo_unreference(&vps->bo); + vps->bo_size = 0; return 0; } @@ -449,22 +449,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, size = new_state->crtc_w * new_state->crtc_h * 4; dev_priv = vmw_priv(crtc->dev); - if (vps->dmabuf) { - if (vps->dmabuf_size == size) { + if (vps->bo) { + if (vps->bo_size == size) { /* * Note that this might temporarily up the pin-count * to 2, until cleanup_fb() is called. */ - return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, + return vmw_bo_pin_in_vram(dev_priv, vps->bo, true); } - vmw_dmabuf_unreference(&vps->dmabuf); - vps->dmabuf_size = 0; + vmw_bo_unreference(&vps->bo); + vps->bo_size = 0; } - vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL); - if (!vps->dmabuf) + vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL); + if (!vps->bo) return -ENOMEM; vmw_svga_enable(dev_priv); @@ -473,22 +473,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); - ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size, + ret = vmw_bo_init(dev_priv, vps->bo, size, &vmw_vram_ne_placement, - false, &vmw_dmabuf_bo_free); + false, &vmw_bo_bo_free); vmw_overlay_resume_all(dev_priv); if (ret) { - vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ + vps->bo = NULL; /* vmw_bo_init frees on error */ return ret; } - vps->dmabuf_size = size; + vps->bo_size = size; /* * TTM already thinks the buffer is pinned, but make sure the * pin_count is upped. */ - return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true); + return vmw_bo_pin_in_vram(dev_priv, vps->bo, true); } @@ -512,10 +512,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, vclips.w = crtc->mode.hdisplay; vclips.h = crtc->mode.vdisplay; - if (vfb->dmabuf) - ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL, - &vclips, 1, 1, true, - &fence, crtc); + if (vfb->bo) + ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL, + &vclips, 1, 1, true, + &fence, crtc); else ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, &vclips, NULL, 0, 0, @@ -775,11 +775,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) return 0; } -static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv, +static int do_bo_define_gmrfb(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer) { - struct vmw_dma_buffer *buf = - container_of(framebuffer, struct vmw_framebuffer_dmabuf, + struct vmw_buffer_object *buf = + container_of(framebuffer, struct vmw_framebuffer_bo, base)->buffer; int depth = framebuffer->base.format->depth; struct { @@ -970,13 +970,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, } /** - * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips. + * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips. * * @dirty: The closure structure. * * Commits a previously built command buffer of readback clips. */ -static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) +static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty) { if (!dirty->num_hits) { vmw_fifo_commit(dirty->dev_priv, 0); @@ -984,20 +984,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) } vmw_fifo_commit(dirty->dev_priv, - sizeof(struct vmw_kms_sou_dmabuf_blit) * + sizeof(struct vmw_kms_sou_bo_blit) * dirty->num_hits); } /** - * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect. + * vmw_sou_bo_clip - Callback to encode a readback cliprect. * * @dirty: The closure structure * * Encodes a BLIT_GMRFB_TO_SCREEN cliprect. */ -static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) +static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty) { - struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd; + struct vmw_kms_sou_bo_blit *blit = dirty->cmd; blit += dirty->num_hits; blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; @@ -1012,10 +1012,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) } /** - * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer + * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer * * @dev_priv: Pointer to the device private structure. - * @framebuffer: Pointer to the dma-buffer backed framebuffer. + * @framebuffer: Pointer to the buffer-object backed framebuffer. * @clips: Array of clip rects. * @vclips: Alternate array of clip rects. Either @clips or @vclips must * be NULL. @@ -1025,12 +1025,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) * @out_fence: If non-NULL, will return a ref-counted pointer to a * struct vmw_fence_obj. The returned fence pointer may be NULL in which * case the device has already synchronized. - * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only. + * @crtc: If crtc is passed, perform bo dirty on that crtc only. * * Returns 0 on success, negative error code on failure. -ERESTARTSYS if * interrupted. */ -int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, +int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer, struct drm_clip_rect *clips, struct drm_vmw_rect *vclips, @@ -1039,8 +1039,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, struct vmw_fence_obj **out_fence, struct drm_crtc *crtc) { - struct vmw_dma_buffer *buf = - container_of(framebuffer, struct vmw_framebuffer_dmabuf, + struct vmw_buffer_object *buf = + container_of(framebuffer, struct vmw_framebuffer_bo, base)->buffer; struct vmw_kms_dirty dirty; int ret; @@ -1050,14 +1050,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, if (ret) return ret; - ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer); + ret = do_bo_define_gmrfb(dev_priv, framebuffer); if (unlikely(ret != 0)) goto out_revert; dirty.crtc = crtc; - dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit; - dirty.clip = vmw_sou_dmabuf_clip; - dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) * + dirty.fifo_commit = vmw_sou_bo_fifo_commit; + dirty.clip = vmw_sou_bo_clip; + dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) * num_clips; ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 0, 0, num_clips, increment, &dirty); @@ -1116,12 +1116,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty) /** * vmw_kms_sou_readback - Perform a readback from the screen object system to - * a dma-buffer backed framebuffer. + * a buffer-object backed framebuffer. * * @dev_priv: Pointer to the device private structure. * @file_priv: Pointer to a struct drm_file identifying the caller. * Must be set to NULL if @user_fence_rep is NULL. - * @vfb: Pointer to the dma-buffer backed framebuffer. + * @vfb: Pointer to the buffer-object backed framebuffer. * @user_fence_rep: User-space provided structure for fence information. * Must be set to non-NULL if @file_priv is non-NULL. * @vclips: Array of clip rects. @@ -1139,8 +1139,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, uint32_t num_clips, struct drm_crtc *crtc) { - struct vmw_dma_buffer *buf = - container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; + struct vmw_buffer_object *buf = + container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; struct vmw_kms_dirty dirty; int ret; @@ -1149,7 +1149,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, if (ret) return ret; - ret = do_dmabuf_define_gmrfb(dev_priv, vfb); + ret = do_bo_define_gmrfb(dev_priv, vfb); if (unlikely(ret != 0)) goto out_revert; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 73b8e9a16368..f6c939f3ff5e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -159,7 +159,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, SVGA3dShaderType type, uint8_t num_input_sig, uint8_t num_output_sig, - struct vmw_dma_buffer *byte_code, + struct vmw_buffer_object *byte_code, void (*res_free) (struct vmw_resource *res)) { struct vmw_shader *shader = vmw_res_to_shader(res); @@ -178,7 +178,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, res->backup_size = size; if (byte_code) { - res->backup = vmw_dmabuf_reference(byte_code); + res->backup = vmw_bo_reference(byte_code); res->backup_offset = offset; } shader->size = size; @@ -723,7 +723,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, } static int vmw_user_shader_alloc(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buffer, + struct vmw_buffer_object *buffer, size_t shader_size, size_t offset, SVGA3dShaderType shader_type, @@ -801,7 +801,7 @@ out: static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buffer, + struct vmw_buffer_object *buffer, size_t shader_size, size_t offset, SVGA3dShaderType shader_type) @@ -862,12 +862,12 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, { struct vmw_private *dev_priv = vmw_priv(dev); struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_dma_buffer *buffer = NULL; + struct vmw_buffer_object *buffer = NULL; SVGA3dShaderType shader_type; int ret; if (buffer_handle != SVGA3D_INVALID_ID) { - ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, + ret = vmw_user_bo_lookup(tfile, buffer_handle, &buffer, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find buffer for shader " @@ -906,7 +906,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ttm_read_unlock(&dev_priv->reservation_sem); out_bad_arg: - vmw_dmabuf_unreference(&buffer); + vmw_bo_unreference(&buffer); return ret; } @@ -983,7 +983,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, struct list_head *list) { struct ttm_operation_ctx ctx = { false, true }; - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; struct ttm_bo_kmap_obj map; bool is_iomem; int ret; @@ -997,8 +997,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, if (unlikely(!buf)) return -ENOMEM; - ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, - true, vmw_dmabuf_bo_free); + ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement, + true, vmw_bo_bo_free); if (unlikely(ret != 0)) goto out; @@ -1031,7 +1031,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, res, list); vmw_resource_unreference(&res); no_reserve: - vmw_dmabuf_unreference(&buf); + vmw_bo_unreference(&buf); out: return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 152e96cb1c01..537df9034008 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -44,7 +44,7 @@ enum stdu_content_type { SAME_AS_DISPLAY = 0, SEPARATE_SURFACE, - SEPARATE_DMA + SEPARATE_BO }; /** @@ -58,7 +58,7 @@ enum stdu_content_type { * @bottom: Bottom side of bounding box. * @fb_left: Left side of the framebuffer/content bounding box * @fb_top: Top of the framebuffer/content bounding box - * @buf: DMA buffer when DMA-ing between buffer and screen targets. + * @buf: buffer object when DMA-ing between buffer and screen targets. * @sid: Surface ID when copying between surface and screen targets. */ struct vmw_stdu_dirty { @@ -68,7 +68,7 @@ struct vmw_stdu_dirty { s32 fb_left, fb_top; u32 pitch; union { - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; u32 sid; }; }; @@ -508,14 +508,14 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc, /** - * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect + * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect * * @dirty: The closure structure. * * Encodes a surface DMA command cliprect and updates the bounding box * for the DMA. */ -static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty) +static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *ddirty = container_of(dirty, struct vmw_stdu_dirty, base); @@ -543,14 +543,14 @@ static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty) } /** - * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command. + * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command. * * @dirty: The closure structure. * * Fills in the missing fields in a DMA command, and optionally encodes * a screen target update command, depending on transfer direction. */ -static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) +static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *ddirty = container_of(dirty, struct vmw_stdu_dirty, base); @@ -594,13 +594,13 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) /** - * vmw_stdu_dmabuf_cpu_clip - Callback to encode a CPU blit + * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit * * @dirty: The closure structure. * * This function calculates the bounding box for all the incoming clips. */ -static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) +static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *ddirty = container_of(dirty, struct vmw_stdu_dirty, base); @@ -624,14 +624,14 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) /** - * vmw_stdu_dmabuf_cpu_commit - Callback to do a CPU blit from DMAbuf + * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object * * @dirty: The closure structure. * * For the special case when we cannot create a proxy surface in a * 2D VM, we have to do a CPU blit ourselves. */ -static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) +static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *ddirty = container_of(dirty, struct vmw_stdu_dirty, base); @@ -652,7 +652,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) if (width == 0 || height == 0) return; - /* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */ + /* Assume we are blitting from Guest (bo) to Host (display_srf) */ dst_pitch = stdu->display_srf->base_size.width * stdu->cpp; dst_bo = &stdu->display_srf->res.backup->base; dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp; @@ -712,13 +712,13 @@ out_cleanup: } /** - * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed + * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed * framebuffer and the screen target system. * * @dev_priv: Pointer to the device private structure. * @file_priv: Pointer to a struct drm-file identifying the caller. May be * set to NULL, but then @user_fence_rep must also be set to NULL. - * @vfb: Pointer to the dma-buffer backed framebuffer. + * @vfb: Pointer to the buffer-object backed framebuffer. * @clips: Array of clip rects. Either @clips or @vclips must be NULL. * @vclips: Alternate array of clip rects. Either @clips or @vclips must * be NULL. @@ -747,8 +747,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, bool interruptible, struct drm_crtc *crtc) { - struct vmw_dma_buffer *buf = - container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; + struct vmw_buffer_object *buf = + container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; struct vmw_stdu_dirty ddirty; int ret; bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D); @@ -770,8 +770,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, ddirty.fb_left = ddirty.fb_top = S32_MAX; ddirty.pitch = vfb->base.pitches[0]; ddirty.buf = buf; - ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; - ddirty.base.clip = vmw_stdu_dmabuf_clip; + ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit; + ddirty.base.clip = vmw_stdu_bo_clip; ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) + num_clips * sizeof(SVGA3dCopyBox) + sizeof(SVGA3dCmdSurfaceDMASuffix); @@ -780,8 +780,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, if (cpu_blit) { - ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit; - ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip; + ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit; + ddirty.base.clip = vmw_stdu_bo_cpu_clip; ddirty.base.fifo_reserve_size = 0; } @@ -927,7 +927,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, if (ret) return ret; - if (vfbs->is_dmabuf_proxy) { + if (vfbs->is_bo_proxy) { ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); if (ret) goto out_finish; @@ -1075,7 +1075,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane, * @new_state: info on the new plane state, including the FB * * This function allocates a new display surface if the content is - * backed by a DMA. The display surface is pinned here, and it'll + * backed by a buffer object. The display surface is pinned here, and it'll * be unpinned in .cleanup_fb() * * Returns 0 on success @@ -1105,13 +1105,13 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, } vfb = vmw_framebuffer_to_vfb(new_fb); - new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb); + new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb); if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay && new_vfbs->surface->base_size.height == vdisplay) new_content_type = SAME_AS_DISPLAY; - else if (vfb->dmabuf) - new_content_type = SEPARATE_DMA; + else if (vfb->bo) + new_content_type = SEPARATE_BO; else new_content_type = SEPARATE_SURFACE; @@ -1124,10 +1124,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, display_base_size.depth = 1; /* - * If content buffer is a DMA buf, then we have to construct - * surface info + * If content buffer is a buffer object, then we have to + * construct surface info */ - if (new_content_type == SEPARATE_DMA) { + if (new_content_type == SEPARATE_BO) { switch (new_fb->format->cpp[0]*8) { case 32: @@ -1212,12 +1212,12 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, vps->content_fb_type = new_content_type; /* - * This should only happen if the DMA buf is too large to create a + * This should only happen if the buffer object is too large to create a * proxy surface for. - * If we are a 2D VM with a DMA buffer then we have to use CPU blit + * If we are a 2D VM with a buffer object then we have to use CPU blit * so cache these mappings */ - if (vps->content_fb_type == SEPARATE_DMA && + if (vps->content_fb_type == SEPARATE_BO && !(dev_priv->capabilities & SVGA_CAP_3D)) vps->cpp = new_fb->pitches[0] / new_fb->width; @@ -1276,7 +1276,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, if (ret) DRM_ERROR("Failed to bind surface to STDU.\n"); - if (vfb->dmabuf) + if (vfb->bo) ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL, &vclips, 1, 1, true, false, crtc); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index b236c48bf265..2b2e8aa7114a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -842,12 +842,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, if (dev_priv->has_mob && req->shareable) { uint32_t backup_handle; - ret = vmw_user_dmabuf_alloc(dev_priv, tfile, - res->backup_size, - true, - &backup_handle, - &res->backup, - &user_srf->backup_base); + ret = vmw_user_bo_alloc(dev_priv, tfile, + res->backup_size, + true, + &backup_handle, + &res->backup, + &user_srf->backup_base); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; @@ -1317,14 +1317,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, if (req->buffer_handle != SVGA3D_INVALID_ID) { - ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, - &res->backup, - &user_srf->backup_base); + ret = vmw_user_bo_lookup(tfile, req->buffer_handle, + &res->backup, + &user_srf->backup_base); if (ret == 0) { if (res->backup->base.num_pages * PAGE_SIZE < res->backup_size) { DRM_ERROR("Surface backup buffer is too small.\n"); - vmw_dmabuf_unreference(&res->backup); + vmw_bo_unreference(&res->backup); ret = -EINVAL; goto out_unlock; } else { @@ -1332,13 +1332,13 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, } } } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) - ret = vmw_user_dmabuf_alloc(dev_priv, tfile, - res->backup_size, - req->drm_surface_flags & - drm_vmw_surface_flag_shareable, - &backup_handle, - &res->backup, - &user_srf->backup_base); + ret = vmw_user_bo_alloc(dev_priv, tfile, + res->backup_size, + req->drm_surface_flags & + drm_vmw_surface_flag_shareable, + &backup_handle, + &res->backup, + &user_srf->backup_base); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); @@ -1414,8 +1414,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, } mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ - ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, - &backup_handle); + ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle); mutex_unlock(&dev_priv->cmdbuf_mutex); if (unlikely(ret != 0)) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 21111fd091f9..0931f43913b1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) struct ttm_object_file *tfile = vmw_fpriv((struct drm_file *)filp->private_data)->tfile; - return vmw_user_dmabuf_verify_access(bo, tfile); + return vmw_user_bo_verify_access(bo, tfile); } static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |