summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-01-20 04:03:27 +0400
committerDave Airlie <airlied@redhat.com>2014-01-20 04:03:27 +0400
commit9354eafd893f45320a37da360e1728104e49cc2f (patch)
tree8cd82ac2ff70ea3a9fd97b432f10c880b1d97a4c /drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
parent53dac830537b51df555ba5e7ebb236705b7eaa7c (diff)
parent1985f99987ff04e1bb0405101dd8e25cf1b6b037 (diff)
downloadlinux-9354eafd893f45320a37da360e1728104e49cc2f.tar.xz
Merge tag 'vmwgfx-next-2014-01-17' of git://people.freedesktop.org/~thomash/linux into drm-next
Pull request of 2014-01-17 Pull request for 3.14. One not so urgent fix, One huge device update. The pull request corresponds to the patches sent out on dri-devel, except: [PATCH 02/33], review tag typo pointed out by Matt Turner. [PATCH 04/33], dropped. The new surface formats are never used. The upcoming vmware svga2 hardware version 11 will introduce the concept of "guest backed objects" or -resources. The device will in principle get all of its memory from the guest, which has big advantages from the device point of view. This means that vmwgfx contexts, shaders and surfaces need to be backed by guest memory in the form of buffer objects called MOBs, presumably short for MemoryOBjects, which are bound to the device in a special way. This patch series introduces guest backed object support. Some new IOCTLs are added to allocate these new guest backed object, and to optionally provide them with a backing MOB. There is an update to the gallium driver that comes with this update, and it will be pushed in the near timeframe presumably to a separate mesa branch before merged to master. * tag 'vmwgfx-next-2014-01-17' of git://people.freedesktop.org/~thomash/linux: (33 commits) drm/vmwgfx: Invalidate surface on non-readback unbind drm/vmwgfx: Silence the device command verifier drm/vmwgfx: Implement 64-bit Otable- and MOB binding v2 drm/vmwgfx: Fix surface framebuffer check for guest-backed surfaces drm/vmwgfx: Update otable definitions drm/vmwgfx: Use the linux DMA api also for MOBs drm/vmwgfx: Ditch the vmw_dummy_query_bo_prepare function drm/vmwgfx: Persistent tracking of context bindings drm/vmwgfx: Track context bindings and scrub them upon exiting execbuf drm/vmwgfx: Block the BIND_SHADERCONSTS command drm/vmwgfx: Add a parameter to get max MOB memory size drm/vmwgfx: Implement a buffer object synccpu ioctl. drm/vmwgfx: Make sure that the multisampling is off drm/vmwgfx: Extend the command verifier to handle guest-backed on / off drm/vmwgfx: Fix up the vmwgfx_drv.h header for new files drm/vmwgfx: Enable 3D for new hardware version drm/vmwgfx: Add new unused (by user-space) commands to the verifier drm/vmwgfx: Validate guest-backed shader const commands drm/vmwgfx: Add guest-backed shaders drm/vmwgfx: Hook up guest-backed surfaces ...
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c195
1 files changed, 192 insertions, 3 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9b5ea2ac7ddf..6fdd82d42f65 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -215,6 +215,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
res->func = func;
INIT_LIST_HEAD(&res->lru_head);
INIT_LIST_HEAD(&res->mob_head);
+ INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
res->backup = NULL;
res->backup_offset = 0;
@@ -441,6 +442,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
ttm_bo_unref(&bo);
}
+static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
+ enum ttm_ref_type ref_type)
+{
+ struct vmw_user_dma_buffer *user_bo;
+ user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
+
+ switch (ref_type) {
+ case TTM_REF_SYNCCPU_WRITE:
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
+ break;
+ default:
+ BUG();
+ }
+}
+
/**
* vmw_user_dmabuf_alloc - Allocate a user dma buffer
*
@@ -471,6 +487,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
}
ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
&vmw_vram_sys_placement, true,
&vmw_user_dmabuf_destroy);
if (unlikely(ret != 0))
@@ -482,7 +500,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
&user_bo->prime,
shareable,
ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ &vmw_user_dmabuf_release,
+ &vmw_user_dmabuf_ref_obj_release);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
@@ -515,6 +534,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
+/**
+ * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ struct ttm_buffer_object *bo = &user_bo->dma.base;
+ bool existed;
+ int ret;
+
+ if (flags & drm_vmw_synccpu_allow_cs) {
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, true,
+ !!(flags & drm_vmw_synccpu_dontblock));
+ spin_unlock(&bdev->fence_lock);
+ return ret;
+ }
+
+ ret = ttm_bo_synccpu_write_grab
+ (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_SYNCCPU_WRITE, &existed);
+ if (ret != 0 || existed)
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
+
+ return ret;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ if (!(flags & drm_vmw_synccpu_allow_cs))
+ return ttm_ref_object_base_unref(tfile, handle,
+ TTM_REF_SYNCCPU_WRITE);
+
+ return 0;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_synccpu_arg *arg =
+ (struct drm_vmw_synccpu_arg *) data;
+ struct vmw_dma_buffer *dma_buf;
+ struct vmw_user_dma_buffer *user_bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+
+ if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+ || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+ drm_vmw_synccpu_dontblock |
+ drm_vmw_synccpu_allow_cs)) != 0) {
+ DRM_ERROR("Illegal synccpu flags.\n");
+ return -EINVAL;
+ }
+
+ switch (arg->op) {
+ case drm_vmw_synccpu_grab:
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
+ dma);
+ ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
+ vmw_dmabuf_unreference(&dma_buf);
+ if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+ ret != -EBUSY)) {
+ DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ case drm_vmw_synccpu_release:
+ ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
+ arg->flags);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ default:
+ DRM_ERROR("Invalid synccpu operation.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -591,7 +734,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
}
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
- struct vmw_dma_buffer *dma_buf)
+ struct vmw_dma_buffer *dma_buf,
+ uint32_t *handle)
{
struct vmw_user_dma_buffer *user_bo;
@@ -599,6 +743,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
return -EINVAL;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+
+ *handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL);
}
@@ -1291,11 +1437,54 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
* @mem: The truct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
- * For now does nothing.
+ * Evicts the Guest Backed hardware resource if the backup
+ * buffer is being moved out of MOB memory.
+ * Note that this function should not race with the resource
+ * validation code as long as it accesses only members of struct
+ * resource that remain static while bo::res is !NULL and
+ * while we have @bo reserved. struct resource::backup is *not* a
+ * static member. The resource validation code will take care
+ * to set @bo::res to NULL, while having @bo reserved when the
+ * buffer is no longer bound to the resource, so @bo:res can be
+ * used to determine whether there is a need to unbind and whether
+ * it is safe to unbind.
*/
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
+ struct vmw_dma_buffer *dma_buf;
+
+ if (mem == NULL)
+ return;
+
+ if (bo->destroy != vmw_dmabuf_bo_free &&
+ bo->destroy != vmw_user_dmabuf_destroy)
+ return;
+
+ dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+
+ if (mem->mem_type != VMW_PL_MOB) {
+ struct vmw_resource *res, *n;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_validate_buffer val_buf;
+
+ val_buf.bo = bo;
+
+ list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
+
+ if (unlikely(res->func->unbind == NULL))
+ continue;
+
+ (void) res->func->unbind(res, true, &val_buf);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+ list_del_init(&res->mob_head);
+ }
+
+ spin_lock(&bdev->fence_lock);
+ (void) ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+ }
}
/**