summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-08-17 09:03:48 +0300
committerDave Airlie <airlied@redhat.com>2015-08-17 09:03:48 +0300
commit294947a5c7f6d228b70fcc51a89527e74a38a2c5 (patch)
treed0f79f3978a1a129e164ce576a771e00550fb985 /drivers/gpu/drm/vmwgfx/vmwgfx_context.c
parent6406e45cc6f4976ace2b6d23b76bb5f07541e68f (diff)
parent54fbde8a94a8a78547597215c9e4be590d075ee0 (diff)
downloadlinux-294947a5c7f6d228b70fcc51a89527e74a38a2c5.tar.xz
Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next
A couple of fixes from the previous pull request as well as gl3 support. There is one drm core change, an export of a previously private function. Take 2 implementing screen targets, this time with the fbdev code adjusted accordingly. Also there is an implementation of register-driven command buffers, that overrides the FIFO ring for command processing. It's needed for our upcoming hardware revision. * 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux: (35 commits) drm/vmwgfx: Fix copyright headers drm/vmwgfx: Add DX query support. Various fixes. drm/vmwgfx: Add command parser support for a couple of DX commands drm/vmwgfx: Command parser fixes for DX drm/vmwgfx: Initial DX support drm/vmwgfx: Update device includes for DX device functionality drm: export the DRM permission check code drm/vmwgfx: Fix crash when unloading vmwgfx v2 drm/vmwgfx: Fix framebuffer creation on older hardware drm/vmwgfx: Fixed topology boundary checking for Screen Targets drm/vmwgfx: Fix an uninitialized value drm/vmwgfx: Fix compiler warning with 32-bit dma_addr_t drm/vmwgfx: Kill a bunch of sparse warnings drm/vmwgfx: Fix kms preferred mode sorting drm/vmwgfx: Reinstate the legacy display system dirty callback drm/vmwgfx: Implement fbdev on kms v2 drm/vmwgfx: Add a kernel interface to create a framebuffer v2 drm/vmwgfx: Avoid cmdbuf alloc sleeping if !TASK_RUNNING drm/vmwgfx: Convert screen targets to new helpers v3 drm/vmwgfx: Convert screen objects to the new helpers ...
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_context.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c784
1 files changed, 400 insertions, 384 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 44e6ecba3de7..443d1ed00de7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,19 +27,19 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
#include "ttm/ttm_placement.h"
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
- struct vmw_ctx_binding_state cbs;
+ struct vmw_ctx_binding_state *cbs;
struct vmw_cmdbuf_res_manager *man;
+ struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+ spinlock_t cotable_lock;
+ struct vmw_dma_buffer *dx_query_mob;
};
-
-
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
-
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -51,12 +51,14 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
- bool rebind);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+static int vmw_dx_context_create(struct vmw_resource *res);
+static int vmw_dx_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_destroy(struct vmw_resource *res);
+
static uint64_t vmw_user_context_size;
static const struct vmw_user_resource_conv user_context_conv = {
@@ -93,15 +95,38 @@ static const struct vmw_res_func vmw_gb_context_func = {
.unbind = vmw_gb_context_unbind
};
-static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
- [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
- [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
- [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+static const struct vmw_res_func vmw_dx_context_func = {
+ .res_type = vmw_res_dx_context,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "dx contexts",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_dx_context_create,
+ .destroy = vmw_dx_context_destroy,
+ .bind = vmw_dx_context_bind,
+ .unbind = vmw_dx_context_unbind
+};
/**
* Context management:
*/
+static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+{
+ struct vmw_resource *res;
+ int i;
+
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ spin_lock(&uctx->cotable_lock);
+ res = uctx->cotables[i];
+ uctx->cotables[i] = NULL;
+ spin_unlock(&uctx->cotable_lock);
+
+ if (res)
+ vmw_resource_unreference(&res);
+ }
+}
+
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
struct vmw_user_context *uctx =
@@ -113,17 +138,19 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
} *cmd;
- if (res->func->destroy == vmw_gb_context_destroy) {
+ if (res->func->destroy == vmw_gb_context_destroy ||
+ res->func->destroy == vmw_dx_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
- (void) vmw_context_binding_state_kill(&uctx->cbs);
- (void) vmw_gb_context_destroy(res);
+ vmw_binding_state_kill(uctx->cbs);
+ (void) res->func->destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
+ vmw_context_cotables_unref(uctx);
return;
}
@@ -135,25 +162,29 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
return;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
+ cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
}
static int vmw_gb_context_init(struct vmw_private *dev_priv,
+ bool dx,
struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
+ void (*res_free)(struct vmw_resource *res))
{
- int ret;
+ int ret, i;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
+ res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+ SVGA3D_CONTEXT_DATA_SIZE);
ret = vmw_resource_init(dev_priv, res, true,
- res_free, &vmw_gb_context_func);
- res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+ res_free,
+ dx ? &vmw_dx_context_func :
+ &vmw_gb_context_func);
if (unlikely(ret != 0))
goto out_err;
@@ -166,12 +197,32 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
}
}
- memset(&uctx->cbs, 0, sizeof(uctx->cbs));
- INIT_LIST_HEAD(&uctx->cbs.list);
+ uctx->cbs = vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(uctx->cbs)) {
+ ret = PTR_ERR(uctx->cbs);
+ goto out_err;
+ }
+
+ spin_lock_init(&uctx->cotable_lock);
+
+ if (dx) {
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
+ &uctx->res, i);
+ if (unlikely(uctx->cotables[i] == NULL)) {
+ ret = -ENOMEM;
+ goto out_cotables;
+ }
+ }
+ }
+
+
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
+out_cotables:
+ vmw_context_cotables_unref(uctx);
out_err:
if (res_free)
res_free(res);
@@ -182,7 +233,8 @@ out_err:
static int vmw_context_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
+ void (*res_free)(struct vmw_resource *res),
+ bool dx)
{
int ret;
@@ -192,7 +244,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
} *cmd;
if (dev_priv->has_mob)
- return vmw_gb_context_init(dev_priv, res, res_free);
+ return vmw_gb_context_init(dev_priv, dx, res, res_free);
ret = vmw_resource_init(dev_priv, res, false,
res_free, &vmw_legacy_context_func);
@@ -215,12 +267,12 @@ static int vmw_context_init(struct vmw_private *dev_priv,
return -ENOMEM;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
+ cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
@@ -232,19 +284,10 @@ out_early:
return ret;
}
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
- struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
- int ret;
-
- if (unlikely(res == NULL))
- return NULL;
-
- ret = vmw_context_init(dev_priv, res, NULL);
-
- return (ret == 0) ? res : NULL;
-}
+/*
+ * GB context.
+ */
static int vmw_gb_context_create(struct vmw_resource *res)
{
@@ -281,7 +324,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -309,7 +352,6 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
"binding.\n");
return -ENOMEM;
}
-
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
@@ -346,7 +388,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_state_scrub(&uctx->cbs);
+ vmw_binding_state_scrub(uctx->cbs);
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
@@ -414,7 +456,231 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
+
+ return 0;
+}
+
+/*
+ * DX context.
+ */
+
+static int vmw_dx_context_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineContext body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a context id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_fifo_resource_inc(dev_priv);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+static int vmw_dx_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindContext body;
+ } *cmd;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.validContents = res->backup_dirty;
+ res->backup_dirty = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+
+ return 0;
+}
+
+/**
+ * vmw_dx_context_scrub_cotables - Scrub all bindings and
+ * cotables from a context
+ *
+ * @ctx: Pointer to the context resource
+ * @readback: Whether to save the otable contents on scrubbing.
+ *
+ * COtables must be unbound before their context, but unbinding requires
+ * the backup buffer being reserved, whereas scrubbing does not.
+ * This function scrubs all cotables of a context, potentially reading back
+ * the contents into their backup buffers. However, scrubbing cotables
+ * also makes the device context invalid, so scrub all bindings first so
+ * that doesn't have to be done later with an invalid context.
+ */
+void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+ bool readback)
+{
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ int i;
+
+ vmw_binding_state_scrub(uctx->cbs);
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ struct vmw_resource *res;
+
+ /* Avoid racing with ongoing cotable destruction. */
+ spin_lock(&uctx->cotable_lock);
+ res = uctx->cotables[vmw_cotable_scrub_order[i]];
+ if (res)
+ res = vmw_resource_reference_unless_doomed(res);
+ spin_unlock(&uctx->cotable_lock);
+ if (!res)
+ continue;
+
+ WARN_ON(vmw_cotable_scrub(res, readback));
+ vmw_resource_unreference(&res);
+ }
+}
+
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXReadbackContext body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindContext body;
+ } *cmd2;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_dx_context_scrub_cotables(res, readback);
+
+ if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
+ readback) {
+ WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
+ if (vmw_query_readback_all(uctx->dx_query_mob))
+ DRM_ERROR("Failed to read back query states\n");
+ }
+
+ submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "unbinding.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd2 = (void *) cmd;
+ if (readback) {
+ cmd1 = (void *) cmd;
+ cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.cid = res->id;
+ cmd2 = (void *) (&cmd1[1]);
+ }
+ cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.cid = res->id;
+ cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+ vmw_fifo_commit(dev_priv, submit_size);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_dx_context_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDestroyContext body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "destruction.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (dev_priv->query_cid == res->id)
+ dev_priv->query_cid_valid = false;
+ vmw_resource_release_id(res);
+ vmw_fifo_resource_dec(dev_priv);
return 0;
}
@@ -435,6 +701,11 @@ static void vmw_user_context_free(struct vmw_resource *res)
container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
+ if (ctx->cbs)
+ vmw_binding_state_free(ctx->cbs);
+
+ (void) vmw_context_bind_dx_query(res, NULL);
+
ttm_base_object_kfree(ctx, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
@@ -465,8 +736,8 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
}
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int vmw_context_define(struct drm_device *dev, void *data,
+ struct drm_file *file_priv, bool dx)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_context *ctx;
@@ -476,6 +747,10 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
+ if (!dev_priv->has_dx && dx) {
+ DRM_ERROR("DX contexts not supported by device.\n");
+ return -EINVAL;
+ }
/*
* Approximate idr memory usage with 128 bytes. It will be limited
@@ -516,7 +791,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
* From here on, the destructor takes over resource freeing.
*/
- ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
if (unlikely(ret != 0))
goto out_unlock;
@@ -535,387 +810,128 @@ out_err:
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
-
-}
-
-/**
- * vmw_context_scrub_shader - scrub a shader binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
-{
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShader body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "unbinding.\n");
- return -ENOMEM;
- }
-
- cmd->header.id = SVGA_3D_CMD_SET_SHADER;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.cid = bi->ctx->id;
- cmd->body.type = bi->i1.shader_type;
- cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
-}
-
-/**
- * vmw_context_scrub_render_target - scrub a render target binding
- * from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
- bool rebind)
-{
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetRenderTarget body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for render target "
- "unbinding.\n");
- return -ENOMEM;
- }
-
- cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.cid = bi->ctx->id;
- cmd->body.type = bi->i1.rt_type;
- cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- cmd->body.target.face = 0;
- cmd->body.target.mipmap = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
}
-/**
- * vmw_context_scrub_texture - scrub a texture binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- *
- * TODO: Possibly complement this function with a function that takes
- * a list of texture bindings and combines them to a single command.
- */
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
- bool rebind)
-{
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- struct {
- SVGA3dCmdSetTextureState c;
- SVGA3dTextureState s1;
- } body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for texture "
- "unbinding.\n");
- return -ENOMEM;
- }
-
-
- cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.c.cid = bi->ctx->id;
- cmd->body.s1.stage = bi->i1.texture_stage;
- cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
- cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
-}
-
-/**
- * vmw_context_binding_drop: Stop tracking a context binding
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Stops tracking a context binding, and re-initializes its storage.
- * Typically used when the context binding is replaced with a binding to
- * another (or the same, for that matter) resource.
- */
-static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- list_del(&cb->ctx_list);
- if (!list_empty(&cb->res_list))
- list_del(&cb->res_list);
- cb->bi.ctx = NULL;
+ return vmw_context_define(dev, data, file_priv, false);
}
-/**
- * vmw_context_binding_add: Start tracking a context binding
- *
- * @cbs: Pointer to the context binding state tracker.
- * @bi: Information about the binding to track.
- *
- * Performs basic checks on the binding to make sure arguments are within
- * bounds and then starts tracking the binding in the context binding
- * state structure @cbs.
- */
-int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
- const struct vmw_ctx_bindinfo *bi)
+int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct vmw_ctx_binding *loc;
-
- switch (bi->bt) {
- case vmw_ctx_binding_rt:
- if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
- DRM_ERROR("Illegal render target type %u.\n",
- (unsigned) bi->i1.rt_type);
- return -EINVAL;
- }
- loc = &cbs->render_targets[bi->i1.rt_type];
- break;
- case vmw_ctx_binding_tex:
- if (unlikely((unsigned)bi->i1.texture_stage >=
- SVGA3D_NUM_TEXTURE_UNITS)) {
- DRM_ERROR("Illegal texture/sampler unit %u.\n",
- (unsigned) bi->i1.texture_stage);
- return -EINVAL;
- }
- loc = &cbs->texture_units[bi->i1.texture_stage];
- break;
- case vmw_ctx_binding_shader:
- if (unlikely((unsigned)bi->i1.shader_type >=
- SVGA3D_SHADERTYPE_MAX)) {
- DRM_ERROR("Illegal shader type %u.\n",
- (unsigned) bi->i1.shader_type);
- return -EINVAL;
- }
- loc = &cbs->shaders[bi->i1.shader_type];
- break;
+ union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
+ struct drm_vmw_context_arg *rep = &arg->rep;
+
+ switch (arg->req) {
+ case drm_vmw_context_legacy:
+ return vmw_context_define(dev, rep, file_priv, false);
+ case drm_vmw_context_dx:
+ return vmw_context_define(dev, rep, file_priv, true);
default:
- BUG();
- }
-
- if (loc->bi.ctx != NULL)
- vmw_context_binding_drop(loc);
-
- loc->bi = *bi;
- loc->bi.scrubbed = false;
- list_add_tail(&loc->ctx_list, &cbs->list);
- INIT_LIST_HEAD(&loc->res_list);
-
- return 0;
-}
-
-/**
- * vmw_context_binding_transfer: Transfer a context binding tracking entry.
- *
- * @cbs: Pointer to the persistent context binding state tracker.
- * @bi: Information about the binding to track.
- *
- */
-static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
- const struct vmw_ctx_bindinfo *bi)
-{
- struct vmw_ctx_binding *loc;
-
- switch (bi->bt) {
- case vmw_ctx_binding_rt:
- loc = &cbs->render_targets[bi->i1.rt_type];
break;
- case vmw_ctx_binding_tex:
- loc = &cbs->texture_units[bi->i1.texture_stage];
- break;
- case vmw_ctx_binding_shader:
- loc = &cbs->shaders[bi->i1.shader_type];
- break;
- default:
- BUG();
- }
-
- if (loc->bi.ctx != NULL)
- vmw_context_binding_drop(loc);
-
- if (bi->res != NULL) {
- loc->bi = *bi;
- list_add_tail(&loc->ctx_list, &cbs->list);
- list_add_tail(&loc->res_list, &bi->res->binding_head);
}
+ return -EINVAL;
}
/**
- * vmw_context_binding_kill - Kill a binding on the device
- * and stop tracking it.
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Emits FIFO commands to scrub a binding represented by @cb.
- * Then stops tracking the binding and re-initializes its storage.
- */
-static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
-{
- if (!cb->bi.scrubbed) {
- (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
- cb->bi.scrubbed = true;
- }
- vmw_context_binding_drop(cb);
-}
-
-/**
- * vmw_context_binding_state_kill - Kill all bindings associated with a
- * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ * vmw_context_binding_list - Return a list of context bindings
*
- * @cbs: Pointer to the context binding state tracker.
+ * @ctx: The context resource
*
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker. Then re-initializes the whole structure.
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry, *next;
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
- list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
- vmw_context_binding_kill(entry);
+ return vmw_binding_state_list(uctx->cbs);
}
-/**
- * vmw_context_binding_state_scrub - Scrub all bindings associated with a
- * struct vmw_ctx_binding state structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker.
- */
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry;
-
- list_for_each_entry(entry, &cbs->list, ctx_list) {
- if (!entry->bi.scrubbed) {
- (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
- entry->bi.scrubbed = true;
- }
- }
+ return container_of(ctx, struct vmw_user_context, res)->man;
}
-/**
- * vmw_context_binding_res_list_kill - Kill all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Kills all bindings associated with a specific resource. Typically
- * called before the resource is destroyed.
- */
-void vmw_context_binding_res_list_kill(struct list_head *head)
+struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+ SVGACOTableType cotable_type)
{
- struct vmw_ctx_binding *entry, *next;
+ if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+ return ERR_PTR(-EINVAL);
- list_for_each_entry_safe(entry, next, head, res_list)
- vmw_context_binding_kill(entry);
+ return vmw_resource_reference
+ (container_of(ctx, struct vmw_user_context, res)->
+ cotables[cotable_type]);
}
/**
- * vmw_context_binding_res_list_scrub - Scrub all bindings on a
- * resource binding list
+ * vmw_context_binding_state -
+ * Return a pointer to a context binding state structure
*
- * @head: list head of resource binding list
+ * @ctx: The context resource
*
- * Scrub all bindings associated with a specific resource. Typically
- * called before the resource is evicted.
+ * Returns the current state of bindings of the given context. Note that
+ * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
-void vmw_context_binding_res_list_scrub(struct list_head *head)
+struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry;
-
- list_for_each_entry(entry, head, res_list) {
- if (!entry->bi.scrubbed) {
- (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
- entry->bi.scrubbed = true;
- }
- }
+ return container_of(ctx, struct vmw_user_context, res)->cbs;
}
/**
- * vmw_context_binding_state_transfer - Commit staged binding info
+ * vmw_context_bind_dx_query -
+ * Sets query MOB for the context. If @mob is NULL, then this function will
+ * remove the association between the MOB and the context. This function
+ * assumes the binding_mutex is held.
*
- * @ctx: Pointer to context to commit the staged binding info to.
- * @from: Staged binding info built during execbuf.
+ * @ctx_res: The context resource
+ * @mob: a reference to the query MOB
*
- * Transfers binding info from a temporary structure to the persistent
- * structure in the context. This can be done once commands
+ * Returns -EINVAL if a MOB has already been set and does not match the one
+ * specified in the parameter. 0 otherwise.
*/
-void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
- struct vmw_ctx_binding_state *from)
+int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+ struct vmw_dma_buffer *mob)
{
struct vmw_user_context *uctx =
- container_of(ctx, struct vmw_user_context, res);
- struct vmw_ctx_binding *entry, *next;
-
- list_for_each_entry_safe(entry, next, &from->list, ctx_list)
- vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
-}
+ container_of(ctx_res, struct vmw_user_context, res);
-/**
- * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
- *
- * @ctx: The context resource
- *
- * Walks through the context binding list and rebinds all scrubbed
- * resources.
- */
-int vmw_context_rebind_all(struct vmw_resource *ctx)
-{
- struct vmw_ctx_binding *entry;
- struct vmw_user_context *uctx =
- container_of(ctx, struct vmw_user_context, res);
- struct vmw_ctx_binding_state *cbs = &uctx->cbs;
- int ret;
+ if (mob == NULL) {
+ if (uctx->dx_query_mob) {
+ uctx->dx_query_mob->dx_query_ctx = NULL;
+ vmw_dmabuf_unreference(&uctx->dx_query_mob);
+ uctx->dx_query_mob = NULL;
+ }
- list_for_each_entry(entry, &cbs->list, ctx_list) {
- if (likely(!entry->bi.scrubbed))
- continue;
+ return 0;
+ }
- if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
- SVGA3D_INVALID_ID))
- continue;
+ /* Can only have one MOB per context for queries */
+ if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
+ return -EINVAL;
- ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
- if (unlikely(ret != 0))
- return ret;
+ mob->dx_query_ctx = ctx_res;
- entry->bi.scrubbed = false;
- }
+ if (!uctx->dx_query_mob)
+ uctx->dx_query_mob = vmw_dmabuf_reference(mob);
return 0;
}
/**
- * vmw_context_binding_list - Return a list of context bindings
- *
- * @ctx: The context resource
+ * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
*
- * Returns the current list of bindings of the given context. Note that
- * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ * @ctx_res: The context resource
*/
-struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
- return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
-}
+ struct vmw_user_context *uctx =
+ container_of(ctx_res, struct vmw_user_context, res);
-struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
-{
- return container_of(ctx, struct vmw_user_context, res)->man;
+ return uctx->dx_query_mob;
}