summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h742
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c182
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c633
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c218
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h248
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1172
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c107
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c160
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c105
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c653
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c206
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c810
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c475
20 files changed, 5374 insertions, 421 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 9f8b690bcf52..458cdf6d81e8 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
- vmwgfx_surface.o vmwgfx_prime.o
+ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d0e085ee8249..b645647b7776 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -34,6 +34,8 @@
#include "svga_reg.h"
+typedef uint32 PPN;
+typedef __le64 PPN64;
/*
* 3D Hardware Version
@@ -71,6 +73,9 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
#define SVGA3D_MAX_CONTEXT_IDS 256
#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
+#define SVGA3D_NUM_TEXTURE_UNITS 32
+#define SVGA3D_NUM_LIGHTS 8
+
/*
* Surface formats.
*
@@ -81,6 +86,7 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
*/
typedef enum SVGA3dSurfaceFormat {
+ SVGA3D_FORMAT_MIN = 0,
SVGA3D_FORMAT_INVALID = 0,
SVGA3D_X8R8G8B8 = 1,
@@ -134,12 +140,6 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_RG_S10E5 = 35,
SVGA3D_RG_S23E8 = 36,
- /*
- * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
- * the most efficient format to use when creating new surfaces
- * expressly for index or vertex data.
- */
-
SVGA3D_BUFFER = 37,
SVGA3D_Z_D24X8 = 38,
@@ -159,15 +159,114 @@ typedef enum SVGA3dSurfaceFormat {
/* Video format with alpha */
SVGA3D_AYUV = 45,
+ SVGA3D_R32G32B32A32_TYPELESS = 46,
+ SVGA3D_R32G32B32A32_FLOAT = 25,
+ SVGA3D_R32G32B32A32_UINT = 47,
+ SVGA3D_R32G32B32A32_SINT = 48,
+ SVGA3D_R32G32B32_TYPELESS = 49,
+ SVGA3D_R32G32B32_FLOAT = 50,
+ SVGA3D_R32G32B32_UINT = 51,
+ SVGA3D_R32G32B32_SINT = 52,
+ SVGA3D_R16G16B16A16_TYPELESS = 53,
+ SVGA3D_R16G16B16A16_FLOAT = 24,
+ SVGA3D_R16G16B16A16_UNORM = 41,
+ SVGA3D_R16G16B16A16_UINT = 54,
+ SVGA3D_R16G16B16A16_SNORM = 55,
+ SVGA3D_R16G16B16A16_SINT = 56,
+ SVGA3D_R32G32_TYPELESS = 57,
+ SVGA3D_R32G32_FLOAT = 36,
+ SVGA3D_R32G32_UINT = 58,
+ SVGA3D_R32G32_SINT = 59,
+ SVGA3D_R32G8X24_TYPELESS = 60,
+ SVGA3D_D32_FLOAT_S8X24_UINT = 61,
+ SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
+ SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
+ SVGA3D_R10G10B10A2_TYPELESS = 64,
+ SVGA3D_R10G10B10A2_UNORM = 26,
+ SVGA3D_R10G10B10A2_UINT = 65,
+ SVGA3D_R11G11B10_FLOAT = 66,
+ SVGA3D_R8G8B8A8_TYPELESS = 67,
+ SVGA3D_R8G8B8A8_UNORM = 68,
+ SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
+ SVGA3D_R8G8B8A8_UINT = 70,
+ SVGA3D_R8G8B8A8_SNORM = 28,
+ SVGA3D_R8G8B8A8_SINT = 71,
+ SVGA3D_R16G16_TYPELESS = 72,
+ SVGA3D_R16G16_FLOAT = 35,
+ SVGA3D_R16G16_UNORM = 40,
+ SVGA3D_R16G16_UINT = 73,
+ SVGA3D_R16G16_SNORM = 39,
+ SVGA3D_R16G16_SINT = 74,
+ SVGA3D_R32_TYPELESS = 75,
+ SVGA3D_D32_FLOAT = 76,
+ SVGA3D_R32_FLOAT = 34,
+ SVGA3D_R32_UINT = 77,
+ SVGA3D_R32_SINT = 78,
+ SVGA3D_R24G8_TYPELESS = 79,
+ SVGA3D_D24_UNORM_S8_UINT = 80,
+ SVGA3D_R24_UNORM_X8_TYPELESS = 81,
+ SVGA3D_X24_TYPELESS_G8_UINT = 82,
+ SVGA3D_R8G8_TYPELESS = 83,
+ SVGA3D_R8G8_UNORM = 84,
+ SVGA3D_R8G8_UINT = 85,
+ SVGA3D_R8G8_SNORM = 27,
+ SVGA3D_R8G8_SINT = 86,
+ SVGA3D_R16_TYPELESS = 87,
+ SVGA3D_R16_FLOAT = 33,
+ SVGA3D_D16_UNORM = 8,
+ SVGA3D_R16_UNORM = 88,
+ SVGA3D_R16_UINT = 89,
+ SVGA3D_R16_SNORM = 90,
+ SVGA3D_R16_SINT = 91,
+ SVGA3D_R8_TYPELESS = 92,
+ SVGA3D_R8_UNORM = 93,
+ SVGA3D_R8_UINT = 94,
+ SVGA3D_R8_SNORM = 95,
+ SVGA3D_R8_SINT = 96,
+ SVGA3D_A8_UNORM = 32,
+ SVGA3D_R1_UNORM = 97,
+ SVGA3D_R9G9B9E5_SHAREDEXP = 98,
+ SVGA3D_R8G8_B8G8_UNORM = 99,
+ SVGA3D_G8R8_G8B8_UNORM = 100,
+ SVGA3D_BC1_TYPELESS = 101,
+ SVGA3D_BC1_UNORM = 15,
+ SVGA3D_BC1_UNORM_SRGB = 102,
+ SVGA3D_BC2_TYPELESS = 103,
+ SVGA3D_BC2_UNORM = 17,
+ SVGA3D_BC2_UNORM_SRGB = 104,
+ SVGA3D_BC3_TYPELESS = 105,
+ SVGA3D_BC3_UNORM = 19,
+ SVGA3D_BC3_UNORM_SRGB = 106,
+ SVGA3D_BC4_TYPELESS = 107,
SVGA3D_BC4_UNORM = 108,
+ SVGA3D_BC4_SNORM = 109,
+ SVGA3D_BC5_TYPELESS = 110,
SVGA3D_BC5_UNORM = 111,
+ SVGA3D_BC5_SNORM = 112,
+ SVGA3D_B5G6R5_UNORM = 3,
+ SVGA3D_B5G5R5A1_UNORM = 5,
+ SVGA3D_B8G8R8A8_UNORM = 2,
+ SVGA3D_B8G8R8X8_UNORM = 1,
+ SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
+ SVGA3D_B8G8R8A8_TYPELESS = 114,
+ SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
+ SVGA3D_B8G8R8X8_TYPELESS = 116,
+ SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
/* Advanced D3D9 depth formats. */
SVGA3D_Z_DF16 = 118,
SVGA3D_Z_DF24 = 119,
SVGA3D_Z_D24S8_INT = 120,
- SVGA3D_FORMAT_MAX
+ /* Planar video formats. */
+ SVGA3D_YV12 = 121,
+
+ /* Shader constant formats. */
+ SVGA3D_SURFACE_SHADERCONST_FLOAT = 122,
+ SVGA3D_SURFACE_SHADERCONST_INT = 123,
+ SVGA3D_SURFACE_SHADERCONST_BOOL = 124,
+
+ SVGA3D_FORMAT_MAX = 125,
} SVGA3dSurfaceFormat;
typedef uint32 SVGA3dColor; /* a, r, g, b */
@@ -957,15 +1056,21 @@ typedef enum {
} SVGA3dCubeFace;
typedef enum {
+ SVGA3D_SHADERTYPE_INVALID = 0,
+ SVGA3D_SHADERTYPE_MIN = 1,
SVGA3D_SHADERTYPE_VS = 1,
SVGA3D_SHADERTYPE_PS = 2,
- SVGA3D_SHADERTYPE_MAX
+ SVGA3D_SHADERTYPE_MAX = 3,
+ SVGA3D_SHADERTYPE_GS = 3,
} SVGA3dShaderType;
+#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
typedef enum {
SVGA3D_CONST_TYPE_FLOAT = 0,
SVGA3D_CONST_TYPE_INT = 1,
SVGA3D_CONST_TYPE_BOOL = 2,
+ SVGA3D_CONST_TYPE_MAX
} SVGA3dShaderConstType;
#define SVGA3D_MAX_SURFACE_FACES 6
@@ -1056,9 +1161,74 @@ typedef enum {
#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42
-
-#define SVGA_3D_CMD_FUTURE_MAX 2000
+#define SVGA_3D_CMD_SCREEN_DMA 1082
+#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
+#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084
+
+#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085
+#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086
+#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087
+#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088
+#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089
+#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090
+
+#define SVGA_3D_CMD_SET_OTABLE_BASE 1091
+#define SVGA_3D_CMD_READBACK_OTABLE 1092
+
+#define SVGA_3D_CMD_DEFINE_GB_MOB 1093
+#define SVGA_3D_CMD_DESTROY_GB_MOB 1094
+#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095
+#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096
+
+#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097
+#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098
+#define SVGA_3D_CMD_BIND_GB_SURFACE 1099
+#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100
+#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101
+#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102
+#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103
+#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104
+#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105
+#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106
+
+#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107
+#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108
+#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109
+#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110
+#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111
+
+#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112
+#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113
+#define SVGA_3D_CMD_BIND_GB_SHADER 1114
+
+#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115
+
+#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116
+#define SVGA_3D_CMD_END_GB_QUERY 1117
+#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118
+
+#define SVGA_3D_CMD_NOP 1119
+
+#define SVGA_3D_CMD_ENABLE_GART 1120
+#define SVGA_3D_CMD_DISABLE_GART 1121
+#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122
+#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123
+
+#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124
+#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125
+#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126
+#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127
+
+#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128
+#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
+
+#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
+
+#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
+#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
+
+#define SVGA_3D_CMD_MAX 1142
+#define SVGA_3D_CMD_FUTURE_MAX 3000
/*
* Common substructures used in multiple FIFO commands:
@@ -1750,6 +1920,495 @@ struct {
/*
+ * Guest-backed surface definitions.
+ */
+
+typedef uint32 SVGAMobId;
+
+typedef enum SVGAMobFormat {
+ SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
+ SVGA3D_MOBFMT_PTDEPTH_0 = 0,
+ SVGA3D_MOBFMT_PTDEPTH_1 = 1,
+ SVGA3D_MOBFMT_PTDEPTH_2 = 2,
+ SVGA3D_MOBFMT_RANGE = 3,
+ SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+ SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+ SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+ SVGA3D_MOBFMT_MAX,
+} SVGAMobFormat;
+
+/*
+ * Sizes of opaque types.
+ */
+
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
+#define SVGA3D_CONTEXT_DATA_SIZE 16384
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef enum {
+ SVGA_OTABLE_MOB = 0,
+ SVGA_OTABLE_MIN = 0,
+ SVGA_OTABLE_SURFACE = 1,
+ SVGA_OTABLE_CONTEXT = 2,
+ SVGA_OTABLE_SHADER = 3,
+ SVGA_OTABLE_SCREEN_TARGET = 4,
+ SVGA_OTABLE_DX9_MAX = 5,
+ SVGA_OTABLE_MAX = 8
+} SVGAOTableType;
+
+typedef
+struct {
+ SVGAOTableType type;
+ PPN baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+__attribute__((__packed__))
+SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+struct {
+ SVGAOTableType type;
+ PPN64 baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+__attribute__((__packed__))
+SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+typedef
+struct {
+ SVGAOTableType type;
+}
+__attribute__((__packed__))
+SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBMob {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN base;
+ uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBMob {
+ SVGAMobId mobid;
+}
+__attribute__((__packed__))
+SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+/*
+ * Redefine an object in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdRedefineGBMob {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN base;
+ uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBMob64 {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN64 base;
+ uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+struct SVGA3dCmdRedefineGBMob64 {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN64 base;
+ uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBMobMapping {
+ SVGAMobId mobid;
+}
+__attribute__((__packed__))
+SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBSurface {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBSurface {
+ uint32 sid;
+} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to an object.
+ */
+
+typedef
+struct SVGA3dCmdBindGBSurface {
+ uint32 sid;
+ SVGAMobId mobid;
+} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+/*
+ * Conditionally bind a mob to a guest backed surface if testMobid
+ * matches the currently bound mob. Optionally issue a readback on
+ * the surface while it is still bound to the old mobid if the mobid
+ * is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+
+typedef
+struct{
+ uint32 sid;
+ SVGAMobId testMobid;
+ SVGAMobId mobid;
+ uint32 flags;
+}
+SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBImage {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBSurface {
+ uint32 sid;
+} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBImage {
+ SVGA3dSurfaceImageId image;
+} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBSurface {
+ uint32 sid;
+} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface. After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBImagePartial {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+ uint32 invertBox;
+}
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBImage {
+ SVGA3dSurfaceImageId image;
+} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBSurface {
+ uint32 sid;
+} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface. After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBImagePartial {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+ uint32 invertBox;
+}
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBContext {
+ uint32 cid;
+} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBContext {
+ uint32 cid;
+} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+struct SVGA3dCmdBindGBContext {
+ uint32 cid;
+ SVGAMobId mobid;
+ uint32 validContents;
+} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBContext {
+ uint32 cid;
+} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+struct SVGA3dCmdInvalidateGBContext {
+ uint32 cid;
+} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBShader {
+ uint32 shid;
+ SVGA3dShaderType type;
+ uint32 sizeInBytes;
+} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef struct SVGA3dCmdBindGBShader {
+ uint32 shid;
+ SVGAMobId mobid;
+ uint32 offsetInBytes;
+} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef struct SVGA3dCmdDestroyGBShader {
+ uint32 shid;
+} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 regStart;
+ SVGA3dShaderType shaderType;
+ SVGA3dShaderConstType constType;
+
+ /*
+ * Followed by a variable number of shader constants.
+ *
+ * Note that FLOAT and INT constants are 4-dwords in length, while
+ * BOOL constants are 1-dword in length.
+ */
+} SVGA3dCmdSetGBShaderConstInline;
+/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAMobId mobid;
+ uint32 offset;
+} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ * The semantics of this command are identical to the
+ * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ * to a Mob instead of a GMR.
+ */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAMobId mobid;
+ uint32 offset;
+} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+typedef
+struct {
+ SVGAMobId mobid;
+ uint32 fbOffset;
+ uint32 initalized;
+}
+SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+struct {
+ SVGAMobId mobid;
+ uint32 gartOffset;
+}
+SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+struct {
+ uint32 gartOffset;
+ uint32 numPages;
+}
+SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+
+typedef
+struct {
+ uint32 stid;
+ uint32 width;
+ uint32 height;
+ int32 xRoot;
+ int32 yRoot;
+ uint32 flags;
+}
+SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+struct {
+ uint32 stid;
+}
+SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+struct {
+ uint32 stid;
+ SVGA3dSurfaceImageId image;
+}
+SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+struct {
+ uint32 stid;
+ SVGA3dBox box;
+}
+SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
+/*
* Capability query index.
*
* Notes:
@@ -1879,10 +2538,41 @@ typedef enum {
SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
/*
- * Don't add new caps into the previous section; the values in this
- * enumeration must not change. You can put new values right before
- * SVGA3D_DEVCAP_MAX.
+ * Deprecated.
+ */
+ SVGA3D_DEVCAP_VGPU10 = 84,
+
+ /*
+ * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+ * ored together, one for every type of video decoding supported.
+ */
+ SVGA3D_DEVCAP_VIDEO_DECODE = 85,
+
+ /*
+ * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+ * ored together, one for every type of video processing supported.
+ */
+ SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
+
+ SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
+ SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
+ SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
+ SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
+
+ SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
+
+ /*
+ * Does the host support the SVGA logic ops commands?
+ */
+ SVGA3D_DEVCAP_LOGICOPS = 92,
+
+ /*
+ * What support does the host have for screen targets?
+ *
+ * See the SVGA3D_SCREENTARGET_CAP bits below.
*/
+ SVGA3D_DEVCAP_SCREENTARGETS = 93,
+
SVGA3D_DEVCAP_MAX /* This must be the last index. */
} SVGA3dDevCapIndex;
@@ -1893,4 +2583,28 @@ typedef union {
float f;
} SVGA3dDevCapResult;
+typedef enum {
+ SVGA3DCAPS_RECORD_UNKNOWN = 0,
+ SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
+} SVGA3dCapsRecordType;
+
+typedef
+struct SVGA3dCapsRecordHeader {
+ uint32 length;
+ SVGA3dCapsRecordType type;
+}
+SVGA3dCapsRecordHeader;
+
+typedef
+struct SVGA3dCapsRecord {
+ SVGA3dCapsRecordHeader header;
+ uint32 data[1];
+}
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 01f63cb49678..71defa4d2d75 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -169,7 +169,10 @@ enum {
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
- SVGA_REG_TOP = 48, /* Must be 1 more than the last register */
+ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
+ SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
+ SVGA_REG_TOP = 53, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -431,7 +434,10 @@ struct SVGASignedPoint {
#define SVGA_CAP_TRACES 0x00200000
#define SVGA_CAP_GMR2 0x00400000
#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000
-
+#define SVGA_CAP_COMMAND_BUFFERS 0x01000000
+#define SVGA_CAP_DEAD1 0x02000000
+#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
+#define SVGA_CAP_GBOBJECTS 0x08000000
/*
* FIFO register indices.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 0489c6152482..6327cfc36805 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -40,6 +40,10 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
+static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM |
+ TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_NO_EVICT;
+
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED;
@@ -47,6 +51,9 @@ static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT;
+static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB |
+ TTM_PL_FLAG_CACHED;
+
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -116,16 +123,26 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
+struct ttm_placement vmw_sys_ne_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &sys_ne_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_ne_placement_flags
+};
+
static uint32_t evictable_placement_flags[] = {
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
- VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
+ VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
struct ttm_placement vmw_evictable_placement = {
.fpfn = 0,
.lpfn = 0,
- .num_placement = 3,
+ .num_placement = 4,
.placement = evictable_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
@@ -140,10 +157,21 @@ struct ttm_placement vmw_srf_placement = {
.busy_placement = gmr_vram_placement_flags
};
+struct ttm_placement vmw_mob_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .num_busy_placement = 1,
+ .placement = &mob_placement_flags,
+ .busy_placement = &mob_placement_flags
+};
+
struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
int gmr_id;
+ struct vmw_mob *mob;
+ int mem_type;
struct sg_table sgt;
struct vmw_sg_table vsgt;
uint64_t sg_alloc_size;
@@ -244,6 +272,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
viter->dma_address = &__vmw_piter_dma_addr;
viter->page = &__vmw_piter_non_sg_page;
viter->addrs = vsgt->addrs;
+ viter->pages = vsgt->pages;
break;
case vmw_dma_map_populate:
case vmw_dma_map_bind:
@@ -424,6 +453,63 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
vmw_tt->mapped = false;
}
+
+/**
+ * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ * Note that the buffer object must be either pinned or reserved before
+ * calling this function.
+ */
+int vmw_bo_map_dma(struct ttm_buffer_object *bo)
+{
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+ return vmw_ttm_map_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ */
+void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
+{
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+ vmw_ttm_unmap_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
+ * TTM buffer object
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Returns a pointer to a struct vmw_sg_table object. The object should
+ * not be freed after use.
+ * Note that for the device addresses to be valid, the buffer object must
+ * either be reserved or pinned.
+ */
+const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
+{
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+ return &vmw_tt->vsgt;
+}
+
+
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct vmw_ttm_tt *vmw_be =
@@ -435,9 +521,27 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
return ret;
vmw_be->gmr_id = bo_mem->start;
+ vmw_be->mem_type = bo_mem->mem_type;
+
+ switch (bo_mem->mem_type) {
+ case VMW_PL_GMR:
+ return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+ ttm->num_pages, vmw_be->gmr_id);
+ case VMW_PL_MOB:
+ if (unlikely(vmw_be->mob == NULL)) {
+ vmw_be->mob =
+ vmw_mob_create(ttm->num_pages);
+ if (unlikely(vmw_be->mob == NULL))
+ return -ENOMEM;
+ }
- return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
- ttm->num_pages, vmw_be->gmr_id);
+ return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+ &vmw_be->vsgt, ttm->num_pages,
+ vmw_be->gmr_id);
+ default:
+ BUG();
+ }
+ return 0;
}
static int vmw_ttm_unbind(struct ttm_tt *ttm)
@@ -445,7 +549,16 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm)
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
- vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+ switch (vmw_be->mem_type) {
+ case VMW_PL_GMR:
+ vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+ break;
+ case VMW_PL_MOB:
+ vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
+ break;
+ default:
+ BUG();
+ }
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
vmw_ttm_unmap_dma(vmw_be);
@@ -453,6 +566,7 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm)
return 0;
}
+
static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
@@ -463,9 +577,14 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
ttm_dma_tt_fini(&vmw_be->dma_ttm);
else
ttm_tt_fini(ttm);
+
+ if (vmw_be->mob)
+ vmw_mob_destroy(vmw_be->mob);
+
kfree(vmw_be);
}
+
static int vmw_ttm_populate(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_tt =
@@ -500,6 +619,12 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+
+ if (vmw_tt->mob) {
+ vmw_mob_destroy(vmw_tt->mob);
+ vmw_tt->mob = NULL;
+ }
+
vmw_ttm_unmap_dma(vmw_tt);
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
size_t size =
@@ -517,7 +642,7 @@ static struct ttm_backend_func vmw_ttm_func = {
.destroy = vmw_ttm_destroy,
};
-struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
@@ -530,6 +655,7 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
+ vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
@@ -546,12 +672,12 @@ out_no_init:
return NULL;
}
-int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
}
-int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
switch (type) {
@@ -571,6 +697,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case VMW_PL_GMR:
+ case VMW_PL_MOB:
/*
* "Guest Memory Regions" is an aperture like feature with
* one slot per bo. There is an upper limit of the number of
@@ -589,7 +716,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
return 0;
}
-void vmw_evict_flags(struct ttm_buffer_object *bo,
+static void vmw_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
*placement = vmw_sys_placement;
@@ -618,6 +745,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
case VMW_PL_GMR:
+ case VMW_PL_MOB:
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -677,6 +805,38 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
VMW_FENCE_WAIT_TIMEOUT);
}
+/**
+ * vmw_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The truct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * Calls move_notify for all subsystems needing it.
+ * (currently only resources).
+ */
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+ vmw_resource_move_notify(bo, mem);
+}
+
+
+/**
+ * vmw_swap_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to be swapped out.
+ */
+static void vmw_swap_notify(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ spin_lock(&bdev->fence_lock);
+ ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+}
+
+
struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
@@ -691,8 +851,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_flush = vmw_sync_obj_flush,
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
- .move_notify = NULL,
- .swap_notify = NULL,
+ .move_notify = vmw_move_notify,
+ .swap_notify = vmw_swap_notify,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 00ae0925aca8..9426c53fb483 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -32,12 +32,30 @@
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
+ struct vmw_ctx_binding_state cbs;
};
+
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base);
+static int vmw_gb_context_create(struct vmw_resource *res);
+static int vmw_gb_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_destroy(struct vmw_resource *res);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size;
static const struct vmw_user_resource_conv user_context_conv = {
@@ -62,6 +80,23 @@ static const struct vmw_res_func vmw_legacy_context_func = {
.unbind = NULL
};
+static const struct vmw_res_func vmw_gb_context_func = {
+ .res_type = vmw_res_context,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "guest backed contexts",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_gb_context_create,
+ .destroy = vmw_gb_context_destroy,
+ .bind = vmw_gb_context_bind,
+ .unbind = vmw_gb_context_unbind
+};
+
+static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
+ [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
+ [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
+ [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+
/**
* Context management:
*/
@@ -76,6 +111,20 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
} *cmd;
+ if (res->func->destroy == vmw_gb_context_destroy) {
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ mutex_lock(&dev_priv->binding_mutex);
+ (void) vmw_context_binding_state_kill
+ (&container_of(res, struct vmw_user_context, res)->cbs);
+ (void) vmw_gb_context_destroy(res);
+ if (dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid)
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+ mutex_unlock(&dev_priv->binding_mutex);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ return;
+ }
+
vmw_execbuf_release_pinned_bo(dev_priv);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -92,6 +141,33 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
vmw_3d_resource_dec(dev_priv, false);
}
+static int vmw_gb_context_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
+
+ ret = vmw_resource_init(dev_priv, res, true,
+ res_free, &vmw_gb_context_func);
+ res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+
+ if (unlikely(ret != 0)) {
+ if (res_free)
+ res_free(res);
+ else
+ kfree(res);
+ return ret;
+ }
+
+ memset(&uctx->cbs, 0, sizeof(uctx->cbs));
+ INIT_LIST_HEAD(&uctx->cbs.list);
+
+ vmw_resource_activate(res, vmw_hw_context_destroy);
+ return 0;
+}
+
static int vmw_context_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
void (*res_free) (struct vmw_resource *res))
@@ -103,6 +179,9 @@ static int vmw_context_init(struct vmw_private *dev_priv,
SVGA3dCmdDefineContext body;
} *cmd;
+ if (dev_priv->has_mob)
+ return vmw_gb_context_init(dev_priv, res, res_free);
+
ret = vmw_resource_init(dev_priv, res, false,
res_free, &vmw_legacy_context_func);
@@ -154,6 +233,180 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
return (ret == 0) ? res : NULL;
}
+
+static int vmw_gb_context_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBContext body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a context id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv, false);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+static int vmw_gb_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBContext body;
+ } *cmd;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.validContents = res->backup_dirty;
+ res->backup_dirty = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBContext body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBContext body;
+ } *cmd2;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_state_scrub(&uctx->cbs);
+
+ submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "unbinding.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd2 = (void *) cmd;
+ if (readback) {
+ cmd1 = (void *) cmd;
+ cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.cid = res->id;
+ cmd2 = (void *) (&cmd1[1]);
+ }
+ cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.cid = res->id;
+ cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+ vmw_fifo_commit(dev_priv, submit_size);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_gb_context_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBContext body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "destruction.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (dev_priv->query_cid == res->id)
+ dev_priv->query_cid_valid = false;
+ vmw_resource_release_id(res);
+ vmw_3d_resource_dec(dev_priv, false);
+
+ return 0;
+}
+
/**
* User-space context management:
*/
@@ -272,3 +525,383 @@ out_unlock:
return ret;
}
+
+/**
+ * vmw_context_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShader body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = bi->ctx->id;
+ cmd->body.type = bi->i1.shader_type;
+ cmd->body.shid =
+ cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_context_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
+{
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetRenderTarget body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for render target "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = bi->ctx->id;
+ cmd->body.type = bi->i1.rt_type;
+ cmd->body.target.sid =
+ cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ cmd->body.target.face = 0;
+ cmd->body.target.mipmap = 0;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_context_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
+{
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ struct {
+ SVGA3dCmdSetTextureState c;
+ SVGA3dTextureState s1;
+ } body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for texture "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+
+ cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.c.cid = bi->ctx->id;
+ cmd->body.s1.stage = bi->i1.texture_stage;
+ cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+ cmd->body.s1.value =
+ cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_drop: Stop tracking a context binding
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+{
+ list_del(&cb->ctx_list);
+ if (!list_empty(&cb->res_list))
+ list_del(&cb->res_list);
+ cb->bi.ctx = NULL;
+}
+
+/**
+ * vmw_context_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Performs basic checks on the binding to make sure arguments are within
+ * bounds and then starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi)
+{
+ struct vmw_ctx_binding *loc;
+
+ switch (bi->bt) {
+ case vmw_ctx_binding_rt:
+ if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
+ DRM_ERROR("Illegal render target type %u.\n",
+ (unsigned) bi->i1.rt_type);
+ return -EINVAL;
+ }
+ loc = &cbs->render_targets[bi->i1.rt_type];
+ break;
+ case vmw_ctx_binding_tex:
+ if (unlikely((unsigned)bi->i1.texture_stage >=
+ SVGA3D_NUM_TEXTURE_UNITS)) {
+ DRM_ERROR("Illegal texture/sampler unit %u.\n",
+ (unsigned) bi->i1.texture_stage);
+ return -EINVAL;
+ }
+ loc = &cbs->texture_units[bi->i1.texture_stage];
+ break;
+ case vmw_ctx_binding_shader:
+ if (unlikely((unsigned)bi->i1.shader_type >=
+ SVGA3D_SHADERTYPE_MAX)) {
+ DRM_ERROR("Illegal shader type %u.\n",
+ (unsigned) bi->i1.shader_type);
+ return -EINVAL;
+ }
+ loc = &cbs->shaders[bi->i1.shader_type];
+ break;
+ default:
+ BUG();
+ }
+
+ if (loc->bi.ctx != NULL)
+ vmw_context_binding_drop(loc);
+
+ loc->bi = *bi;
+ loc->bi.scrubbed = false;
+ list_add_tail(&loc->ctx_list, &cbs->list);
+ INIT_LIST_HEAD(&loc->res_list);
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi)
+{
+ struct vmw_ctx_binding *loc;
+
+ switch (bi->bt) {
+ case vmw_ctx_binding_rt:
+ loc = &cbs->render_targets[bi->i1.rt_type];
+ break;
+ case vmw_ctx_binding_tex:
+ loc = &cbs->texture_units[bi->i1.texture_stage];
+ break;
+ case vmw_ctx_binding_shader:
+ loc = &cbs->shaders[bi->i1.shader_type];
+ break;
+ default:
+ BUG();
+ }
+
+ if (loc->bi.ctx != NULL)
+ vmw_context_binding_drop(loc);
+
+ if (bi->res != NULL) {
+ loc->bi = *bi;
+ list_add_tail(&loc->ctx_list, &cbs->list);
+ list_add_tail(&loc->res_list, &bi->res->binding_head);
+ }
+}
+
+/**
+ * vmw_context_binding_kill - Kill a binding on the device
+ * and stop tracking it.
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Emits FIFO commands to scrub a binding represented by @cb.
+ * Then stops tracking the binding and re-initializes its storage.
+ */
+static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
+{
+ if (!cb->bi.scrubbed) {
+ (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
+ cb->bi.scrubbed = true;
+ }
+ vmw_context_binding_drop(cb);
+}
+
+/**
+ * vmw_context_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_binding *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+ vmw_context_binding_kill(entry);
+}
+
+/**
+ * vmw_context_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
+ * vmw_context_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_context_binding_res_list_kill(struct list_head *head)
+{
+ struct vmw_ctx_binding *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, res_list)
+ vmw_context_binding_kill(entry);
+}
+
+/**
+ * vmw_context_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_context_binding_res_list_scrub(struct list_head *head)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, head, res_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
+ * vmw_context_binding_state_transfer - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ *
+ * Transfers binding info from a temporary structure to the persistent
+ * structure in the context. This can be done once commands
+ */
+void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
+ struct vmw_ctx_binding_state *from)
+{
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ struct vmw_ctx_binding *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &from->list, ctx_list)
+ vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
+}
+
+/**
+ * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_context_rebind_all(struct vmw_resource *ctx)
+{
+ struct vmw_ctx_binding *entry;
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ struct vmw_ctx_binding_state *cbs = &uctx->cbs;
+ int ret;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (likely(!entry->bi.scrubbed))
+ continue;
+
+ if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
+ SVGA3D_INVALID_ID))
+ continue;
+
+ ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ entry->bi.scrubbed = false;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_list - Return a list of context bindings
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+{
+ return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index d4e54fcc0acd..a75840211b3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -290,8 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
/**
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
*
- * @bo: The buffer object. Must be reserved, and present either in VRAM
- * or GMR memory.
+ * @bo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin.
*
*/
@@ -303,10 +302,9 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
int ret;
lockdep_assert_held(&bo->resv->lock.base);
- BUG_ON(old_mem_type != TTM_PL_VRAM &&
- old_mem_type != VMW_PL_GMR);
- pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+ pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+ | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
if (pin)
pl_flags |= TTM_PL_FLAG_NO_EVICT;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c7a549694e59..3bdc0adc656d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -112,6 +112,21 @@
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
struct drm_vmw_update_layout_arg)
+#define DRM_IOCTL_VMW_CREATE_SHADER \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
+ struct drm_vmw_shader_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SHADER \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
+ struct drm_vmw_shader_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
+ union drm_vmw_gb_surface_create_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
+ union drm_vmw_gb_surface_reference_arg)
+#define DRM_IOCTL_VMW_SYNCCPU \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
+ struct drm_vmw_synccpu_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -177,6 +192,21 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_CREATE_SHADER,
+ vmw_shader_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_UNREF_SHADER,
+ vmw_shader_destroy_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
+ vmw_gb_surface_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
+ vmw_gb_surface_reference_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_SYNCCPU,
+ vmw_user_dmabuf_synccpu_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -189,6 +219,7 @@ static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
static int vmw_force_iommu;
static int vmw_restrict_iommu;
static int vmw_force_coherent;
+static int vmw_restrict_dma_mask;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
@@ -203,6 +234,8 @@ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
static void vmw_print_capabilities(uint32_t capabilities)
@@ -240,38 +273,52 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" GMR2.\n");
if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
DRM_INFO(" Screen Object 2.\n");
+ if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
+ DRM_INFO(" Command Buffers.\n");
+ if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
+ DRM_INFO(" Command Buffers 2.\n");
+ if (capabilities & SVGA_CAP_GBOBJECTS)
+ DRM_INFO(" Guest Backed Resources.\n");
}
-
/**
- * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
- * the start of a buffer object.
+ * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
*
- * @dev_priv: The device private structure.
+ * @dev_priv: A device private structure.
*
- * This function will idle the buffer using an uninterruptible wait, then
- * map the first page and initialize a pending occlusion query result structure,
- * Finally it will unmap the buffer.
+ * This function creates a small buffer object that holds the query
+ * result for dummy queries emitted as query barriers.
+ * The function will then map the first page and initialize a pending
+ * occlusion query result structure, Finally it will unmap the buffer.
+ * No interruptible waits are done within this function.
*
- * TODO: Since we're only mapping a single page, we should optimize the map
- * to use kmap_atomic / iomap_atomic.
+ * Returns an error if bo creation or initialization fails.
*/
-static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
+static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
+ int ret;
+ struct ttm_buffer_object *bo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
- int ret;
- struct ttm_bo_device *bdev = &dev_priv->bdev;
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
- ttm_bo_reserve(bo, false, false, false, 0);
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bdev->fence_lock);
+ /*
+ * Create the bo as pinned, so that a tryreserve will
+ * immediately succeed. This is because we're the only
+ * user of the bo currently.
+ */
+ ret = ttm_bo_create(&dev_priv->bdev,
+ PAGE_SIZE,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, NULL,
+ &bo);
+
if (unlikely(ret != 0))
- (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
- 10*HZ);
+ return ret;
+
+ ret = ttm_bo_reserve(bo, false, true, false, 0);
+ BUG_ON(ret != 0);
ret = ttm_bo_kmap(bo, 0, 1, &map);
if (likely(ret == 0)) {
@@ -280,34 +327,19 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
result->state = SVGA3D_QUERYSTATE_PENDING;
result->result32 = 0xff;
ttm_bo_kunmap(&map);
- } else
- DRM_ERROR("Dummy query buffer map failed.\n");
+ }
+ vmw_bo_pin(bo, false);
ttm_bo_unreserve(bo);
-}
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Dummy query buffer map failed.\n");
+ ttm_bo_unref(&bo);
+ } else
+ dev_priv->dummy_query_bo = bo;
-/**
- * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
- *
- * @dev_priv: A device private structure.
- *
- * This function creates a small buffer object that holds the query
- * result for dummy queries emitted as query barriers.
- * No interruptible waits are done within this function.
- *
- * Returns an error if bo creation fails.
- */
-static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
-{
- return ttm_bo_create(&dev_priv->bdev,
- PAGE_SIZE,
- ttm_bo_type_device,
- &vmw_vram_sys_placement,
- 0, false, NULL,
- &dev_priv->dummy_query_bo);
+ return ret;
}
-
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
@@ -318,14 +350,24 @@ static int vmw_request_device(struct vmw_private *dev_priv)
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
+ if (dev_priv->has_mob) {
+ ret = vmw_otables_setup(dev_priv);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to initialize "
+ "guest Memory OBjects.\n");
+ goto out_no_mob;
+ }
+ }
ret = vmw_dummy_query_bo_create(dev_priv);
if (unlikely(ret != 0))
goto out_no_query_bo;
- vmw_dummy_query_bo_prepare(dev_priv);
return 0;
out_no_query_bo:
+ if (dev_priv->has_mob)
+ vmw_otables_takedown(dev_priv);
+out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
return ret;
@@ -341,10 +383,13 @@ static void vmw_release_device(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL);
ttm_bo_unref(&dev_priv->dummy_query_bo);
+ if (dev_priv->has_mob)
+ vmw_otables_takedown(dev_priv);
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
+
/**
* Increase the 3d resource refcount.
* If the count was prevously zero, initialize the fifo, switching to svga
@@ -510,6 +555,33 @@ out_fixup:
return 0;
}
+/**
+ * vmw_dma_masks - set required page- and dma masks
+ *
+ * @dev: Pointer to struct drm-device
+ *
+ * With 32-bit we can only handle 32 bit PFNs. Optionally set that
+ * restriction also for 64-bit systems.
+ */
+#ifdef CONFIG_INTEL_IOMMU
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (intel_iommu_enabled &&
+ (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
+ DRM_INFO("Restricting DMA addresses to 44 bits.\n");
+ return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+ }
+ return 0;
+}
+#else
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+ return 0;
+}
+#endif
+
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
@@ -532,6 +604,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
+ mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -578,14 +651,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
vmw_get_initial_size(dev_priv);
- if (dev_priv->capabilities & SVGA_CAP_GMR) {
- dev_priv->max_gmr_descriptors =
- vmw_read(dev_priv,
- SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
dev_priv->max_gmr_ids =
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
- }
- if (dev_priv->capabilities & SVGA_CAP_GMR2) {
dev_priv->max_gmr_pages =
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
dev_priv->memory_size =
@@ -598,23 +666,42 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
*/
dev_priv->memory_size = 512*1024*1024;
}
+ dev_priv->max_mob_pages = 0;
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ uint64_t mem_size =
+ vmw_read(dev_priv,
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+
+ dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+ dev_priv->prim_bb_mem =
+ vmw_read(dev_priv,
+ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
+ } else
+ dev_priv->prim_bb_mem = dev_priv->vram_size;
+
+ ret = vmw_dma_masks(dev_priv);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&dev_priv->hw_mutex);
+ goto out_err0;
+ }
+
+ if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
+ dev_priv->prim_bb_mem = dev_priv->vram_size;
mutex_unlock(&dev_priv->hw_mutex);
vmw_print_capabilities(dev_priv->capabilities);
- if (dev_priv->capabilities & SVGA_CAP_GMR) {
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
- DRM_INFO("Max GMR descriptors is %u\n",
- (unsigned)dev_priv->max_gmr_descriptors);
- }
- if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max number of GMR pages is %u\n",
(unsigned)dev_priv->max_gmr_pages);
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
(unsigned)dev_priv->memory_size / 1024);
}
+ DRM_INFO("Maximum display memory size is %u kiB\n",
+ dev_priv->prim_bb_mem / 1024);
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
dev_priv->vram_start, dev_priv->vram_size / 1024);
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
@@ -649,12 +736,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->has_gmr = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
- dev_priv->max_gmr_ids) != 0) {
+ VMW_PL_GMR) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
}
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ dev_priv->has_mob = true;
+ if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+ VMW_PL_MOB) != 0) {
+ DRM_INFO("No MOB memory available. "
+ "3D will be disabled.\n");
+ dev_priv->has_mob = false;
+ }
+ }
+
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
dev_priv->mmio_size);
@@ -757,6 +854,8 @@ out_err4:
iounmap(dev_priv->mmio_virt);
out_err3:
arch_phys_wc_del(dev_priv->mmio_mtrr);
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -801,6 +900,8 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr);
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -840,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master);
}
+ vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -859,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
+ vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
+ if (IS_ERR(vmw_fp->shman))
+ goto out_no_shman;
+
file_priv->driver_priv = vmw_fp;
dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0;
+out_no_shman:
+ ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile:
kfree(vmw_fp);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 20890ad8408b..ecaa302a6154 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20120209"
+#define VMWGFX_DRIVER_DATE "20121114"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 4
+#define VMWGFX_DRIVER_MINOR 5
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -50,19 +50,39 @@
#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
+
+/*
+ * Perhaps we should have sysfs entries for these.
+ */
+#define VMWGFX_NUM_GB_CONTEXT 256
+#define VMWGFX_NUM_GB_SHADER 20000
+#define VMWGFX_NUM_GB_SURFACE 32768
+#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
+ VMWGFX_NUM_GB_SHADER +\
+ VMWGFX_NUM_GB_SURFACE +\
+ VMWGFX_NUM_GB_SCREEN_TARGET)
#define VMW_PL_GMR TTM_PL_PRIV0
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+#define VMW_PL_MOB TTM_PL_PRIV1
+#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
#define VMW_RES_STREAM ttm_driver_type2
#define VMW_RES_FENCE ttm_driver_type3
+#define VMW_RES_SHADER ttm_driver_type4
+
+struct vmw_compat_shader_manager;
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
+ bool gb_aware;
+ struct vmw_compat_shader_manager *shman;
};
struct vmw_dma_buffer {
@@ -82,6 +102,7 @@ struct vmw_dma_buffer {
struct vmw_validate_buffer {
struct ttm_validate_buffer base;
struct drm_hash_item hash;
+ bool validate_as_mob;
};
struct vmw_res_func;
@@ -98,6 +119,7 @@ struct vmw_resource {
const struct vmw_res_func *func;
struct list_head lru_head; /* Protected by the resource lock */
struct list_head mob_head; /* Protected by @backup reserved */
+ struct list_head binding_head; /* Protected by binding_mutex */
void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
};
@@ -106,6 +128,7 @@ enum vmw_res_type {
vmw_res_context,
vmw_res_surface,
vmw_res_stream,
+ vmw_res_shader,
vmw_res_max
};
@@ -154,6 +177,7 @@ struct vmw_fifo_state {
};
struct vmw_relocation {
+ SVGAMobId *mob_loc;
SVGAGuestPtr *location;
uint32_t index;
};
@@ -229,11 +253,77 @@ struct vmw_piter {
struct page *(*page)(struct vmw_piter *);
};
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+ vmw_ctx_binding_shader,
+ vmw_ctx_binding_rt,
+ vmw_ctx_binding_tex,
+ vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - structure representing a single context binding
+ *
+ * @ctx: Pointer to the context structure. NULL means the binding is not
+ * active.
+ * @res: Non ref-counted pointer to the bound resource.
+ * @bt: The binding type.
+ * @i1: Union of information needed to unbind.
+ */
+struct vmw_ctx_bindinfo {
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
+ enum vmw_ctx_binding_type bt;
+ bool scrubbed;
+ union {
+ SVGA3dShaderType shader_type;
+ SVGA3dRenderTargetType rt_type;
+ uint32 texture_stage;
+ } i1;
+};
+
+/**
+ * struct vmw_ctx_binding - structure representing a single context binding
+ * - suitable for tracking in a context
+ *
+ * @ctx_list: List head for context.
+ * @res_list: List head for bound resource.
+ * @bi: Binding info
+ */
+struct vmw_ctx_binding {
+ struct list_head ctx_list;
+ struct list_head res_list;
+ struct vmw_ctx_bindinfo bi;
+};
+
+
+/**
+ * struct vmw_ctx_binding_state - context binding state
+ *
+ * @list: linked list of individual bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units/samplers bindings.
+ * @shaders: Shader bindings.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+ struct list_head list;
+ struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
+ struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+ struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
+};
+
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
- struct ttm_object_file *tfile;
+ struct vmw_fpriv *fp;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
@@ -250,6 +340,8 @@ struct vmw_sw_context{
struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
+ struct vmw_ctx_binding_state staged_bindings;
+ struct list_head staged_shaders;
};
struct vmw_legacy_display;
@@ -281,6 +373,7 @@ struct vmw_private {
unsigned int io_start;
uint32_t vram_start;
uint32_t vram_size;
+ uint32_t prim_bb_mem;
uint32_t mmio_start;
uint32_t mmio_size;
uint32_t fb_max_width;
@@ -290,11 +383,12 @@ struct vmw_private {
__le32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
- uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
+ uint32_t max_mob_pages;
uint32_t memory_size;
bool has_gmr;
+ bool has_mob;
struct mutex hw_mutex;
/*
@@ -370,6 +464,7 @@ struct vmw_private {
struct vmw_sw_context ctx;
struct mutex cmdbuf_mutex;
+ struct mutex binding_mutex;
/**
* Operating mode.
@@ -415,6 +510,12 @@ struct vmw_private {
* DMA mapping stuff.
*/
enum vmw_dma_map_mode map_mode;
+
+ /*
+ * Guest Backed stuff
+ */
+ struct ttm_buffer_object *otable_bo;
+ struct vmw_otable *otables;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -471,23 +572,14 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
* Resource utilities - vmwgfx_resource.c
*/
struct vmw_user_resource_conv;
-extern const struct vmw_user_resource_conv *user_surface_converter;
-extern const struct vmw_user_resource_conv *user_context_converter;
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
-extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
@@ -499,18 +591,6 @@ extern int vmw_user_resource_lookup_handle(
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
-extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf);
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
@@ -519,10 +599,21 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
void (*bo_free) (struct ttm_buffer_object *bo));
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
+extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_dma_buffer **p_dma_buf);
+extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+ struct vmw_dma_buffer *dma_buf,
+ uint32_t *handle);
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
@@ -622,10 +713,16 @@ extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_vram_gmr_ne_placement;
extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_placement vmw_sys_ne_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
+extern struct ttm_placement vmw_mob_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
+extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
+extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
+extern const struct vmw_sg_table *
+vmw_bo_sg_table(struct ttm_buffer_object *bo);
extern void vmw_piter_start(struct vmw_piter *viter,
const struct vmw_sg_table *vsgt,
unsigned long p_offs);
@@ -701,7 +798,7 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* IRQs and wating - vmwgfx_irq.c
*/
-extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t vmw_irq_handler(int irq, void *arg);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
uint32_t seqno, bool interruptible,
unsigned long timeout);
@@ -832,6 +929,101 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
uint32_t handle, uint32_t flags,
int *prime_fd);
+/*
+ * MemoryOBject management - vmwgfx_mob.c
+ */
+struct vmw_mob;
+extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
+ const struct vmw_sg_table *vsgt,
+ unsigned long num_data_pages, int32_t mob_id);
+extern void vmw_mob_unbind(struct vmw_private *dev_priv,
+ struct vmw_mob *mob);
+extern void vmw_mob_destroy(struct vmw_mob *mob);
+extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
+extern int vmw_otables_setup(struct vmw_private *dev_priv);
+extern void vmw_otables_takedown(struct vmw_private *dev_priv);
+
+/*
+ * Context management - vmwgfx_context.c
+ */
+
+extern const struct vmw_user_resource_conv *user_context_converter;
+
+extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
+
+extern int vmw_context_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ int id,
+ struct vmw_resource **p_res);
+extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *ci);
+extern void
+vmw_context_binding_state_transfer(struct vmw_resource *res,
+ struct vmw_ctx_binding_state *cbs);
+extern void vmw_context_binding_res_list_kill(struct list_head *head);
+extern void vmw_context_binding_res_list_scrub(struct list_head *head);
+extern int vmw_context_rebind_all(struct vmw_resource *ctx);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+
+/*
+ * Surface management - vmwgfx_surface.c
+ */
+
+extern const struct vmw_user_resource_conv *user_surface_converter;
+
+extern void vmw_surface_res_free(struct vmw_resource *res);
+extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, int *id);
+extern int vmw_surface_validate(struct vmw_private *dev_priv,
+ struct vmw_surface *srf);
+
+/*
+ * Shader management - vmwgfx_shader.c
+ */
+
+extern const struct vmw_user_resource_conv *user_shader_converter;
+
+extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key);
+extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list);
+extern struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv);
+extern void
+vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+
/**
* Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 599f6469a1eb..269b85cc875a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -54,6 +54,8 @@ struct vmw_resource_relocation {
* @res: Ref-counted pointer to the resource.
* @switch_backup: Boolean whether to switch backup buffer on unreserve.
* @new_backup: Refcounted pointer to the new backup buffer.
+ * @staged_bindings: If @res is a context, tracks bindings set up during
+ * the command batch. Otherwise NULL.
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
@@ -65,12 +67,32 @@ struct vmw_resource_val_node {
struct drm_hash_item hash;
struct vmw_resource *res;
struct vmw_dma_buffer *new_backup;
+ struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
bool first_usage;
bool no_buffer_needed;
};
/**
+ * struct vmw_cmd_entry - Describe a command for the verifier
+ *
+ * @user_allow: Whether allowed from the execbuf ioctl.
+ * @gb_disable: Whether disabled if guest-backed objects are available.
+ * @gb_enable: Whether enabled iff guest-backed objects are available.
+ */
+struct vmw_cmd_entry {
+ int (*func) (struct vmw_private *, struct vmw_sw_context *,
+ SVGA3dCmdHeader *);
+ bool user_allow;
+ bool gb_disable;
+ bool gb_enable;
+};
+
+#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
+ [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
+ (_gb_disable), (_gb_enable)}
+
+/**
* vmw_resource_unreserve - unreserve resources previously reserved for
* command submission.
*
@@ -87,6 +109,18 @@ static void vmw_resource_list_unreserve(struct list_head *list,
struct vmw_dma_buffer *new_backup =
backoff ? NULL : val->new_backup;
+ /*
+ * Transfer staged context bindings to the
+ * persistent context binding tracker.
+ */
+ if (unlikely(val->staged_bindings)) {
+ if (!backoff) {
+ vmw_context_binding_state_transfer
+ (val->res, val->staged_bindings);
+ }
+ kfree(val->staged_bindings);
+ val->staged_bindings = NULL;
+ }
vmw_resource_unreserve(res, new_backup,
val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup);
@@ -146,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
}
/**
+ * vmw_resource_context_res_add - Put resources previously bound to a context on
+ * the validation list
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @sw_context: Pointer to a software context used for this command submission
+ * @ctx: Pointer to the context resource
+ *
+ * This function puts all resources that were previously bound to @ctx on
+ * the resource validation list. This is part of the context state reemission
+ */
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource *ctx)
+{
+ struct list_head *binding_list;
+ struct vmw_ctx_binding *entry;
+ int ret = 0;
+ struct vmw_resource *res;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ binding_list = vmw_context_binding_list(ctx);
+
+ list_for_each_entry(entry, binding_list, ctx_list) {
+ res = vmw_resource_reference_unless_doomed(entry->bi.res);
+ if (unlikely(res == NULL))
+ continue;
+
+ ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ mutex_unlock(&dev_priv->binding_mutex);
+ return ret;
+}
+
+/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
@@ -201,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
- list_for_each_entry(rel, list, head)
- cb[rel->offset] = rel->res->id;
+ list_for_each_entry(rel, list, head) {
+ if (likely(rel->res != NULL))
+ cb[rel->offset] = rel->res->id;
+ else
+ cb[rel->offset] = SVGA_3D_CMD_NOP;
+ }
}
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -224,6 +300,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
+ * @validate_as_mob: Validate this buffer as a MOB.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
@@ -232,6 +309,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo,
+ bool validate_as_mob,
uint32_t *p_val_node)
{
uint32_t val_node;
@@ -244,6 +322,10 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
+ if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
+ DRM_ERROR("Inconsistent buffer usage.\n");
+ return -EINVAL;
+ }
val_buf = &vval_buf->base;
val_node = vval_buf - sw_context->val_bufs;
} else {
@@ -266,6 +348,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
val_buf->bo = ttm_bo_reference(bo);
val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+ vval_buf->validate_as_mob = validate_as_mob;
}
sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
@@ -302,7 +385,8 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
struct ttm_buffer_object *bo = &res->backup->base;
ret = vmw_bo_to_validate_list
- (sw_context, bo, NULL);
+ (sw_context, bo,
+ vmw_resource_needs_backup(res), NULL);
if (unlikely(ret != 0))
return ret;
@@ -339,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
- * @id: Pointer to the location in the command buffer currently being
+ * @id: user-space resource id handle.
+ * @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
*/
-static int vmw_cmd_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t *id,
- struct vmw_resource_val_node **p_val)
+static int
+vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t id,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
@@ -362,15 +451,22 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node;
int ret;
- if (*id == SVGA3D_INVALID_ID)
+ if (id == SVGA3D_INVALID_ID) {
+ if (p_val)
+ *p_val = NULL;
+ if (res_type == vmw_res_context) {
+ DRM_ERROR("Illegal context invalid id.\n");
+ return -EINVAL;
+ }
return 0;
+ }
/*
* Fastpath in case of repeated commands referencing the same
* resource
*/
- if (likely(rcache->valid && *id == rcache->handle)) {
+ if (likely(rcache->valid && id == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
@@ -379,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
- sw_context->tfile,
- *id,
+ sw_context->fp->tfile,
+ id,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) *id);
+ (unsigned) id);
dump_stack();
return ret;
}
rcache->valid = true;
rcache->res = res;
- rcache->handle = *id;
+ rcache->handle = id;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -411,6 +507,22 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
rcache->node = node;
if (p_val)
*p_val = node;
+
+ if (dev_priv->has_mob && node->first_usage &&
+ res_type == vmw_res_context) {
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+ node->staged_bindings =
+ kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
+ if (node->staged_bindings == NULL) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ goto out_no_reloc;
+ }
+ INIT_LIST_HEAD(&node->staged_bindings->list);
+ }
+
vmw_resource_unreference(&res);
return 0;
@@ -422,6 +534,59 @@ out_no_reloc:
}
/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id_loc: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
+ */
+static int
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
+{
+ return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
+ converter, *id_loc, id_loc, p_val);
+}
+
+/**
+ * vmw_rebind_contexts - Rebind all resources previously bound to
+ * referenced contexts.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Rebind context binding points that have been scrubbed because of eviction.
+ */
+static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ if (likely(!val->staged_bindings))
+ continue;
+
+ ret = vmw_context_rebind_all(val->res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to rebind context.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
@@ -453,17 +618,35 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *res_node;
int ret;
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ &ctx_node);
if (unlikely(ret != 0))
return ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->body.target.sid, NULL);
- return ret;
+ &cmd->body.target.sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (dev_priv->has_mob) {
+ struct vmw_ctx_bindinfo bi;
+
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_rt;
+ bi.i1.rt_type = cmd->body.type;
+ return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ }
+
+ return 0;
}
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -519,11 +702,6 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
- if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
- return -EPERM;
- }
-
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.srcImage.sid, NULL);
@@ -541,11 +719,6 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
- if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
- return -EPERM;
- }
-
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->body.sid,
NULL);
@@ -586,7 +759,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
- NULL);
+ dev_priv->has_mob, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -594,7 +767,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
ret = vmw_bo_to_validate_list(sw_context,
dev_priv->dummy_query_bo,
- NULL);
+ dev_priv->has_mob, NULL);
if (unlikely(ret != 0))
return ret;
@@ -672,6 +845,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
}
/**
+ * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
+ * handle to a MOB id.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @id: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
+ *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a MOB id. The translation does not take place immediately, but
+ * during a call to vmw_apply_relocations(). This function builds a relocation
+ * list and a list of buffers to validate. The former needs to be freed using
+ * either vmw_apply_relocations() or vmw_free_relocations(). The latter
+ * needs to be freed using vmw_clear_validations.
+ */
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGAMobId *id,
+ struct vmw_dma_buffer **vmw_bo_p)
+{
+ struct vmw_dma_buffer *vmw_bo = NULL;
+ struct ttm_buffer_object *bo;
+ uint32_t handle = *id;
+ struct vmw_relocation *reloc;
+ int ret;
+
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or use MOB buffer.\n");
+ return -EINVAL;
+ }
+ bo = &vmw_bo->base;
+
+ if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
+ DRM_ERROR("Max number relocations per submission"
+ " exceeded\n");
+ ret = -EINVAL;
+ goto out_no_reloc;
+ }
+
+ reloc = &sw_context->relocs[sw_context->cur_reloc++];
+ reloc->mob_loc = id;
+ reloc->location = NULL;
+
+ ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+
+ *vmw_bo_p = vmw_bo;
+ return 0;
+
+out_no_reloc:
+ vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_p = NULL;
+ return ret;
+}
+
+/**
* vmw_translate_guest_pointer - Prepare to translate a user-space buffer
* handle to a valid SVGAGuestPtr
*
@@ -701,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
@@ -718,7 +951,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -732,6 +965,30 @@ out_no_reloc:
}
/**
+ * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_begin_gb_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginGBQuery q;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_begin_gb_query_cmd,
+ header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->q.cid,
+ NULL);
+}
+
+/**
* vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
@@ -750,12 +1007,64 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_begin_query_cmd,
header);
+ if (unlikely(dev_priv->has_mob)) {
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginGBQuery q;
+ } gb_cmd;
+
+ BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+ gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
+ gb_cmd.header.size = cmd->header.size;
+ gb_cmd.q.cid = cmd->q.cid;
+ gb_cmd.q.type = cmd->q.type;
+
+ memcpy(cmd, &gb_cmd, sizeof(*cmd));
+ return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
+ }
+
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->q.cid,
NULL);
}
/**
+ * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdEndGBQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context,
+ &cmd->q.mobid,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return ret;
+}
+
+/**
* vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
@@ -774,6 +1083,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
+ if (dev_priv->has_mob) {
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdEndGBQuery q;
+ } gb_cmd;
+
+ BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+ gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
+ gb_cmd.header.size = cmd->header.size;
+ gb_cmd.q.cid = cmd->q.cid;
+ gb_cmd.q.type = cmd->q.type;
+ gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
+ gb_cmd.q.offset = cmd->q.guestResult.offset;
+
+ memcpy(cmd, &gb_cmd, sizeof(*cmd));
+ return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
+ }
+
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
@@ -790,7 +1118,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
return ret;
}
-/*
+/**
+ * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForGBQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context,
+ &cmd->q.mobid,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return 0;
+}
+
+/**
* vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
@@ -809,6 +1170,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
+ if (dev_priv->has_mob) {
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForGBQuery q;
+ } gb_cmd;
+
+ BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+ gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+ gb_cmd.header.size = cmd->header.size;
+ gb_cmd.q.cid = cmd->q.cid;
+ gb_cmd.q.type = cmd->q.type;
+ gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
+ gb_cmd.q.offset = cmd->q.guestResult.offset;
+
+ memcpy(cmd, &gb_cmd, sizeof(*cmd));
+ return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
+ }
+
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
@@ -853,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
+ header);
out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
@@ -921,15 +1302,22 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
struct vmw_tex_state_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetTextureState state;
- };
+ } *cmd;
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *res_node;
int ret;
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ cmd = container_of(header, struct vmw_tex_state_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->state.cid,
+ &ctx_node);
if (unlikely(ret != 0))
return ret;
@@ -939,9 +1327,20 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cur_state->value, NULL);
+ &cur_state->value, &res_node);
if (unlikely(ret != 0))
return ret;
+
+ if (dev_priv->has_mob) {
+ struct vmw_ctx_bindinfo bi;
+
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_tex;
+ bi.i1.texture_stage = cur_state->stage;
+ vmw_context_binding_add(ctx_node->staged_bindings,
+ &bi);
+ }
}
return 0;
@@ -971,6 +1370,314 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
}
/**
+ * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @res_type: The resource type.
+ * @converter: Information about user-space binding for this resource type.
+ * @res_id: Pointer to the user-space resource handle in the command stream.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving.
+ */
+static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv
+ *converter,
+ uint32_t *res_id,
+ uint32_t *buf_id,
+ unsigned long backup_offset)
+{
+ int ret;
+ struct vmw_dma_buffer *dma_buf;
+ struct vmw_resource_val_node *val_node;
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
+ converter, res_id, &val_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (val_node->first_usage)
+ val_node->no_buffer_needed = true;
+
+ vmw_dmabuf_unreference(&val_node->new_backup);
+ val_node->new_backup = dma_buf;
+ val_node->new_backup_offset = backup_offset;
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_bind_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
+
+ return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, &cmd->body.mobid,
+ 0);
+}
+
+/**
+ * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBImage body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdInvalidateGBImage body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_surface - Validate an
+ * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdInvalidateGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, NULL);
+}
+
+
+/**
+ * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_define_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineShader body;
+ } *cmd;
+ int ret;
+ size_t size;
+
+ cmd = container_of(header, struct vmw_shader_define_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ size = cmd->header.size - sizeof(cmd->body);
+ ret = vmw_compat_shader_add(sw_context->fp->shman,
+ cmd->body.shid, cmd + 1,
+ cmd->body.type, size,
+ sw_context->fp->tfile,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_destroy_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_shader_destroy_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ ret = vmw_compat_shader_remove(sw_context->fp->shman,
+ cmd->body.shid,
+ cmd->body.type,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
+/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
*
@@ -986,18 +1693,105 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
+ struct vmw_resource_val_node *ctx_node;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
header);
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ &ctx_node);
if (unlikely(ret != 0))
return ret;
+ if (dev_priv->has_mob) {
+ struct vmw_ctx_bindinfo bi;
+ struct vmw_resource_val_node *res_node;
+ u32 shid = cmd->body.shid;
+
+ if (shid != SVGA3D_INVALID_ID)
+ (void) vmw_compat_shader_lookup(sw_context->fp->shman,
+ cmd->body.type,
+ &shid);
+
+ ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
+ vmw_res_shader,
+ user_shader_converter,
+ shid,
+ &cmd->body.shid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_shader;
+ bi.i1.shader_type = cmd->body.type;
+ return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ }
+
return 0;
}
+/**
+ * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_const_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShaderConst body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_const_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (dev_priv->has_mob)
+ header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_bind_gb_shader_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBShader body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
+ header);
+
+ return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
+ user_shader_converter,
+ &cmd->body.shid, &cmd->body.mobid,
+ cmd->body.offsetInBytes);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -1041,50 +1835,173 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
return 0;
}
-typedef int (*vmw_cmd_func) (struct vmw_private *,
- struct vmw_sw_context *,
- SVGA3dCmdHeader *);
-
-#define VMW_CMD_DEF(cmd, func) \
- [cmd - SVGA_3D_CMD_BASE] = func
-
-static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
- VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
+static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
- &vmw_cmd_set_render_target_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
- VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
- VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
- VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
- VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
- VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
+ &vmw_cmd_set_render_target_check, true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
- &vmw_cmd_blt_surf_screen_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
+ &vmw_cmd_blt_surf_screen_check, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
+ &vmw_cmd_update_gb_surface, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
+ &vmw_cmd_readback_gb_image, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
+ &vmw_cmd_readback_gb_surface, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
+ &vmw_cmd_invalidate_gb_image, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
+ &vmw_cmd_invalidate_gb_surface, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
+ true, false, true)
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -1095,6 +2012,8 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
uint32_t size_remaining = *size;
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
int ret;
+ const struct vmw_cmd_entry *entry;
+ bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
/* Handle any none 3D commands */
@@ -1107,18 +2026,40 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(*size > size_remaining))
- goto out_err;
+ goto out_invalid;
if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
- goto out_err;
+ goto out_invalid;
+
+ entry = &vmw_cmd_entries[cmd_id];
+ if (unlikely(!entry->user_allow && !sw_context->kernel))
+ goto out_privileged;
+
+ if (unlikely(entry->gb_disable && gb))
+ goto out_old;
+
+ if (unlikely(entry->gb_enable && !gb))
+ goto out_new;
- ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
+ ret = entry->func(dev_priv, sw_context, header);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_invalid;
return 0;
-out_err:
- DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
+out_invalid:
+ DRM_ERROR("Invalid SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EINVAL;
+out_privileged:
+ DRM_ERROR("Privileged SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EPERM;
+out_old:
+ DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EINVAL;
+out_new:
+ DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
}
@@ -1174,6 +2115,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start;
break;
+ case VMW_PL_MOB:
+ *reloc->mob_loc = bo->mem.start;
+ break;
default:
BUG();
}
@@ -1198,6 +2142,8 @@ static void vmw_resource_list_unreference(struct list_head *list)
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
+ if (unlikely(val->staged_bindings))
+ kfree(val->staged_bindings);
kfree(val);
}
}
@@ -1224,7 +2170,8 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo)
+ struct ttm_buffer_object *bo,
+ bool validate_as_mob)
{
int ret;
@@ -1238,6 +2185,9 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
dev_priv->dummy_query_bo_pinned))
return 0;
+ if (validate_as_mob)
+ return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
@@ -1259,7 +2209,6 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
return ret;
}
-
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
@@ -1267,7 +2216,8 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
int ret;
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
+ ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+ entry->validate_as_mob);
if (unlikely(ret != 0))
return ret;
}
@@ -1461,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else
sw_context->kernel = true;
- sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+ sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
@@ -1478,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
sw_context->res_ht_initialized = true;
}
+ INIT_LIST_HEAD(&sw_context->staged_shaders);
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0))
@@ -1509,11 +2460,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err;
}
+ ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_err;
+ }
+
+ if (dev_priv->has_mob) {
+ ret = vmw_rebind_contexts(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
- goto out_err;
+ goto out_unlock_binding;
}
vmw_apply_relocations(sw_context);
@@ -1538,6 +2501,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
+ mutex_unlock(&dev_priv->binding_mutex);
+
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
@@ -1558,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
list_splice_init(&sw_context->resource_list, &resource_list);
+ vmw_compat_shaders_commit(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
@@ -1568,11 +2535,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
return 0;
+out_unlock_binding:
+ mutex_unlock(&dev_priv->binding_mutex);
out_err:
- vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+out_err_nores:
vmw_resource_list_unreserve(&sw_context->resource_list, true);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
@@ -1581,6 +2551,8 @@ out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
+ vmw_compat_shaders_revert(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c62d20e8a6f1..436b013b4231 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -271,7 +271,7 @@ void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
spin_unlock_irq(&fman->lock);
}
-void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
+static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
struct list_head *list)
{
struct vmw_fence_action *action, *next_action;
@@ -897,7 +897,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
* Note that the action callbacks may be executed before this function
* returns.
*/
-void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
+static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action)
{
struct vmw_fence_manager *fman = fence->fman;
@@ -993,7 +993,7 @@ struct vmw_event_fence_pending {
struct drm_vmw_event_fence event;
};
-int vmw_event_fence_action_create(struct drm_file *file_priv,
+static int vmw_event_fence_action_create(struct drm_file *file_priv,
struct vmw_fence_obj *fence,
uint32_t flags,
uint64_t user_data,
@@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+ ttm_base_object_lookup_for_ref(dev_priv->tdev,
+ arg->handle);
if (unlikely(base == NULL)) {
DRM_ERROR("Fence event invalid fence object handle "
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 3eb148667d63..6ccd993e26bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -35,6 +35,23 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ if (!(dev_priv->capabilities & SVGA_CAP_3D))
+ return false;
+
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ uint32_t result;
+
+ if (!dev_priv->has_mob)
+ return false;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
+ result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return (result != 0);
+ }
+
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
@@ -511,24 +528,16 @@ out_err:
}
/**
- * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
+ * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
+ * legacy query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
- * This function is used to emit a dummy occlusion query with
- * no primitives rendered between query begin and query end.
- * It's used to provide a query barrier, in order to know that when
- * this query is finished, all preceding queries are also finished.
- *
- * A Query results structure should have been initialized at the start
- * of the dev_priv->dummy_query_bo buffer object. And that buffer object
- * must also be either reserved or pinned when this function is called.
- *
- * Returns -ENOMEM on failure to reserve fifo space.
+ * See the vmw_fifo_emit_dummy_query documentation.
*/
-int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
- uint32_t cid)
+static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
+ uint32_t cid)
{
/*
* A query wait without a preceding query end will
@@ -566,3 +575,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return 0;
}
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * guest-backed resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * See the vmw_fifo_emit_dummy_query documentation.
+ */
+static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
+ uint32_t cid)
+{
+ /*
+ * A query wait without a preceding query end will
+ * actually finish all queries for this cid
+ * without writing to the query result structure.
+ */
+
+ struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForGBQuery body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of fifo space for dummy query.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = cid;
+ cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.offset = 0;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * appropriate resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * This function is used to emit a dummy occlusion query with
+ * no primitives rendered between query begin and query end.
+ * It's used to provide a query barrier, in order to know that when
+ * this query is finished, all preceding queries are also finished.
+ *
+ * A Query results structure should have been initialized at the start
+ * of the dev_priv->dummy_query_bo buffer object. And that buffer object
+ * must also be either reserved or pinned when this function is called.
+ *
+ * Returns -ENOMEM on failure to reserve fifo space.
+ */
+int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+ uint32_t cid)
+{
+ if (dev_priv->has_mob)
+ return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
+
+ return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 6ef0b035becb..61d8d803199f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -125,181 +125,27 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
}
-static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
- struct list_head *desc_pages)
-{
- struct page *page, *next;
- struct svga_guest_mem_descriptor *page_virtual;
- unsigned int desc_per_page = PAGE_SIZE /
- sizeof(struct svga_guest_mem_descriptor) - 1;
-
- if (list_empty(desc_pages))
- return;
-
- list_for_each_entry_safe(page, next, desc_pages, lru) {
- list_del_init(&page->lru);
-
- if (likely(desc_dma != DMA_ADDR_INVALID)) {
- dma_unmap_page(dev, desc_dma, PAGE_SIZE,
- DMA_TO_DEVICE);
- }
-
- page_virtual = kmap_atomic(page);
- desc_dma = (dma_addr_t)
- le32_to_cpu(page_virtual[desc_per_page].ppn) <<
- PAGE_SHIFT;
- kunmap_atomic(page_virtual);
-
- __free_page(page);
- }
-}
-
-/**
- * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
- * the number of used descriptors.
- *
- */
-
-static int vmw_gmr_build_descriptors(struct device *dev,
- struct list_head *desc_pages,
- struct vmw_piter *iter,
- unsigned long num_pages,
- dma_addr_t *first_dma)
-{
- struct page *page;
- struct svga_guest_mem_descriptor *page_virtual = NULL;
- struct svga_guest_mem_descriptor *desc_virtual = NULL;
- unsigned int desc_per_page;
- unsigned long prev_pfn;
- unsigned long pfn;
- int ret;
- dma_addr_t desc_dma;
-
- desc_per_page = PAGE_SIZE /
- sizeof(struct svga_guest_mem_descriptor) - 1;
-
- while (likely(num_pages != 0)) {
- page = alloc_page(__GFP_HIGHMEM);
- if (unlikely(page == NULL)) {
- ret = -ENOMEM;
- goto out_err;
- }
-
- list_add_tail(&page->lru, desc_pages);
- page_virtual = kmap_atomic(page);
- desc_virtual = page_virtual - 1;
- prev_pfn = ~(0UL);
-
- while (likely(num_pages != 0)) {
- pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
-
- if (pfn != prev_pfn + 1) {
-
- if (desc_virtual - page_virtual ==
- desc_per_page - 1)
- break;
-
- (++desc_virtual)->ppn = cpu_to_le32(pfn);
- desc_virtual->num_pages = cpu_to_le32(1);
- } else {
- uint32_t tmp =
- le32_to_cpu(desc_virtual->num_pages);
- desc_virtual->num_pages = cpu_to_le32(tmp + 1);
- }
- prev_pfn = pfn;
- --num_pages;
- vmw_piter_next(iter);
- }
-
- (++desc_virtual)->ppn = DMA_PAGE_INVALID;
- desc_virtual->num_pages = cpu_to_le32(0);
- kunmap_atomic(page_virtual);
- }
-
- desc_dma = 0;
- list_for_each_entry_reverse(page, desc_pages, lru) {
- page_virtual = kmap_atomic(page);
- page_virtual[desc_per_page].ppn = cpu_to_le32
- (desc_dma >> PAGE_SHIFT);
- kunmap_atomic(page_virtual);
- desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(dev, desc_dma)))
- goto out_err;
- }
- *first_dma = desc_dma;
-
- return 0;
-out_err:
- vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
- return ret;
-}
-
-static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
- int gmr_id, dma_addr_t desc_dma)
-{
- mutex_lock(&dev_priv->hw_mutex);
-
- vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
- wmb();
- vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
- mb();
-
- mutex_unlock(&dev_priv->hw_mutex);
-
-}
-
int vmw_gmr_bind(struct vmw_private *dev_priv,
const struct vmw_sg_table *vsgt,
unsigned long num_pages,
int gmr_id)
{
- struct list_head desc_pages;
- dma_addr_t desc_dma = 0;
- struct device *dev = dev_priv->dev->dev;
struct vmw_piter data_iter;
- int ret;
vmw_piter_start(&data_iter, vsgt, 0);
if (unlikely(!vmw_piter_next(&data_iter)))
return 0;
- if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
- return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
-
- if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
- return -EINVAL;
-
- if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
+ if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
return -EINVAL;
- INIT_LIST_HEAD(&desc_pages);
-
- ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
- num_pages, &desc_dma);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
- vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
-
- return 0;
+ return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
}
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
- if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
+ if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
vmw_gmr2_unbind(dev_priv, gmr_id);
- return;
- }
-
- mutex_lock(&dev_priv->hw_mutex);
- vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
- wmb();
- vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
- mb();
- mutex_unlock(&dev_priv->hw_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index c5c054ae9056..b1273e8e9a69 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -125,10 +125,21 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
return -ENOMEM;
spin_lock_init(&gman->lock);
- gman->max_gmr_pages = dev_priv->max_gmr_pages;
gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
- gman->max_gmr_ids = p_size;
+
+ switch (p_size) {
+ case VMW_PL_GMR:
+ gman->max_gmr_ids = dev_priv->max_gmr_ids;
+ gman->max_gmr_pages = dev_priv->max_gmr_pages;
+ break;
+ case VMW_PL_MOB:
+ gman->max_gmr_ids = VMWGFX_NUM_MOB;
+ gman->max_gmr_pages = dev_priv->max_mob_pages;
+ break;
+ default:
+ BUG();
+ }
man->priv = (void *) gman;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a51f48e3e917..f9881f9e62bd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
#include <drm/vmwgfx_drm.h>
#include "vmwgfx_kms.h"
+struct svga_3d_compat_cap {
+ SVGA3dCapsRecordHeader header;
+ SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
+};
+
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_getparam_arg *param =
(struct drm_vmw_getparam_arg *)data;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
switch (param->param) {
case DRM_VMW_PARAM_NUM_STREAMS:
@@ -53,13 +59,18 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = dev_priv->fifo.capabilities;
break;
case DRM_VMW_PARAM_MAX_FB_SIZE:
- param->value = dev_priv->vram_size;
+ param->value = dev_priv->prim_bb_mem;
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
+ param->value = SVGA3D_HWVERSION_WS8_B1;
+ break;
+ }
+
param->value =
ioread32(fifo_mem +
((fifo->capabilities &
@@ -68,6 +79,29 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
SVGA_FIFO_3D_HWVERSION));
break;
}
+ case DRM_VMW_PARAM_MAX_SURF_MEMORY:
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ !vmw_fp->gb_aware)
+ param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
+ else
+ param->value = dev_priv->memory_size;
+ break;
+ case DRM_VMW_PARAM_3D_CAPS_SIZE:
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ vmw_fp->gb_aware)
+ param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+ param->value = sizeof(struct svga_3d_compat_cap) +
+ sizeof(uint32_t);
+ else
+ param->value = (SVGA_FIFO_3D_CAPS_LAST -
+ SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
+ break;
+ case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+ vmw_fp->gb_aware = true;
+ param->value = dev_priv->max_mob_pages * PAGE_SIZE;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
@@ -77,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
+ size_t size)
+{
+ struct svga_3d_compat_cap *compat_cap =
+ (struct svga_3d_compat_cap *) bounce;
+ unsigned int i;
+ size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
+ unsigned int max_size;
+
+ if (size < pair_offset)
+ return -EINVAL;
+
+ max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
+
+ if (max_size > SVGA3D_DEVCAP_MAX)
+ max_size = SVGA3D_DEVCAP_MAX;
+
+ compat_cap->header.length =
+ (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+ compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ for (i = 0; i < max_size; ++i) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+ compat_cap->pairs[i][0] = i;
+ compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return 0;
+}
+
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -89,29 +155,58 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
void *bounce;
int ret;
+ bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
- size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2;
+ if (gb_objects && vmw_fp->gb_aware)
+ size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (gb_objects)
+ size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
+ else
+ size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
if (arg->max_size < size)
size = arg->max_size;
- bounce = vmalloc(size);
+ bounce = vzalloc(size);
if (unlikely(bounce == NULL)) {
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
return -ENOMEM;
}
- fifo_mem = dev_priv->mmio_virt;
- memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+ if (gb_objects && vmw_fp->gb_aware) {
+ int i, num;
+ uint32_t *bounce32 = (uint32_t *) bounce;
+
+ num = size / sizeof(uint32_t);
+ if (num > SVGA3D_DEVCAP_MAX)
+ num = SVGA3D_DEVCAP_MAX;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ for (i = 0; i < num; ++i) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+ *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+ } else if (gb_objects) {
+ ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+ if (unlikely(ret != 0))
+ goto out_err;
+ } else {
+ fifo_mem = dev_priv->mmio_virt;
+ memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+ }
ret = copy_to_user(buffer, bounce, size);
if (ret)
ret = -EFAULT;
+out_err:
vfree(bounce);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 4640adbcaf91..0c423766c441 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -30,7 +30,7 @@
#define VMW_FENCE_WRAP (1 << 24)
-irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t vmw_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 03f1c2038631..8a650413dea5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -40,7 +40,7 @@ struct vmw_clip_rect {
* Clip @num_rects number of @rects against @clip storing the
* results in @out_rects and the number of passed rects in @out_num.
*/
-void vmw_clip_cliprects(struct drm_clip_rect *rects,
+static void vmw_clip_cliprects(struct drm_clip_rect *rects,
int num_rects,
struct vmw_clip_rect clip,
SVGASignedRect *out_rects,
@@ -423,7 +423,7 @@ struct vmw_framebuffer_surface {
struct drm_master *master;
};
-void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
@@ -589,7 +589,7 @@ out_free_tmp:
return ret;
}
-int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
@@ -609,9 +609,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
if (!dev_priv->sou_priv)
return -EINVAL;
+ drm_modeset_lock_all(dev_priv->dev);
+
ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ drm_modeset_unlock_all(dev_priv->dev);
return ret;
+ }
if (!num_clips) {
num_clips = 1;
@@ -629,6 +633,9 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
clips, num_clips, inc, NULL);
ttm_read_unlock(&vmaster->lock);
+
+ drm_modeset_unlock_all(dev_priv->dev);
+
return 0;
}
@@ -665,9 +672,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
- surface->sizes[0].width < mode_cmd->width ||
- surface->sizes[0].height < mode_cmd->height ||
- surface->sizes[0].depth != 1)) {
+ surface->base_size.width < mode_cmd->width ||
+ surface->base_size.height < mode_cmd->height ||
+ surface->base_size.depth != 1)) {
DRM_ERROR("Incompatible surface dimensions "
"for requested mode.\n");
return -EINVAL;
@@ -754,7 +761,7 @@ struct vmw_framebuffer_dmabuf {
struct vmw_dma_buffer *buffer;
};
-void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
@@ -940,7 +947,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
return ret;
}
-int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
@@ -953,9 +960,13 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_clip_rect norect;
int ret, increment = 1;
+ drm_modeset_lock_all(dev_priv->dev);
+
ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ drm_modeset_unlock_all(dev_priv->dev);
return ret;
+ }
if (!num_clips) {
num_clips = 1;
@@ -979,6 +990,9 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
}
ttm_read_unlock(&vmaster->lock);
+
+ drm_modeset_unlock_all(dev_priv->dev);
+
return ret;
}
@@ -1631,7 +1645,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
{
- return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+ return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
}
@@ -1663,7 +1677,7 @@ void vmw_disable_vblank(struct drm_device *dev, int crtc)
* Small shared kms functions.
*/
-int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects)
{
struct drm_device *dev = dev_priv->dev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
new file mode 100644
index 000000000000..d4a5a19cb8c3
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -0,0 +1,653 @@
+/**************************************************************************
+ *
+ * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+/*
+ * If we set up the screen target otable, screen objects stop working.
+ */
+
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
+
+#ifdef CONFIG_64BIT
+#define VMW_PPN_SIZE 8
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
+#else
+#define VMW_PPN_SIZE 4
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
+#endif
+
+/*
+ * struct vmw_mob - Structure containing page table and metadata for a
+ * Guest Memory OBject.
+ *
+ * @num_pages Number of pages that make up the page table.
+ * @pt_level The indirection level of the page table. 0-2.
+ * @pt_root_page DMA address of the level 0 page of the page table.
+ */
+struct vmw_mob {
+ struct ttm_buffer_object *pt_bo;
+ unsigned long num_pages;
+ unsigned pt_level;
+ dma_addr_t pt_root_page;
+ uint32_t id;
+};
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size: Size of the table (page-aligned).
+ * @page_table: Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+ unsigned long size;
+ struct vmw_mob *page_table;
+};
+
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+ struct vmw_mob *mob);
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+ struct vmw_piter data_iter,
+ unsigned long num_data_pages);
+
+/*
+ * vmw_setup_otable_base - Issue an object table base setup command to
+ * the device
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @type: Type of object table base
+ * @offset Start of table offset into dev_priv::otable_bo
+ * @otable Pointer to otable metadata;
+ *
+ * This function returns -ENOMEM if it fails to reserve fifo space,
+ * and may block waiting for fifo space.
+ */
+static int vmw_setup_otable_base(struct vmw_private *dev_priv,
+ SVGAOTableType type,
+ unsigned long offset,
+ struct vmw_otable *otable)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetOTableBase64 body;
+ } *cmd;
+ struct vmw_mob *mob;
+ const struct vmw_sg_table *vsgt;
+ struct vmw_piter iter;
+ int ret;
+
+ BUG_ON(otable->page_table != NULL);
+
+ vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+ vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
+ WARN_ON(!vmw_piter_next(&iter));
+
+ mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
+ if (unlikely(mob == NULL)) {
+ DRM_ERROR("Failed creating OTable page table.\n");
+ return -ENOMEM;
+ }
+
+ if (otable->size <= PAGE_SIZE) {
+ mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+ mob->pt_root_page = vmw_piter_dma_addr(&iter);
+ } else if (vsgt->num_regions == 1) {
+ mob->pt_level = SVGA3D_MOBFMT_RANGE;
+ mob->pt_root_page = vmw_piter_dma_addr(&iter);
+ } else {
+ ret = vmw_mob_pt_populate(dev_priv, mob);
+ if (unlikely(ret != 0))
+ goto out_no_populate;
+
+ vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
+ mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+ cmd->body.sizeInBytes = otable->size;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = mob->pt_level;
+
+ /*
+ * The device doesn't support this, But the otable size is
+ * determined at compile-time, so this BUG shouldn't trigger
+ * randomly.
+ */
+ BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ otable->page_table = mob;
+
+ return 0;
+
+out_no_fifo:
+out_no_populate:
+ vmw_mob_destroy(mob);
+ return ret;
+}
+
+/*
+ * vmw_takedown_otable_base - Issue an object table base takedown command
+ * to the device
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @type: Type of object table base
+ *
+ */
+static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
+ SVGAOTableType type,
+ struct vmw_otable *otable)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetOTableBase body;
+ } *cmd;
+ struct ttm_buffer_object *bo;
+
+ if (otable->page_table == NULL)
+ return;
+
+ bo = otable->page_table->pt_bo;
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
+ DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ if (bo) {
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+
+ vmw_fence_single_bo(bo, NULL);
+ ttm_bo_unreserve(bo);
+ }
+
+ vmw_mob_destroy(otable->page_table);
+ otable->page_table = NULL;
+}
+
+/*
+ * vmw_otables_setup - Set up guest backed memory object tables
+ *
+ * @dev_priv: Pointer to a device private structure
+ *
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A succesful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
+ */
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+ unsigned long offset;
+ unsigned long bo_size;
+ struct vmw_otable *otables;
+ SVGAOTableType i;
+ int ret;
+
+ otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
+ GFP_KERNEL);
+ if (unlikely(otables == NULL)) {
+ DRM_ERROR("Failed to allocate space for otable "
+ "metadata.\n");
+ return -ENOMEM;
+ }
+
+ otables[SVGA_OTABLE_MOB].size =
+ VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
+ otables[SVGA_OTABLE_SURFACE].size =
+ VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
+ otables[SVGA_OTABLE_CONTEXT].size =
+ VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
+ otables[SVGA_OTABLE_SHADER].size =
+ VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
+ otables[SVGA_OTABLE_SCREEN_TARGET].size =
+ VMWGFX_NUM_GB_SCREEN_TARGET *
+ SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
+
+ bo_size = 0;
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+ otables[i].size =
+ (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
+ bo_size += otables[i].size;
+ }
+
+ ret = ttm_bo_create(&dev_priv->bdev, bo_size,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, NULL,
+ &dev_priv->otable_bo);
+
+ if (unlikely(ret != 0))
+ goto out_no_bo;
+
+ ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+ ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+ ret = vmw_bo_map_dma(dev_priv->otable_bo);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+
+ ttm_bo_unreserve(dev_priv->otable_bo);
+
+ offset = 0;
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
+ ret = vmw_setup_otable_base(dev_priv, i, offset,
+ &otables[i]);
+ if (unlikely(ret != 0))
+ goto out_no_setup;
+ offset += otables[i].size;
+ }
+
+ dev_priv->otables = otables;
+ return 0;
+
+out_unreserve:
+ ttm_bo_unreserve(dev_priv->otable_bo);
+out_no_setup:
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
+ vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+
+ ttm_bo_unref(&dev_priv->otable_bo);
+out_no_bo:
+ kfree(otables);
+ return ret;
+}
+
+
+/*
+ * vmw_otables_takedown - Take down guest backed memory object tables
+ *
+ * @dev_priv: Pointer to a device private structure
+ *
+ * Take down the Guest Memory Object tables.
+ */
+void vmw_otables_takedown(struct vmw_private *dev_priv)
+{
+ SVGAOTableType i;
+ struct ttm_buffer_object *bo = dev_priv->otable_bo;
+ int ret;
+
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
+ vmw_takedown_otable_base(dev_priv, i,
+ &dev_priv->otables[i]);
+
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+
+ vmw_fence_single_bo(bo, NULL);
+ ttm_bo_unreserve(bo);
+
+ ttm_bo_unref(&dev_priv->otable_bo);
+ kfree(dev_priv->otables);
+ dev_priv->otables = NULL;
+}
+
+
+/*
+ * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
+ * needed for a guest backed memory object.
+ *
+ * @data_pages: Number of data pages in the memory object buffer.
+ */
+static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
+{
+ unsigned long data_size = data_pages * PAGE_SIZE;
+ unsigned long tot_size = 0;
+
+ while (likely(data_size > PAGE_SIZE)) {
+ data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
+ data_size *= VMW_PPN_SIZE;
+ tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
+ }
+
+ return tot_size >> PAGE_SHIFT;
+}
+
+/*
+ * vmw_mob_create - Create a mob, but don't populate it.
+ *
+ * @data_pages: Number of data pages of the underlying buffer object.
+ */
+struct vmw_mob *vmw_mob_create(unsigned long data_pages)
+{
+ struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
+
+ if (unlikely(mob == NULL))
+ return NULL;
+
+ mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
+
+ return mob;
+}
+
+/*
+ * vmw_mob_pt_populate - Populate the mob pagetable
+ *
+ * @mob: Pointer to the mob the pagetable of which we want to
+ * populate.
+ *
+ * This function allocates memory to be used for the pagetable, and
+ * adjusts TTM memory accounting accordingly. Returns ENOMEM if
+ * memory resources aren't sufficient and may cause TTM buffer objects
+ * to be swapped out by using the TTM memory accounting function.
+ */
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+ struct vmw_mob *mob)
+{
+ int ret;
+ BUG_ON(mob->pt_bo != NULL);
+
+ ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, NULL, &mob->pt_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
+
+ BUG_ON(ret != 0);
+ ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+ ret = vmw_bo_map_dma(mob->pt_bo);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+
+ ttm_bo_unreserve(mob->pt_bo);
+
+ return 0;
+
+out_unreserve:
+ ttm_bo_unreserve(mob->pt_bo);
+ ttm_bo_unref(&mob->pt_bo);
+
+ return ret;
+}
+
+/**
+ * vmw_mob_assign_ppn - Assign a value to a page table entry
+ *
+ * @addr: Pointer to pointer to page table entry.
+ * @val: The page table entry
+ *
+ * Assigns a value to a page table entry pointed to by *@addr and increments
+ * *@addr according to the page table entry size.
+ */
+#if (VMW_PPN_SIZE == 8)
+static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+{
+ *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
+ *addr += 2;
+}
+#else
+static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+{
+ *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
+}
+#endif
+
+/*
+ * vmw_mob_build_pt - Build a pagetable
+ *
+ * @data_addr: Array of DMA addresses to the underlying buffer
+ * object's data pages.
+ * @num_data_pages: Number of buffer object data pages.
+ * @pt_pages: Array of page pointers to the page table pages.
+ *
+ * Returns the number of page table pages actually used.
+ * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
+ */
+static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
+ unsigned long num_data_pages,
+ struct vmw_piter *pt_iter)
+{
+ unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
+ unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
+ unsigned long pt_page;
+ __le32 *addr, *save_addr;
+ unsigned long i;
+ struct page *page;
+
+ for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
+ page = vmw_piter_page(pt_iter);
+
+ save_addr = addr = kmap_atomic(page);
+
+ for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
+ vmw_mob_assign_ppn(&addr,
+ vmw_piter_dma_addr(data_iter));
+ if (unlikely(--num_data_pages == 0))
+ break;
+ WARN_ON(!vmw_piter_next(data_iter));
+ }
+ kunmap_atomic(save_addr);
+ vmw_piter_next(pt_iter);
+ }
+
+ return num_pt_pages;
+}
+
+/*
+ * vmw_mob_build_pt - Set up a multilevel mob pagetable
+ *
+ * @mob: Pointer to a mob whose page table needs setting up.
+ * @data_addr Array of DMA addresses to the buffer object's data
+ * pages.
+ * @num_data_pages: Number of buffer object data pages.
+ *
+ * Uses tail recursion to set up a multilevel mob page table.
+ */
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+ struct vmw_piter data_iter,
+ unsigned long num_data_pages)
+{
+ unsigned long num_pt_pages = 0;
+ struct ttm_buffer_object *bo = mob->pt_bo;
+ struct vmw_piter save_pt_iter;
+ struct vmw_piter pt_iter;
+ const struct vmw_sg_table *vsgt;
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+
+ vsgt = vmw_bo_sg_table(bo);
+ vmw_piter_start(&pt_iter, vsgt, 0);
+ BUG_ON(!vmw_piter_next(&pt_iter));
+ mob->pt_level = 0;
+ while (likely(num_data_pages > 1)) {
+ ++mob->pt_level;
+ BUG_ON(mob->pt_level > 2);
+ save_pt_iter = pt_iter;
+ num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
+ &pt_iter);
+ data_iter = save_pt_iter;
+ num_data_pages = num_pt_pages;
+ }
+
+ mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
+ ttm_bo_unreserve(bo);
+}
+
+/*
+ * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
+ *
+ * @mob: Pointer to a mob to destroy.
+ */
+void vmw_mob_destroy(struct vmw_mob *mob)
+{
+ if (mob->pt_bo)
+ ttm_bo_unref(&mob->pt_bo);
+ kfree(mob);
+}
+
+/*
+ * vmw_mob_unbind - Hide a mob from the device.
+ *
+ * @dev_priv: Pointer to a device private.
+ * @mob_id: Device id of the mob to unbind.
+ */
+void vmw_mob_unbind(struct vmw_private *dev_priv,
+ struct vmw_mob *mob)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBMob body;
+ } *cmd;
+ int ret;
+ struct ttm_buffer_object *bo = mob->pt_bo;
+
+ if (bo) {
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ /*
+ * Noone else should be using this buffer.
+ */
+ BUG_ON(ret != 0);
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for Memory "
+ "Object unbinding.\n");
+ }
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.mobid = mob->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (bo) {
+ vmw_fence_single_bo(bo, NULL);
+ ttm_bo_unreserve(bo);
+ }
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+/*
+ * vmw_mob_bind - Make a mob visible to the device after first
+ * populating it if necessary.
+ *
+ * @dev_priv: Pointer to a device private.
+ * @mob: Pointer to the mob we're making visible.
+ * @data_addr: Array of DMA addresses to the data pages of the underlying
+ * buffer object.
+ * @num_data_pages: Number of data pages of the underlying buffer
+ * object.
+ * @mob_id: Device id of the mob to bind
+ *
+ * This function is intended to be interfaced with the ttm_tt backend
+ * code.
+ */
+int vmw_mob_bind(struct vmw_private *dev_priv,
+ struct vmw_mob *mob,
+ const struct vmw_sg_table *vsgt,
+ unsigned long num_data_pages,
+ int32_t mob_id)
+{
+ int ret;
+ bool pt_set_up = false;
+ struct vmw_piter data_iter;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBMob64 body;
+ } *cmd;
+
+ mob->id = mob_id;
+ vmw_piter_start(&data_iter, vsgt, 0);
+ if (unlikely(!vmw_piter_next(&data_iter)))
+ return 0;
+
+ if (likely(num_data_pages == 1)) {
+ mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+ mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+ } else if (vsgt->num_regions == 1) {
+ mob->pt_level = SVGA3D_MOBFMT_RANGE;
+ mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+ } else if (unlikely(mob->pt_bo == NULL)) {
+ ret = vmw_mob_pt_populate(dev_priv, mob);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_mob_pt_setup(mob, data_iter, num_data_pages);
+ pt_set_up = true;
+ mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+ }
+
+ (void) vmw_3d_resource_inc(dev_priv, false);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for Memory "
+ "Object binding.\n");
+ goto out_no_cmd_space;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.mobid = mob_id;
+ cmd->body.ptDepth = mob->pt_level;
+ cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+ cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+
+out_no_cmd_space:
+ vmw_3d_resource_dec(dev_priv, false);
+ if (pt_set_up)
+ ttm_bo_unref(&mob->pt_bo);
+
+ return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9b5ea2ac7ddf..2aa4bc6a4d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+ return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
/**
* vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
vmw_dmabuf_unreference(&res->backup);
}
- if (likely(res->hw_destroy != NULL))
+ if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_kill(&res->binding_head);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
id = res->id;
if (res->res_free != NULL)
@@ -215,6 +224,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
res->func = func;
INIT_LIST_HEAD(&res->lru_head);
INIT_LIST_HEAD(&res->mob_head);
+ INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
res->backup = NULL;
res->backup_offset = 0;
@@ -441,6 +451,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
ttm_bo_unref(&bo);
}
+static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
+ enum ttm_ref_type ref_type)
+{
+ struct vmw_user_dma_buffer *user_bo;
+ user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
+
+ switch (ref_type) {
+ case TTM_REF_SYNCCPU_WRITE:
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
+ break;
+ default:
+ BUG();
+ }
+}
+
/**
* vmw_user_dmabuf_alloc - Allocate a user dma buffer
*
@@ -471,6 +496,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
}
ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
&vmw_vram_sys_placement, true,
&vmw_user_dmabuf_destroy);
if (unlikely(ret != 0))
@@ -482,7 +509,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
&user_bo->prime,
shareable,
ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ &vmw_user_dmabuf_release,
+ &vmw_user_dmabuf_ref_obj_release);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
@@ -515,6 +543,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
+/**
+ * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ struct ttm_buffer_object *bo = &user_bo->dma.base;
+ bool existed;
+ int ret;
+
+ if (flags & drm_vmw_synccpu_allow_cs) {
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, true,
+ !!(flags & drm_vmw_synccpu_dontblock));
+ spin_unlock(&bdev->fence_lock);
+ return ret;
+ }
+
+ ret = ttm_bo_synccpu_write_grab
+ (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_SYNCCPU_WRITE, &existed);
+ if (ret != 0 || existed)
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
+
+ return ret;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ if (!(flags & drm_vmw_synccpu_allow_cs))
+ return ttm_ref_object_base_unref(tfile, handle,
+ TTM_REF_SYNCCPU_WRITE);
+
+ return 0;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_synccpu_arg *arg =
+ (struct drm_vmw_synccpu_arg *) data;
+ struct vmw_dma_buffer *dma_buf;
+ struct vmw_user_dma_buffer *user_bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+
+ if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+ || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+ drm_vmw_synccpu_dontblock |
+ drm_vmw_synccpu_allow_cs)) != 0) {
+ DRM_ERROR("Illegal synccpu flags.\n");
+ return -EINVAL;
+ }
+
+ switch (arg->op) {
+ case drm_vmw_synccpu_grab:
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
+ dma);
+ ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
+ vmw_dmabuf_unreference(&dma_buf);
+ if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+ ret != -EBUSY)) {
+ DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ case drm_vmw_synccpu_release:
+ ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
+ arg->flags);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ default:
+ DRM_ERROR("Invalid synccpu operation.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -591,7 +743,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
}
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
- struct vmw_dma_buffer *dma_buf)
+ struct vmw_dma_buffer *dma_buf,
+ uint32_t *handle)
{
struct vmw_user_dma_buffer *user_bo;
@@ -599,6 +752,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
return -EINVAL;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+
+ *handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL);
}
@@ -1291,11 +1446,54 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
* @mem: The truct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
- * For now does nothing.
+ * Evicts the Guest Backed hardware resource if the backup
+ * buffer is being moved out of MOB memory.
+ * Note that this function should not race with the resource
+ * validation code as long as it accesses only members of struct
+ * resource that remain static while bo::res is !NULL and
+ * while we have @bo reserved. struct resource::backup is *not* a
+ * static member. The resource validation code will take care
+ * to set @bo::res to NULL, while having @bo reserved when the
+ * buffer is no longer bound to the resource, so @bo:res can be
+ * used to determine whether there is a need to unbind and whether
+ * it is safe to unbind.
*/
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
+ struct vmw_dma_buffer *dma_buf;
+
+ if (mem == NULL)
+ return;
+
+ if (bo->destroy != vmw_dmabuf_bo_free &&
+ bo->destroy != vmw_user_dmabuf_destroy)
+ return;
+
+ dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+
+ if (mem->mem_type != VMW_PL_MOB) {
+ struct vmw_resource *res, *n;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_validate_buffer val_buf;
+
+ val_buf.bo = bo;
+
+ list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
+
+ if (unlikely(res->func->unbind == NULL))
+ continue;
+
+ (void) res->func->unbind(res, true, &val_buf);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+ list_del_init(&res->mob_head);
+ }
+
+ spin_lock(&bdev->fence_lock);
+ (void) ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+ }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
new file mode 100644
index 000000000000..217d941b8176
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -0,0 +1,810 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+#define VMW_COMPAT_SHADER_HT_ORDER 12
+
+struct vmw_shader {
+ struct vmw_resource res;
+ SVGA3dShaderType type;
+ uint32_t size;
+};
+
+struct vmw_user_shader {
+ struct ttm_base_object base;
+ struct vmw_shader shader;
+};
+
+/**
+ * enum vmw_compat_shader_state - Staging state for compat shaders
+ */
+enum vmw_compat_shader_state {
+ VMW_COMPAT_COMMITED,
+ VMW_COMPAT_ADD,
+ VMW_COMPAT_DEL
+};
+
+/**
+ * struct vmw_compat_shader - Metadata for compat shaders.
+ *
+ * @handle: The TTM handle of the guest backed shader.
+ * @tfile: The struct ttm_object_file the guest backed shader is registered
+ * with.
+ * @hash: Hash item for lookup.
+ * @head: List head for staging lists or the compat shader manager list.
+ * @state: Staging state.
+ *
+ * The structure is protected by the cmdbuf lock.
+ */
+struct vmw_compat_shader {
+ u32 handle;
+ struct ttm_object_file *tfile;
+ struct drm_hash_item hash;
+ struct list_head head;
+ enum vmw_compat_shader_state state;
+};
+
+/**
+ * struct vmw_compat_shader_manager - Compat shader manager.
+ *
+ * @shaders: Hash table containing staged and commited compat shaders
+ * @list: List of commited shaders.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @shaders and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_compat_shader_manager {
+ struct drm_open_hash shaders;
+ struct list_head list;
+ struct vmw_private *dev_priv;
+};
+
+static void vmw_user_shader_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base);
+
+static int vmw_gb_shader_create(struct vmw_resource *res);
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_destroy(struct vmw_resource *res);
+
+static uint64_t vmw_user_shader_size;
+
+static const struct vmw_user_resource_conv user_shader_conv = {
+ .object_type = VMW_RES_SHADER,
+ .base_obj_to_res = vmw_user_shader_base_to_res,
+ .res_free = vmw_user_shader_free
+};
+
+const struct vmw_user_resource_conv *user_shader_converter =
+ &user_shader_conv;
+
+
+static const struct vmw_res_func vmw_gb_shader_func = {
+ .res_type = vmw_res_shader,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "guest backed shaders",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_gb_shader_create,
+ .destroy = vmw_gb_shader_destroy,
+ .bind = vmw_gb_shader_bind,
+ .unbind = vmw_gb_shader_unbind
+};
+
+/**
+ * Shader management:
+ */
+
+static inline struct vmw_shader *
+vmw_res_to_shader(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_shader, res);
+}
+
+static void vmw_hw_shader_destroy(struct vmw_resource *res)
+{
+ (void) vmw_gb_shader_destroy(res);
+}
+
+static int vmw_gb_shader_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ uint32_t size,
+ uint64_t offset,
+ SVGA3dShaderType type,
+ struct vmw_dma_buffer *byte_code,
+ void (*res_free) (struct vmw_resource *res))
+{
+ struct vmw_shader *shader = vmw_res_to_shader(res);
+ int ret;
+
+ ret = vmw_resource_init(dev_priv, res, true,
+ res_free, &vmw_gb_shader_func);
+
+
+ if (unlikely(ret != 0)) {
+ if (res_free)
+ res_free(res);
+ else
+ kfree(res);
+ return ret;
+ }
+
+ res->backup_size = size;
+ if (byte_code) {
+ res->backup = vmw_dmabuf_reference(byte_code);
+ res->backup_offset = offset;
+ }
+ shader->size = size;
+ shader->type = type;
+
+ vmw_resource_activate(res, vmw_hw_shader_destroy);
+ return 0;
+}
+
+static int vmw_gb_shader_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_shader *shader = vmw_res_to_shader(res);
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBShader body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a shader id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ cmd->body.type = shader->type;
+ cmd->body.sizeInBytes = shader->size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv, false);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBShader body;
+ } *cmd;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.offsetInBytes = 0;
+ res->backup_dirty = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBShader body;
+ } *cmd;
+ struct vmw_fence_obj *fence;
+
+ BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ cmd->body.mobid = SVGA3D_INVALID_ID;
+ cmd->body.offsetInBytes = 0;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_gb_shader_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBShader body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "destruction.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ mutex_unlock(&dev_priv->binding_mutex);
+ vmw_resource_release_id(res);
+ vmw_3d_resource_dec(dev_priv, false);
+
+ return 0;
+}
+
+/**
+ * User-space shader management:
+ */
+
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_shader, base)->
+ shader.res);
+}
+
+static void vmw_user_shader_free(struct vmw_resource *res)
+{
+ struct vmw_user_shader *ushader =
+ container_of(res, struct vmw_user_shader, shader.res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ ttm_base_object_kfree(ushader, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_resource *res = vmw_user_shader_base_to_res(base);
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->handle,
+ TTM_REF_USAGE);
+}
+
+int vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type,
+ struct ttm_object_file *tfile,
+ u32 *handle)
+{
+ struct vmw_user_shader *ushader;
+ struct vmw_resource *res, *tmp;
+ int ret;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of shaders anyway.
+ */
+ if (unlikely(vmw_user_shader_size == 0))
+ vmw_user_shader_size =
+ ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ goto out;
+ }
+
+ ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+ if (unlikely(ushader == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ res = &ushader->shader.res;
+ ushader->base.shareable = false;
+ ushader->base.tfile = NULL;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
+ ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+ offset, shader_type, buffer,
+ vmw_user_shader_free);
+ if (unlikely(ret != 0))
+ goto out;
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_base_object_init(tfile, &ushader->base, false,
+ VMW_RES_SHADER,
+ &vmw_user_shader_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ if (handle)
+ *handle = ushader->base.hash.key;
+out_err:
+ vmw_resource_unreference(&res);
+out:
+ return ret;
+}
+
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_shader_create_arg *arg =
+ (struct drm_vmw_shader_create_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct vmw_dma_buffer *buffer = NULL;
+ SVGA3dShaderType shader_type;
+ int ret;
+
+ if (arg->buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+ &buffer);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find buffer for shader "
+ "creation.\n");
+ return ret;
+ }
+
+ if ((u64)buffer->base.num_pages * PAGE_SIZE <
+ (u64)arg->size + (u64)arg->offset) {
+ DRM_ERROR("Illegal buffer- or shader size.\n");
+ ret = -EINVAL;
+ goto out_bad_arg;
+ }
+ }
+
+ switch (arg->shader_type) {
+ case drm_vmw_shader_type_vs:
+ shader_type = SVGA3D_SHADERTYPE_VS;
+ break;
+ case drm_vmw_shader_type_ps:
+ shader_type = SVGA3D_SHADERTYPE_PS;
+ break;
+ case drm_vmw_shader_type_gs:
+ shader_type = SVGA3D_SHADERTYPE_GS;
+ break;
+ default:
+ DRM_ERROR("Illegal shader type.\n");
+ ret = -EINVAL;
+ goto out_bad_arg;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ goto out_bad_arg;
+
+ ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+ shader_type, tfile, &arg->shader_handle);
+
+ ttm_read_unlock(&vmaster->lock);
+out_bad_arg:
+ vmw_dmabuf_unreference(&buffer);
+ return ret;
+}
+
+/**
+ * vmw_compat_shader_lookup - Look up a compat shader
+ *
+ * @man: Pointer to the compat shader manager.
+ * @shader_type: The shader type, that combined with the user_key identifies
+ * the shader.
+ * @user_key: On entry, this should be a pointer to the user_key.
+ * On successful exit, it will contain the guest-backed shader's TTM handle.
+ *
+ * Returns 0 on success. Non-zero on failure, in which case the value pointed
+ * to by @user_key is unmodified.
+ */
+int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key)
+{
+ struct drm_hash_item *hash;
+ int ret;
+ unsigned long key = *user_key | (shader_type << 24);
+
+ ret = drm_ht_find_item(&man->shaders, key, &hash);
+ if (unlikely(ret != 0))
+ return ret;
+
+ *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
+ hash)->handle;
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_free - Free a compat shader.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @entry: Pointer to a struct vmw_compat_shader.
+ *
+ * Frees a struct vmw_compat_shder entry and drops its reference to the
+ * guest backed shader.
+ */
+static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
+ struct vmw_compat_shader *entry)
+{
+ list_del(&entry->head);
+ WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
+ WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE));
+ kfree(entry);
+}
+
+/**
+ * vmw_compat_shaders_commit - Commit a list of compat shader actions.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function commits a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ list_del(&entry->head);
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ entry->state = VMW_COMPAT_COMMITED;
+ list_add_tail(&entry->head, &man->list);
+ break;
+ case VMW_COMPAT_DEL:
+ ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE);
+ kfree(entry);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function reverts a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+ int ret;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_DEL:
+ ret = drm_ht_insert_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ list_add_tail(&entry->head, &man->list);
+ entry->state = VMW_COMPAT_COMMITED;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_compat_shader_remove - Stage a compat shader for removal.
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @shader_type: Shader type.
+ * @list: Caller's list of staged shader actions.
+ *
+ * This function stages a compat shader for removal and removes the key from
+ * the shader manager's hash table. If the shader was previously only staged
+ * for addition it is completely removed (But the execbuf code may keep a
+ * reference if it was bound to a context between addition and removal). If
+ * it was previously commited to the manager, it is staged for removal.
+ */
+int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry;
+ struct drm_hash_item *hash;
+ int ret;
+
+ ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
+ &hash);
+ if (likely(ret != 0))
+ return -EINVAL;
+
+ entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
+
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_COMMITED:
+ (void) drm_ht_remove_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ entry->state = VMW_COMPAT_DEL;
+ list_add_tail(&entry->head, list);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_add - Create a compat shader and add the
+ * key to the manager
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @bytecode: Pointer to the bytecode of the shader.
+ * @shader_type: Shader type.
+ * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
+ * to be created with.
+ * @list: Caller's list of staged shader actions.
+ *
+ * Note that only the key is added to the shader manager's hash table.
+ * The shader is not yet added to the shader manager's list of shaders.
+ */
+int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list)
+{
+ struct vmw_dma_buffer *buf;
+ struct ttm_bo_kmap_obj map;
+ bool is_iomem;
+ struct vmw_compat_shader *compat;
+ u32 handle;
+ int ret;
+
+ if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+ return -EINVAL;
+
+ /* Allocate and pin a DMA buffer */
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (unlikely(buf == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+ true, vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto out;
+
+ ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+
+ /* Map and copy shader bytecode. */
+ ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
+ &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(&buf->base);
+ goto no_reserve;
+ }
+
+ memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
+ WARN_ON(is_iomem);
+
+ ttm_bo_kunmap(&map);
+ ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+ WARN_ON(ret != 0);
+ ttm_bo_unreserve(&buf->base);
+
+ /* Create a guest-backed shader container backed by the dma buffer */
+ ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
+ tfile, &handle);
+ vmw_dmabuf_unreference(&buf);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+ /*
+ * Create a compat shader structure and stage it for insertion
+ * in the manager
+ */
+ compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+ if (compat == NULL)
+ goto no_compat;
+
+ compat->hash.key = user_key | (shader_type << 24);
+ ret = drm_ht_insert_item(&man->shaders, &compat->hash);
+ if (unlikely(ret != 0))
+ goto out_invalid_key;
+
+ compat->state = VMW_COMPAT_ADD;
+ compat->handle = handle;
+ compat->tfile = tfile;
+ list_add_tail(&compat->head, list);
+
+ return 0;
+
+out_invalid_key:
+ kfree(compat);
+no_compat:
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+no_reserve:
+out:
+ return ret;
+}
+
+/**
+ * vmw_compat_shader_man_create - Create a compat shader manager
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Typically done at file open time. If successful returns a pointer to a
+ * compat shader manager. Otherwise returns an error pointer.
+ */
+struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_compat_shader_manager *man;
+ int ret;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+
+ man->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&man->list);
+ ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
+ if (ret == 0)
+ return man;
+
+ kfree(man);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ *
+ * @man: Pointer to the shader manager to destroy.
+ *
+ * Typically done at file close time.
+ */
+void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ mutex_lock(&man->dev_priv->cmdbuf_mutex);
+ list_for_each_entry_safe(entry, next, &man->list, head)
+ vmw_compat_shader_free(man, entry);
+
+ mutex_unlock(&man->dev_priv->cmdbuf_mutex);
+ kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7de2ea8bd553..82468d902915 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -41,7 +41,6 @@ struct vmw_user_surface {
struct ttm_prime_object prime;
struct vmw_surface srf;
uint32_t size;
- uint32_t backup_handle;
};
/**
@@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_legacy_srf_create(struct vmw_resource *res);
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+static int vmw_gb_surface_create(struct vmw_resource *res);
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_destroy(struct vmw_resource *res);
+
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
@@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
.unbind = &vmw_legacy_srf_unbind
};
+static const struct vmw_res_func vmw_gb_surface_func = {
+ .res_type = vmw_res_surface,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "guest backed surfaces",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_gb_surface_create,
+ .destroy = vmw_gb_surface_destroy,
+ .bind = vmw_gb_surface_bind,
+ .unbind = vmw_gb_surface_unbind
+};
+
/**
* struct vmw_surface_dma - SVGA3D DMA command
*/
@@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
struct vmw_surface *srf;
void *cmd;
+ if (res->func->destroy == vmw_gb_surface_destroy) {
+ (void) vmw_gb_surface_destroy(res);
+ return;
+ }
+
if (res->id != -1) {
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
@@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_resource *res = &srf->res;
BUG_ON(res_free == NULL);
- (void) vmw_3d_resource_inc(dev_priv, false);
+ if (!dev_priv->has_mob)
+ (void) vmw_3d_resource_inc(dev_priv, false);
ret = vmw_resource_init(dev_priv, res, true, res_free,
+ (dev_priv->has_mob) ? &vmw_gb_surface_func :
&vmw_legacy_surface_func);
if (unlikely(ret != 0)) {
- vmw_3d_resource_dec(dev_priv, false);
+ if (!dev_priv->has_mob)
+ vmw_3d_resource_dec(dev_priv, false);
res_free(res);
return ret;
}
@@ -750,7 +777,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
- srf->multisample_count = 1;
+ srf->multisample_count = 0;
cur_bo_offset = 0;
cur_offset = srf->offsets;
@@ -843,6 +870,7 @@ out_unlock:
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_surface_reference_arg *arg =
(union drm_vmw_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
@@ -854,7 +882,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct ttm_base_object *base;
int ret = -EINVAL;
- base = ttm_base_object_lookup(tfile, req->sid);
+ base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
if (unlikely(base == NULL)) {
DRM_ERROR("Could not find surface to reference.\n");
return -EINVAL;
@@ -880,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
rep->size_addr;
if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
+ ret = copy_to_user(user_sizes, &srf->base_size,
+ sizeof(srf->base_size));
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
@@ -893,3 +921,436 @@ out_no_reference:
return ret;
}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static int vmw_gb_surface_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ uint32_t cmd_len, submit_len;
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBSurface body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a surface id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd_len = sizeof(cmd->body);
+ submit_len = sizeof(*cmd);
+ cmd = vmw_fifo_reserve(dev_priv, submit_len);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ cmd->body.numMipLevels = srf->mip_levels[0];
+ cmd->body.multisampleCount = srf->multisample_count;
+ cmd->body.autogenFilter = srf->autogen_filter;
+ cmd->body.size.width = srf->base_size.width;
+ cmd->body.size.height = srf->base_size.height;
+ cmd->body.size.depth = srf->base_size.depth;
+ vmw_fifo_commit(dev_priv, submit_len);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ vmw_3d_resource_dec(dev_priv, false);
+ return ret;
+}
+
+
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBSurface body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBSurface body;
+ } *cmd2;
+ uint32_t submit_size;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
+
+ cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd1 == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.sid = res->id;
+ cmd1->body.mobid = bo->mem.start;
+ if (res->backup_dirty) {
+ cmd2 = (void *) &cmd1[1];
+ cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.sid = res->id;
+ res->backup_dirty = false;
+ }
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ return 0;
+}
+
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBSurface body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdInvalidateGBSurface body;
+ } *cmd2;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBSurface body;
+ } *cmd3;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ if (readback) {
+ cmd1 = (void *) cmd;
+ cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.sid = res->id;
+ cmd3 = (void *) &cmd1[1];
+ } else {
+ cmd2 = (void *) cmd;
+ cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.sid = res->id;
+ cmd3 = (void *) &cmd2[1];
+ }
+
+ cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+ cmd3->header.size = sizeof(cmd3->body);
+ cmd3->body.sid = res->id;
+ cmd3->body.mobid = SVGA3D_INVALID_ID;
+
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_gb_surface_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBSurface body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ mutex_unlock(&dev_priv->binding_mutex);
+ vmw_resource_release_id(res);
+ vmw_3d_resource_dec(dev_priv, false);
+
+ return 0;
+}
+
+/**
+ * vmw_gb_surface_define_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ union drm_vmw_gb_surface_create_arg *arg =
+ (union drm_vmw_gb_surface_create_arg *)data;
+ struct drm_vmw_gb_surface_create_req *req = &arg->req;
+ struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+ uint32_t size;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ const struct svga3d_surface_desc *desc;
+ uint32_t backup_handle;
+
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ size = vmw_user_surface_size + 128;
+
+ desc = svga3dsurface_get_desc(req->format);
+ if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+ DRM_ERROR("Invalid surface format for surface creation.\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ srf->flags = req->svga3d_flags;
+ srf->format = req->format;
+ srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
+ srf->mip_levels[0] = req->mip_levels;
+ srf->num_sizes = 1;
+ srf->sizes = NULL;
+ srf->offsets = NULL;
+ user_srf->size = size;
+ srf->base_size = req->base_size;
+ srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ srf->multisample_count = req->multisample_count;
+ res->backup_size = svga3dsurface_get_serialized_size
+ (srf->format, srf->base_size, srf->mip_levels[0],
+ srf->flags & SVGA3D_SURFACE_CUBEMAP);
+
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
+
+ /**
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ if (req->buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
+ &res->backup);
+ } else if (req->drm_surface_flags &
+ drm_vmw_surface_flag_create_buffer)
+ ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+ res->backup_size,
+ req->drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ &backup_handle,
+ &res->backup);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ tmp = vmw_resource_reference(&srf->res);
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ req->drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ rep->handle = user_srf->prime.base.hash.key;
+ rep->backup_size = res->backup_size;
+ if (res->backup) {
+ rep->buffer_map_handle =
+ drm_vma_node_offset_addr(&res->backup->base.vma_node);
+ rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+ rep->buffer_handle = backup_handle;
+ } else {
+ rep->buffer_map_handle = 0;
+ rep->buffer_size = 0;
+ rep->buffer_handle = SVGA3D_INVALID_ID;
+ }
+
+ vmw_resource_unreference(&res);
+
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_gb_surface_reference_arg *arg =
+ (union drm_vmw_gb_surface_reference_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct ttm_base_object *base;
+ uint32_t backup_handle;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
+ srf = &user_srf->srf;
+ if (srf->res.backup == NULL) {
+ DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+ goto out_bad_resource;
+ }
+
+ ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+ TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a GB surface.\n");
+ goto out_bad_resource;
+ }
+
+ mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+ ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
+ &backup_handle);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a GB surface "
+ "backup buffer.\n");
+ (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ req->sid,
+ TTM_REF_USAGE);
+ goto out_bad_resource;
+ }
+
+ rep->creq.svga3d_flags = srf->flags;
+ rep->creq.format = srf->format;
+ rep->creq.mip_levels = srf->mip_levels[0];
+ rep->creq.drm_surface_flags = 0;
+ rep->creq.multisample_count = srf->multisample_count;
+ rep->creq.autogen_filter = srf->autogen_filter;
+ rep->creq.buffer_handle = backup_handle;
+ rep->creq.base_size = srf->base_size;
+ rep->crep.handle = user_srf->prime.base.hash.key;
+ rep->crep.backup_size = srf->res.backup_size;
+ rep->crep.buffer_handle = backup_handle;
+ rep->crep.buffer_map_handle =
+ drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+ rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}