diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
58 files changed, 4043 insertions, 2759 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index 8c308dac99c5..6b28a326f8bb 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 config DRM_VMWGFX tristate "DRM driver for VMware Virtual GPU" depends on DRM && PCI && X86 && MMU diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 794cc9d5c9b0..09b2aa08363e 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ - vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ + vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ - vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ + vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h index 9ce2466a5d00..69c4253fbfbb 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2007-2015 VMware, Inc. All rights reserved. + * Copyright 2007-2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h index 2dfd57c5f463..9cbba0e8ce6a 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 1998-2015 VMware, Inc. All rights reserved. + * Copyright 1998-2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -46,10 +47,10 @@ * the SVGA3D protocol and remain reserved; they should not be used in the * future. * - * IDs between 1040 and 1999 (inclusive) are available for use by the + * IDs between 1040 and 2999 (inclusive) are available for use by the * current SVGA3D protocol. * - * FIFO clients other than SVGA3D should stay below 1000, or at 2000 + * FIFO clients other than SVGA3D should stay below 1000, or at 3000 * and up. */ @@ -89,19 +90,19 @@ typedef enum { SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069, SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070, SVGA_3D_CMD_GENERATE_MIPMAPS = 1071, - SVGA_3D_CMD_VIDEO_CREATE_DECODER = 1072, - SVGA_3D_CMD_VIDEO_DESTROY_DECODER = 1073, - SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR = 1074, - SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR = 1075, - SVGA_3D_CMD_VIDEO_DECODE_START_FRAME = 1076, - SVGA_3D_CMD_VIDEO_DECODE_RENDER = 1077, - SVGA_3D_CMD_VIDEO_DECODE_END_FRAME = 1078, - SVGA_3D_CMD_VIDEO_PROCESS_FRAME = 1079, + SVGA_3D_CMD_DEAD4 = 1072, + SVGA_3D_CMD_DEAD5 = 1073, + SVGA_3D_CMD_DEAD6 = 1074, + SVGA_3D_CMD_DEAD7 = 1075, + SVGA_3D_CMD_DEAD8 = 1076, + SVGA_3D_CMD_DEAD9 = 1077, + SVGA_3D_CMD_DEAD10 = 1078, + SVGA_3D_CMD_DEAD11 = 1079, SVGA_3D_CMD_ACTIVATE_SURFACE = 1080, SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081, SVGA_3D_CMD_SCREEN_DMA = 1082, - SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE = 1083, - SVGA_3D_CMD_OPEN_CONTEXT_SURFACE = 1084, + SVGA_3D_CMD_DEAD1 = 1083, + SVGA_3D_CMD_DEAD2 = 1084, SVGA_3D_CMD_LOGICOPS_BITBLT = 1085, SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086, @@ -217,7 +218,7 @@ typedef enum { SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177, SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178, SVGA_3D_CMD_DX_PRED_COPY = 1179, - SVGA_3D_CMD_DX_STRETCHBLT = 1180, + SVGA_3D_CMD_DX_PRESENTBLT = 1180, SVGA_3D_CMD_DX_GENMIPS = 1181, SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182, SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183, @@ -254,7 +255,7 @@ typedef enum { SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214, SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215, SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216, - SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT = 1217, + SVGA_3D_CMD_DX_BIND_ALL_SHADER = 1217, SVGA_3D_CMD_DX_HINT = 1218, SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219, SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220, @@ -262,17 +263,47 @@ typedef enum { SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222, /* - * Reserve some IDs to be used for the DX11 shader types. + * Reserve some IDs to be used for the SM5 shader types. */ SVGA_3D_CMD_DX_RESERVED1 = 1223, SVGA_3D_CMD_DX_RESERVED2 = 1224, SVGA_3D_CMD_DX_RESERVED3 = 1225, - SVGA_3D_CMD_DX_MAX = 1226, - SVGA_3D_CMD_MAX = 1226, + SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER = 1226, + SVGA_3D_CMD_DX_MAX = 1227, + + SVGA_3D_CMD_SCREEN_COPY = 1227, + + /* + * Reserve some IDs to be used for video. + */ + SVGA_3D_CMD_VIDEO_RESERVED1 = 1228, + SVGA_3D_CMD_VIDEO_RESERVED2 = 1229, + SVGA_3D_CMD_VIDEO_RESERVED3 = 1230, + SVGA_3D_CMD_VIDEO_RESERVED4 = 1231, + SVGA_3D_CMD_VIDEO_RESERVED5 = 1232, + SVGA_3D_CMD_VIDEO_RESERVED6 = 1233, + SVGA_3D_CMD_VIDEO_RESERVED7 = 1234, + SVGA_3D_CMD_VIDEO_RESERVED8 = 1235, + + SVGA_3D_CMD_GROW_OTABLE = 1236, + SVGA_3D_CMD_DX_GROW_COTABLE = 1237, + SVGA_3D_CMD_INTRA_SURFACE_COPY = 1238, + + SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 = 1239, + + SVGA_3D_CMD_DX_RESOLVE_COPY = 1240, + SVGA_3D_CMD_DX_PRED_RESOLVE_COPY = 1241, + SVGA_3D_CMD_DX_PRED_CONVERT_REGION = 1242, + SVGA_3D_CMD_DX_PRED_CONVERT = 1243, + SVGA_3D_CMD_WHOLE_SURFACE_COPY = 1244, + + SVGA_3D_CMD_MAX = 1245, SVGA_3D_CMD_FUTURE_MAX = 3000 } SVGAFifo3dCmdId; +#define SVGA_NUM_3D_CMD (SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE) + /* * FIFO command format definitions: */ @@ -301,7 +332,7 @@ typedef #include "vmware_pack_begin.h" struct { uint32 sid; - SVGA3dSurfaceFlags surfaceFlags; + SVGA3dSurface1Flags surfaceFlags; SVGA3dSurfaceFormat format; /* * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace @@ -327,7 +358,7 @@ typedef #include "vmware_pack_begin.h" struct { uint32 sid; - SVGA3dSurfaceFlags surfaceFlags; + SVGA3dSurface1Flags surfaceFlags; SVGA3dSurfaceFormat format; /* * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace @@ -459,6 +490,28 @@ struct { #include "vmware_pack_end.h" SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */ +/* + * Perform a surface copy within the same image. + * The src/dest boxes are allowed to overlap. + */ +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceImageId surface; + SVGA3dCopyBox box; +} +#include "vmware_pack_end.h" +SVGA3dCmdIntraSurfaceCopy; /* SVGA_3D_CMD_INTRA_SURFACE_COPY */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 srcSid; + uint32 destSid; +} +#include "vmware_pack_end.h" +SVGA3dCmdWholeSurfaceCopy; /* SVGA_3D_CMD_WHOLE_SURFACE_COPY */ + typedef #include "vmware_pack_begin.h" struct { @@ -772,6 +825,17 @@ struct { #include "vmware_pack_end.h" SVGA3dVertexElement; +/* + * Should the vertex element respect the stream value? The high bit of the + * stream should be set to indicate that the stream should be respected. If + * the high bit is not set, the stream will be ignored and replaced by the index + * of the position of the currently considered vertex element. + * + * All guests should set this bit and correctly specify the stream going + * forward. + */ +#define SVGA3D_VERTEX_ELEMENT_RESPECT_STREAM (1 << 7) + typedef #include "vmware_pack_begin.h" struct { @@ -1102,8 +1166,6 @@ struct { #include "vmware_pack_end.h" SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */ - - typedef #include "vmware_pack_begin.h" struct { @@ -1147,38 +1209,6 @@ struct SVGA3dCmdScreenDMA { SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */ /* - * Set Unity Surface Cookie - * - * Associates the supplied cookie with the surface id for use with - * Unity. This cookie is a hint from guest to host, there is no way - * for the guest to readback the cookie and the host is free to drop - * the cookie association at will. The default value for the cookie - * on all surfaces is 0. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdSetUnitySurfaceCookie { - uint32 sid; - uint64 cookie; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetUnitySurfaceCookie; /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */ - -/* - * Open a context-specific surface in a non-context-specific manner. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdOpenContextSurface { - uint32 sid; -} -#include "vmware_pack_end.h" -SVGA3dCmdOpenContextSurface; /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */ - - -/* * Logic ops */ @@ -1324,7 +1354,7 @@ typedef #include "vmware_pack_begin.h" struct { SVGA3dSurfaceFormat format; - SVGA3dSurfaceFlags surfaceFlags; + SVGA3dSurface1Flags surface1Flags; uint32 numMipLevels; uint32 multisampleCount; SVGA3dTextureFilter autogenFilter; @@ -1332,7 +1362,11 @@ struct { SVGAMobId mobid; uint32 arraySize; uint32 mobPitch; - uint32 pad[5]; + SVGA3dSurface2Flags surface2Flags; + uint8 multisamplePattern; + uint8 qualityLevel; + uint8 pad0[2]; + uint32 pad1[3]; } #include "vmware_pack_end.h" SVGAOTableSurfaceEntry; @@ -1360,7 +1394,8 @@ struct { SVGAOTableShaderEntry; #define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry)) -#define SVGA_STFLAG_PRIMARY (1 << 0) +#define SVGA_STFLAG_PRIMARY (1 << 0) +#define SVGA_STFLAG_RESERVED (1 << 1) /* Added with cap SVGA_CAP_HP_CMD_QUEUE */ typedef uint32 SVGAScreenTargetFlags; typedef @@ -1528,6 +1563,25 @@ struct { #include "vmware_pack_end.h" SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ +/* + * Guests using SVGA_3D_CMD_GROW_OTABLE are promising that + * the new OTable contains the same contents as the old one, except possibly + * for some new invalid entries at the end. + * + * (Otherwise, guests should use one of the SetOTableBase commands.) + */ +typedef +#include "vmware_pack_begin.h" +struct { + SVGAOTableType type; + PPN64 baseAddress; + uint32 sizeInBytes; + uint32 validSizeInBytes; + SVGAMobFormat ptDepth; +} +#include "vmware_pack_end.h" +SVGA3dCmdGrowOTable; /* SVGA_3D_CMD_GROW_OTABLE */ + typedef #include "vmware_pack_begin.h" struct { @@ -1615,7 +1669,7 @@ typedef #include "vmware_pack_begin.h" struct SVGA3dCmdDefineGBSurface { uint32 sid; - SVGA3dSurfaceFlags surfaceFlags; + SVGA3dSurface1Flags surfaceFlags; SVGA3dSurfaceFormat format; uint32 numMipLevels; uint32 multisampleCount; @@ -1626,6 +1680,45 @@ struct SVGA3dCmdDefineGBSurface { SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ /* + * Defines a guest-backed surface, adding the arraySize field. + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDefineGBSurface_v2 { + uint32 sid; + SVGA3dSurface1Flags surfaceFlags; + SVGA3dSurfaceFormat format; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; + uint32 arraySize; + uint32 pad; +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */ + +/* + * Defines a guest-backed surface, adding the larger flags. + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDefineGBSurface_v3 { + uint32 sid; + SVGA3dSurfaceAllFlags surfaceFlags; + SVGA3dSurfaceFormat format; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dMSPattern multisamplePattern; + SVGA3dMSQualityLevel qualityLevel; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; + uint32 arraySize; +} +#include "vmware_pack_end.h" +SVGA3dCmdDefineGBSurface_v3; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 */ + +/* * Destroy a guest-backed surface. */ @@ -1672,7 +1765,7 @@ SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */ typedef #include "vmware_pack_begin.h" -struct{ +struct SVGA3dCmdCondBindGBSurface { uint32 sid; SVGAMobId testMobid; SVGAMobId mobid; @@ -2066,6 +2159,26 @@ struct { uint32 mobOffset; } #include "vmware_pack_end.h" -SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE*/ +SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE */ + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 stid; + SVGA3dSurfaceImageId dest; + + uint32 statusMobId; + uint32 statusMobOffset; + + /* Reserved fields */ + uint32 mustBeInvalidId; + uint32 mustBeZero; +} +#include "vmware_pack_end.h" +SVGA3dCmdScreenCopy; /* SVGA_3D_CMD_SCREEN_COPY */ + +#define SVGA_SCREEN_COPY_STATUS_FAILURE 0x00 +#define SVGA_SCREEN_COPY_STATUS_SUCCESS 0x01 +#define SVGA_SCREEN_COPY_STATUS_INVALID 0xFFFFFFFF #endif /* _SVGA3D_CMD_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h index c18b663f360f..f256560049bf 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 1998-2015 VMware, Inc. All rights reserved. + * Copyright 1998-2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -229,9 +230,9 @@ typedef enum { SVGA3D_DEVCAP_DEAD2 = 94, /* - * Does the device support the DX commands? + * Does the device support DXContexts? */ - SVGA3D_DEVCAP_DX = 95, + SVGA3D_DEVCAP_DXCONTEXT = 95, /* * What is the maximum size of a texture array? @@ -241,21 +242,47 @@ typedef enum { SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96, /* - * What is the maximum number of vertex buffers that can - * be used in the DXContext inputAssembly? + * What is the maximum number of vertex buffers or vertex input registers + * that can be expected to work correctly with a DXContext? + * + * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but + * anything in excess of this cap is not guaranteed to render correctly. + * + * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS + * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or + * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1, + * but only the registers up to this cap value are guaranteed to render + * correctly. + * + * If guest-drivers are able to expose a lower-limit, it's recommended + * that they clamp to this value. Otherwise, the host will make a + * best-effort on case-by-case basis if guests exceed this. */ SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97, /* - * What is the maximum number of constant buffers - * that can be expected to work correctly with a - * DX context? + * What is the maximum number of constant buffers that can be expected to + * work correctly with a DX context? + * + * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but + * anything in excess of this cap is not guaranteed to render correctly. + * + * If guest-drivers are able to expose a lower-limit, it's recommended + * that they clamp to this value. Otherwise, the host will make a + * best-effort on case-by-case basis if guests exceed this. */ SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98, /* * Does the device support provoking vertex control? - * If zero, the first vertex will always be the provoking vertex. + * + * If this cap is present, the provokingVertexLast field in the + * rasterizer state is enabled. (Guests can then set it to FALSE, + * meaning that the first vertex is the provoking vertex, or TRUE, + * meaning that the last verteix is the provoking vertex.) + * + * If this cap is FALSE, then guests should set the provokingVertexLast + * to FALSE, otherwise rendering behavior is undefined. */ SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99, @@ -281,7 +308,7 @@ typedef enum { SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119, SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120, SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121, - SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122, + SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1 = 122, SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123, SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124, SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125, @@ -320,8 +347,8 @@ typedef enum { SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158, SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159, SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160, - SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161, - SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162, + SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24 = 161, + SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT = 162, SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163, SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164, SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165, @@ -339,8 +366,8 @@ typedef enum { SVGA3D_DEVCAP_DXFMT_R32_SINT = 177, SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178, SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179, - SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180, - SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181, + SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8 = 180, + SVGA3D_DEVCAP_DXFMT_X24_G8_UINT = 181, SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182, SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183, SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184, @@ -404,6 +431,17 @@ typedef enum { SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242, SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243, + /* + * Advertises shaderModel 4.1 support, independent blend-states, + * cube-map arrays, and a higher vertex input registers limit. + * + * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.) + */ + SVGA3D_DEVCAP_SM41 = 244, + + SVGA3D_DEVCAP_MULTISAMPLE_2X = 245, + SVGA3D_DEVCAP_MULTISAMPLE_4X = 246, + SVGA3D_DEVCAP_MAX /* This must be the last index. */ } SVGA3dDevCapIndex; @@ -419,9 +457,7 @@ typedef enum { * MIPS: Does the format support mip levels? * ARRAY: Does the format support texture arrays? * VOLUME: Does the format support having volume? - * MULTISAMPLE_2: Does the format support 2x multisample? - * MULTISAMPLE_4: Does the format support 4x multisample? - * MULTISAMPLE_8: Does the format support 8x multisample? + * MULTISAMPLE: Does the format support multisample? */ #define SVGA3D_DXFMT_SUPPORTED (1 << 0) #define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1) @@ -432,20 +468,8 @@ typedef enum { #define SVGA3D_DXFMT_ARRAY (1 << 6) #define SVGA3D_DXFMT_VOLUME (1 << 7) #define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8) -#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9) -#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10) -#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11) -#define SVGADX_DXFMT_MAX (1 << 12) - -/* - * Convenience mask for any multisample capability. - * - * The multisample bits imply both load and render capability. - */ -#define SVGA3D_DXFMT_MULTISAMPLE ( \ - SVGADX_DXFMT_MULTISAMPLE_2 | \ - SVGADX_DXFMT_MULTISAMPLE_4 | \ - SVGADX_DXFMT_MULTISAMPLE_8 ) +#define SVGA3D_DXFMT_MULTISAMPLE (1 << 9) +#define SVGA3D_DXFMT_MAX (1 << 10) typedef union { Bool b; diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h index 8c5ae608cfb4..7a49c94df221 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2012-2015 VMware, Inc. All rights reserved. + * Copyright 2012-2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -56,6 +57,16 @@ typedef uint32 SVGA3dInputClassification; #define SVGA3D_RESOURCE_TYPE_MAX 7 typedef uint32 SVGA3dResourceType; +#define SVGA3D_COLOR_WRITE_ENABLE_RED (1 << 0) +#define SVGA3D_COLOR_WRITE_ENABLE_GREEN (1 << 1) +#define SVGA3D_COLOR_WRITE_ENABLE_BLUE (1 << 2) +#define SVGA3D_COLOR_WRITE_ENABLE_ALPHA (1 << 3) +#define SVGA3D_COLOR_WRITE_ENABLE_ALL (SVGA3D_COLOR_WRITE_ENABLE_RED | \ + SVGA3D_COLOR_WRITE_ENABLE_GREEN | \ + SVGA3D_COLOR_WRITE_ENABLE_BLUE | \ + SVGA3D_COLOR_WRITE_ENABLE_ALPHA) +typedef uint8 SVGA3dColorWriteEnable; + #define SVGA3D_DEPTH_WRITE_MASK_ZERO 0 #define SVGA3D_DEPTH_WRITE_MASK_ALL 1 typedef uint8 SVGA3dDepthWriteMask; @@ -88,17 +99,28 @@ typedef uint8 SVGA3dCullMode; #define SVGA3D_COMPARISON_MAX 9 typedef uint8 SVGA3dComparisonFunc; +/* + * SVGA3D_MULTISAMPLE_RAST_DISABLE disables MSAA for all primitives. + * SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE, which is supported in SM41, + * disables MSAA for lines only. + */ +#define SVGA3D_MULTISAMPLE_RAST_DISABLE 0 +#define SVGA3D_MULTISAMPLE_RAST_ENABLE 1 +#define SVGA3D_MULTISAMPLE_RAST_DX_MAX 1 +#define SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE 2 +#define SVGA3D_MULTISAMPLE_RAST_MAX 2 +typedef uint8 SVGA3dMultisampleRastEnable; + #define SVGA3D_DX_MAX_VERTEXBUFFERS 32 +#define SVGA3D_DX_MAX_VERTEXINPUTREGISTERS 16 +#define SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS 32 #define SVGA3D_DX_MAX_SOTARGETS 4 #define SVGA3D_DX_MAX_SRVIEWS 128 #define SVGA3D_DX_MAX_CONSTBUFFERS 16 #define SVGA3D_DX_MAX_SAMPLERS 16 -/* Id limits */ -static const uint32 SVGA3dBlendObjectCountPerContext = 4096; -static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096; +#define SVGA3D_DX_MAX_CONSTBUF_BINDING_SIZE (4096 * 4 * (uint32)sizeof(uint32)) -typedef uint32 SVGA3dSurfaceId; typedef uint32 SVGA3dShaderResourceViewId; typedef uint32 SVGA3dRenderTargetViewId; typedef uint32 SVGA3dDepthStencilViewId; @@ -194,20 +216,6 @@ SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */ typedef #include "vmware_pack_begin.h" -struct SVGA3dReplyFormatData { - uint32 formatSupport; - uint32 msaa2xQualityLevels:5; - uint32 msaa4xQualityLevels:5; - uint32 msaa8xQualityLevels:5; - uint32 msaa16xQualityLevels:5; - uint32 msaa32xQualityLevels:5; - uint32 pad:7; -} -#include "vmware_pack_end.h" -SVGA3dReplyFormatData; - -typedef -#include "vmware_pack_begin.h" struct SVGA3dCmdDXSetSingleConstantBuffer { uint32 slot; SVGA3dShaderType type; @@ -624,6 +632,28 @@ SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */ typedef #include "vmware_pack_begin.h" +struct SVGA3dCmdDXPredConvertRegion { + SVGA3dSurfaceId dstSid; + uint32 dstSubResource; + SVGA3dBox destBox; + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dBox srcBox; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXPredConvertRegion; /* SVGA_3D_CMD_DX_PRED_CONVERT_REGION */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXPredConvert { + SVGA3dSurfaceId dstSid; + SVGA3dSurfaceId srcSid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXPredConvert; /* SVGA_3D_CMD_DX_PRED_CONVERT */ + +typedef +#include "vmware_pack_begin.h" struct SVGA3dCmdDXBufferCopy { SVGA3dSurfaceId dest; SVGA3dSurfaceId src; @@ -635,23 +665,57 @@ struct SVGA3dCmdDXBufferCopy { SVGA3dCmdDXBufferCopy; /* SVGA_3D_CMD_DX_BUFFER_COPY */ -typedef uint32 SVGA3dDXStretchBltMode; -#define SVGADX_STRETCHBLT_LINEAR (1 << 0) -#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1) +/* + * Perform a surface copy between a multisample, and a non-multisampled + * surface. + */ +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceId dstSid; + uint32 dstSubResource; + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dSurfaceFormat copyFormat; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXResolveCopy; /* SVGA_3D_CMD_DX_RESOLVE_COPY */ + +/* + * Perform a predicated surface copy between a multisample, and a + * non-multisampled surface. + */ +typedef +#include "vmware_pack_begin.h" +struct { + SVGA3dSurfaceId dstSid; + uint32 dstSubResource; + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dSurfaceFormat copyFormat; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXPredResolveCopy; /* SVGA_3D_CMD_DX_PRED_RESOLVE_COPY */ + +typedef uint32 SVGA3dDXPresentBltMode; +#define SVGADX_PRESENTBLT_LINEAR (1 << 0) +#define SVGADX_PRESENTBLT_FORCE_SRC_SRGB (1 << 1) +#define SVGADX_PRESENTBLT_FORCE_SRC_XRBIAS (1 << 2) +#define SVGADX_PRESENTBLT_MODE_MAX (1 << 3) typedef #include "vmware_pack_begin.h" -struct SVGA3dCmdDXStretchBlt { +struct SVGA3dCmdDXPresentBlt { SVGA3dSurfaceId srcSid; uint32 srcSubResource; SVGA3dSurfaceId dstSid; uint32 destSubResource; SVGA3dBox boxSrc; SVGA3dBox boxDest; - SVGA3dDXStretchBltMode mode; + SVGA3dDXPresentBltMode mode; } #include "vmware_pack_end.h" -SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */ +SVGA3dCmdDXPresentBlt; /* SVGA_3D_CMD_DX_PRESENTBLT*/ typedef #include "vmware_pack_begin.h" @@ -662,26 +726,6 @@ struct SVGA3dCmdDXGenMips { SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */ /* - * Defines a resource/DX surface. Resources share the surfaceId namespace. - * - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDefineGBSurface_v2 { - uint32 sid; - SVGA3dSurfaceFlags surfaceFlags; - SVGA3dSurfaceFormat format; - uint32 numMipLevels; - uint32 multisampleCount; - SVGA3dTextureFilter autogenFilter; - SVGA3dSize size; - uint32 arraySize; - uint32 pad; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */ - -/* * Update a sub-resource in a guest-backed resource. * (Inform the device that the guest-contents have been updated.) */ @@ -724,7 +768,8 @@ SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */ /* * Raw byte wise transfer from a buffer surface into another surface - * of the requested box. + * of the requested box. Supported if 3d is enabled and SVGA_CAP_DX + * is set. This command does not take a context. */ typedef #include "vmware_pack_begin.h" @@ -773,6 +818,93 @@ struct SVGA3dCmdDXSurfaceCopyAndReadback { SVGA3dCmdDXSurfaceCopyAndReadback; /* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */ +/* + * SVGA_DX_HINT_NONE: Does nothing. + * + * SVGA_DX_HINT_PREFETCH_OBJECT: + * SVGA_DX_HINT_PREEVICT_OBJECT: + * Consumes a SVGAObjectRef, and hints that the host should consider + * fetching/evicting the specified object. + * + * An id of SVGA3D_INVALID_ID can be used if the guest isn't sure + * what object was affected. (For instance, if the guest knows that + * it is about to evict a DXShader, but doesn't know precisely which one, + * the device can still use this to help limit it's search, or track + * how many page-outs have happened.) + * + * SVGA_DX_HINT_PREFETCH_COBJECT: + * SVGA_DX_HINT_PREEVICT_COBJECT: + * Same as the above, except they consume an SVGACObjectRef. + */ +typedef uint32 SVGADXHintId; +#define SVGA_DX_HINT_NONE 0 +#define SVGA_DX_HINT_PREFETCH_OBJECT 1 +#define SVGA_DX_HINT_PREEVICT_OBJECT 2 +#define SVGA_DX_HINT_PREFETCH_COBJECT 3 +#define SVGA_DX_HINT_PREEVICT_COBJECT 4 +#define SVGA_DX_HINT_MAX 5 + +typedef +#include "vmware_pack_begin.h" +struct SVGAObjectRef { + SVGAOTableType type; + uint32 id; +} +#include "vmware_pack_end.h" +SVGAObjectRef; + +typedef +#include "vmware_pack_begin.h" +struct SVGACObjectRef { + SVGACOTableType type; + uint32 cid; + uint32 id; +} +#include "vmware_pack_end.h" +SVGACObjectRef; + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXHint { + SVGADXHintId hintId; + + /* + * Followed by variable sized data depending on the hintId. + */ +} +#include "vmware_pack_end.h" +SVGA3dCmdDXHint; +/* SVGA_3D_CMD_DX_HINT */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXBufferUpdate { + SVGA3dSurfaceId sid; + uint32 x; + uint32 width; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXBufferUpdate; +/* SVGA_3D_CMD_DX_BUFFER_UPDATE */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXSetConstantBufferOffset { + uint32 slot; + uint32 offsetInBytes; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXSetConstantBufferOffset; + +typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetVSConstantBufferOffset; +/* SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET */ + +typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetPSConstantBufferOffset; +/* SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET */ + +typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetGSConstantBufferOffset; +/* SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET */ + typedef #include "vmware_pack_begin.h" @@ -789,7 +921,7 @@ struct { uint32 firstArraySlice; uint32 mipLevels; uint32 arraySize; - } tex; + } tex; /* 1d, 2d, 3d, cube */ struct { uint32 firstElement; uint32 numElements; @@ -844,6 +976,7 @@ struct SVGA3dRenderTargetViewDesc { struct { uint32 firstElement; uint32 numElements; + uint32 padding0; } buffer; struct { uint32 mipSlice; @@ -964,9 +1097,6 @@ SVGA3dInputElementDesc; typedef #include "vmware_pack_begin.h" struct { - /* - * XXX: How many of these can there be? - */ uint32 elid; uint32 numDescs; SVGA3dInputElementDesc desc[32]; @@ -1007,7 +1137,7 @@ struct SVGA3dDXBlendStatePerRT { uint8 srcBlendAlpha; uint8 destBlendAlpha; uint8 blendOpAlpha; - uint8 renderTargetWriteMask; + SVGA3dColorWriteEnable renderTargetWriteMask; uint8 logicOpEnable; uint8 logicOp; uint16 pad0; @@ -1125,7 +1255,7 @@ struct { float slopeScaledDepthBias; uint8 depthClipEnable; uint8 scissorEnable; - uint8 multisampleEnable; + SVGA3dMultisampleRastEnable multisampleEnable; uint8 antialiasedLineEnable; float lineWidth; uint8 lineStippleEnable; @@ -1152,7 +1282,7 @@ struct SVGA3dCmdDXDefineRasterizerState { float slopeScaledDepthBias; uint8 depthClipEnable; uint8 scissorEnable; - uint8 multisampleEnable; + SVGA3dMultisampleRastEnable multisampleEnable; uint8 antialiasedLineEnable; float lineWidth; uint8 lineStippleEnable; @@ -1222,21 +1352,6 @@ struct SVGA3dCmdDXDestroySamplerState { #include "vmware_pack_end.h" SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */ -/* - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dSignatureEntry { - uint8 systemValue; - uint8 reg; /* register is a reserved word */ - uint16 mask; - uint8 registerComponentType; - uint8 minPrecision; - uint16 pad0; -} -#include "vmware_pack_end.h" -SVGA3dSignatureEntry; - typedef #include "vmware_pack_begin.h" struct SVGA3dCmdDXDefineShader { @@ -1254,12 +1369,7 @@ struct SVGACOTableDXShaderEntry { uint32 sizeInBytes; uint32 offsetInBytes; SVGAMobId mobid; - uint32 numInputSignatureEntries; - uint32 numOutputSignatureEntries; - - uint32 numPatchConstantSignatureEntries; - - uint32 pad; + uint32 pad[4]; } #include "vmware_pack_end.h" SVGACOTableDXShaderEntry; @@ -1283,6 +1393,25 @@ struct SVGA3dCmdDXBindShader { #include "vmware_pack_end.h" SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXBindAllShader { + uint32 cid; + SVGAMobId mobid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXBindAllShader; /* SVGA_3D_CMD_DX_BIND_ALL_SHADER */ + +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXCondBindAllShader { + uint32 cid; + SVGAMobId testMobid; + SVGAMobId mobid; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXCondBindAllShader; /* SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER */ + /* * The maximum number of streamout decl's in each streamout entry. */ @@ -1356,7 +1485,6 @@ SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */ * * This command allows the guest to bind a mob to a context-object table. */ - typedef #include "vmware_pack_begin.h" struct SVGA3dCmdDXSetCOTable { @@ -1368,6 +1496,26 @@ struct SVGA3dCmdDXSetCOTable { #include "vmware_pack_end.h" SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */ +/* + * Guests using SVGA_3D_CMD_DX_GROW_COTABLE are promising that + * the new COTable contains the same contents as the old one, except possibly + * for some new invalid entries at the end. + * + * If there is an old cotable mob bound, it also has to still be valid. + * + * (Otherwise, guests should use the DXSetCOTableBase command.) + */ +typedef +#include "vmware_pack_begin.h" +struct SVGA3dCmdDXGrowCOTable { + uint32 cid; + uint32 mobid; + SVGACOTableType type; + uint32 validSizeInBytes; +} +#include "vmware_pack_end.h" +SVGA3dCmdDXGrowCOTable; /* SVGA_3D_CMD_DX_GROW_COTABLE */ + typedef #include "vmware_pack_begin.h" struct SVGA3dCmdDXReadbackCOTable { @@ -1471,7 +1619,7 @@ struct SVGADXContextMobFormat { SVGA3dQueryId queryID[SVGA3D_MAX_QUERY]; SVGA3dCOTableData cotables[SVGA_COTABLE_MAX]; - uint32 pad7[381]; + uint32 pad7[380]; } #include "vmware_pack_end.h" SVGADXContextMobFormat; diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h index a1c36877ad55..b22a67f15660 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2007-2015 VMware, Inc. All rights reserved. + * Copyright 2007-2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -62,7 +63,9 @@ * Maximum size in dwords of shader text the SVGA device will allow. * Currently 8 MB. */ -#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32)) +#define SVGA3D_MAX_SHADER_MEMORY_BYTES (8 * 1024 * 1024) +#define SVGA3D_MAX_SHADER_MEMORY (SVGA3D_MAX_SHADER_MEMORY_BYTES / \ + sizeof(uint32)) #define SVGA3D_MAX_CLIP_PLANES 6 diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h index b44ce648f592..bdfc404c91e3 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 1998-2015 VMware, Inc. All rights reserved. + * Copyright 1998-2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h index babe7cb84fc2..f2bfd3d80598 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h @@ -1,7 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2008-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -25,189 +25,355 @@ * **************************************************************************/ -#include <linux/kernel.h> - -#ifdef __KERNEL__ - -#include <drm/vmwgfx_drm.h> -#define surf_size_struct struct drm_vmw_size - -#else /* __KERNEL__ */ +/* + * svga3d_surfacedefs.h -- + * + * Surface definitions and inlineable utilities for SVGA3d. + */ -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0])) -#endif /* ARRAY_SIZE */ +#ifndef _SVGA3D_SURFACEDEFS_H_ +#define _SVGA3D_SURFACEDEFS_H_ -#define max_t(type, x, y) ((x) > (y) ? (x) : (y)) -#define surf_size_struct SVGA3dSize -#define u32 uint32 +#define INCLUDE_ALLOW_USERLEVEL +#define INCLUDE_ALLOW_MODULE +#include "includeCheck.h" -#endif /* __KERNEL__ */ +#include <linux/kernel.h> +#include <drm/vmwgfx_drm.h> #include "svga3d_reg.h" +#define surf_size_struct struct drm_vmw_size + /* - * enum svga3d_block_desc describes the active data channels in a block. - * - * There can be at-most four active channels in a block: - * 1. Red, bump W, luminance and depth are stored in the first channel. - * 2. Green, bump V and stencil are stored in the second channel. - * 3. Blue and bump U are stored in the third channel. - * 4. Alpha and bump Q are stored in the fourth channel. - * - * Block channels can be used to store compressed and buffer data: - * 1. For compressed formats, only the data channel is used and its size - * is equal to that of a singular block in the compression scheme. - * 2. For buffer formats, only the data channel is used and its size is - * exactly one byte in length. - * 3. In each case the bit depth represent the size of a singular block. - * - * Note: Compressed and IEEE formats do not use the bitMask structure. + * enum svga3d_block_desc - describes generic properties about formats. */ - enum svga3d_block_desc { - SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */ - SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel - data */ - SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel - data */ - SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video - U and V */ - SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel - data */ - SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel - data */ - SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil - channel */ - SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel - data */ - SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel - data */ - SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel - data */ - SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance - data */ - SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */ - SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha - channel */ - SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel - data */ - SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of - data */ - SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of - data depending on the - compression method used */ - SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE - floating point - representation in - all channels */ - SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store - data. */ - SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */ - SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */ - SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */ - SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */ - SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV, - e.g., NV12. */ - SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate - Y, U, V, e.g., YV12. */ - - SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN, - SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG | - SVGA3DBLOCKDESC_BLUE, - SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA | - SVGA3DBLOCKDESC_SRGB, + /* Nothing special can be said about this format. */ + SVGA3DBLOCKDESC_NONE = 0, + + /* Format contains Blue/U data */ + SVGA3DBLOCKDESC_BLUE = 1 << 0, + SVGA3DBLOCKDESC_W = 1 << 0, + SVGA3DBLOCKDESC_BUMP_L = 1 << 0, + + /* Format contains Green/V data */ + SVGA3DBLOCKDESC_GREEN = 1 << 1, + SVGA3DBLOCKDESC_V = 1 << 1, + + /* Format contains Red/W/Luminance data */ + SVGA3DBLOCKDESC_RED = 1 << 2, + SVGA3DBLOCKDESC_U = 1 << 2, + SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, + + /* Format contains Alpha/Q data */ + SVGA3DBLOCKDESC_ALPHA = 1 << 3, + SVGA3DBLOCKDESC_Q = 1 << 3, + + /* Format is a buffer */ + SVGA3DBLOCKDESC_BUFFER = 1 << 4, + + /* Format is compressed */ + SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, + + /* Format uses IEEE floating point */ + SVGA3DBLOCKDESC_FP = 1 << 6, + + /* Three separate blocks store data. */ + SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 7, + + /* 2 planes of Y, UV, e.g., NV12. */ + SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 8, + + /* 3 planes of separate Y, U, V, e.g., YV12. */ + SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 9, + + /* Block with a stencil channel */ + SVGA3DBLOCKDESC_STENCIL = 1 << 11, + + /* Typeless format */ + SVGA3DBLOCKDESC_TYPELESS = 1 << 12, + + /* Channels are signed integers */ + SVGA3DBLOCKDESC_SINT = 1 << 13, + + /* Channels are unsigned integers */ + SVGA3DBLOCKDESC_UINT = 1 << 14, + + /* Channels are normalized (when sampling) */ + SVGA3DBLOCKDESC_NORM = 1 << 15, + + /* Channels are in SRGB */ + SVGA3DBLOCKDESC_SRGB = 1 << 16, + + /* Shared exponent */ + SVGA3DBLOCKDESC_EXP = 1 << 17, + + /* Format contains color data. */ + SVGA3DBLOCKDESC_COLOR = 1 << 18, + /* Format contains depth data. */ + SVGA3DBLOCKDESC_DEPTH = 1 << 19, + /* Format contains bump data. */ + SVGA3DBLOCKDESC_BUMP = 1 << 20, + + /* Format contains YUV video data. */ + SVGA3DBLOCKDESC_YUV_VIDEO = 1 << 21, + + /* For mixed unsigned/signed formats. */ + SVGA3DBLOCKDESC_MIXED = 1 << 22, + + /* For distingushing CxV8U8. */ + SVGA3DBLOCKDESC_CX = 1 << 23, + + /* Different compressed format groups. */ + SVGA3DBLOCKDESC_BC1 = 1 << 24, + SVGA3DBLOCKDESC_BC2 = 1 << 25, + SVGA3DBLOCKDESC_BC3 = 1 << 26, + SVGA3DBLOCKDESC_BC4 = 1 << 27, + SVGA3DBLOCKDESC_BC5 = 1 << 28, + + SVGA3DBLOCKDESC_A_UINT = SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_A_UNORM = SVGA3DBLOCKDESC_A_UINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_R_UINT = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_R_UNORM = SVGA3DBLOCKDESC_R_UINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_R_SINT = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_R_SNORM = SVGA3DBLOCKDESC_R_SINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_G_UINT = SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RG_UINT = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RG_UNORM = SVGA3DBLOCKDESC_RG_UINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_RG_SINT = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RG_SNORM = SVGA3DBLOCKDESC_RG_SINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_RGB_UINT = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | + SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGB_SINT = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | + SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGB_UNORM = SVGA3DBLOCKDESC_RGB_UINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_RGB_UNORM_SRGB = SVGA3DBLOCKDESC_RGB_UNORM | + SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_RGBA_UINT = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | + SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGBA_UNORM = SVGA3DBLOCKDESC_RGBA_UINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_RGBA_UNORM_SRGB = SVGA3DBLOCKDESC_RGBA_UNORM | + SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_RGBA_SINT = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | + SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGBA_SNORM = SVGA3DBLOCKDESC_RGBA_SINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | + SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_FP | + SVGA3DBLOCKDESC_COLOR, SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U | - SVGA3DBLOCKDESC_V, + SVGA3DBLOCKDESC_V | + SVGA3DBLOCKDESC_BUMP, SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_LUMINANCE, + SVGA3DBLOCKDESC_BUMP_L | + SVGA3DBLOCKDESC_MIXED | + SVGA3DBLOCKDESC_BUMP, SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_W, + SVGA3DBLOCKDESC_W | + SVGA3DBLOCKDESC_BUMP, SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW | - SVGA3DBLOCKDESC_ALPHA, + SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_MIXED | + SVGA3DBLOCKDESC_BUMP, SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U | - SVGA3DBLOCKDESC_V | - SVGA3DBLOCKDESC_W | - SVGA3DBLOCKDESC_Q, - SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE | - SVGA3DBLOCKDESC_ALPHA, + SVGA3DBLOCKDESC_V | + SVGA3DBLOCKDESC_W | + SVGA3DBLOCKDESC_Q | + SVGA3DBLOCKDESC_BUMP, + SVGA3DBLOCKDESC_L_UNORM = SVGA3DBLOCKDESC_LUMINANCE | + SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_NORM | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_LA_UNORM = SVGA3DBLOCKDESC_LUMINANCE | + SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_NORM | + SVGA3DBLOCKDESC_COLOR, SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_IEEE_FP, + SVGA3DBLOCKDESC_FP | + SVGA3DBLOCKDESC_COLOR, SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP | - SVGA3DBLOCKDESC_GREEN, + SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_COLOR, SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP | - SVGA3DBLOCKDESC_BLUE, - SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH | - SVGA3DBLOCKDESC_STENCIL, - SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO | - SVGA3DBLOCKDESC_Y, + SVGA3DBLOCKDESC_BLUE | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_YUV_VIDEO | + SVGA3DBLOCKDESC_COLOR, SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_Y | - SVGA3DBLOCKDESC_U_VIDEO | - SVGA3DBLOCKDESC_V_VIDEO, - SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_EXP, - SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV | - SVGA3DBLOCKDESC_2PLANAR_YUV, - SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV | - SVGA3DBLOCKDESC_3PLANAR_YUV, + SVGA3DBLOCKDESC_YUV_VIDEO | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGB_EXP = SVGA3DBLOCKDESC_RED | + SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | + SVGA3DBLOCKDESC_EXP | + SVGA3DBLOCKDESC_COLOR, + + SVGA3DBLOCKDESC_COMP_TYPELESS = SVGA3DBLOCKDESC_COMPRESSED | + SVGA3DBLOCKDESC_TYPELESS, + SVGA3DBLOCKDESC_COMP_UNORM = SVGA3DBLOCKDESC_COMPRESSED | + SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_NORM | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_COMP_SNORM = SVGA3DBLOCKDESC_COMPRESSED | + SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_NORM | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_COMP_UNORM | + SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_BC1_COMP_TYPELESS = SVGA3DBLOCKDESC_BC1 | + SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC1_COMP_UNORM = SVGA3DBLOCKDESC_BC1 | + SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC1_COMP_UNORM | + SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_BC2_COMP_TYPELESS = SVGA3DBLOCKDESC_BC2 | + SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC2_COMP_UNORM = SVGA3DBLOCKDESC_BC2 | + SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC2_COMP_UNORM | + SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_BC3_COMP_TYPELESS = SVGA3DBLOCKDESC_BC3 | + SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC3_COMP_UNORM = SVGA3DBLOCKDESC_BC3 | + SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC3_COMP_UNORM | + SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_BC4_COMP_TYPELESS = SVGA3DBLOCKDESC_BC4 | + SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC4_COMP_UNORM = SVGA3DBLOCKDESC_BC4 | + SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC4_COMP_SNORM = SVGA3DBLOCKDESC_BC4 | + SVGA3DBLOCKDESC_COMP_SNORM, + SVGA3DBLOCKDESC_BC5_COMP_TYPELESS = SVGA3DBLOCKDESC_BC5 | + SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC5_COMP_UNORM = SVGA3DBLOCKDESC_BC5 | + SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC5_COMP_SNORM = SVGA3DBLOCKDESC_BC5 | + SVGA3DBLOCKDESC_COMP_SNORM, + + SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_YUV_VIDEO | + SVGA3DBLOCKDESC_PLANAR_YUV | + SVGA3DBLOCKDESC_2PLANAR_YUV | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_YUV_VIDEO | + SVGA3DBLOCKDESC_PLANAR_YUV | + SVGA3DBLOCKDESC_3PLANAR_YUV | + SVGA3DBLOCKDESC_COLOR, + + SVGA3DBLOCKDESC_DEPTH_UINT = SVGA3DBLOCKDESC_DEPTH | + SVGA3DBLOCKDESC_UINT, + SVGA3DBLOCKDESC_DEPTH_UNORM = SVGA3DBLOCKDESC_DEPTH_UINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH | + SVGA3DBLOCKDESC_STENCIL, + SVGA3DBLOCKDESC_DS_UINT = SVGA3DBLOCKDESC_DEPTH | + SVGA3DBLOCKDESC_STENCIL | + SVGA3DBLOCKDESC_UINT, + SVGA3DBLOCKDESC_DS_UNORM = SVGA3DBLOCKDESC_DS_UINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_DEPTH_FP = SVGA3DBLOCKDESC_DEPTH | + SVGA3DBLOCKDESC_FP, + + SVGA3DBLOCKDESC_UV_UINT = SVGA3DBLOCKDESC_UV | + SVGA3DBLOCKDESC_UINT, + SVGA3DBLOCKDESC_UV_SNORM = SVGA3DBLOCKDESC_UV | + SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_UVCX_SNORM = SVGA3DBLOCKDESC_UV_SNORM | + SVGA3DBLOCKDESC_CX, + SVGA3DBLOCKDESC_UVWQ_SNORM = SVGA3DBLOCKDESC_UVWQ | + SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_NORM, }; -/* - * SVGA3dSurfaceDesc describes the actual pixel data. - * - * This structure provides the following information: - * 1. Block description. - * 2. Dimensions of a block in the surface. - * 3. Size of block in bytes. - * 4. Bit depth of the pixel data. - * 5. Channel bit depths and masks (if applicable). - */ struct svga3d_channel_def { union { u8 blue; - u8 u; + u8 w_bump; + u8 l_bump; u8 uv_video; u8 u_video; }; union { u8 green; - u8 v; u8 stencil; + u8 v_bump; u8 v_video; }; union { u8 red; - u8 w; + u8 u_bump; u8 luminance; - u8 y; + u8 y_video; u8 depth; u8 data; }; union { u8 alpha; - u8 q; + u8 q_bump; u8 exp; }; }; +/* + * struct svga3d_surface_desc - describes the actual pixel data. + * + * @format: Format + * @block_desc: Block description + * @block_size: Dimensions in pixels of a block + * @bytes_per_block: Size of block in bytes + * @pitch_bytes_per_block: Size of a block in bytes for purposes of pitch + * @bit_depth: Channel bit depths + * @bit_offset: Channel bit masks (in bits offset from the start of the pointer) + */ struct svga3d_surface_desc { SVGA3dSurfaceFormat format; enum svga3d_block_desc block_desc; + surf_size_struct block_size; u32 bytes_per_block; u32 pitch_bytes_per_block; - u32 total_bit_depth; struct svga3d_channel_def bit_depth; struct svga3d_channel_def bit_offset; }; @@ -215,729 +381,728 @@ struct svga3d_surface_desc { static const struct svga3d_surface_desc svga3d_surface_descs[] = { {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE, {1, 1, 1}, 0, 0, - 0, {{0}, {0}, {0}, {0}}, + {{0}, {0}, {0}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB, + {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB_UNORM, {1, 1, 1}, 4, 4, - 24, {{8}, {8}, {8}, {0}}, + {{8}, {8}, {8}, {0}}, {{0}, {8}, {16}, {24}}}, - {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, + {{8}, {8}, {8}, {8}}, {{0}, {8}, {16}, {24}}}, - {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB, + {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB_UNORM, {1, 1, 1}, 2, 2, - 16, {{5}, {6}, {5}, {0}}, + {{5}, {6}, {5}, {0}}, {{0}, {5}, {11}, {0}}}, - {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB, + {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB_UNORM, {1, 1, 1}, 2, 2, - 15, {{5}, {5}, {5}, {0}}, + {{5}, {5}, {5}, {0}}, {{0}, {5}, {10}, {0}}}, - {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 2, 2, - 16, {{5}, {5}, {5}, {1}}, + {{5}, {5}, {5}, {1}}, {{0}, {5}, {10}, {15}}}, - {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 2, 2, - 16, {{4}, {4}, {4}, {4}}, + {{4}, {4}, {4}, {4}}, {{0}, {4}, {8}, {12}}}, - {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH, + {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH_UNORM, {1, 1, 1}, 4, 4, - 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {32}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH, + {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH_UNORM, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS, + {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS_UNORM, {1, 1, 1}, 4, 4, - 32, {{0}, {8}, {24}, {0}}, - {{0}, {24}, {0}, {0}}}, + {{0}, {8}, {24}, {0}}, + {{0}, {0}, {8}, {0}}}, - {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS, + {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS_UNORM, {1, 1, 1}, 2, 2, - 16, {{0}, {1}, {15}, {0}}, - {{0}, {15}, {0}, {0}}}, + {{0}, {1}, {15}, {0}}, + {{0}, {0}, {1}, {0}}}, - {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE, + {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_L_UNORM, {1, 1, 1}, 1, 1, - 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {8}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA, - {1 , 1, 1}, 1, 1, - 8, {{0}, {0}, {4}, {4}}, + {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA_UNORM, + {1, 1, 1}, 1, 1, + {{0}, {0}, {4}, {4}}, {{0}, {0}, {0}, {4}}}, - {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE, + {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_L_UNORM, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA, + {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA_UNORM, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {8}, {8}}, + {{0}, {0}, {8}, {8}}, {{0}, {0}, {0}, {8}}}, - {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_DXT1, SVGA3DBLOCKDESC_BC1_COMP_UNORM, {4, 4, 1}, 8, 8, - 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {64}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_DXT2, SVGA3DBLOCKDESC_BC2_COMP_UNORM, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_DXT3, SVGA3DBLOCKDESC_BC2_COMP_UNORM, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_DXT4, SVGA3DBLOCKDESC_BC3_COMP_UNORM, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_DXT5, SVGA3DBLOCKDESC_BC3_COMP_UNORM, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV, + {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV_SNORM, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {8}, {8}}, - {{0}, {0}, {0}, {8}}}, + {{0}, {8}, {8}, {0}}, + {{0}, {8}, {0}, {0}}}, {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL, {1, 1, 1}, 2, 2, - 16, {{5}, {5}, {6}, {0}}, - {{11}, {6}, {0}, {0}}}, + {{6}, {5}, {5}, {0}}, + {{10}, {5}, {0}, {0}}}, {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {0}}, + {{8}, {8}, {8}, {0}}, {{16}, {8}, {0}, {0}}}, - {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL, + {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_UVL, {1, 1, 1}, 3, 3, - 24, {{8}, {8}, {8}, {0}}, + {{8}, {8}, {8}, {0}}, {{16}, {8}, {0}, {0}}}, {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP, {1, 1, 1}, 8, 8, - 64, {{16}, {16}, {16}, {16}}, + {{16}, {16}, {16}, {16}}, {{32}, {16}, {0}, {48}}}, {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP, {1, 1, 1}, 16, 16, - 128, {{32}, {32}, {32}, {32}}, + {{32}, {32}, {32}, {32}}, {{64}, {32}, {0}, {96}}}, - {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 4, 4, - 32, {{10}, {10}, {10}, {2}}, + {{10}, {10}, {10}, {2}}, {{0}, {10}, {20}, {30}}}, - {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV, + {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV_SNORM, {1, 1, 1}, 2, 2, - 16, {{8}, {8}, {0}, {0}}, - {{8}, {0}, {0}, {0}}}, + {{0}, {8}, {8}, {0}}, + {{0}, {8}, {0}, {0}}}, - {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ, + {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ_SNORM, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, - {{24}, {16}, {8}, {0}}}, + {{8}, {8}, {8}, {8}}, + {{16}, {8}, {0}, {24}}}, - {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV, + {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UVCX_SNORM, {1, 1, 1}, 2, 2, - 16, {{8}, {8}, {0}, {0}}, - {{8}, {0}, {0}, {0}}}, + {{0}, {8}, {8}, {0}}, + {{0}, {8}, {0}, {0}}}, {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL, {1, 1, 1}, 4, 4, - 24, {{8}, {8}, {8}, {0}}, + {{8}, {8}, {8}, {0}}, {{16}, {8}, {0}, {0}}}, {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA, {1, 1, 1}, 4, 4, - 32, {{10}, {10}, {10}, {2}}, - {{0}, {10}, {20}, {30}}}, + {{10}, {10}, {10}, {2}}, + {{20}, {10}, {0}, {30}}}, - {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA, + {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_A_UNORM, {1, 1, 1}, 1, 1, - 8, {{0}, {0}, {0}, {8}}, + {{0}, {0}, {0}, {8}}, {{0}, {0}, {0}, {0}}}, {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP, {1, 1, 1}, 4, 4, - 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {32}, {0}}, {{0}, {0}, {0}, {0}}}, {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP, {1, 1, 1}, 4, 4, - 32, {{0}, {16}, {16}, {0}}, + {{0}, {16}, {16}, {0}}, {{0}, {16}, {0}, {0}}}, {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP, {1, 1, 1}, 8, 8, - 64, {{0}, {32}, {32}, {0}}, + {{0}, {32}, {32}, {0}}, {{0}, {32}, {0}, {0}}}, {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER, {1, 1, 1}, 1, 1, - 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {8}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH, + {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH_UNORM, {1, 1, 1}, 4, 4, - 32, {{0}, {0}, {24}, {0}}, - {{0}, {24}, {0}, {0}}}, + {{0}, {0}, {24}, {0}}, + {{0}, {0}, {8}, {0}}}, - {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV, + {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV_SNORM, {1, 1, 1}, 4, 4, - 32, {{16}, {16}, {0}, {0}}, - {{16}, {0}, {0}, {0}}}, + {{0}, {16}, {16}, {0}}, + {{0}, {16}, {0}, {0}}}, - {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG, + {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG_UNORM, {1, 1, 1}, 4, 4, - 32, {{0}, {16}, {16}, {0}}, - {{0}, {0}, {16}, {0}}}, + {{0}, {16}, {16}, {0}}, + {{0}, {16}, {0}, {0}}}, - {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 8, 8, - 64, {{16}, {16}, {16}, {16}}, + {{16}, {16}, {16}, {16}}, {{32}, {16}, {0}, {48}}}, {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV, - {1, 1, 1}, 2, 2, - 16, {{8}, {0}, {8}, {0}}, + {2, 1, 1}, 4, 4, + {{8}, {0}, {8}, {0}}, {{0}, {0}, {8}, {0}}}, {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV, - {1, 1, 1}, 2, 2, - 16, {{8}, {0}, {8}, {0}}, + {2, 1, 1}, 4, 4, + {{8}, {0}, {8}, {0}}, {{8}, {0}, {0}, {0}}}, {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12, {2, 2, 1}, 6, 2, - 48, {{0}, {0}, {48}, {0}}, + {{0}, {0}, {48}, {0}}, {{0}, {0}, {0}, {0}}}, {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, + {{8}, {8}, {8}, {8}}, {{0}, {8}, {16}, {24}}}, - {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 16, 16, - 128, {{32}, {32}, {32}, {32}}, + {{32}, {32}, {32}, {32}}, {{64}, {32}, {0}, {96}}}, - {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA_UINT, {1, 1, 1}, 16, 16, - 128, {{32}, {32}, {32}, {32}}, + {{32}, {32}, {32}, {32}}, {{64}, {32}, {0}, {96}}}, - {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ, + {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_RGBA_SINT, {1, 1, 1}, 16, 16, - 128, {{32}, {32}, {32}, {32}}, + {{32}, {32}, {32}, {32}}, {{64}, {32}, {0}, {96}}}, - {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB, + {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 12, 12, - 96, {{32}, {32}, {32}, {0}}, + {{32}, {32}, {32}, {0}}, {{64}, {32}, {0}, {0}}}, {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP, {1, 1, 1}, 12, 12, - 96, {{32}, {32}, {32}, {0}}, + {{32}, {32}, {32}, {0}}, {{64}, {32}, {0}, {0}}}, - {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB, + {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB_UINT, {1, 1, 1}, 12, 12, - 96, {{32}, {32}, {32}, {0}}, + {{32}, {32}, {32}, {0}}, {{64}, {32}, {0}, {0}}}, - {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW, + {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_RGB_SINT, {1, 1, 1}, 12, 12, - 96, {{32}, {32}, {32}, {0}}, + {{32}, {32}, {32}, {0}}, {{64}, {32}, {0}, {0}}}, - {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 8, 8, - 64, {{16}, {16}, {16}, {16}}, + {{16}, {16}, {16}, {16}}, {{32}, {16}, {0}, {48}}}, - {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA_UINT, {1, 1, 1}, 8, 8, - 64, {{16}, {16}, {16}, {16}}, + {{16}, {16}, {16}, {16}}, {{32}, {16}, {0}, {48}}}, - {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ, + {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM, {1, 1, 1}, 8, 8, - 64, {{16}, {16}, {16}, {16}}, + {{16}, {16}, {16}, {16}}, {{32}, {16}, {0}, {48}}}, - {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ, + {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_RGBA_SINT, {1, 1, 1}, 8, 8, - 64, {{16}, {16}, {16}, {16}}, + {{16}, {16}, {16}, {16}}, {{32}, {16}, {0}, {48}}}, - {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG, + {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 8, 8, - 64, {{0}, {32}, {32}, {0}}, + {{0}, {32}, {32}, {0}}, {{0}, {32}, {0}, {0}}}, - {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG, + {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG_UINT, {1, 1, 1}, 8, 8, - 64, {{0}, {32}, {32}, {0}}, + {{0}, {32}, {32}, {0}}, {{0}, {32}, {0}, {0}}}, - {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV, + {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_RG_SINT, {1, 1, 1}, 8, 8, - 64, {{0}, {32}, {32}, {0}}, + {{0}, {32}, {32}, {0}}, {{0}, {32}, {0}, {0}}}, - {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG, + {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 8, 8, - 64, {{0}, {8}, {32}, {0}}, + {{0}, {8}, {32}, {0}}, {{0}, {32}, {0}, {0}}}, {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS, {1, 1, 1}, 8, 8, - 64, {{0}, {8}, {32}, {0}}, + {{0}, {8}, {32}, {0}}, {{0}, {32}, {0}, {0}}}, - {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP, + {SVGA3D_R32_FLOAT_X8X24, SVGA3DBLOCKDESC_R_FP, {1, 1, 1}, 8, 8, - 64, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {32}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN, + {SVGA3D_X32_G8X24_UINT, SVGA3DBLOCKDESC_G_UINT, {1, 1, 1}, 8, 8, - 64, {{0}, {8}, {0}, {0}}, + {{0}, {8}, {0}, {0}}, {{0}, {32}, {0}, {0}}}, - {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 4, 4, - 32, {{10}, {10}, {10}, {2}}, - {{0}, {10}, {20}, {30}}}, + {{10}, {10}, {10}, {2}}, + {{20}, {10}, {0}, {30}}}, - {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA_UINT, {1, 1, 1}, 4, 4, - 32, {{10}, {10}, {10}, {2}}, - {{0}, {10}, {20}, {30}}}, + {{10}, {10}, {10}, {2}}, + {{20}, {10}, {0}, {30}}}, {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP, {1, 1, 1}, 4, 4, - 32, {{10}, {11}, {11}, {0}}, - {{0}, {10}, {21}, {0}}}, + {{10}, {11}, {11}, {0}}, + {{22}, {11}, {0}, {0}}}, - {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, + {{8}, {8}, {8}, {8}}, {{16}, {8}, {0}, {24}}}, - {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, + {{8}, {8}, {8}, {8}}, {{16}, {8}, {0}, {24}}}, - {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB, + {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, + {{8}, {8}, {8}, {8}}, {{16}, {8}, {0}, {24}}}, - {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA_UINT, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, + {{8}, {8}, {8}, {8}}, {{16}, {8}, {0}, {24}}}, - {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA_SINT, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, + {{8}, {8}, {8}, {8}}, {{16}, {8}, {0}, {24}}}, - {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG, + {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 4, 4, - 32, {{0}, {16}, {16}, {0}}, + {{0}, {16}, {16}, {0}}, {{0}, {16}, {0}, {0}}}, - {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP, + {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_UINT, {1, 1, 1}, 4, 4, - 32, {{0}, {16}, {16}, {0}}, + {{0}, {16}, {16}, {0}}, {{0}, {16}, {0}, {0}}}, - {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV, + {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_RG_SINT, {1, 1, 1}, 4, 4, - 32, {{0}, {16}, {16}, {0}}, + {{0}, {16}, {16}, {0}}, {{0}, {16}, {0}, {0}}}, - {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED, + {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 4, 4, - 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {32}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH, + {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH_FP, {1, 1, 1}, 4, 4, - 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {32}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED, + {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_R_UINT, {1, 1, 1}, 4, 4, - 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {32}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED, + {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_R_SINT, {1, 1, 1}, 4, 4, - 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {32}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG, + {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 4, 4, - 32, {{0}, {8}, {24}, {0}}, + {{0}, {8}, {24}, {0}}, {{0}, {24}, {0}, {0}}}, - {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS, + {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS_UNORM, {1, 1, 1}, 4, 4, - 32, {{0}, {8}, {24}, {0}}, + {{0}, {8}, {24}, {0}}, {{0}, {24}, {0}, {0}}}, - {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED, + {SVGA3D_R24_UNORM_X8, SVGA3DBLOCKDESC_R_UNORM, {1, 1, 1}, 4, 4, - 32, {{0}, {0}, {24}, {0}}, + {{0}, {0}, {24}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN, + {SVGA3D_X24_G8_UINT, SVGA3DBLOCKDESC_G_UINT, {1, 1, 1}, 4, 4, - 32, {{0}, {8}, {0}, {0}}, + {{0}, {8}, {0}, {0}}, {{0}, {24}, {0}, {0}}}, - {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG, + {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 2, 2, - 16, {{0}, {8}, {8}, {0}}, + {{0}, {8}, {8}, {0}}, {{0}, {8}, {0}, {0}}}, - {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG, + {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG_UNORM, {1, 1, 1}, 2, 2, - 16, {{0}, {8}, {8}, {0}}, + {{0}, {8}, {8}, {0}}, {{0}, {8}, {0}, {0}}}, - {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG, + {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG_UINT, {1, 1, 1}, 2, 2, - 16, {{0}, {8}, {8}, {0}}, + {{0}, {8}, {8}, {0}}, {{0}, {8}, {0}, {0}}}, - {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV, + {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_RG_SINT, {1, 1, 1}, 2, 2, - 16, {{0}, {8}, {8}, {0}}, + {{0}, {8}, {8}, {0}}, {{0}, {8}, {0}, {0}}}, - {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED, + {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED, + {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_R_UNORM, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED, + {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_R_UINT, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U, + {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_R_SNORM, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U, + {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_R_SINT, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED, + {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 1, 1, - 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {8}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED, + {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_R_UNORM, {1, 1, 1}, 1, 1, - 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {8}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED, + {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_R_UINT, {1, 1, 1}, 1, 1, - 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {8}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U, + {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_R_SNORM, {1, 1, 1}, 1, 1, - 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {8}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U, + {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_R_SINT, {1, 1, 1}, 1, 1, - 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {8}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_P8, SVGA3DBLOCKDESC_RED, + {SVGA3D_P8, SVGA3DBLOCKDESC_NONE, {1, 1, 1}, 1, 1, - 8, {{0}, {0}, {8}, {0}}, + {{0}, {0}, {8}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE, + {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGB_EXP, {1, 1, 1}, 4, 4, - 32, {{9}, {9}, {9}, {5}}, + {{9}, {9}, {9}, {5}}, {{18}, {9}, {0}, {27}}}, - {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, - 16, {{0}, {8}, {8}, {0}}, - {{0}, {8}, {0}, {0}}}, + {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_NONE, + {2, 1, 1}, 4, 4, + {{0}, {8}, {8}, {0}}, + {{0}, {0}, {8}, {0}}}, - {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, - 16, {{0}, {8}, {8}, {0}}, + {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_NONE, + {2, 1, 1}, 4, 4, + {{0}, {8}, {8}, {0}}, {{0}, {8}, {0}, {0}}}, - {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_BC1_COMP_TYPELESS, {4, 4, 1}, 8, 8, - 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {64}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB, + {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB, {4, 4, 1}, 8, 8, - 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {64}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_BC2_COMP_TYPELESS, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB, + {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_BC3_COMP_TYPELESS, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB, + {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_BC4_COMP_TYPELESS, {4, 4, 1}, 8, 8, - 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {64}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_ATI1, SVGA3DBLOCKDESC_BC4_COMP_UNORM, {4, 4, 1}, 8, 8, - 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {64}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_BC4_COMP_SNORM, {4, 4, 1}, 8, 8, - 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {64}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_BC5_COMP_TYPELESS, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_ATI2, SVGA3DBLOCKDESC_BC5_COMP_UNORM, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_BC5_COMP_SNORM, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 4, 4, - 32, {{10}, {10}, {10}, {2}}, - {{0}, {10}, {20}, {30}}}, + {{10}, {10}, {10}, {2}}, + {{20}, {10}, {0}, {30}}}, - {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, + {{8}, {8}, {8}, {8}}, {{0}, {8}, {16}, {24}}}, - {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB, + {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, + {{8}, {8}, {8}, {8}}, {{0}, {8}, {16}, {24}}}, - {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB, + {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, {1, 1, 1}, 4, 4, - 24, {{8}, {8}, {8}, {0}}, + {{8}, {8}, {8}, {0}}, {{0}, {8}, {16}, {24}}}, - {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB, + {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_UNORM_SRGB, {1, 1, 1}, 4, 4, - 24, {{8}, {8}, {8}, {0}}, + {{8}, {8}, {8}, {0}}, {{0}, {8}, {16}, {24}}}, - {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH, + {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH_UNORM, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH, + {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH_UNORM, {1, 1, 1}, 4, 4, - 32, {{0}, {8}, {24}, {0}}, - {{0}, {24}, {0}, {0}}}, + {{0}, {0}, {24}, {0}}, + {{0}, {0}, {8}, {0}}}, - {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS, + {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS_UNORM, {1, 1, 1}, 4, 4, - 32, {{0}, {8}, {24}, {0}}, - {{0}, {24}, {0}, {0}}}, + {{0}, {8}, {24}, {0}}, + {{0}, {0}, {8}, {0}}}, {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12, {2, 2, 1}, 6, 2, - 48, {{0}, {0}, {48}, {0}}, + {{0}, {0}, {48}, {0}}, {{0}, {0}, {0}, {0}}}, {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP, {1, 1, 1}, 16, 16, - 128, {{32}, {32}, {32}, {32}}, + {{32}, {32}, {32}, {32}}, {{64}, {32}, {0}, {96}}}, {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP, {1, 1, 1}, 8, 8, - 64, {{16}, {16}, {16}, {16}}, + {{16}, {16}, {16}, {16}}, {{32}, {16}, {0}, {48}}}, - {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 8, 8, - 64, {{16}, {16}, {16}, {16}}, + {{16}, {16}, {16}, {16}}, {{32}, {16}, {0}, {48}}}, {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP, {1, 1, 1}, 8, 8, - 64, {{0}, {32}, {32}, {0}}, + {{0}, {32}, {32}, {0}}, {{0}, {32}, {0}, {0}}}, - {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 4, 4, - 32, {{10}, {10}, {10}, {2}}, - {{0}, {10}, {20}, {30}}}, + {{10}, {10}, {10}, {2}}, + {{20}, {10}, {0}, {30}}}, - {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, - {{24}, {16}, {8}, {0}}}, + {{8}, {8}, {8}, {8}}, + {{16}, {8}, {0}, {24}}}, {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP, {1, 1, 1}, 4, 4, - 32, {{0}, {16}, {16}, {0}}, + {{0}, {16}, {16}, {0}}, {{0}, {16}, {0}, {0}}}, - {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG, + {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG_UNORM, {1, 1, 1}, 4, 4, - 32, {{0}, {16}, {16}, {0}}, - {{0}, {0}, {16}, {0}}}, + {{0}, {16}, {16}, {0}}, + {{0}, {16}, {0}, {0}}}, - {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG, + {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG_SNORM, {1, 1, 1}, 4, 4, - 32, {{16}, {16}, {0}, {0}}, - {{16}, {0}, {0}, {0}}}, + {{0}, {16}, {16}, {0}}, + {{0}, {16}, {0}, {0}}}, {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP, {1, 1, 1}, 4, 4, - 32, {{0}, {0}, {32}, {0}}, + {{0}, {0}, {32}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG, + {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG_SNORM, {1, 1, 1}, 2, 2, - 16, {{8}, {8}, {0}, {0}}, - {{8}, {0}, {0}, {0}}}, + {{0}, {8}, {8}, {0}}, + {{0}, {8}, {0}, {0}}}, {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH, + {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH_UNORM, {1, 1, 1}, 2, 2, - 16, {{0}, {0}, {16}, {0}}, + {{0}, {0}, {16}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA, + {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_A_UNORM, {1, 1, 1}, 1, 1, - 8, {{0}, {0}, {0}, {8}}, + {{0}, {0}, {0}, {8}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_BC1_COMP_UNORM, {4, 4, 1}, 8, 8, - 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {64}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_BC2_COMP_UNORM, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_BC3_COMP_UNORM, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB, + {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB_UNORM, {1, 1, 1}, 2, 2, - 16, {{5}, {6}, {5}, {0}}, + {{5}, {6}, {5}, {0}}, {{0}, {5}, {11}, {0}}}, - {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 2, 2, - 16, {{5}, {5}, {5}, {1}}, + {{5}, {5}, {5}, {1}}, {{0}, {5}, {10}, {15}}}, - {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA, + {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, {1, 1, 1}, 4, 4, - 32, {{8}, {8}, {8}, {8}}, + {{8}, {8}, {8}, {8}}, {{0}, {8}, {16}, {24}}}, - {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB, + {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB_UNORM, {1, 1, 1}, 4, 4, - 24, {{8}, {8}, {8}, {0}}, + {{8}, {8}, {8}, {0}}, {{0}, {8}, {16}, {24}}}, - {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_BC4_COMP_UNORM, {4, 4, 1}, 8, 8, - 64, {{0}, {0}, {64}, {0}}, + {{0}, {0}, {64}, {0}}, {{0}, {0}, {0}, {0}}}, - {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED, + {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_BC5_COMP_UNORM, {4, 4, 1}, 16, 16, - 128, {{0}, {0}, {128}, {0}}, + {{0}, {0}, {128}, {0}}, {{0}, {0}, {0}, {0}}}, - }; static inline u32 clamped_umul32(u32 a, u32 b) @@ -946,6 +1111,10 @@ static inline u32 clamped_umul32(u32 a, u32 b) return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; } +/** + * svga3dsurface_get_desc - Look up the appropriate SVGA3dSurfaceDesc for the + * given format. + */ static inline const struct svga3d_surface_desc * svga3dsurface_get_desc(SVGA3dSurfaceFormat format) { @@ -955,23 +1124,10 @@ svga3dsurface_get_desc(SVGA3dSurfaceFormat format) return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID]; } -/* - *---------------------------------------------------------------------- - * - * svga3dsurface_get_mip_size -- - * - * Given a base level size and the mip level, compute the size of - * the mip level. - * - * Results: - * See above. - * - * Side effects: - * None. - * - *---------------------------------------------------------------------- +/** + * svga3dsurface_get_mip_size - Given a base level size and the mip level, + * compute the size of the mip level. */ - static inline surf_size_struct svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level) { @@ -1018,28 +1174,17 @@ svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc, return pitch; } -/* - *----------------------------------------------------------------------------- - * - * svga3dsurface_get_image_buffer_size -- - * - * Return the number of bytes of buffer space required to store - * one image of a surface, optionally using the specified pitch. - * - * If pitch is zero, it is assumed that rows are tightly packed. +/** + * svga3dsurface_get_image_buffer_size - Calculates image buffer size. * - * This function is overflow-safe. If the result would have - * overflowed, instead we return MAX_UINT32. + * Return the number of bytes of buffer space required to store one image of a + * surface, optionally using the specified pitch. * - * Results: - * Byte count. + * If pitch is zero, it is assumed that rows are tightly packed. * - * Side effects: - * None. - * - *----------------------------------------------------------------------------- + * This function is overflow-safe. If the result would have overflowed, instead + * we return MAX_UINT32. */ - static inline u32 svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc, const surf_size_struct *size, @@ -1067,6 +1212,9 @@ svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc, return total_size; } +/** + * svga3dsurface_get_serialized_size - Get the serialized size for the image. + */ static inline u32 svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, surf_size_struct base_level_size, @@ -1087,6 +1235,26 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, return total_size * num_layers; } +/** + * svga3dsurface_get_serialized_size_extended - Returns the number of bytes + * required for a surface with given parameters. Support for sample count. + */ +static inline u32 +svga3dsurface_get_serialized_size_extended(SVGA3dSurfaceFormat format, + surf_size_struct base_level_size, + u32 num_mip_levels, + u32 num_layers, + u32 num_samples) +{ + uint64_t total_size = + svga3dsurface_get_serialized_size(format, + base_level_size, + num_mip_levels, + num_layers); + total_size *= max_t(u32, 1, num_samples); + + return min_t(uint64_t, total_size, (uint64_t)U32_MAX); +} /** * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel @@ -1206,3 +1374,5 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format) } return svga3dsurface_is_dx_screen_target_format(format); } + +#endif /* _SVGA3D_SURFACEDEFS_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h index 27b33ba88430..308370665a8e 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2012-2015 VMware, Inc. All rights reserved. + * Copyright 2012-2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -44,9 +45,21 @@ #define SVGA3D_INVALID_ID ((uint32)-1) +typedef uint8 SVGABool8; /* 8-bit Bool definition */ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ typedef uint32 SVGA3dColor; /* a, r, g, b */ +typedef uint32 SVGA3dSurfaceId; + +typedef +#include "vmware_pack_begin.h" +struct { + uint32 numerator; + uint32 denominator; +} +#include "vmware_pack_end.h" +SVGA3dFraction64; + typedef #include "vmware_pack_begin.h" struct SVGA3dCopyRect { @@ -145,7 +158,7 @@ typedef enum SVGA3dSurfaceFormat { SVGA3D_BUMPU8V8 = 20, SVGA3D_BUMPL6V5U5 = 21, SVGA3D_BUMPX8L8V8U8 = 22, - SVGA3D_BUMPL8V8U8 = 23, + SVGA3D_FORMAT_DEAD1 = 23, SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */ SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */ @@ -204,8 +217,8 @@ typedef enum SVGA3dSurfaceFormat { SVGA3D_R32G32_SINT = 59, SVGA3D_R32G8X24_TYPELESS = 60, SVGA3D_D32_FLOAT_S8X24_UINT = 61, - SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62, - SVGA3D_X32_TYPELESS_G8X24_UINT = 63, + SVGA3D_R32_FLOAT_X8X24 = 62, + SVGA3D_X32_G8X24_UINT = 63, SVGA3D_R10G10B10A2_TYPELESS = 64, SVGA3D_R10G10B10A2_UINT = 65, SVGA3D_R11G11B10_FLOAT = 66, @@ -223,8 +236,8 @@ typedef enum SVGA3dSurfaceFormat { SVGA3D_R32_SINT = 78, SVGA3D_R24G8_TYPELESS = 79, SVGA3D_D24_UNORM_S8_UINT = 80, - SVGA3D_R24_UNORM_X8_TYPELESS = 81, - SVGA3D_X24_TYPELESS_G8_UINT = 82, + SVGA3D_R24_UNORM_X8 = 81, + SVGA3D_X24_G8_UINT = 82, SVGA3D_R8G8_TYPELESS = 83, SVGA3D_R8G8_UNORM = 84, SVGA3D_R8G8_UINT = 85, @@ -296,92 +309,114 @@ typedef enum SVGA3dSurfaceFormat { SVGA3D_FORMAT_MAX } SVGA3dSurfaceFormat; -typedef enum SVGA3dSurfaceFlags { - SVGA3D_SURFACE_CUBEMAP = (1 << 0), +/* + * SVGA3d Surface Flags -- + */ +#define SVGA3D_SURFACE_CUBEMAP (1 << 0) - /* - * HINT flags are not enforced by the device but are useful for - * performance. - */ - SVGA3D_SURFACE_HINT_STATIC = (1 << 1), - SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2), - SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3), - SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4), - SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5), - SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6), - SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7), - SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8), - SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9), - SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10), - SVGA3D_SURFACE_DECODE_RENDERTARGET = (1 << 11), +/* + * HINT flags are not enforced by the device but are useful for + * performance. + */ +#define SVGA3D_SURFACE_HINT_STATIC (CONST64U(1) << 1) +#define SVGA3D_SURFACE_HINT_DYNAMIC (CONST64U(1) << 2) +#define SVGA3D_SURFACE_HINT_INDEXBUFFER (CONST64U(1) << 3) +#define SVGA3D_SURFACE_HINT_VERTEXBUFFER (CONST64U(1) << 4) +#define SVGA3D_SURFACE_HINT_TEXTURE (CONST64U(1) << 5) +#define SVGA3D_SURFACE_HINT_RENDERTARGET (CONST64U(1) << 6) +#define SVGA3D_SURFACE_HINT_DEPTHSTENCIL (CONST64U(1) << 7) +#define SVGA3D_SURFACE_HINT_WRITEONLY (CONST64U(1) << 8) +#define SVGA3D_SURFACE_MASKABLE_ANTIALIAS (CONST64U(1) << 9) +#define SVGA3D_SURFACE_AUTOGENMIPMAPS (CONST64U(1) << 10) + +#define SVGA3D_SURFACE_DECODE_RENDERTARGET (CONST64U(1) << 11) - /* - * Is this surface using a base-level pitch for it's mob backing? - * - * This flag is not intended to be set by guest-drivers, but is instead - * set by the device when the surface is bound to a mob with a specified - * pitch. - */ - SVGA3D_SURFACE_MOB_PITCH = (1 << 12), +/* + * Is this surface using a base-level pitch for it's mob backing? + * + * This flag is not intended to be set by guest-drivers, but is instead + * set by the device when the surface is bound to a mob with a specified + * pitch. + */ +#define SVGA3D_SURFACE_MOB_PITCH (CONST64U(1) << 12) - SVGA3D_SURFACE_INACTIVE = (1 << 13), - SVGA3D_SURFACE_HINT_RT_LOCKABLE = (1 << 14), - SVGA3D_SURFACE_VOLUME = (1 << 15), +#define SVGA3D_SURFACE_INACTIVE (CONST64U(1) << 13) +#define SVGA3D_SURFACE_HINT_RT_LOCKABLE (CONST64U(1) << 14) +#define SVGA3D_SURFACE_VOLUME (CONST64U(1) << 15) - /* - * Required to be set on a surface to bind it to a screen target. - */ - SVGA3D_SURFACE_SCREENTARGET = (1 << 16), +/* + * Required to be set on a surface to bind it to a screen target. + */ +#define SVGA3D_SURFACE_SCREENTARGET (CONST64U(1) << 16) - /* - * Align images in the guest-backing mob to 16-bytes. - */ - SVGA3D_SURFACE_ALIGN16 = (1 << 17), +/* + * Align images in the guest-backing mob to 16-bytes. + */ +#define SVGA3D_SURFACE_ALIGN16 (CONST64U(1) << 17) - SVGA3D_SURFACE_1D = (1 << 18), - SVGA3D_SURFACE_ARRAY = (1 << 19), +#define SVGA3D_SURFACE_1D (CONST64U(1) << 18) +#define SVGA3D_SURFACE_ARRAY (CONST64U(1) << 19) - /* - * Bind flags. - * These are enforced for any surface defined with DefineGBSurface_v2. - */ - SVGA3D_SURFACE_BIND_VERTEX_BUFFER = (1 << 20), - SVGA3D_SURFACE_BIND_INDEX_BUFFER = (1 << 21), - SVGA3D_SURFACE_BIND_CONSTANT_BUFFER = (1 << 22), - SVGA3D_SURFACE_BIND_SHADER_RESOURCE = (1 << 23), - SVGA3D_SURFACE_BIND_RENDER_TARGET = (1 << 24), - SVGA3D_SURFACE_BIND_DEPTH_STENCIL = (1 << 25), - SVGA3D_SURFACE_BIND_STREAM_OUTPUT = (1 << 26), +/* + * Bind flags. + * These are enforced for any surface defined with DefineGBSurface_v2. + */ +#define SVGA3D_SURFACE_BIND_VERTEX_BUFFER (CONST64U(1) << 20) +#define SVGA3D_SURFACE_BIND_INDEX_BUFFER (CONST64U(1) << 21) +#define SVGA3D_SURFACE_BIND_CONSTANT_BUFFER (CONST64U(1) << 22) +#define SVGA3D_SURFACE_BIND_SHADER_RESOURCE (CONST64U(1) << 23) +#define SVGA3D_SURFACE_BIND_RENDER_TARGET (CONST64U(1) << 24) +#define SVGA3D_SURFACE_BIND_DEPTH_STENCIL (CONST64U(1) << 25) +#define SVGA3D_SURFACE_BIND_STREAM_OUTPUT (CONST64U(1) << 26) - /* - * A note on staging flags: - * - * The STAGING flags notes that the surface will not be used directly by the - * drawing pipeline, i.e. that it will not be bound to any bind point. - * Staging surfaces may be used by copy operations to move data in and out - * of other surfaces. - * - * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive - * updates indirectly, i.e. the surface will not be updated directly, but - * will receive copies from staging surfaces. - */ - SVGA3D_SURFACE_STAGING_UPLOAD = (1 << 27), - SVGA3D_SURFACE_STAGING_DOWNLOAD = (1 << 28), - SVGA3D_SURFACE_HINT_INDIRECT_UPDATE = (1 << 29), +/* + * The STAGING flags notes that the surface will not be used directly by the + * drawing pipeline, i.e. that it will not be bound to any bind point. + * Staging surfaces may be used by copy operations to move data in and out + * of other surfaces. No bind flags may be set on surfaces with this flag. + * + * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive + * updates indirectly, i.e. the surface will not be updated directly, but + * will receive copies from staging surfaces. + */ +#define SVGA3D_SURFACE_STAGING_UPLOAD (CONST64U(1) << 27) +#define SVGA3D_SURFACE_STAGING_DOWNLOAD (CONST64U(1) << 28) +#define SVGA3D_SURFACE_HINT_INDIRECT_UPDATE (CONST64U(1) << 29) - /* - * Setting this flag allow this surface to be used with the - * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for - * buffer surfaces, an no bind flags are allowed to be set on surfaces - * with this flag. - */ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER = (1 << 30), +/* + * Setting this flag allow this surface to be used with the + * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for + * buffer surfaces, and no bind flags are allowed to be set on surfaces + * with this flag. + */ +#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30) - /* - * Marker for the last defined bit. - */ - SVGA3D_SURFACE_FLAG_MAX = (1 << 31), -} SVGA3dSurfaceFlags; +/* + * Reserved for video operations. + */ +#define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31) + +/* + * Specifies that a surface is multisample, and therefore requires the full + * mob-backing to store all the samples. + */ +#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32) + +#define SVGA3D_SURFACE_FLAG_MAX (CONST64U(1) << 33) + +/* + * Surface flags types: + * + * SVGA3dSurface1Flags: Lower 32-bits of flags. + * SVGA3dSurface2Flags: Upper 32-bits of flags. + * SVGA3dSurfaceAllFlags: Full 64-bits of flags. + */ +typedef uint32 SVGA3dSurface1Flags; +typedef uint32 SVGA3dSurface2Flags; +typedef uint64 SVGA3dSurfaceAllFlags; + +#define SVGA3D_SURFACE_FLAGS1_MASK ((uint64_t)MAX_UINT32) +#define SVGA3D_SURFACE_FLAGS2_MASK (MAX_UINT64 & ~SVGA3D_SURFACE_FLAGS1_MASK) #define SVGA3D_SURFACE_HB_DISALLOWED_MASK \ ( SVGA3D_SURFACE_MOB_PITCH | \ @@ -392,29 +427,41 @@ typedef enum SVGA3dSurfaceFlags { SVGA3D_SURFACE_STAGING_UPLOAD | \ SVGA3D_SURFACE_STAGING_DOWNLOAD | \ SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ + SVGA3D_SURFACE_MULTISAMPLE \ + ) + +#define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK \ + ( SVGA3D_SURFACE_1D | \ + SVGA3D_SURFACE_MULTISAMPLE \ ) #define SVGA3D_SURFACE_2D_DISALLOWED_MASK \ ( SVGA3D_SURFACE_CUBEMAP | \ SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \ SVGA3D_SURFACE_AUTOGENMIPMAPS | \ - SVGA3D_SURFACE_DECODE_RENDERTARGET | \ SVGA3D_SURFACE_VOLUME | \ SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_ARRAY | \ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ + SVGA3D_SURFACE_MULTISAMPLE \ + ) + +#define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK \ + ( SVGA3D_SURFACE_CUBEMAP | \ + SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_VOLUME | \ + SVGA3D_SURFACE_1D | \ + SVGA3D_SURFACE_MULTISAMPLE \ ) #define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \ ( SVGA3D_SURFACE_CUBEMAP | \ SVGA3D_SURFACE_AUTOGENMIPMAPS | \ - SVGA3D_SURFACE_DECODE_RENDERTARGET | \ SVGA3D_SURFACE_VOLUME | \ SVGA3D_SURFACE_1D | \ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ @@ -426,12 +473,36 @@ typedef enum SVGA3dSurfaceFlags { SVGA3D_SURFACE_STAGING_UPLOAD | \ SVGA3D_SURFACE_STAGING_DOWNLOAD | \ SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ + SVGA3D_SURFACE_MULTISAMPLE \ + ) + +#define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK \ + ( SVGA3D_SURFACE_CUBEMAP | \ + SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_VOLUME | \ + SVGA3D_SURFACE_1D | \ + SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \ + SVGA3D_SURFACE_ARRAY | \ + SVGA3D_SURFACE_MULTISAMPLE | \ + SVGA3D_SURFACE_MOB_PITCH \ + ) + +#define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK \ + ( SVGA3D_SURFACE_CUBEMAP | \ + SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_VOLUME | \ + SVGA3D_SURFACE_1D | \ + SVGA3D_SURFACE_SCREENTARGET | \ + SVGA3D_SURFACE_MOB_PITCH \ ) #define SVGA3D_SURFACE_DX_ONLY_MASK \ ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ + SVGA3D_SURFACE_STAGING_UPLOAD | \ + SVGA3D_SURFACE_STAGING_DOWNLOAD | \ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \ + ) #define SVGA3D_SURFACE_STAGING_MASK \ ( SVGA3D_SURFACE_STAGING_UPLOAD | \ @@ -487,7 +558,7 @@ typedef enum { /* * Indicates that this format can be converted to any RGB format for which - * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified + * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified. */ SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000, @@ -498,22 +569,22 @@ typedef enum { /* * Indicated that this format can be read as an SRGB texture (meaning that the - * sampler will linearize the looked up data) + * sampler will linearize the looked up data). */ SVGA3DFORMAT_OP_SRGBREAD = 0x00008000, /* - * Indicates that this format can be used in the bumpmap instructions + * Indicates that this format can be used in the bumpmap instructions. */ SVGA3DFORMAT_OP_BUMPMAP = 0x00010000, /* - * Indicates that this format can be sampled by the displacement map sampler + * Indicates that this format can be sampled by the displacement map sampler. */ SVGA3DFORMAT_OP_DMAP = 0x00020000, /* - * Indicates that this format cannot be used with texture filtering + * Indicates that this format cannot be used with texture filtering. */ SVGA3DFORMAT_OP_NOFILTER = 0x00040000, @@ -530,18 +601,18 @@ typedef enum { SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000, /* - * Indicates that this format cannot be used with alpha blending + * Indicates that this format cannot be used with alpha blending. */ SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000, /* * Indicates that the device can auto-generated sublevels for resources - * of this format + * of this format. */ SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000, /* - * Indicates that this format can be used by vertex texture sampler + * Indicates that this format can be used by vertex texture sampler. */ SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000, @@ -1501,7 +1572,6 @@ union SVGADXQueryResultUnion { #include "vmware_pack_end.h" SVGADXQueryResultUnion; - typedef enum { SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */ SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */ @@ -1533,9 +1603,9 @@ typedef struct { union { struct { - uint16 function; /* SVGA3dFogFunction */ - uint8 type; /* SVGA3dFogType */ - uint8 base; /* SVGA3dFogBase */ + uint16 function; /* SVGA3dFogFunction */ + uint8 type; /* SVGA3dFogType */ + uint8 base; /* SVGA3dFogBase */ }; uint32 uintValue; }; @@ -1547,19 +1617,27 @@ SVGA3dFogMode; * Uniquely identify one image (a 1D/2D/3D array) from a surface. This * is a surface ID as well as face/mipmap indices. */ - typedef #include "vmware_pack_begin.h" struct SVGA3dSurfaceImageId { - uint32 sid; - uint32 face; - uint32 mipmap; + uint32 sid; + uint32 face; + uint32 mipmap; } #include "vmware_pack_end.h" SVGA3dSurfaceImageId; typedef #include "vmware_pack_begin.h" +struct SVGA3dSubSurfaceId { + uint32 sid; + uint32 subResourceId; +} +#include "vmware_pack_end.h" +SVGA3dSubSurfaceId; + +typedef +#include "vmware_pack_begin.h" struct { uint32 width; uint32 height; @@ -1582,13 +1660,18 @@ typedef enum { SVGA_OTABLE_DX9_MAX = 5, SVGA_OTABLE_DXCONTEXT = 5, - SVGA_OTABLE_MAX = 6 -} SVGAOTableType; + SVGA_OTABLE_DX_MAX = 6, -/* - * Deprecated. - */ -#define SVGA_OTABLE_COUNT 4 + SVGA_OTABLE_RESERVED1 = 6, + SVGA_OTABLE_RESERVED2 = 7, + + /* + * Additions to this table need to be tied to HW-version features and + * checkpointed accordingly. + */ + SVGA_OTABLE_DEVEL_MAX = 8, + SVGA_OTABLE_MAX = 8 +} SVGAOTableType; typedef enum { SVGA_COTABLE_MIN = 0, @@ -1605,7 +1688,7 @@ typedef enum { SVGA_COTABLE_DXSHADER = 10, SVGA_COTABLE_DX10_MAX = 11, SVGA_COTABLE_UAVIEW = 11, - SVGA_COTABLE_MAX + SVGA_COTABLE_MAX = 12, } SVGACOTableType; /* @@ -1626,8 +1709,37 @@ typedef enum SVGAMobFormat { SVGA3D_MOBFMT_PREDX_MAX = 7, SVGA3D_MOBFMT_EMPTY = 7, SVGA3D_MOBFMT_MAX, + + /* + * This isn't actually used by the guest, but is a mob-format used + * internally by the SVGA device (and is therefore not binary compatible). + */ + SVGA3D_MOBFMT_HB, } SVGAMobFormat; #define SVGA3D_MOB_EMPTY_BASE 1 +/* + * Multisample pattern types. + */ + +typedef enum SVGA3dMSPattern { + SVGA3D_MS_PATTERN_NONE = 0, + SVGA3D_MS_PATTERN_MIN = 0, + SVGA3D_MS_PATTERN_STANDARD = 1, + SVGA3D_MS_PATTERN_CENTER = 2, + SVGA3D_MS_PATTERN_MAX = 3, +} SVGA3dMSPattern; + +/* + * Precision settings for each sample. + */ + +typedef enum SVGA3dMSQualityLevel { + SVGA3D_MS_QUALITY_NONE = 0, + SVGA3D_MS_QUALITY_MIN = 0, + SVGA3D_MS_QUALITY_FULL = 1, + SVGA3D_MS_QUALITY_MAX = 2, +} SVGA3dMSQualityLevel; + #endif /* _SVGA3D_TYPES_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h index 884b1d1fb85f..acb41e28e46f 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2007-2015 VMware, Inc. All rights reserved. + * Copyright 2007-2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h index faf6d9b2b891..e5385146e7fc 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2007-2015 VMware, Inc. All rights reserved. + * Copyright 2007-2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h index 88e72bf9a534..056f54b35d73 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 1998-2015 VMware, Inc. All rights reserved. + * Copyright 1998-2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -63,16 +64,26 @@ typedef uint32 SVGAMobId; #define SVGA_MAX_BITS_PER_PIXEL 32 #define SVGA_MAX_DEPTH 24 #define SVGA_MAX_DISPLAYS 10 +#define SVGA_MAX_SCREEN_SIZE 8192 +#define SVGA_SCREEN_ROOT_LIMIT (SVGA_MAX_SCREEN_SIZE * SVGA_MAX_DISPLAYS) + /* * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned * cursor bypass mode. This is still supported, but no new guest * drivers should use it. */ -#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */ -#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */ -#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */ -#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */ +#define SVGA_CURSOR_ON_HIDE 0x0 +#define SVGA_CURSOR_ON_SHOW 0x1 + +/* + * Remove the cursor from the framebuffer + * because we need to see what's under it + */ +#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 + +/* Put the cursor back in the framebuffer so the user can see it */ +#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* * The maximum framebuffer size that can traced for guests unless the @@ -101,7 +112,10 @@ typedef uint32 SVGAMobId; #define SVGA_VERSION_0 0 #define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0) -/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */ +/* + * "Invalid" value for all SVGA IDs. + * (Version ID, screen object ID, surface ID...) + */ #define SVGA_ID_INVALID 0xFFFFFFFF /* Port offsets, relative to BAR0 */ @@ -154,7 +168,7 @@ enum { SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */ SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */ SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */ - SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */ + SVGA_REG_GUEST_ID = 23, /* (Deprecated) */ SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */ SVGA_REG_CURSOR_X = 25, /* (Deprecated) */ SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */ @@ -186,7 +200,14 @@ enum { SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ - SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ + + /* + * Max primary memory. + * See SVGA_CAP_NO_BB_RESTRICTION. + */ + SVGA_REG_MAX_PRIMARY_MEM = 50, + SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, + SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */ SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ SVGA_REG_CMD_PREPEND_LOW = 53, @@ -194,7 +215,10 @@ enum { SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, SVGA_REG_MOB_MAX_SIZE = 57, - SVGA_REG_TOP = 58, /* Must be 1 more than the last register */ + SVGA_REG_BLANK_SCREEN_TARGETS = 58, + SVGA_REG_CAP2 = 59, + SVGA_REG_DEVEL_CAP = 60, + SVGA_REG_TOP = 61, /* Must be 1 more than the last register */ SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ /* Next 768 (== 256*3) registers exist for colormap */ @@ -392,6 +416,7 @@ typedef enum { SVGA_CB_CONTEXT_0 = 0x0, SVGA_CB_CONTEXT_1 = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */ SVGA_CB_CONTEXT_MAX = 0x2, + SVGA_CB_CONTEXT_HP_MAX = 0x2, } SVGACBContext; @@ -448,6 +473,18 @@ typedef enum { * due to an error. No IRQ is raised. */ SVGA_CB_STATUS_SUBMISSION_ERROR = 6, + + /* + * Written by the host when the host finished a + * SVGA_DC_CMD_ASYNC_STOP_QUEUE request for this command buffer + * queue. The offset of the first byte not processed is stored in + * the errorOffset field of the command buffer header. All guest + * visible side effects of commands till that point are guaranteed + * to be finished before this is written. The + * SVGA_IRQFLAG_COMMAND_BUFFER IRQ is raised as long as the + * SVGA_CB_FLAG_NO_IRQ is not set. + */ + SVGA_CB_STATUS_PARTIAL_COMPLETE = 7, } SVGACBStatus; typedef enum { @@ -460,8 +497,8 @@ typedef enum { typedef #include "vmware_pack_begin.h" struct { - volatile SVGACBStatus status; - volatile uint32 errorOffset; + volatile SVGACBStatus status; /* Modified by device. */ + volatile uint32 errorOffset; /* Modified by device. */ uint64 id; SVGACBFlags flags; uint32 length; @@ -472,7 +509,9 @@ struct { uint32 mobOffset; } mob; } ptr; - uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */ + uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise, + * modified by device. + */ uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */ uint32 mustBeZero[6]; } @@ -483,20 +522,26 @@ typedef enum { SVGA_DC_CMD_NOP = 0, SVGA_DC_CMD_START_STOP_CONTEXT = 1, SVGA_DC_CMD_PREEMPT = 2, - SVGA_DC_CMD_MAX = 3, - SVGA_DC_CMD_FORCE_UINT = MAX_UINT32, + SVGA_DC_CMD_START_QUEUE = 3, /* Requires SVGA_CAP_HP_CMD_QUEUE */ + SVGA_DC_CMD_ASYNC_STOP_QUEUE = 4, /* Requires SVGA_CAP_HP_CMD_QUEUE */ + SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE = 5, /* Requires SVGA_CAP_HP_CMD_QUEUE */ + SVGA_DC_CMD_MAX = 6, } SVGADeviceContextCmdId; -typedef struct { +/* + * Starts or stops both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1. + */ + +typedef struct SVGADCCmdStartStop { uint32 enable; - SVGACBContext context; + SVGACBContext context; /* Must be zero */ } SVGADCCmdStartStop; /* * SVGADCCmdPreempt -- * * This command allows the guest to request that all command buffers - * on the specified context be preempted that can be. After execution + * on SVGA_CB_CONTEXT_0 be preempted that can be. After execution * of this command all command buffers that were preempted will * already have SVGA_CB_STATUS_PREEMPTED written into the status * field. The device might still be processing a command buffer, @@ -506,12 +551,69 @@ typedef struct { * command buffer header set to zero. */ -typedef struct { - SVGACBContext context; +typedef struct SVGADCCmdPreempt { + SVGACBContext context; /* Must be zero */ uint32 ignoreIDZero; } SVGADCCmdPreempt; /* + * Starts the requested command buffer processing queue. Valid only + * if the SVGA_CAP_HP_CMD_QUEUE cap is set. + * + * For a command queue to be considered runnable it must be enabled + * and any corresponding higher priority queues must also be enabled. + * For example in order for command buffers to be processed on + * SVGA_CB_CONTEXT_0 both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1 must + * be enabled. But for commands to be runnable on SVGA_CB_CONTEXT_1 + * only that queue must be enabled. + */ + +typedef struct SVGADCCmdStartQueue { + SVGACBContext context; +} SVGADCCmdStartQueue; + +/* + * Requests the SVGA device to stop processing the requested command + * buffer queue as soon as possible. The guest knows the stop has + * completed when one of the following happens. + * + * 1) A command buffer status of SVGA_CB_STATUS_PARTIAL_COMPLETE is returned + * 2) A command buffer error is encountered with would stop the queue + * regardless of the async stop request. + * 3) All command buffers that have been submitted complete successfully. + * 4) The stop completes synchronously if no command buffers are + * active on the queue when it is issued. + * + * If the command queue is not in a runnable state there is no + * guarentee this async stop will finish. For instance if the high + * priority queue is not enabled and a stop is requested on the low + * priority queue, the high priority queue must be reenabled to + * guarantee that the async stop will finish. + * + * This command along with SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE can be used + * to implement mid command buffer preemption. + * + * Valid only if the SVGA_CAP_HP_CMD_QUEUE cap is set. + */ + +typedef struct SVGADCCmdAsyncStopQueue { + SVGACBContext context; +} SVGADCCmdAsyncStopQueue; + +/* + * Requests the SVGA device to throw away any full command buffers on + * the requested command queue that have not been started. For a + * driver to know which command buffers were thrown away a driver + * should only issue this command when the queue is stopped, for + * whatever reason. + */ + +typedef struct SVGADCCmdEmptyQueue { + SVGACBContext context; +} SVGADCCmdEmptyQueue; + + +/* * SVGAGMRImageFormat -- * * This is a packed representation of the source 2D image format @@ -536,7 +638,7 @@ typedef struct SVGAGMRImageFormat { struct { uint32 bitsPerPixel : 8; uint32 colorDepth : 8; - uint32 reserved : 16; /* Must be zero */ + uint32 reserved : 16; /* Must be zero */ }; uint32 value; @@ -672,8 +774,36 @@ SVGASignedPoint; * SVGA_CAP_GBOBJECTS -- * Enable guest-backed objects and surfaces. * - * SVGA_CAP_CMD_BUFFERS_3 -- - * Enable support for command buffers in a mob. + * SVGA_CAP_DX -- + * Enable support for DX commands, and command buffers in a mob. + * + * SVGA_CAP_HP_CMD_QUEUE -- + * Enable support for the high priority command queue, and the + * ScreenCopy command. + * + * SVGA_CAP_NO_BB_RESTRICTION -- + * Allow ScreenTargets to be defined without regard to the 32-bpp + * bounding-box memory restrictions. ie: + * + * The summed memory usage of all screens (assuming they were defined as + * 32-bpp) must always be less than the value of the + * SVGA_REG_MAX_PRIMARY_MEM register. + * + * If this cap is not present, the 32-bpp bounding box around all screens + * must additionally be under the value of the SVGA_REG_MAX_PRIMARY_MEM + * register. + * + * If the cap is present, the bounding box restriction is lifted (and only + * the screen-sum limit applies). + * + * (Note that this is a slight lie... there is still a sanity limit on any + * dimension of the topology to be less than SVGA_SCREEN_ROOT_LIMIT, even + * when SVGA_CAP_NO_BB_RESTRICTION is present, but that should be + * large enough to express any possible topology without holes between + * monitors.) + * + * SVGA_CAP_CAP2_REGISTER -- + * If this cap is present, the SVGA_REG_CAP2 register is supported. */ #define SVGA_CAP_NONE 0x00000000 @@ -699,8 +829,30 @@ SVGASignedPoint; #define SVGA_CAP_GBOBJECTS 0x08000000 #define SVGA_CAP_DX 0x10000000 #define SVGA_CAP_HP_CMD_QUEUE 0x20000000 +#define SVGA_CAP_NO_BB_RESTRICTION 0x40000000 +#define SVGA_CAP_CAP2_REGISTER 0x80000000 -#define SVGA_CAP_CMD_RESERVED 0x80000000 +/* + * The SVGA_REG_CAP2 register is an additional set of SVGA capability bits. + * + * SVGA_CAP2_GROW_OTABLE -- + * Allow the GrowOTable/DXGrowCOTable commands. + * + * SVGA_CAP2_INTRA_SURFACE_COPY -- + * Allow the IntraSurfaceCopy command. + * + * SVGA_CAP2_DX2 -- + * Allow the DefineGBSurface_v3, WholeSurfaceCopy. + * + * SVGA_CAP2_RESERVED -- + * Reserve the last bit for extending the SVGA capabilities to some + * future mechanisms. + */ +#define SVGA_CAP2_NONE 0x00000000 +#define SVGA_CAP2_GROW_OTABLE 0x00000001 +#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002 +#define SVGA_CAP2_DX2 0x00000004 +#define SVGA_CAP2_RESERVED 0x80000000 /* @@ -722,7 +874,8 @@ typedef enum { SVGABackdoorCapDeviceCaps = 0, SVGABackdoorCapFifoCaps = 1, SVGABackdoorCap3dHWVersion = 2, - SVGABackdoorCapMax = 3, + SVGABackdoorCapDeviceCaps2 = 3, + SVGABackdoorCapMax = 4, } SVGABackdoorCapType; @@ -1914,16 +2067,6 @@ SVGAFifoCmdRemapGMR2; #define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */ -/* - * To simplify autoDetect display configuration, support a minimum of - * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated: - * numDisplays = 2 - * maxWidth = numDisplay * 1920 = 3840 - * maxHeight = rotated width of single monitor = 1920 - * vramSize = maxWidth * maxHeight * 4 = 29491200 - */ -#define SVGA_VRAM_SIZE_AUTODETECT (32 * 1024 * 1024) - #if defined(VMX86_SERVER) #define SVGA_VRAM_SIZE (4 * 1024 * 1024) #define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024) diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h index 2e8ba4df8de9..350bbc6fab02 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2015 VMware, Inc. All rights reserved. + * Copyright 2015 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -40,7 +41,10 @@ typedef uint64 PPN64; typedef bool Bool; +#define MAX_UINT64 U64_MAX #define MAX_UINT32 U32_MAX #define MAX_UINT16 U16_MAX +#define CONST64U(x) x##ULL + #endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h index 7e7b0ce34aa2..75308bd0d970 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h +++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h @@ -1,25 +1,2 @@ -/********************************************************** - * Copyright 2015 VMware, Inc. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, - * modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - **********************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/compiler.h> diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h index e2e440ed3d44..e93d6f28b68c 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h +++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h @@ -1,25 +1,2 @@ -/********************************************************** - * Copyright 2015 VMware, Inc. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, - * modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - **********************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ __packed diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c index 55d32ae43aa4..0b9ee7fb45d6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h index bf2e77ad5a20..6a2a9d69043b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h @@ -1,7 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index e8c94b19db7b..fc6673cde289 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -1,6 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2017 VMware, Inc., Palo Alto, CA., USA + * Copyright 2017 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c new file mode 100644 index 000000000000..2dda03345761 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -0,0 +1,1123 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/************************************************************************** + * + * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include <drm/ttm/ttm_placement.h> + +#include <drm/drmP.h> +#include "vmwgfx_drv.h" +#include "drm/ttm/ttm_object.h" + + +/** + * struct vmw_user_buffer_object - User-space-visible buffer object + * + * @prime: The prime object providing user visibility. + * @vbo: The struct vmw_buffer_object + */ +struct vmw_user_buffer_object { + struct ttm_prime_object prime; + struct vmw_buffer_object vbo; +}; + + +/** + * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct + * vmw_buffer_object. + * + * @bo: Pointer to the TTM buffer object. + * Return: Pointer to the struct vmw_buffer_object embedding the + * TTM buffer object. + */ +static struct vmw_buffer_object * +vmw_buffer_object(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct vmw_buffer_object, base); +} + + +/** + * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct + * vmw_user_buffer_object. + * + * @bo: Pointer to the TTM buffer object. + * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer + * object. + */ +static struct vmw_user_buffer_object * +vmw_user_buffer_object(struct ttm_buffer_object *bo) +{ + struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); + + return container_of(vmw_bo, struct vmw_user_buffer_object, vbo); +} + + +/** + * vmw_bo_pin_in_placement - Validate a buffer to placement. + * + * @dev_priv: Driver private. + * @buf: DMA buffer to move. + * @placement: The placement to pin it. + * @interruptible: Use interruptible wait. + * Return: Zero on success, Negative error code on failure. In particular + * -ERESTARTSYS if interrupted by a signal + */ +int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + struct ttm_placement *placement, + bool interruptible) +{ + struct ttm_operation_ctx ctx = {interruptible, false }; + struct ttm_buffer_object *bo = &buf->base; + int ret; + uint32_t new_flags; + + ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); + if (unlikely(ret != 0)) + return ret; + + vmw_execbuf_release_pinned_bo(dev_priv); + + ret = ttm_bo_reserve(bo, interruptible, false, NULL); + if (unlikely(ret != 0)) + goto err; + + if (buf->pin_count > 0) + ret = ttm_bo_mem_compat(placement, &bo->mem, + &new_flags) == true ? 0 : -EINVAL; + else + ret = ttm_bo_validate(bo, placement, &ctx); + + if (!ret) + vmw_bo_pin_reserved(buf, true); + + ttm_bo_unreserve(bo); + +err: + ttm_write_unlock(&dev_priv->reservation_sem); + return ret; +} + + +/** + * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr. + * + * This function takes the reservation_sem in write mode. + * Flushes and unpins the query bo to avoid failures. + * + * @dev_priv: Driver private. + * @buf: DMA buffer to move. + * @pin: Pin buffer if true. + * @interruptible: Use interruptible wait. + * Return: Zero on success, Negative error code on failure. In particular + * -ERESTARTSYS if interrupted by a signal + */ +int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible) +{ + struct ttm_operation_ctx ctx = {interruptible, false }; + struct ttm_buffer_object *bo = &buf->base; + int ret; + uint32_t new_flags; + + ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); + if (unlikely(ret != 0)) + return ret; + + vmw_execbuf_release_pinned_bo(dev_priv); + + ret = ttm_bo_reserve(bo, interruptible, false, NULL); + if (unlikely(ret != 0)) + goto err; + + if (buf->pin_count > 0) { + ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem, + &new_flags) == true ? 0 : -EINVAL; + goto out_unreserve; + } + + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); + if (likely(ret == 0) || ret == -ERESTARTSYS) + goto out_unreserve; + + ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); + +out_unreserve: + if (!ret) + vmw_bo_pin_reserved(buf, true); + + ttm_bo_unreserve(bo); +err: + ttm_write_unlock(&dev_priv->reservation_sem); + return ret; +} + + +/** + * vmw_bo_pin_in_vram - Move a buffer to vram. + * + * This function takes the reservation_sem in write mode. + * Flushes and unpins the query bo to avoid failures. + * + * @dev_priv: Driver private. + * @buf: DMA buffer to move. + * @interruptible: Use interruptible wait. + * Return: Zero on success, Negative error code on failure. In particular + * -ERESTARTSYS if interrupted by a signal + */ +int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible) +{ + return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement, + interruptible); +} + + +/** + * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram. + * + * This function takes the reservation_sem in write mode. + * Flushes and unpins the query bo to avoid failures. + * + * @dev_priv: Driver private. + * @buf: DMA buffer to pin. + * @interruptible: Use interruptible wait. + * Return: Zero on success, Negative error code on failure. In particular + * -ERESTARTSYS if interrupted by a signal + */ +int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible) +{ + struct ttm_operation_ctx ctx = {interruptible, false }; + struct ttm_buffer_object *bo = &buf->base; + struct ttm_placement placement; + struct ttm_place place; + int ret = 0; + uint32_t new_flags; + + place = vmw_vram_placement.placement[0]; + place.lpfn = bo->num_pages; + placement.num_placement = 1; + placement.placement = &place; + placement.num_busy_placement = 1; + placement.busy_placement = &place; + + ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); + if (unlikely(ret != 0)) + return ret; + + vmw_execbuf_release_pinned_bo(dev_priv); + ret = ttm_bo_reserve(bo, interruptible, false, NULL); + if (unlikely(ret != 0)) + goto err_unlock; + + /* + * Is this buffer already in vram but not at the start of it? + * In that case, evict it first because TTM isn't good at handling + * that situation. + */ + if (bo->mem.mem_type == TTM_PL_VRAM && + bo->mem.start < bo->num_pages && + bo->mem.start > 0 && + buf->pin_count == 0) { + ctx.interruptible = false; + (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx); + } + + if (buf->pin_count > 0) + ret = ttm_bo_mem_compat(&placement, &bo->mem, + &new_flags) == true ? 0 : -EINVAL; + else + ret = ttm_bo_validate(bo, &placement, &ctx); + + /* For some reason we didn't end up at the start of vram */ + WARN_ON(ret == 0 && bo->offset != 0); + if (!ret) + vmw_bo_pin_reserved(buf, true); + + ttm_bo_unreserve(bo); +err_unlock: + ttm_write_unlock(&dev_priv->reservation_sem); + + return ret; +} + + +/** + * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer. + * + * This function takes the reservation_sem in write mode. + * + * @dev_priv: Driver private. + * @buf: DMA buffer to unpin. + * @interruptible: Use interruptible wait. + * Return: Zero on success, Negative error code on failure. In particular + * -ERESTARTSYS if interrupted by a signal + */ +int vmw_bo_unpin(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible) +{ + struct ttm_buffer_object *bo = &buf->base; + int ret; + + ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_bo_reserve(bo, interruptible, false, NULL); + if (unlikely(ret != 0)) + goto err; + + vmw_bo_pin_reserved(buf, false); + + ttm_bo_unreserve(bo); + +err: + ttm_read_unlock(&dev_priv->reservation_sem); + return ret; +} + +/** + * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement + * of a buffer. + * + * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved. + * @ptr: SVGAGuestPtr returning the result. + */ +void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, + SVGAGuestPtr *ptr) +{ + if (bo->mem.mem_type == TTM_PL_VRAM) { + ptr->gmrId = SVGA_GMR_FRAMEBUFFER; + ptr->offset = bo->offset; + } else { + ptr->gmrId = bo->mem.start; + ptr->offset = 0; + } +} + + +/** + * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it. + * + * @vbo: The buffer object. Must be reserved. + * @pin: Whether to pin or unpin. + * + */ +void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) +{ + struct ttm_operation_ctx ctx = { false, true }; + struct ttm_place pl; + struct ttm_placement placement; + struct ttm_buffer_object *bo = &vbo->base; + uint32_t old_mem_type = bo->mem.mem_type; + int ret; + + lockdep_assert_held(&bo->resv->lock.base); + + if (pin) { + if (vbo->pin_count++ > 0) + return; + } else { + WARN_ON(vbo->pin_count <= 0); + if (--vbo->pin_count > 0) + return; + } + + pl.fpfn = 0; + pl.lpfn = 0; + pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB + | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; + if (pin) + pl.flags |= TTM_PL_FLAG_NO_EVICT; + + memset(&placement, 0, sizeof(placement)); + placement.num_placement = 1; + placement.placement = &pl; + + ret = ttm_bo_validate(bo, &placement, &ctx); + + BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); +} + + +/** + * vmw_bo_map_and_cache - Map a buffer object and cache the map + * + * @vbo: The buffer object to map + * Return: A kernel virtual address or NULL if mapping failed. + * + * This function maps a buffer object into the kernel address space, or + * returns the virtual kernel address of an already existing map. The virtual + * address remains valid as long as the buffer object is pinned or reserved. + * The cached map is torn down on either + * 1) Buffer object move + * 2) Buffer object swapout + * 3) Buffer object destruction + * + */ +void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) +{ + struct ttm_buffer_object *bo = &vbo->base; + bool not_used; + void *virtual; + int ret; + + virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); + if (virtual) + return virtual; + + ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map); + if (ret) + DRM_ERROR("Buffer object map failed: %d.\n", ret); + + return ttm_kmap_obj_virtual(&vbo->map, ¬_used); +} + + +/** + * vmw_bo_unmap - Tear down a cached buffer object map. + * + * @vbo: The buffer object whose map we are tearing down. + * + * This function tears down a cached map set up using + * vmw_buffer_object_map_and_cache(). + */ +void vmw_bo_unmap(struct vmw_buffer_object *vbo) +{ + if (vbo->map.bo == NULL) + return; + + ttm_bo_kunmap(&vbo->map); +} + + +/** + * vmw_bo_acc_size - Calculate the pinned memory usage of buffers + * + * @dev_priv: Pointer to a struct vmw_private identifying the device. + * @size: The requested buffer size. + * @user: Whether this is an ordinary dma buffer or a user dma buffer. + */ +static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size, + bool user) +{ + static size_t struct_size, user_struct_size; + size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); + + if (unlikely(struct_size == 0)) { + size_t backend_size = ttm_round_pot(vmw_tt_size); + + struct_size = backend_size + + ttm_round_pot(sizeof(struct vmw_buffer_object)); + user_struct_size = backend_size + + ttm_round_pot(sizeof(struct vmw_user_buffer_object)); + } + + if (dev_priv->map_mode == vmw_dma_alloc_coherent) + page_array_size += + ttm_round_pot(num_pages * sizeof(dma_addr_t)); + + return ((user) ? user_struct_size : struct_size) + + page_array_size; +} + + +/** + * vmw_bo_bo_free - vmw buffer object destructor + * + * @bo: Pointer to the embedded struct ttm_buffer_object + */ +void vmw_bo_bo_free(struct ttm_buffer_object *bo) +{ + struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); + + vmw_bo_unmap(vmw_bo); + kfree(vmw_bo); +} + + +/** + * vmw_user_bo_destroy - vmw buffer object destructor + * + * @bo: Pointer to the embedded struct ttm_buffer_object + */ +static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) +{ + struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo); + + vmw_bo_unmap(&vmw_user_bo->vbo); + ttm_prime_object_kfree(vmw_user_bo, prime); +} + + +/** + * vmw_bo_init - Initialize a vmw buffer object + * + * @dev_priv: Pointer to the device private struct + * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize. + * @size: Buffer object size in bytes. + * @placement: Initial placement. + * @interruptible: Whether waits should be performed interruptible. + * @bo_free: The buffer object destructor. + * Returns: Zero on success, negative error code on error. + * + * Note that on error, the code will free the buffer object. + */ +int vmw_bo_init(struct vmw_private *dev_priv, + struct vmw_buffer_object *vmw_bo, + size_t size, struct ttm_placement *placement, + bool interruptible, + void (*bo_free)(struct ttm_buffer_object *bo)) +{ + struct ttm_bo_device *bdev = &dev_priv->bdev; + size_t acc_size; + int ret; + bool user = (bo_free == &vmw_user_bo_destroy); + + WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free))); + + acc_size = vmw_bo_acc_size(dev_priv, size, user); + memset(vmw_bo, 0, sizeof(*vmw_bo)); + + INIT_LIST_HEAD(&vmw_bo->res_list); + + ret = ttm_bo_init(bdev, &vmw_bo->base, size, + ttm_bo_type_device, placement, + 0, interruptible, acc_size, + NULL, NULL, bo_free); + return ret; +} + + +/** + * vmw_user_bo_release - TTM reference base object release callback for + * vmw user buffer objects + * + * @p_base: The TTM base object pointer about to be unreferenced. + * + * Clears the TTM base object pointer and drops the reference the + * base object has on the underlying struct vmw_buffer_object. + */ +static void vmw_user_bo_release(struct ttm_base_object **p_base) +{ + struct vmw_user_buffer_object *vmw_user_bo; + struct ttm_base_object *base = *p_base; + struct ttm_buffer_object *bo; + + *p_base = NULL; + + if (unlikely(base == NULL)) + return; + + vmw_user_bo = container_of(base, struct vmw_user_buffer_object, + prime.base); + bo = &vmw_user_bo->vbo.base; + ttm_bo_unref(&bo); +} + + +/** + * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback + * for vmw user buffer objects + * + * @base: Pointer to the TTM base object + * @ref_type: Reference type of the reference reaching zero. + * + * Called when user-space drops its last synccpu reference on the buffer + * object, Either explicitly or as part of a cleanup file close. + */ +static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base, + enum ttm_ref_type ref_type) +{ + struct vmw_user_buffer_object *user_bo; + + user_bo = container_of(base, struct vmw_user_buffer_object, prime.base); + + switch (ref_type) { + case TTM_REF_SYNCCPU_WRITE: + ttm_bo_synccpu_write_release(&user_bo->vbo.base); + break; + default: + WARN_ONCE(true, "Undefined buffer object reference release.\n"); + } +} + + +/** + * vmw_user_bo_alloc - Allocate a user buffer object + * + * @dev_priv: Pointer to a struct device private. + * @tfile: Pointer to a struct ttm_object_file on which to register the user + * object. + * @size: Size of the buffer object. + * @shareable: Boolean whether the buffer is shareable with other open files. + * @handle: Pointer to where the handle value should be assigned. + * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer + * should be assigned. + * Return: Zero on success, negative error code on error. + */ +int vmw_user_bo_alloc(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t size, + bool shareable, + uint32_t *handle, + struct vmw_buffer_object **p_vbo, + struct ttm_base_object **p_base) +{ + struct vmw_user_buffer_object *user_bo; + struct ttm_buffer_object *tmp; + int ret; + + user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); + if (unlikely(!user_bo)) { + DRM_ERROR("Failed to allocate a buffer.\n"); + return -ENOMEM; + } + + ret = vmw_bo_init(dev_priv, &user_bo->vbo, size, + (dev_priv->has_mob) ? + &vmw_sys_placement : + &vmw_vram_sys_placement, true, + &vmw_user_bo_destroy); + if (unlikely(ret != 0)) + return ret; + + tmp = ttm_bo_reference(&user_bo->vbo.base); + ret = ttm_prime_object_init(tfile, + size, + &user_bo->prime, + shareable, + ttm_buffer_type, + &vmw_user_bo_release, + &vmw_user_bo_ref_obj_release); + if (unlikely(ret != 0)) { + ttm_bo_unref(&tmp); + goto out_no_base_object; + } + + *p_vbo = &user_bo->vbo; + if (p_base) { + *p_base = &user_bo->prime.base; + kref_get(&(*p_base)->refcount); + } + *handle = user_bo->prime.base.hash.key; + +out_no_base_object: + return ret; +} + + +/** + * vmw_user_bo_verify_access - verify access permissions on this + * buffer object. + * + * @bo: Pointer to the buffer object being accessed + * @tfile: Identifying the caller. + */ +int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, + struct ttm_object_file *tfile) +{ + struct vmw_user_buffer_object *vmw_user_bo; + + if (unlikely(bo->destroy != vmw_user_bo_destroy)) + return -EPERM; + + vmw_user_bo = vmw_user_buffer_object(bo); + + /* Check that the caller has opened the object. */ + if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) + return 0; + + DRM_ERROR("Could not grant buffer access.\n"); + return -EPERM; +} + + +/** + * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu + * access, idling previous GPU operations on the buffer and optionally + * blocking it for further command submissions. + * + * @user_bo: Pointer to the buffer object being grabbed for CPU access + * @tfile: Identifying the caller. + * @flags: Flags indicating how the grab should be performed. + * Return: Zero on success, Negative error code on error. In particular, + * -EBUSY will be returned if a dontblock operation is requested and the + * buffer object is busy, and -ERESTARTSYS will be returned if a wait is + * interrupted by a signal. + * + * A blocking grab will be automatically released when @tfile is closed. + */ +static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, + struct ttm_object_file *tfile, + uint32_t flags) +{ + struct ttm_buffer_object *bo = &user_bo->vbo.base; + bool existed; + int ret; + + if (flags & drm_vmw_synccpu_allow_cs) { + bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); + long lret; + + lret = reservation_object_wait_timeout_rcu + (bo->resv, true, true, + nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); + if (!lret) + return -EBUSY; + else if (lret < 0) + return lret; + return 0; + } + + ret = ttm_bo_synccpu_write_grab + (bo, !!(flags & drm_vmw_synccpu_dontblock)); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_ref_object_add(tfile, &user_bo->prime.base, + TTM_REF_SYNCCPU_WRITE, &existed, false); + if (ret != 0 || existed) + ttm_bo_synccpu_write_release(&user_bo->vbo.base); + + return ret; +} + +/** + * vmw_user_bo_synccpu_release - Release a previous grab for CPU access, + * and unblock command submission on the buffer if blocked. + * + * @handle: Handle identifying the buffer object. + * @tfile: Identifying the caller. + * @flags: Flags indicating the type of release. + */ +static int vmw_user_bo_synccpu_release(uint32_t handle, + struct ttm_object_file *tfile, + uint32_t flags) +{ + if (!(flags & drm_vmw_synccpu_allow_cs)) + return ttm_ref_object_base_unref(tfile, handle, + TTM_REF_SYNCCPU_WRITE); + + return 0; +} + + +/** + * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu + * functionality. + * + * @dev: Identifies the drm device. + * @data: Pointer to the ioctl argument. + * @file_priv: Identifies the caller. + * Return: Zero on success, negative error code on error. + * + * This function checks the ioctl arguments for validity and calls the + * relevant synccpu functions. + */ +int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_synccpu_arg *arg = + (struct drm_vmw_synccpu_arg *) data; + struct vmw_buffer_object *vbo; + struct vmw_user_buffer_object *user_bo; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct ttm_base_object *buffer_base; + int ret; + + if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 + || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | + drm_vmw_synccpu_dontblock | + drm_vmw_synccpu_allow_cs)) != 0) { + DRM_ERROR("Illegal synccpu flags.\n"); + return -EINVAL; + } + + switch (arg->op) { + case drm_vmw_synccpu_grab: + ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo, + &buffer_base); + if (unlikely(ret != 0)) + return ret; + + user_bo = container_of(vbo, struct vmw_user_buffer_object, + vbo); + ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags); + vmw_bo_unreference(&vbo); + ttm_base_object_unref(&buffer_base); + if (unlikely(ret != 0 && ret != -ERESTARTSYS && + ret != -EBUSY)) { + DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", + (unsigned int) arg->handle); + return ret; + } + break; + case drm_vmw_synccpu_release: + ret = vmw_user_bo_synccpu_release(arg->handle, tfile, + arg->flags); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", + (unsigned int) arg->handle); + return ret; + } + break; + default: + DRM_ERROR("Invalid synccpu operation.\n"); + return -EINVAL; + } + + return 0; +} + + +/** + * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object + * allocation functionality. + * + * @dev: Identifies the drm device. + * @data: Pointer to the ioctl argument. + * @file_priv: Identifies the caller. + * Return: Zero on success, negative error code on error. + * + * This function checks the ioctl arguments for validity and allocates a + * struct vmw_user_buffer_object bo. + */ +int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + union drm_vmw_alloc_dmabuf_arg *arg = + (union drm_vmw_alloc_dmabuf_arg *)data; + struct drm_vmw_alloc_dmabuf_req *req = &arg->req; + struct drm_vmw_dmabuf_rep *rep = &arg->rep; + struct vmw_buffer_object *vbo; + uint32_t handle; + int ret; + + ret = ttm_read_lock(&dev_priv->reservation_sem, true); + if (unlikely(ret != 0)) + return ret; + + ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, + req->size, false, &handle, &vbo, + NULL); + if (unlikely(ret != 0)) + goto out_no_bo; + + rep->handle = handle; + rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node); + rep->cur_gmr_id = handle; + rep->cur_gmr_offset = 0; + + vmw_bo_unreference(&vbo); + +out_no_bo: + ttm_read_unlock(&dev_priv->reservation_sem); + + return ret; +} + + +/** + * vmw_bo_unref_ioctl - Generic handle close ioctl. + * + * @dev: Identifies the drm device. + * @data: Pointer to the ioctl argument. + * @file_priv: Identifies the caller. + * Return: Zero on success, negative error code on error. + * + * This function checks the ioctl arguments for validity and closes a + * handle to a TTM base object, optionally freeing the object. + */ +int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_unref_dmabuf_arg *arg = + (struct drm_vmw_unref_dmabuf_arg *)data; + + return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, + arg->handle, + TTM_REF_USAGE); +} + + +/** + * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle. + * + * @tfile: The TTM object file the handle is registered with. + * @handle: The user buffer object handle + * @out: Pointer to a where a pointer to the embedded + * struct vmw_buffer_object should be placed. + * @p_base: Pointer to where a pointer to the TTM base object should be + * placed, or NULL if no such pointer is required. + * Return: Zero on success, Negative error code on error. + * + * Both the output base object pointer and the vmw buffer object pointer + * will be refcounted. + */ +int vmw_user_bo_lookup(struct ttm_object_file *tfile, + uint32_t handle, struct vmw_buffer_object **out, + struct ttm_base_object **p_base) +{ + struct vmw_user_buffer_object *vmw_user_bo; + struct ttm_base_object *base; + + base = ttm_base_object_lookup(tfile, handle); + if (unlikely(base == NULL)) { + DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", + (unsigned long)handle); + return -ESRCH; + } + + if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { + ttm_base_object_unref(&base); + DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", + (unsigned long)handle); + return -EINVAL; + } + + vmw_user_bo = container_of(base, struct vmw_user_buffer_object, + prime.base); + (void)ttm_bo_reference(&vmw_user_bo->vbo.base); + if (p_base) + *p_base = base; + else + ttm_base_object_unref(&base); + *out = &vmw_user_bo->vbo; + + return 0; +} + + +/** + * vmw_user_bo_reference - Open a handle to a vmw user buffer object. + * + * @tfile: The TTM object file to register the handle with. + * @vbo: The embedded vmw buffer object. + * @handle: Pointer to where the new handle should be placed. + * Return: Zero on success, Negative error code on error. + */ +int vmw_user_bo_reference(struct ttm_object_file *tfile, + struct vmw_buffer_object *vbo, + uint32_t *handle) +{ + struct vmw_user_buffer_object *user_bo; + + if (vbo->base.destroy != vmw_user_bo_destroy) + return -EINVAL; + + user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo); + + *handle = user_bo->prime.base.hash.key; + return ttm_ref_object_add(tfile, &user_bo->prime.base, + TTM_REF_USAGE, NULL, false); +} + + +/** + * vmw_bo_fence_single - Utility function to fence a single TTM buffer + * object without unreserving it. + * + * @bo: Pointer to the struct ttm_buffer_object to fence. + * @fence: Pointer to the fence. If NULL, this function will + * insert a fence into the command stream.. + * + * Contrary to the ttm_eu version of this function, it takes only + * a single buffer object instead of a list, and it also doesn't + * unreserve the buffer object, which needs to be done separately. + */ +void vmw_bo_fence_single(struct ttm_buffer_object *bo, + struct vmw_fence_obj *fence) +{ + struct ttm_bo_device *bdev = bo->bdev; + + struct vmw_private *dev_priv = + container_of(bdev, struct vmw_private, bdev); + + if (fence == NULL) { + vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); + reservation_object_add_excl_fence(bo->resv, &fence->base); + dma_fence_put(&fence->base); + } else + reservation_object_add_excl_fence(bo->resv, &fence->base); +} + + +/** + * vmw_dumb_create - Create a dumb kms buffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @args: Pointer to a struct drm_mode_create_dumb structure + * Return: Zero on success, negative error code on failure. + * + * This is a driver callback for the core drm create_dumb functionality. + * Note that this is very similar to the vmw_bo_alloc ioctl, except + * that the arguments have a different format. + */ +int vmw_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_buffer_object *vbo; + int ret; + + args->pitch = args->width * ((args->bpp + 7) / 8); + args->size = args->pitch * args->height; + + ret = ttm_read_lock(&dev_priv->reservation_sem, true); + if (unlikely(ret != 0)) + return ret; + + ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, + args->size, false, &args->handle, + &vbo, NULL); + if (unlikely(ret != 0)) + goto out_no_bo; + + vmw_bo_unreference(&vbo); +out_no_bo: + ttm_read_unlock(&dev_priv->reservation_sem); + return ret; +} + + +/** + * vmw_dumb_map_offset - Return the address space offset of a dumb buffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @handle: Handle identifying the dumb buffer. + * @offset: The address space offset returned. + * Return: Zero on success, negative error code on failure. + * + * This is a driver callback for the core drm dumb_map_offset functionality. + */ +int vmw_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_buffer_object *out_buf; + int ret; + + ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL); + if (ret != 0) + return -EINVAL; + + *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); + vmw_bo_unreference(&out_buf); + return 0; +} + + +/** + * vmw_dumb_destroy - Destroy a dumb boffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @handle: Handle identifying the dumb buffer. + * Return: Zero on success, negative error code on failure. + * + * This is a driver callback for the core drm dumb_destroy functionality. + */ +int vmw_dumb_destroy(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle) +{ + return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, + handle, TTM_REF_USAGE); +} + + +/** + * vmw_bo_swap_notify - swapout notify callback. + * + * @bo: The buffer object to be swapped out. + */ +void vmw_bo_swap_notify(struct ttm_buffer_object *bo) +{ + /* Is @bo embedded in a struct vmw_buffer_object? */ + if (bo->destroy != vmw_bo_bo_free && + bo->destroy != vmw_user_bo_destroy) + return; + + /* Kill any cached kernel maps before swapout */ + vmw_bo_unmap(vmw_buffer_object(bo)); +} + + +/** + * vmw_bo_move_notify - TTM move_notify_callback + * + * @bo: The TTM buffer object about to move. + * @mem: The struct ttm_mem_reg indicating to what memory + * region the move is taking place. + * + * Detaches cached maps and device bindings that require that the + * buffer doesn't move. + */ +void vmw_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem) +{ + struct vmw_buffer_object *vbo; + + if (mem == NULL) + return; + + /* Make sure @bo is embedded in a struct vmw_buffer_object? */ + if (bo->destroy != vmw_bo_bo_free && + bo->destroy != vmw_user_bo_destroy) + return; + + vbo = container_of(bo, struct vmw_buffer_object, base); + + /* + * Kill any cached kernel maps before move to or from VRAM. + * With other types of moves, the underlying pages stay the same, + * and the map can be kept. + */ + if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM) + vmw_bo_unmap(vbo); + + /* + * If we're moving a backup MOB out of MOB placement, then make sure we + * read back all resource content first, and unbind the MOB from + * the resource. + */ + if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB) + vmw_resource_unbind_list(vbo); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 9f45d5004cae..e7e4655d3f36 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 36c7b6c839c0..3b75af9bf85f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 3767ac335aca..7c3cb8efd11a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -38,7 +38,7 @@ struct vmw_user_context { struct vmw_cmdbuf_res_manager *man; struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; spinlock_t cotable_lock; - struct vmw_dma_buffer *dx_query_mob; + struct vmw_buffer_object *dx_query_mob; }; static void vmw_user_context_free(struct vmw_resource *res); @@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - vmw_fence_single_bo(bo, fence); + vmw_bo_fence_single(bo, fence); if (likely(fence != NULL)) vmw_fence_obj_unreference(&fence); @@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - vmw_fence_single_bo(bo, fence); + vmw_bo_fence_single(bo, fence); if (likely(fence != NULL)) vmw_fence_obj_unreference(&fence); @@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx) * specified in the parameter. 0 otherwise. */ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, - struct vmw_dma_buffer *mob) + struct vmw_buffer_object *mob) { struct vmw_user_context *uctx = container_of(ctx_res, struct vmw_user_context, res); @@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, if (mob == NULL) { if (uctx->dx_query_mob) { uctx->dx_query_mob->dx_query_ctx = NULL; - vmw_dmabuf_unreference(&uctx->dx_query_mob); + vmw_bo_unreference(&uctx->dx_query_mob); uctx->dx_query_mob = NULL; } @@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, mob->dx_query_ctx = ctx_res; if (!uctx->dx_query_mob) - uctx->dx_query_mob = vmw_dmabuf_reference(mob); + uctx->dx_query_mob = vmw_bo_reference(mob); return 0; } @@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, * * @ctx_res: The context resource */ -struct vmw_dma_buffer * +struct vmw_buffer_object * vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res) { struct vmw_user_context *uctx = diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index cbf54ea7b4c0..1d45714e1d5a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, vmw_dx_context_scrub_cotables(vcotbl->ctx, readback); mutex_unlock(&dev_priv->binding_mutex); (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - vmw_fence_single_bo(bo, fence); + vmw_bo_fence_single(bo, fence); if (likely(fence != NULL)) vmw_fence_obj_unreference(&fence); @@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res) } (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - vmw_fence_single_bo(&res->backup->base, fence); + vmw_bo_fence_single(&res->backup->base, fence); vmw_fence_obj_unreference(&fence); return 0; @@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) struct ttm_operation_ctx ctx = { false, false }; struct vmw_private *dev_priv = res->dev_priv; struct vmw_cotable *vcotbl = vmw_cotable(res); - struct vmw_dma_buffer *buf, *old_buf = res->backup; + struct vmw_buffer_object *buf, *old_buf = res->backup; struct ttm_buffer_object *bo, *old_bo = &res->backup->base; size_t old_size = res->backup_size; size_t old_size_read_back = vcotbl->size_read_back; @@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) if (!buf) return -ENOMEM; - ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, - true, vmw_dmabuf_bo_free); + ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, + true, vmw_bo_bo_free); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); return ret; @@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) /* Let go of the old mob. */ list_del(&res->mob_head); list_add_tail(&res->mob_head, &buf->res_list); - vmw_dmabuf_unreference(&old_buf); + vmw_bo_unreference(&old_buf); res->id = vcotbl->type; return 0; @@ -491,7 +491,7 @@ out_map_new: ttm_bo_kunmap(&old_map); out_wait: ttm_bo_unreserve(bo); - vmw_dmabuf_unreference(&buf); + vmw_bo_unreference(&buf); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c deleted file mode 100644 index d59d9dd16ebc..000000000000 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ /dev/null @@ -1,376 +0,0 @@ -/************************************************************************** - * - * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#include <drm/ttm/ttm_placement.h> - -#include <drm/drmP.h> -#include "vmwgfx_drv.h" - - -/** - * vmw_dmabuf_pin_in_placement - Validate a buffer to placement. - * - * @dev_priv: Driver private. - * @buf: DMA buffer to move. - * @placement: The placement to pin it. - * @interruptible: Use interruptible wait. - * - * Returns - * -ERESTARTSYS if interrupted by a signal. - */ -int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - struct ttm_placement *placement, - bool interruptible) -{ - struct ttm_operation_ctx ctx = {interruptible, false }; - struct ttm_buffer_object *bo = &buf->base; - int ret; - uint32_t new_flags; - - ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); - if (unlikely(ret != 0)) - return ret; - - vmw_execbuf_release_pinned_bo(dev_priv); - - ret = ttm_bo_reserve(bo, interruptible, false, NULL); - if (unlikely(ret != 0)) - goto err; - - if (buf->pin_count > 0) - ret = ttm_bo_mem_compat(placement, &bo->mem, - &new_flags) == true ? 0 : -EINVAL; - else - ret = ttm_bo_validate(bo, placement, &ctx); - - if (!ret) - vmw_bo_pin_reserved(buf, true); - - ttm_bo_unreserve(bo); - -err: - ttm_write_unlock(&dev_priv->reservation_sem); - return ret; -} - -/** - * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr. - * - * This function takes the reservation_sem in write mode. - * Flushes and unpins the query bo to avoid failures. - * - * @dev_priv: Driver private. - * @buf: DMA buffer to move. - * @pin: Pin buffer if true. - * @interruptible: Use interruptible wait. - * - * Returns - * -ERESTARTSYS if interrupted by a signal. - */ -int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible) -{ - struct ttm_operation_ctx ctx = {interruptible, false }; - struct ttm_buffer_object *bo = &buf->base; - int ret; - uint32_t new_flags; - - ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); - if (unlikely(ret != 0)) - return ret; - - vmw_execbuf_release_pinned_bo(dev_priv); - - ret = ttm_bo_reserve(bo, interruptible, false, NULL); - if (unlikely(ret != 0)) - goto err; - - if (buf->pin_count > 0) { - ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem, - &new_flags) == true ? 0 : -EINVAL; - goto out_unreserve; - } - - ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); - if (likely(ret == 0) || ret == -ERESTARTSYS) - goto out_unreserve; - - ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); - -out_unreserve: - if (!ret) - vmw_bo_pin_reserved(buf, true); - - ttm_bo_unreserve(bo); -err: - ttm_write_unlock(&dev_priv->reservation_sem); - return ret; -} - -/** - * vmw_dmabuf_pin_in_vram - Move a buffer to vram. - * - * This function takes the reservation_sem in write mode. - * Flushes and unpins the query bo to avoid failures. - * - * @dev_priv: Driver private. - * @buf: DMA buffer to move. - * @interruptible: Use interruptible wait. - * - * Returns - * -ERESTARTSYS if interrupted by a signal. - */ -int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible) -{ - return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement, - interruptible); -} - -/** - * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram. - * - * This function takes the reservation_sem in write mode. - * Flushes and unpins the query bo to avoid failures. - * - * @dev_priv: Driver private. - * @buf: DMA buffer to pin. - * @interruptible: Use interruptible wait. - * - * Returns - * -ERESTARTSYS if interrupted by a signal. - */ -int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible) -{ - struct ttm_operation_ctx ctx = {interruptible, false }; - struct ttm_buffer_object *bo = &buf->base; - struct ttm_placement placement; - struct ttm_place place; - int ret = 0; - uint32_t new_flags; - - place = vmw_vram_placement.placement[0]; - place.lpfn = bo->num_pages; - placement.num_placement = 1; - placement.placement = &place; - placement.num_busy_placement = 1; - placement.busy_placement = &place; - - ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); - if (unlikely(ret != 0)) - return ret; - - vmw_execbuf_release_pinned_bo(dev_priv); - ret = ttm_bo_reserve(bo, interruptible, false, NULL); - if (unlikely(ret != 0)) - goto err_unlock; - - /* - * Is this buffer already in vram but not at the start of it? - * In that case, evict it first because TTM isn't good at handling - * that situation. - */ - if (bo->mem.mem_type == TTM_PL_VRAM && - bo->mem.start < bo->num_pages && - bo->mem.start > 0 && - buf->pin_count == 0) { - ctx.interruptible = false; - (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx); - } - - if (buf->pin_count > 0) - ret = ttm_bo_mem_compat(&placement, &bo->mem, - &new_flags) == true ? 0 : -EINVAL; - else - ret = ttm_bo_validate(bo, &placement, &ctx); - - /* For some reason we didn't end up at the start of vram */ - WARN_ON(ret == 0 && bo->offset != 0); - if (!ret) - vmw_bo_pin_reserved(buf, true); - - ttm_bo_unreserve(bo); -err_unlock: - ttm_write_unlock(&dev_priv->reservation_sem); - - return ret; -} - -/** - * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer. - * - * This function takes the reservation_sem in write mode. - * - * @dev_priv: Driver private. - * @buf: DMA buffer to unpin. - * @interruptible: Use interruptible wait. - * - * Returns - * -ERESTARTSYS if interrupted by a signal. - */ -int vmw_dmabuf_unpin(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible) -{ - struct ttm_buffer_object *bo = &buf->base; - int ret; - - ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible); - if (unlikely(ret != 0)) - return ret; - - ret = ttm_bo_reserve(bo, interruptible, false, NULL); - if (unlikely(ret != 0)) - goto err; - - vmw_bo_pin_reserved(buf, false); - - ttm_bo_unreserve(bo); - -err: - ttm_read_unlock(&dev_priv->reservation_sem); - return ret; -} - -/** - * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement - * of a buffer. - * - * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved. - * @ptr: SVGAGuestPtr returning the result. - */ -void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, - SVGAGuestPtr *ptr) -{ - if (bo->mem.mem_type == TTM_PL_VRAM) { - ptr->gmrId = SVGA_GMR_FRAMEBUFFER; - ptr->offset = bo->offset; - } else { - ptr->gmrId = bo->mem.start; - ptr->offset = 0; - } -} - - -/** - * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it. - * - * @vbo: The buffer object. Must be reserved. - * @pin: Whether to pin or unpin. - * - */ -void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin) -{ - struct ttm_operation_ctx ctx = { false, true }; - struct ttm_place pl; - struct ttm_placement placement; - struct ttm_buffer_object *bo = &vbo->base; - uint32_t old_mem_type = bo->mem.mem_type; - int ret; - - lockdep_assert_held(&bo->resv->lock.base); - - if (pin) { - if (vbo->pin_count++ > 0) - return; - } else { - WARN_ON(vbo->pin_count <= 0); - if (--vbo->pin_count > 0) - return; - } - - pl.fpfn = 0; - pl.lpfn = 0; - pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB - | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; - if (pin) - pl.flags |= TTM_PL_FLAG_NO_EVICT; - - memset(&placement, 0, sizeof(placement)); - placement.num_placement = 1; - placement.placement = &pl; - - ret = ttm_bo_validate(bo, &placement, &ctx); - - BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); -} - - -/* - * vmw_dma_buffer_unmap - Tear down a cached buffer object map. - * - * @vbo: The buffer object whose map we are tearing down. - * - * This function tears down a cached map set up using - * vmw_dma_buffer_map_and_cache(). - */ -void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo) -{ - if (vbo->map.bo == NULL) - return; - - ttm_bo_kunmap(&vbo->map); -} - - -/* - * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map - * - * @vbo: The buffer object to map - * Return: A kernel virtual address or NULL if mapping failed. - * - * This function maps a buffer object into the kernel address space, or - * returns the virtual kernel address of an already existing map. The virtual - * address remains valid as long as the buffer object is pinned or reserved. - * The cached map is torn down on either - * 1) Buffer object move - * 2) Buffer object swapout - * 3) Buffer object destruction - * - */ -void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo) -{ - struct ttm_buffer_object *bo = &vbo->base; - bool not_used; - void *virtual; - int ret; - - virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); - if (virtual) - return virtual; - - ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map); - if (ret) - DRM_ERROR("Buffer object map failed: %d.\n", ret); - - return ttm_kmap_obj_virtual(&vbo->map, ¬_used); -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 09cc721160c4..bb6dbbe18835 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -137,6 +137,12 @@ #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ struct drm_vmw_context_arg) +#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \ + union drm_vmw_gb_surface_create_ext_arg) +#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \ + union drm_vmw_gb_surface_reference_ext_arg) /** * The core DRM version of this macro doesn't account for @@ -153,9 +159,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, + VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, + VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, vmw_kms_cursor_bypass_ioctl, @@ -219,11 +225,17 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { vmw_gb_surface_reference_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_SYNCCPU, - vmw_user_dmabuf_synccpu_ioctl, + vmw_user_bo_synccpu_ioctl, DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, vmw_extended_context_define_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), + VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT, + vmw_gb_surface_define_ext_ioctl, + DRM_AUTH | DRM_RENDER_ALLOW), + VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT, + vmw_gb_surface_reference_ext_ioctl, + DRM_AUTH | DRM_RENDER_ALLOW), }; static const struct pci_device_id vmw_pci_id_list[] = { @@ -258,6 +270,15 @@ MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); +static void vmw_print_capabilities2(uint32_t capabilities2) +{ + DRM_INFO("Capabilities2:\n"); + if (capabilities2 & SVGA_CAP2_GROW_OTABLE) + DRM_INFO(" Grow oTable.\n"); + if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY) + DRM_INFO(" IntraSurface copy.\n"); +} + static void vmw_print_capabilities(uint32_t capabilities) { DRM_INFO("Capabilities:\n"); @@ -321,7 +342,7 @@ static void vmw_print_capabilities(uint32_t capabilities) static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) { int ret; - struct vmw_dma_buffer *vbo; + struct vmw_buffer_object *vbo; struct ttm_bo_kmap_obj map; volatile SVGA3dQueryResult *result; bool dummy; @@ -335,9 +356,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) if (!vbo) return -ENOMEM; - ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, - &vmw_sys_ne_placement, false, - &vmw_dmabuf_bo_free); + ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, + &vmw_sys_ne_placement, false, + &vmw_bo_bo_free); if (unlikely(ret != 0)) return ret; @@ -358,7 +379,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) if (unlikely(ret != 0)) { DRM_ERROR("Dummy query buffer map failed.\n"); - vmw_dmabuf_unreference(&vbo); + vmw_bo_unreference(&vbo); } else dev_priv->dummy_query_bo = vbo; @@ -460,7 +481,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv) BUG_ON(dev_priv->pinned_bo != NULL); - vmw_dmabuf_unreference(&dev_priv->dummy_query_bo); + vmw_bo_unreference(&dev_priv->dummy_query_bo); if (dev_priv->cman) vmw_cmdbuf_remove_pool(dev_priv->cman); @@ -644,6 +665,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); + mutex_init(&dev_priv->requested_layout_mutex); mutex_init(&dev_priv->global_kms_state_mutex); rwlock_init(&dev_priv->resource_lock); ttm_lock_init(&dev_priv->reservation_sem); @@ -683,6 +705,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) } dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); + + if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) { + dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2); + } + + ret = vmw_dma_select_mode(dev_priv); if (unlikely(ret != 0)) { DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); @@ -751,6 +779,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) } vmw_print_capabilities(dev_priv->capabilities); + if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) + vmw_print_capabilities2(dev_priv->capabilities2); ret = vmw_dma_masks(dev_priv); if (unlikely(ret != 0)) @@ -883,7 +913,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (dev_priv->has_mob) { spin_lock(&dev_priv->cap_lock); - vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX); + vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT); dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); spin_unlock(&dev_priv->cap_lock); } @@ -898,9 +928,23 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (ret) goto out_no_fifo; + if (dev_priv->has_dx) { + /* + * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 + * support + */ + if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) { + vmw_write(dev_priv, SVGA_REG_DEV_CAP, + SVGA3D_DEVCAP_SM41); + dev_priv->has_sm4_1 = vmw_read(dev_priv, + SVGA_REG_DEV_CAP); + } + } + DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); - DRM_INFO("Atomic: %s\n", - (dev->driver->driver_features & DRIVER_ATOMIC) ? "yes" : "no"); + DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC) + ? "yes." : "no."); + DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no."); snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s", VMWGFX_REPO, VMWGFX_GIT_VERSION); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 5fcbe1620d50..1abe21758b0d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1,7 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -43,10 +43,10 @@ #include <linux/sync_file.h> #define VMWGFX_DRIVER_NAME "vmwgfx" -#define VMWGFX_DRIVER_DATE "20180322" +#define VMWGFX_DRIVER_DATE "20180704" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 14 -#define VMWGFX_DRIVER_PATCHLEVEL 1 +#define VMWGFX_DRIVER_MINOR 15 +#define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 @@ -83,10 +83,10 @@ struct vmw_fpriv { struct drm_master *locked_master; struct ttm_object_file *tfile; - bool gb_aware; + bool gb_aware; /* user-space is guest-backed aware */ }; -struct vmw_dma_buffer { +struct vmw_buffer_object { struct ttm_buffer_object base; struct list_head res_list; s32 pin_count; @@ -120,7 +120,7 @@ struct vmw_resource { unsigned long backup_size; bool res_dirty; /* Protected by backup buffer reserved */ bool backup_dirty; /* Protected by backup buffer reserved */ - struct vmw_dma_buffer *backup; + struct vmw_buffer_object *backup; unsigned long backup_offset; unsigned long pin_count; /* Protected by resource reserved */ const struct vmw_res_func *func; @@ -166,7 +166,7 @@ struct vmw_surface_offset; struct vmw_surface { struct vmw_resource res; - uint32_t flags; + SVGA3dSurfaceAllFlags flags; uint32_t format; uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; struct drm_vmw_size base_size; @@ -180,6 +180,8 @@ struct vmw_surface { SVGA3dTextureFilter autogen_filter; uint32_t multisample_count; struct list_head view_list; + SVGA3dMSPattern multisample_pattern; + SVGA3dMSQualityLevel quality_level; }; struct vmw_marker_queue { @@ -304,7 +306,7 @@ struct vmw_sw_context{ uint32_t cmd_bounce_size; struct list_head resource_list; struct list_head ctx_resource_list; /* For contexts and cotables */ - struct vmw_dma_buffer *cur_query_bo; + struct vmw_buffer_object *cur_query_bo; struct list_head res_relocations; uint32_t *buf_start; struct vmw_res_cache_entry res_cache[vmw_res_max]; @@ -315,7 +317,7 @@ struct vmw_sw_context{ bool staged_bindings_inuse; struct list_head staged_cmd_res; struct vmw_resource_val_node *dx_ctx_node; - struct vmw_dma_buffer *dx_query_mob; + struct vmw_buffer_object *dx_query_mob; struct vmw_resource *dx_query_ctx; struct vmw_cmdbuf_res_manager *man; }; @@ -386,6 +388,7 @@ struct vmw_private { uint32_t initial_height; u32 *mmio_virt; uint32_t capabilities; + uint32_t capabilities2; uint32_t max_gmr_ids; uint32_t max_gmr_pages; uint32_t max_mob_pages; @@ -397,6 +400,7 @@ struct vmw_private { spinlock_t cap_lock; bool has_dx; bool assume_16bpp; + bool has_sm4_1; /* * VGA registers. @@ -412,6 +416,15 @@ struct vmw_private { uint32_t num_displays; /* + * Currently requested_layout_mutex is used to protect the gui + * positionig state in display unit. With that use case currently this + * mutex is only taken during layout ioctl and atomic check_modeset. + * Other display unit state can be protected with this mutex but that + * needs careful consideration. + */ + struct mutex requested_layout_mutex; + + /* * Framebuffer info. */ @@ -513,8 +526,8 @@ struct vmw_private { * are protected by the cmdbuf mutex. */ - struct vmw_dma_buffer *dummy_query_bo; - struct vmw_dma_buffer *pinned_bo; + struct vmw_buffer_object *dummy_query_bo; + struct vmw_buffer_object *pinned_bo; uint32_t query_cid; uint32_t query_cid_valid; bool dummy_query_bo_pinned; @@ -623,43 +636,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t handle, struct vmw_surface **out_surf, - struct vmw_dma_buffer **out_buf); + struct vmw_buffer_object **out_buf); extern int vmw_user_resource_lookup_handle( struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t handle, const struct vmw_user_resource_conv *converter, struct vmw_resource **p_res); -extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); -extern int vmw_dmabuf_init(struct vmw_private *dev_priv, - struct vmw_dma_buffer *vmw_bo, - size_t size, struct ttm_placement *placement, - bool interuptable, - void (*bo_free) (struct ttm_buffer_object *bo)); -extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile); -extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_dma_buffer **p_dma_buf, - struct ttm_base_object **p_base); -extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, - struct vmw_dma_buffer *dma_buf, - uint32_t *handle); -extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, - uint32_t cur_validate_node); -extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); -extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, - uint32_t id, struct vmw_dma_buffer **out, - struct ttm_base_object **base); extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, @@ -670,43 +653,70 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, struct vmw_resource **out); extern void vmw_resource_unreserve(struct vmw_resource *res, bool switch_backup, - struct vmw_dma_buffer *new_backup, + struct vmw_buffer_object *new_backup, unsigned long new_backup_offset); -extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); extern void vmw_query_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); -extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo); -extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob); -extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, - struct vmw_fence_obj *fence); +extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); extern void vmw_resource_evict_all(struct vmw_private *dev_priv); - +extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); /** - * DMA buffer helper routines - vmwgfx_dmabuf.c + * Buffer object helper functions - vmwgfx_bo.c */ -extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv, - struct vmw_dma_buffer *bo, - struct ttm_placement *placement, +extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, + struct vmw_buffer_object *bo, + struct ttm_placement *placement, + bool interruptible); +extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible); +extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, + struct vmw_buffer_object *buf, + bool interruptible); +extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, + struct vmw_buffer_object *bo, bool interruptible); -extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible); -extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, - bool interruptible); -extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv, - struct vmw_dma_buffer *bo, - bool interruptible); -extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, - struct vmw_dma_buffer *bo, - bool interruptible); +extern int vmw_bo_unpin(struct vmw_private *vmw_priv, + struct vmw_buffer_object *bo, + bool interruptible); extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, SVGAGuestPtr *ptr); -extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin); -extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo); -extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo); +extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin); +extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); +extern int vmw_bo_init(struct vmw_private *dev_priv, + struct vmw_buffer_object *vmw_bo, + size_t size, struct ttm_placement *placement, + bool interuptable, + void (*bo_free)(struct ttm_buffer_object *bo)); +extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, + struct ttm_object_file *tfile); +extern int vmw_user_bo_alloc(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t size, + bool shareable, + uint32_t *handle, + struct vmw_buffer_object **p_dma_buf, + struct ttm_base_object **p_base); +extern int vmw_user_bo_reference(struct ttm_object_file *tfile, + struct vmw_buffer_object *dma_buf, + uint32_t *handle); +extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, + uint32_t id, struct vmw_buffer_object **out, + struct ttm_base_object **base); +extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, + struct vmw_fence_obj *fence); +extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); +extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); +extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); +extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); /** * Misc Ioctl functionality - vmwgfx_ioctl.c @@ -758,7 +768,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv); extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); /** - * TTM buffer object driver - vmwgfx_buffer.c + * TTM buffer object driver - vmwgfx_ttm_buffer.c */ extern const size_t vmw_tt_size; @@ -1041,8 +1051,8 @@ vmw_context_binding_state(struct vmw_resource *ctx); extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, bool readback); extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, - struct vmw_dma_buffer *mob); -extern struct vmw_dma_buffer * + struct vmw_buffer_object *mob); +extern struct vmw_buffer_object * vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); @@ -1070,14 +1080,22 @@ extern int vmw_surface_validate(struct vmw_private *dev_priv, struct vmw_surface *srf); int vmw_surface_gb_priv_define(struct drm_device *dev, uint32_t user_accounting_size, - uint32_t svga3d_flags, + SVGA3dSurfaceAllFlags svga3d_flags, SVGA3dSurfaceFormat format, bool for_scanout, uint32_t num_mip_levels, uint32_t multisample_count, uint32_t array_size, struct drm_vmw_size size, + SVGA3dMSPattern multisample_pattern, + SVGA3dMSQualityLevel quality_level, struct vmw_surface **srf_out); +extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file_priv); +extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file_priv); /* * Shader management - vmwgfx_shader.c @@ -1224,6 +1242,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, u32 w, u32 h, struct vmw_diff_cpy *diff); +/* Host messaging -vmwgfx_msg.c: */ +int vmw_host_get_guestinfo(const char *guest_info_param, + char *buffer, size_t *length); +int vmw_host_log(const char *log); + /** * Inline helper functions */ @@ -1243,9 +1266,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) return srf; } -static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) +static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) { - struct vmw_dma_buffer *tmp_buf = *buf; + struct vmw_buffer_object *tmp_buf = *buf; *buf = NULL; if (tmp_buf != NULL) { @@ -1255,7 +1278,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) } } -static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) +static inline struct vmw_buffer_object * +vmw_bo_reference(struct vmw_buffer_object *buf) { if (ttm_bo_reference(&buf->base)) return buf; @@ -1302,10 +1326,4 @@ static inline void vmw_mmio_write(u32 value, u32 *addr) { WRITE_ONCE(*addr, value); } - -/** - * Add vmw_msg module function - */ -extern int vmw_host_log(const char *log); - #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c9d5cc237124..1f134570b759 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -92,7 +92,7 @@ struct vmw_resource_val_node { struct list_head head; struct drm_hash_item hash; struct vmw_resource *res; - struct vmw_dma_buffer *new_backup; + struct vmw_buffer_object *new_backup; struct vmw_ctx_binding_state *staged_bindings; unsigned long new_backup_offset; u32 first_usage : 1; @@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAMobId *id, - struct vmw_dma_buffer **vmw_bo_p); + struct vmw_buffer_object **vmw_bo_p); static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, - struct vmw_dma_buffer *vbo, + struct vmw_buffer_object *vbo, bool validate_as_mob, uint32_t *p_val_node); /** @@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, } vmw_resource_unreserve(res, switch_backup, val->new_backup, val->new_backup_offset); - vmw_dmabuf_unreference(&val->new_backup); + vmw_bo_unreference(&val->new_backup); } } @@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, } if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { - struct vmw_dma_buffer *dx_query_mob; + struct vmw_buffer_object *dx_query_mob; dx_query_mob = vmw_context_get_dx_query_mob(ctx); if (dx_query_mob) @@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, * submission is reached. */ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, - struct vmw_dma_buffer *vbo, + struct vmw_buffer_object *vbo, bool validate_as_mob, uint32_t *p_val_node) { @@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) return ret; if (res->backup) { - struct vmw_dma_buffer *vbo = res->backup; + struct vmw_buffer_object *vbo = res->backup; ret = vmw_bo_to_validate_list (sw_context, vbo, @@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) } if (sw_context->dx_query_mob) { - struct vmw_dma_buffer *expected_dx_query_mob; + struct vmw_buffer_object *expected_dx_query_mob; expected_dx_query_mob = vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); @@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) list_for_each_entry(val, &sw_context->resource_list, head) { struct vmw_resource *res = val->res; - struct vmw_dma_buffer *backup = res->backup; + struct vmw_buffer_object *backup = res->backup; ret = vmw_resource_validate(res); if (unlikely(ret != 0)) { @@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) /* Check if the resource switched backup buffer */ if (backup && res->backup && (backup != res->backup)) { - struct vmw_dma_buffer *vbo = res->backup; + struct vmw_buffer_object *vbo = res->backup; ret = vmw_bo_to_validate_list (sw_context, vbo, @@ -821,7 +821,7 @@ out_no_reloc: static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) { struct vmw_private *dev_priv = ctx_res->dev_priv; - struct vmw_dma_buffer *dx_query_mob; + struct vmw_buffer_object *dx_query_mob; struct { SVGA3dCmdHeader header; SVGA3dCmdDXBindAllQuery body; @@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, * command batch. */ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, - struct vmw_dma_buffer *new_query_bo, + struct vmw_buffer_object *new_query_bo, struct vmw_sw_context *sw_context) { struct vmw_res_cache_entry *ctx_entry = @@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, if (dev_priv->pinned_bo != sw_context->cur_query_bo) { if (dev_priv->pinned_bo) { vmw_bo_pin_reserved(dev_priv->pinned_bo, false); - vmw_dmabuf_unreference(&dev_priv->pinned_bo); + vmw_bo_unreference(&dev_priv->pinned_bo); } if (!sw_context->needs_post_query_barrier) { @@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, dev_priv->query_cid = sw_context->last_query_ctx->id; dev_priv->query_cid_valid = true; dev_priv->pinned_bo = - vmw_dmabuf_reference(sw_context->cur_query_bo); + vmw_bo_reference(sw_context->cur_query_bo); } } } @@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAMobId *id, - struct vmw_dma_buffer **vmw_bo_p) + struct vmw_buffer_object **vmw_bo_p) { - struct vmw_dma_buffer *vmw_bo = NULL; + struct vmw_buffer_object *vmw_bo = NULL; uint32_t handle = *id; struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, - NULL); + ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use MOB buffer.\n"); ret = -EINVAL; @@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, return 0; out_no_reloc: - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); *vmw_bo_p = NULL; return ret; } @@ -1343,15 +1342,14 @@ out_no_reloc: static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAGuestPtr *ptr, - struct vmw_dma_buffer **vmw_bo_p) + struct vmw_buffer_object **vmw_bo_p) { - struct vmw_dma_buffer *vmw_bo = NULL; + struct vmw_buffer_object *vmw_bo = NULL; uint32_t handle = ptr->gmrId; struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, - NULL); + ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); ret = -EINVAL; @@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, return 0; out_no_reloc: - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); *vmw_bo_p = NULL; return ret; } @@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, SVGA3dCmdDXBindQuery q; } *cmd; - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; int ret; @@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, sw_context->dx_query_mob = vmw_bo; sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return ret; } @@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; struct vmw_query_cmd { SVGA3dCmdHeader header; SVGA3dCmdEndGBQuery q; @@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return ret; } @@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; struct vmw_query_cmd { SVGA3dCmdHeader header; SVGA3dCmdEndQuery q; @@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return ret; } @@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; struct vmw_query_cmd { SVGA3dCmdHeader header; SVGA3dCmdWaitForGBQuery q; @@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return 0; } @@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; struct vmw_query_cmd { SVGA3dCmdHeader header; SVGA3dCmdWaitForQuery q; @@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return 0; } @@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_dma_buffer *vmw_bo = NULL; + struct vmw_buffer_object *vmw_bo = NULL; struct vmw_surface *srf = NULL; struct vmw_dma_cmd { SVGA3dCmdHeader header; @@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, header); out_no_surface: - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return ret; } @@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; int ret; struct { @@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_unreference(&vmw_bo); return ret; } @@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, uint32_t *buf_id, unsigned long backup_offset) { - struct vmw_dma_buffer *dma_buf; + struct vmw_buffer_object *dma_buf; int ret; ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); @@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, if (val_node->first_usage) val_node->no_buffer_needed = true; - vmw_dmabuf_unreference(&val_node->new_backup); + vmw_bo_unreference(&val_node->new_backup); val_node->new_backup = dma_buf; val_node->new_backup_offset = backup_offset; @@ -3118,6 +3116,32 @@ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, &cmd->body.destSid, NULL); } +/** + * vmw_cmd_intra_surface_copy - + * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdIntraSurfaceCopy body; + } *cmd = container_of(header, typeof(*cmd), header); + + if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)) + return -EINVAL; + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.surface.sid, NULL); +} + + static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t *size) @@ -3232,9 +3256,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, false, false, false), - VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, + VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid, false, false, false), - VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, + VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, false, false, false), @@ -3473,6 +3497,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, &vmw_cmd_dx_transfer_from_buffer, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, + true, false, true), }; bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) @@ -3701,8 +3727,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, bool interruptible, bool validate_as_mob) { - struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, - base); + struct vmw_buffer_object *vbo = + container_of(bo, struct vmw_buffer_object, base); struct ttm_operation_ctx ctx = { interruptible, true }; int ret; @@ -4423,7 +4449,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); - vmw_dmabuf_unreference(&dev_priv->pinned_bo); + vmw_bo_unreference(&dev_priv->pinned_bo); out_unlock: return; @@ -4432,7 +4458,7 @@ out_no_emit: out_no_reserve: ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); - vmw_dmabuf_unreference(&dev_priv->pinned_bo); + vmw_bo_unreference(&dev_priv->pinned_bo); } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 54e300365a5c..b913a56f3426 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -42,7 +42,7 @@ struct vmw_fb_par { void *vmalloc; struct mutex bo_mutex; - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; unsigned bo_size; struct drm_framebuffer *set_fb; struct drm_display_mode *set_mode; @@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work) struct drm_clip_rect clip; struct drm_framebuffer *cur_fb; u8 *src_ptr, *dst_ptr; - struct vmw_dma_buffer *vbo = par->vmw_bo; + struct vmw_buffer_object *vbo = par->vmw_bo; void *virtual; if (!READ_ONCE(par->dirty.active)) @@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work) (void) ttm_read_lock(&vmw_priv->reservation_sem, false); (void) ttm_bo_reserve(&vbo->base, false, false, NULL); - virtual = vmw_dma_buffer_map_and_cache(vbo); + virtual = vmw_bo_map_and_cache(vbo); if (!virtual) goto out_unreserve; @@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) */ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, - size_t size, struct vmw_dma_buffer **out) + size_t size, struct vmw_buffer_object **out) { - struct vmw_dma_buffer *vmw_bo; + struct vmw_buffer_object *vmw_bo; int ret; (void) ttm_write_lock(&vmw_priv->reservation_sem, false); @@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, goto err_unlock; } - ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, + ret = vmw_bo_init(vmw_priv, vmw_bo, size, &vmw_sys_placement, false, - &vmw_dmabuf_bo_free); + &vmw_bo_bo_free); if (unlikely(ret != 0)) goto err_unlock; /* init frees the buffer on failure */ @@ -439,38 +439,13 @@ static int vmw_fb_compute_depth(struct fb_var_screeninfo *var, static int vmwgfx_set_config_internal(struct drm_mode_set *set) { struct drm_crtc *crtc = set->crtc; - struct drm_framebuffer *fb; - struct drm_crtc *tmp; - struct drm_device *dev = set->crtc->dev; struct drm_modeset_acquire_ctx ctx; int ret; drm_modeset_acquire_init(&ctx, 0); restart: - /* - * NOTE: ->set_config can also disable other crtcs (if we steal all - * connectors from it), hence we need to refcount the fbs across all - * crtcs. Atomic modeset will have saner semantics ... - */ - drm_for_each_crtc(tmp, dev) - tmp->primary->old_fb = tmp->primary->fb; - - fb = set->fb; - ret = crtc->funcs->set_config(set, &ctx); - if (ret == 0) { - crtc->primary->crtc = crtc; - crtc->primary->fb = fb; - } - - drm_for_each_crtc(tmp, dev) { - if (tmp->primary->fb) - drm_framebuffer_get(tmp->primary->fb); - if (tmp->primary->old_fb) - drm_framebuffer_put(tmp->primary->old_fb); - tmp->primary->old_fb = NULL; - } if (ret == -EDEADLK) { drm_modeset_backoff(&ctx); @@ -516,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, } if (par->vmw_bo && detach_bo && unref_bo) - vmw_dmabuf_unreference(&par->vmw_bo); + vmw_bo_unreference(&par->vmw_bo); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 9ed544f8958f..3d546d409334 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -175,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) struct vmw_private *dev_priv = fman->dev_priv; struct vmwgfx_wait_cb cb; long ret = timeout; - unsigned long irq_flags; if (likely(vmw_fence_obj_signaled(fence))) return timeout; @@ -183,7 +182,7 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); vmw_seqno_waiter_add(dev_priv); - spin_lock_irqsave(f->lock, irq_flags); + spin_lock(f->lock); if (intr && signal_pending(current)) { ret = -ERESTARTSYS; @@ -194,30 +193,45 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) cb.task = current; list_add(&cb.base.node, &f->cb_list); - while (ret > 0) { + for (;;) { __vmw_fences_update(fman); - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) - break; + /* + * We can use the barrier free __set_current_state() since + * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the + * fence spinlock. + */ if (intr) __set_current_state(TASK_INTERRUPTIBLE); else __set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock_irqrestore(f->lock, irq_flags); - ret = schedule_timeout(ret); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) { + if (ret == 0 && timeout > 0) + ret = 1; + break; + } - spin_lock_irqsave(f->lock, irq_flags); - if (ret > 0 && intr && signal_pending(current)) + if (intr && signal_pending(current)) { ret = -ERESTARTSYS; - } + break; + } + if (ret == 0) + break; + + spin_unlock(f->lock); + + ret = schedule_timeout(ret); + + spin_lock(f->lock); + } + __set_current_state(TASK_RUNNING); if (!list_empty(&cb.base.node)) list_del(&cb.base.node); - __set_current_state(TASK_RUNNING); out: - spin_unlock_irqrestore(f->lock, irq_flags); + spin_unlock(f->lock); vmw_seqno_waiter_remove(dev_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 20224dba9d8e..c9382933c2b9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h @@ -1,7 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2011-2012 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index a1c68e6a689e..d0fd147ef75f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 66ffa1d4759c..007a0cc7f232 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index f2f9d88131f2..ddb1e9365a3e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index c5e8eae0dbe2..172a6ba6539c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -56,6 +56,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, case DRM_VMW_PARAM_HW_CAPS: param->value = dev_priv->capabilities; break; + case DRM_VMW_PARAM_HW_CAPS2: + param->value = dev_priv->capabilities2; + break; case DRM_VMW_PARAM_FIFO_CAPS: param->value = dev_priv->fifo.capabilities; break; @@ -113,6 +116,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, case DRM_VMW_PARAM_DX: param->value = dev_priv->has_dx; break; + case DRM_VMW_PARAM_SM4_1: + param->value = dev_priv->has_sm4_1; + break; default: return -EINVAL; } @@ -122,15 +128,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value) { - /* If the header is updated, update the format test as well! */ - BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX); - - if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 && - cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM) - fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 | - SVGADX_DXFMT_MULTISAMPLE_4 | - SVGADX_DXFMT_MULTISAMPLE_8); - else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES) + /* + * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES + * to check the sample count supported by virtual device. Since there + * never was support for multisample count for backing MOB return 0. + */ + if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES) return 0; return fmt_value; @@ -377,8 +380,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, } vfb = vmw_framebuffer_to_vfb(fb); - if (!vfb->dmabuf) { - DRM_ERROR("Framebuffer not dmabuf backed.\n"); + if (!vfb->bo) { + DRM_ERROR("Framebuffer not buffer backed.\n"); ret = -EINVAL; goto out_no_ttm_lock; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index b9239ba067c4..c3ad4478266b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 01f2dc9e6f52..23beff5d8e3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -85,10 +85,10 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv, return 0; } -static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, - struct vmw_dma_buffer *dmabuf, - u32 width, u32 height, - u32 hotspotX, u32 hotspotY) +static int vmw_cursor_update_bo(struct vmw_private *dev_priv, + struct vmw_buffer_object *bo, + u32 width, u32 height, + u32 hotspotX, u32 hotspotY) { struct ttm_bo_kmap_obj map; unsigned long kmap_offset; @@ -100,13 +100,13 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, kmap_offset = 0; kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; - ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL); + ret = ttm_bo_reserve(&bo->base, true, false, NULL); if (unlikely(ret != 0)) { DRM_ERROR("reserve failed\n"); return -EINVAL; } - ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); + ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map); if (unlikely(ret != 0)) goto err_unreserve; @@ -116,7 +116,7 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, ttm_bo_kunmap(&map); err_unreserve: - ttm_bo_unreserve(&dmabuf->base); + ttm_bo_unreserve(&bo->base); return ret; } @@ -352,13 +352,13 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, if (vps->surf) vmw_surface_unreference(&vps->surf); - if (vps->dmabuf) - vmw_dmabuf_unreference(&vps->dmabuf); + if (vps->bo) + vmw_bo_unreference(&vps->bo); if (fb) { - if (vmw_framebuffer_to_vfb(fb)->dmabuf) { - vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer; - vmw_dmabuf_reference(vps->dmabuf); + if (vmw_framebuffer_to_vfb(fb)->bo) { + vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer; + vmw_bo_reference(vps->bo); } else { vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; vmw_surface_reference(vps->surf); @@ -390,7 +390,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, } du->cursor_surface = vps->surf; - du->cursor_dmabuf = vps->dmabuf; + du->cursor_bo = vps->bo; if (vps->surf) { du->cursor_age = du->cursor_surface->snooper.age; @@ -399,11 +399,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, vps->surf->snooper.image, 64, 64, hotspot_x, hotspot_y); - } else if (vps->dmabuf) { - ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf, - plane->state->crtc_w, - plane->state->crtc_h, - hotspot_x, hotspot_y); + } else if (vps->bo) { + ret = vmw_cursor_update_bo(dev_priv, vps->bo, + plane->state->crtc_w, + plane->state->crtc_h, + hotspot_x, hotspot_y); } else { vmw_cursor_update_position(dev_priv, false, 0, 0); return; @@ -519,7 +519,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, ret = -EINVAL; } - if (!vmw_framebuffer_to_vfb(fb)->dmabuf) + if (!vmw_framebuffer_to_vfb(fb)->bo) surface = vmw_framebuffer_to_vfbs(fb)->surface; if (surface && !surface->snooper.image) { @@ -535,9 +535,9 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *new_state) { struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc); - int connector_mask = 1 << drm_connector_index(&du->connector); + int connector_mask = drm_connector_mask(&du->connector); bool has_primary = new_state->plane_mask & - BIT(drm_plane_index(crtc->primary)); + drm_plane_mask(crtc->primary); /* We always want to have an active plane with an active CRTC */ if (has_primary != new_state->enable) @@ -687,8 +687,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) if (vps->surf) (void) vmw_surface_reference(vps->surf); - if (vps->dmabuf) - (void) vmw_dmabuf_reference(vps->dmabuf); + if (vps->bo) + (void) vmw_bo_reference(vps->bo); state = &vps->base; @@ -745,8 +745,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane, if (vps->surf) vmw_surface_unreference(&vps->surf); - if (vps->dmabuf) - vmw_dmabuf_unreference(&vps->dmabuf); + if (vps->bo) + vmw_bo_unreference(&vps->bo); drm_atomic_helper_plane_destroy_state(plane, state); } @@ -902,12 +902,12 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, /** * vmw_kms_readback - Perform a readback from the screen system to - * a dma-buffer backed framebuffer. + * a buffer-object backed framebuffer. * * @dev_priv: Pointer to the device private structure. * @file_priv: Pointer to a struct drm_file identifying the caller. * Must be set to NULL if @user_fence_rep is NULL. - * @vfb: Pointer to the dma-buffer backed framebuffer. + * @vfb: Pointer to the buffer-object backed framebuffer. * @user_fence_rep: User-space provided structure for fence information. * Must be set to non-NULL if @file_priv is non-NULL. * @vclips: Array of clip rects. @@ -951,7 +951,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, struct vmw_framebuffer **out, const struct drm_mode_fb_cmd2 *mode_cmd, - bool is_dmabuf_proxy) + bool is_bo_proxy) { struct drm_device *dev = dev_priv->dev; @@ -1019,7 +1019,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); vfbs->surface = vmw_surface_reference(surface); vfbs->base.user_handle = mode_cmd->handles[0]; - vfbs->is_dmabuf_proxy = is_dmabuf_proxy; + vfbs->is_bo_proxy = is_bo_proxy; *out = &vfbs->base; @@ -1038,30 +1038,30 @@ out_err1: } /* - * Dmabuf framebuffer code + * Buffer-object framebuffer code */ -static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) +static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) { - struct vmw_framebuffer_dmabuf *vfbd = + struct vmw_framebuffer_bo *vfbd = vmw_framebuffer_to_vfbd(framebuffer); drm_framebuffer_cleanup(framebuffer); - vmw_dmabuf_unreference(&vfbd->buffer); + vmw_bo_unreference(&vfbd->buffer); if (vfbd->base.user_obj) ttm_base_object_unref(&vfbd->base.user_obj); kfree(vfbd); } -static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, - struct drm_file *file_priv, - unsigned flags, unsigned color, - struct drm_clip_rect *clips, - unsigned num_clips) +static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips) { struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); - struct vmw_framebuffer_dmabuf *vfbd = + struct vmw_framebuffer_bo *vfbd = vmw_framebuffer_to_vfbd(framebuffer); struct drm_clip_rect norect; int ret, increment = 1; @@ -1092,13 +1092,13 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, true, true, NULL); break; case vmw_du_screen_object: - ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base, - clips, NULL, num_clips, - increment, true, NULL, NULL); + ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base, + clips, NULL, num_clips, + increment, true, NULL, NULL); break; case vmw_du_legacy: - ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0, - clips, num_clips, increment); + ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0, + clips, num_clips, increment); break; default: ret = -EINVAL; @@ -1114,23 +1114,23 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, return ret; } -static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { - .destroy = vmw_framebuffer_dmabuf_destroy, - .dirty = vmw_framebuffer_dmabuf_dirty, +static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { + .destroy = vmw_framebuffer_bo_destroy, + .dirty = vmw_framebuffer_bo_dirty, }; /** - * Pin the dmabuffer in a location suitable for access by the + * Pin the bofer in a location suitable for access by the * display system. */ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) { struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; struct ttm_placement *placement; int ret; - buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : + buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; if (!buf) @@ -1139,12 +1139,12 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) switch (dev_priv->active_display_unit) { case vmw_du_legacy: vmw_overlay_pause_all(dev_priv); - ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false); + ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false); vmw_overlay_resume_all(dev_priv); break; case vmw_du_screen_object: case vmw_du_screen_target: - if (vfb->dmabuf) { + if (vfb->bo) { if (dev_priv->capabilities & SVGA_CAP_3D) { /* * Use surface DMA to get content to @@ -1160,8 +1160,7 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) placement = &vmw_mob_placement; } - return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement, - false); + return vmw_bo_pin_in_placement(dev_priv, buf, placement, false); default: return -EINVAL; } @@ -1172,36 +1171,36 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) { struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; - buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : + buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; if (WARN_ON(!buf)) return 0; - return vmw_dmabuf_unpin(dev_priv, buf, false); + return vmw_bo_unpin(dev_priv, buf, false); } /** - * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf + * vmw_create_bo_proxy - create a proxy surface for the buffer object * * @dev: DRM device * @mode_cmd: parameters for the new surface - * @dmabuf_mob: MOB backing the DMA buf + * @bo_mob: MOB backing the buffer object * @srf_out: newly created surface * - * When the content FB is a DMA buf, we create a surface as a proxy to the + * When the content FB is a buffer object, we create a surface as a proxy to the * same buffer. This way we can do a surface copy rather than a surface DMA. * This is a more efficient approach * * RETURNS: * 0 on success, error code otherwise */ -static int vmw_create_dmabuf_proxy(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct vmw_dma_buffer *dmabuf_mob, - struct vmw_surface **srf_out) +static int vmw_create_bo_proxy(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct vmw_buffer_object *bo_mob, + struct vmw_surface **srf_out) { uint32_t format; struct drm_vmw_size content_base_size = {0}; @@ -1239,15 +1238,17 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, content_base_size.depth = 1; ret = vmw_surface_gb_priv_define(dev, - 0, /* kernel visible only */ - 0, /* flags */ - format, - true, /* can be a scanout buffer */ - 1, /* num of mip levels */ - 0, - 0, - content_base_size, - srf_out); + 0, /* kernel visible only */ + 0, /* flags */ + format, + true, /* can be a scanout buffer */ + 1, /* num of mip levels */ + 0, + 0, + content_base_size, + SVGA3D_MS_PATTERN_NONE, + SVGA3D_MS_QUALITY_NONE, + srf_out); if (ret) { DRM_ERROR("Failed to allocate proxy content buffer\n"); return ret; @@ -1258,8 +1259,8 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, /* Reserve and switch the backing mob. */ mutex_lock(&res->dev_priv->cmdbuf_mutex); (void) vmw_resource_reserve(res, false, true); - vmw_dmabuf_unreference(&res->backup); - res->backup = vmw_dmabuf_reference(dmabuf_mob); + vmw_bo_unreference(&res->backup); + res->backup = vmw_bo_reference(bo_mob); res->backup_offset = 0; vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); @@ -1269,21 +1270,21 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, -static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, - struct vmw_dma_buffer *dmabuf, - struct vmw_framebuffer **out, - const struct drm_mode_fb_cmd2 - *mode_cmd) +static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, + struct vmw_buffer_object *bo, + struct vmw_framebuffer **out, + const struct drm_mode_fb_cmd2 + *mode_cmd) { struct drm_device *dev = dev_priv->dev; - struct vmw_framebuffer_dmabuf *vfbd; + struct vmw_framebuffer_bo *vfbd; unsigned int requested_size; struct drm_format_name_buf format_name; int ret; requested_size = mode_cmd->height * mode_cmd->pitches[0]; - if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { + if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) { DRM_ERROR("Screen buffer object size is too small " "for requested mode.\n"); return -EINVAL; @@ -1312,20 +1313,20 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, } drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); - vfbd->base.dmabuf = true; - vfbd->buffer = vmw_dmabuf_reference(dmabuf); + vfbd->base.bo = true; + vfbd->buffer = vmw_bo_reference(bo); vfbd->base.user_handle = mode_cmd->handles[0]; *out = &vfbd->base; ret = drm_framebuffer_init(dev, &vfbd->base.base, - &vmw_framebuffer_dmabuf_funcs); + &vmw_framebuffer_bo_funcs); if (ret) goto out_err2; return 0; out_err2: - vmw_dmabuf_unreference(&dmabuf); + vmw_bo_unreference(&bo); kfree(vfbd); out_err1: return ret; @@ -1354,57 +1355,57 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) * vmw_kms_new_framebuffer - Create a new framebuffer. * * @dev_priv: Pointer to device private struct. - * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around. - * Either @dmabuf or @surface must be NULL. + * @bo: Pointer to buffer object to wrap the kms framebuffer around. + * Either @bo or @surface must be NULL. * @surface: Pointer to a surface to wrap the kms framebuffer around. - * Either @dmabuf or @surface must be NULL. - * @only_2d: No presents will occur to this dma buffer based framebuffer. This - * Helps the code to do some important optimizations. + * Either @bo or @surface must be NULL. + * @only_2d: No presents will occur to this buffer object based framebuffer. + * This helps the code to do some important optimizations. * @mode_cmd: Frame-buffer metadata. */ struct vmw_framebuffer * vmw_kms_new_framebuffer(struct vmw_private *dev_priv, - struct vmw_dma_buffer *dmabuf, + struct vmw_buffer_object *bo, struct vmw_surface *surface, bool only_2d, const struct drm_mode_fb_cmd2 *mode_cmd) { struct vmw_framebuffer *vfb = NULL; - bool is_dmabuf_proxy = false; + bool is_bo_proxy = false; int ret; /* * We cannot use the SurfaceDMA command in an non-accelerated VM, - * therefore, wrap the DMA buf in a surface so we can use the + * therefore, wrap the buffer object in a surface so we can use the * SurfaceCopy command. */ if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && - dmabuf && only_2d && + bo && only_2d && mode_cmd->width > 64 && /* Don't create a proxy for cursor */ dev_priv->active_display_unit == vmw_du_screen_target) { - ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, - dmabuf, &surface); + ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd, + bo, &surface); if (ret) return ERR_PTR(ret); - is_dmabuf_proxy = true; + is_bo_proxy = true; } /* Create the new framebuffer depending one what we have */ if (surface) { ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, mode_cmd, - is_dmabuf_proxy); + is_bo_proxy); /* - * vmw_create_dmabuf_proxy() adds a reference that is no longer + * vmw_create_bo_proxy() adds a reference that is no longer * needed */ - if (is_dmabuf_proxy) + if (is_bo_proxy) vmw_surface_unreference(&surface); - } else if (dmabuf) { - ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb, - mode_cmd); + } else if (bo) { + ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb, + mode_cmd); } else { BUG(); } @@ -1430,23 +1431,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_framebuffer *vfb = NULL; struct vmw_surface *surface = NULL; - struct vmw_dma_buffer *bo = NULL; + struct vmw_buffer_object *bo = NULL; struct ttm_base_object *user_obj; int ret; - /** - * This code should be conditioned on Screen Objects not being used. - * If screen objects are used, we can allocate a GMR to hold the - * requested framebuffer. - */ - - if (!vmw_kms_validate_mode_vram(dev_priv, - mode_cmd->pitches[0], - mode_cmd->height)) { - DRM_ERROR("Requested mode exceed bounding box limit.\n"); - return ERR_PTR(-ENOMEM); - } - /* * Take a reference on the user object of the resource * backing the kms fb. This ensures that user-space handle @@ -1466,7 +1454,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, * End conditioned code. */ - /* returns either a dmabuf or surface */ + /* returns either a bo or surface */ ret = vmw_user_lookup_handle(dev_priv, tfile, mode_cmd->handles[0], &surface, &bo); @@ -1494,7 +1482,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, err_out: /* vmw_user_lookup_handle takes one ref so does new_fb */ if (bo) - vmw_dmabuf_unreference(&bo); + vmw_bo_unreference(&bo); if (surface) vmw_surface_unreference(&surface); @@ -1508,7 +1496,168 @@ err_out: return &vfb->base; } +/** + * vmw_kms_check_display_memory - Validates display memory required for a + * topology + * @dev: DRM device + * @num_rects: number of drm_rect in rects + * @rects: array of drm_rect representing the topology to validate indexed by + * crtc index. + * + * Returns: + * 0 on success otherwise negative error code + */ +static int vmw_kms_check_display_memory(struct drm_device *dev, + uint32_t num_rects, + struct drm_rect *rects) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_rect bounding_box = {0}; + u64 total_pixels = 0, pixel_mem, bb_mem; + int i; + + for (i = 0; i < num_rects; i++) { + /* + * Currently this check is limiting the topology within max + * texture/screentarget size. This should change in future when + * user-space support multiple fb with topology. + */ + if (rects[i].x1 < 0 || rects[i].y1 < 0 || + rects[i].x2 > mode_config->max_width || + rects[i].y2 > mode_config->max_height) { + DRM_ERROR("Invalid GUI layout.\n"); + return -EINVAL; + } + /* Bounding box upper left is at (0,0). */ + if (rects[i].x2 > bounding_box.x2) + bounding_box.x2 = rects[i].x2; + + if (rects[i].y2 > bounding_box.y2) + bounding_box.y2 = rects[i].y2; + + total_pixels += (u64) drm_rect_width(&rects[i]) * + (u64) drm_rect_height(&rects[i]); + } + + /* Virtual svga device primary limits are always in 32-bpp. */ + pixel_mem = total_pixels * 4; + + /* + * For HV10 and below prim_bb_mem is vram size. When + * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is + * limit on primary bounding box + */ + if (pixel_mem > dev_priv->prim_bb_mem) { + DRM_ERROR("Combined output size too large.\n"); + return -EINVAL; + } + + /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */ + if (dev_priv->active_display_unit != vmw_du_screen_target || + !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) { + bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4; + + if (bb_mem > dev_priv->prim_bb_mem) { + DRM_ERROR("Topology is beyond supported limits.\n"); + return -EINVAL; + } + } + + return 0; +} + +/** + * vmw_kms_check_topology - Validates topology in drm_atomic_state + * @dev: DRM device + * @state: the driver state object + * + * Returns: + * 0 on success otherwise negative error code + */ +static int vmw_kms_check_topology(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_rect *rects; + struct drm_crtc *crtc; + uint32_t i; + int ret = 0; + + rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect), + GFP_KERNEL); + if (!rects) + return -ENOMEM; + + mutex_lock(&dev_priv->requested_layout_mutex); + + drm_for_each_crtc(crtc, dev) { + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + struct drm_crtc_state *crtc_state = crtc->state; + + i = drm_crtc_index(crtc); + + if (crtc_state && crtc_state->enable) { + rects[i].x1 = du->gui_x; + rects[i].y1 = du->gui_y; + rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay; + rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay; + } + } + + /* Determine change to topology due to new atomic state */ + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct vmw_connector_state *vmw_conn_state; + + if (!new_crtc_state->enable && old_crtc_state->enable) { + rects[i].x1 = 0; + rects[i].y1 = 0; + rects[i].x2 = 0; + rects[i].y2 = 0; + continue; + } + + if (!du->pref_active) { + ret = -EINVAL; + goto clean; + } + + /* + * For vmwgfx each crtc has only one connector attached and it + * is not changed so don't really need to check the + * crtc->connector_mask and iterate over it. + */ + connector = &du->connector; + conn_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(conn_state)) { + ret = PTR_ERR(conn_state); + goto clean; + } + + vmw_conn_state = vmw_connector_state_to_vcs(conn_state); + vmw_conn_state->gui_x = du->gui_x; + vmw_conn_state->gui_y = du->gui_y; + + rects[i].x1 = du->gui_x; + rects[i].y1 = du->gui_y; + rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay; + rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay; + } + + ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc, + rects); + +clean: + mutex_unlock(&dev_priv->requested_layout_mutex); + kfree(rects); + return ret; +} /** * vmw_kms_atomic_check_modeset- validate state object for modeset changes @@ -1520,36 +1669,39 @@ err_out: * us to assign a value to mode->crtc_clock so that * drm_calc_timestamping_constants() won't throw an error message * - * RETURNS + * Returns: * Zero for success or -errno */ static int vmw_kms_atomic_check_modeset(struct drm_device *dev, struct drm_atomic_state *state) { - struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; - struct vmw_private *dev_priv = vmw_priv(dev); - int i; - - for_each_new_crtc_in_state(state, crtc, crtc_state, i) { - unsigned long requested_bb_mem = 0; + struct drm_crtc_state *crtc_state; + bool need_modeset = false; + int i, ret; - if (dev_priv->active_display_unit == vmw_du_screen_target) { - if (crtc->primary->fb) { - int cpp = crtc->primary->fb->pitches[0] / - crtc->primary->fb->width; + ret = drm_atomic_helper_check(dev, state); + if (ret) + return ret; - requested_bb_mem += crtc->mode.hdisplay * cpp * - crtc->mode.vdisplay; - } + if (!state->allow_modeset) + return ret; - if (requested_bb_mem > dev_priv->prim_bb_mem) - return -EINVAL; - } + /* + * Legacy path do not set allow_modeset properly like + * @drm_atomic_helper_update_plane, This will result in unnecessary call + * to vmw_kms_check_topology. So extra set of check. + */ + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) + need_modeset = true; } - return drm_atomic_helper_check(dev, state); + if (need_modeset) + return vmw_kms_check_topology(dev, state); + + return ret; } static const struct drm_mode_config_funcs vmw_kms_funcs = { @@ -1841,40 +1993,49 @@ void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe) { } - -/* - * Small shared kms functions. +/** + * vmw_du_update_layout - Update the display unit with topology from resolution + * plugin and generate DRM uevent + * @dev_priv: device private + * @num_rects: number of drm_rect in rects + * @rects: toplogy to update */ - -static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, - struct drm_vmw_rect *rects) +static int vmw_du_update_layout(struct vmw_private *dev_priv, + unsigned int num_rects, struct drm_rect *rects) { struct drm_device *dev = dev_priv->dev; struct vmw_display_unit *du; struct drm_connector *con; + struct drm_connector_list_iter conn_iter; - mutex_lock(&dev->mode_config.mutex); - -#if 0 - { - unsigned int i; - - DRM_INFO("%s: new layout ", __func__); - for (i = 0; i < num; i++) - DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, - rects[i].w, rects[i].h); - DRM_INFO("\n"); + /* + * Currently only gui_x/y is protected with requested_layout_mutex. + */ + mutex_lock(&dev_priv->requested_layout_mutex); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(con, &conn_iter) { + du = vmw_connector_to_du(con); + if (num_rects > du->unit) { + du->pref_width = drm_rect_width(&rects[du->unit]); + du->pref_height = drm_rect_height(&rects[du->unit]); + du->pref_active = true; + du->gui_x = rects[du->unit].x1; + du->gui_y = rects[du->unit].y1; + } else { + du->pref_width = 800; + du->pref_height = 600; + du->pref_active = false; + du->gui_x = 0; + du->gui_y = 0; + } } -#endif + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev_priv->requested_layout_mutex); + mutex_lock(&dev->mode_config.mutex); list_for_each_entry(con, &dev->mode_config.connector_list, head) { du = vmw_connector_to_du(con); - if (num > du->unit) { - du->pref_width = rects[du->unit].w; - du->pref_height = rects[du->unit].h; - du->pref_active = true; - du->gui_x = rects[du->unit].x; - du->gui_y = rects[du->unit].y; + if (num_rects > du->unit) { drm_object_property_set_value (&con->base, dev->mode_config.suggested_x_property, du->gui_x); @@ -1882,9 +2043,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, (&con->base, dev->mode_config.suggested_y_property, du->gui_y); } else { - du->pref_width = 800; - du->pref_height = 600; - du->pref_active = false; drm_object_property_set_value (&con->base, dev->mode_config.suggested_x_property, 0); @@ -1894,8 +2052,8 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, } con->status = vmw_du_connector_detect(con, true); } - mutex_unlock(&dev->mode_config.mutex); + drm_sysfs_hotplug_event(dev); return 0; @@ -2110,7 +2268,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, drm_mode_probed_add(connector, mode); } - drm_mode_connector_list_update(connector); + drm_connector_list_update(connector); /* Move the prefered mode first, help apps pick the right mode. */ drm_mode_sort(&connector->modes); @@ -2195,7 +2353,25 @@ vmw_du_connector_atomic_get_property(struct drm_connector *connector, return 0; } - +/** + * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Update preferred topology of display unit as per ioctl request. The topology + * is expressed as array of drm_vmw_rect. + * e.g. + * [0 0 640 480] [640 0 800 600] [0 480 640 480] + * + * NOTE: + * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside + * device limit on topology, x + w and y + h (lower right) cannot be greater + * than INT_MAX. So topology beyond these limits will return with error. + * + * Returns: + * Zero on success, negative errno on failure. + */ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -2204,15 +2380,12 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, (struct drm_vmw_update_layout_arg *)data; void __user *user_rects; struct drm_vmw_rect *rects; + struct drm_rect *drm_rects; unsigned rects_size; - int ret; - int i; - u64 total_pixels = 0; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_vmw_rect bounding_box = {0}; + int ret, i; if (!arg->num_outputs) { - struct drm_vmw_rect def_rect = {0, 0, 800, 600}; + struct drm_rect def_rect = {0, 0, 800, 600}; vmw_du_update_layout(dev_priv, 1, &def_rect); return 0; } @@ -2231,52 +2404,29 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, goto out_free; } - for (i = 0; i < arg->num_outputs; ++i) { - if (rects[i].x < 0 || - rects[i].y < 0 || - rects[i].x + rects[i].w > mode_config->max_width || - rects[i].y + rects[i].h > mode_config->max_height) { - DRM_ERROR("Invalid GUI layout.\n"); - ret = -EINVAL; - goto out_free; - } - - /* - * bounding_box.w and bunding_box.h are used as - * lower-right coordinates - */ - if (rects[i].x + rects[i].w > bounding_box.w) - bounding_box.w = rects[i].x + rects[i].w; - - if (rects[i].y + rects[i].h > bounding_box.h) - bounding_box.h = rects[i].y + rects[i].h; + drm_rects = (struct drm_rect *)rects; - total_pixels += (u64) rects[i].w * (u64) rects[i].h; - } - - if (dev_priv->active_display_unit == vmw_du_screen_target) { - /* - * For Screen Targets, the limits for a toplogy are: - * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem - * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem - */ - u64 bb_mem = (u64) bounding_box.w * bounding_box.h * 4; - u64 pixel_mem = total_pixels * 4; + for (i = 0; i < arg->num_outputs; i++) { + struct drm_vmw_rect curr_rect; - if (bb_mem > dev_priv->prim_bb_mem) { - DRM_ERROR("Topology is beyond supported limits.\n"); - ret = -EINVAL; + /* Verify user-space for overflow as kernel use drm_rect */ + if ((rects[i].x + rects[i].w > INT_MAX) || + (rects[i].y + rects[i].h > INT_MAX)) { + ret = -ERANGE; goto out_free; } - if (pixel_mem > dev_priv->prim_bb_mem) { - DRM_ERROR("Combined output size too large\n"); - ret = -EINVAL; - goto out_free; - } + curr_rect = rects[i]; + drm_rects[i].x1 = curr_rect.x; + drm_rects[i].y1 = curr_rect.y; + drm_rects[i].x2 = curr_rect.x + curr_rect.w; + drm_rects[i].y2 = curr_rect.y + curr_rect.h; } - vmw_du_update_layout(dev_priv, arg->num_outputs, rects); + ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); + + if (ret == 0) + vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects); out_free: kfree(rects); @@ -2322,9 +2472,10 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, } else { list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { - if (crtc->primary->fb != &framebuffer->base) - continue; - units[num_units++] = vmw_crtc_to_du(crtc); + struct drm_plane *plane = crtc->primary; + + if (plane->state->fb == &framebuffer->base) + units[num_units++] = vmw_crtc_to_du(crtc); } } @@ -2422,7 +2573,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, * interrupted by a signal. */ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, bool interruptible, bool validate_as_mob, bool for_cpu_blit) @@ -2454,7 +2605,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, * Helper to be used if an error forces the caller to undo the actions of * vmw_kms_helper_buffer_prepare. */ -void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf) +void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf) { if (buf) ttm_bo_unreserve(&buf->base); @@ -2477,7 +2628,7 @@ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf) */ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, struct drm_file *file_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, struct vmw_fence_obj **out_fence, struct drm_vmw_fence_rep __user * user_fence_rep) @@ -2489,7 +2640,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, file_priv ? &handle : NULL); if (buf) - vmw_fence_single_bo(&buf->base, fence); + vmw_bo_fence_single(&buf->base, fence); if (file_priv) vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, user_fence_rep, fence, @@ -2517,7 +2668,7 @@ void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx) struct vmw_resource *res = ctx->res; vmw_kms_helper_buffer_revert(ctx->buf); - vmw_dmabuf_unreference(&ctx->buf); + vmw_bo_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -2562,7 +2713,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, if (ret) goto out_unreserve; - ctx->buf = vmw_dmabuf_reference(res->backup); + ctx->buf = vmw_bo_reference(res->backup); } ret = vmw_resource_validate(res); if (ret) @@ -2595,7 +2746,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, out_fence, NULL); - vmw_dmabuf_unreference(&ctx->buf); + vmw_bo_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -2806,6 +2957,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv, struct drm_crtc *crtc) { struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + struct drm_plane *plane = crtc->primary; struct vmw_framebuffer *vfb; mutex_lock(&dev_priv->global_kms_state_mutex); @@ -2813,7 +2965,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv, if (!du->is_implicit) goto out_unlock; - vfb = vmw_framebuffer_to_vfb(crtc->primary->fb); + vfb = vmw_framebuffer_to_vfb(plane->state->fb); WARN_ON_ONCE(dev_priv->num_implicit != 1 && dev_priv->implicit_fb != vfb); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 6b7c012719f1..31311298ec0b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -1,7 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -90,7 +90,7 @@ struct vmw_kms_dirty { #define vmw_framebuffer_to_vfbs(x) \ container_of(x, struct vmw_framebuffer_surface, base.base) #define vmw_framebuffer_to_vfbd(x) \ - container_of(x, struct vmw_framebuffer_dmabuf, base.base) + container_of(x, struct vmw_framebuffer_bo, base.base) /** * Base class for framebuffers @@ -102,7 +102,7 @@ struct vmw_framebuffer { struct drm_framebuffer base; int (*pin)(struct vmw_framebuffer *fb); int (*unpin)(struct vmw_framebuffer *fb); - bool dmabuf; + bool bo; struct ttm_base_object *user_obj; uint32_t user_handle; }; @@ -117,15 +117,15 @@ struct vmw_clip_rect { struct vmw_framebuffer_surface { struct vmw_framebuffer base; struct vmw_surface *surface; - struct vmw_dma_buffer *buffer; + struct vmw_buffer_object *buffer; struct list_head head; - bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */ + bool is_bo_proxy; /* true if this is proxy surface for DMA buf */ }; -struct vmw_framebuffer_dmabuf { +struct vmw_framebuffer_bo { struct vmw_framebuffer base; - struct vmw_dma_buffer *buffer; + struct vmw_buffer_object *buffer; }; @@ -161,18 +161,18 @@ struct vmw_crtc_state { * * @base DRM plane object * @surf Display surface for STDU - * @dmabuf display dmabuf for SOU + * @bo display bo for SOU * @content_fb_type Used by STDU. - * @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit + * @bo_size Size of the bo, used by Screen Object Display Unit * @pinned pin count for STDU display surface */ struct vmw_plane_state { struct drm_plane_state base; struct vmw_surface *surf; - struct vmw_dma_buffer *dmabuf; + struct vmw_buffer_object *bo; int content_fb_type; - unsigned long dmabuf_size; + unsigned long bo_size; int pinned; @@ -192,6 +192,24 @@ struct vmw_connector_state { struct drm_connector_state base; bool is_implicit; + + /** + * @gui_x: + * + * vmwgfx connector property representing the x position of this display + * unit (connector is synonymous to display unit) in overall topology. + * This is what the device expect as xRoot while creating screen. + */ + int gui_x; + + /** + * @gui_y: + * + * vmwgfx connector property representing the y position of this display + * unit (connector is synonymous to display unit) in overall topology. + * This is what the device expect as yRoot while creating screen. + */ + int gui_y; }; /** @@ -209,7 +227,7 @@ struct vmw_display_unit { struct drm_plane cursor; struct vmw_surface *cursor_surface; - struct vmw_dma_buffer *cursor_dmabuf; + struct vmw_buffer_object *cursor_bo; size_t cursor_age; int cursor_x; @@ -243,7 +261,7 @@ struct vmw_display_unit { struct vmw_validation_ctx { struct vmw_resource *res; - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; }; #define vmw_crtc_to_du(x) \ @@ -291,14 +309,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, struct vmw_kms_dirty *dirty); int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, bool interruptible, bool validate_as_mob, bool for_cpu_blit); -void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf); +void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf); void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, struct drm_file *file_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, struct vmw_fence_obj **out_fence, struct drm_vmw_fence_rep __user * user_fence_rep); @@ -316,7 +334,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv, uint32_t num_clips); struct vmw_framebuffer * vmw_kms_new_framebuffer(struct vmw_private *dev_priv, - struct vmw_dma_buffer *dmabuf, + struct vmw_buffer_object *bo, struct vmw_surface *surface, bool only_2d, const struct drm_mode_fb_cmd2 *mode_cmd); @@ -384,11 +402,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector, */ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv); int vmw_kms_ldu_close_display(struct vmw_private *dev_priv); -int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, - struct vmw_framebuffer *framebuffer, - unsigned flags, unsigned color, - struct drm_clip_rect *clips, - unsigned num_clips, int increment); +int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips, int increment); int vmw_kms_update_proxy(struct vmw_resource *res, const struct drm_clip_rect *clips, unsigned num_clips, @@ -408,14 +426,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, unsigned num_clips, int inc, struct vmw_fence_obj **out_fence, struct drm_crtc *crtc); -int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, - struct vmw_framebuffer *framebuffer, - struct drm_clip_rect *clips, - struct drm_vmw_rect *vclips, - unsigned num_clips, int increment, - bool interruptible, - struct vmw_fence_obj **out_fence, - struct drm_crtc *crtc); +int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + struct drm_clip_rect *clips, + struct drm_vmw_rect *vclips, + unsigned int num_clips, int increment, + bool interruptible, + struct vmw_fence_obj **out_fence, + struct drm_crtc *crtc); int vmw_kms_sou_readback(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 4a5907e3f560..723578117191 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -438,7 +438,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) goto err_free_connector; } - (void) drm_mode_connector_attach_encoder(connector, encoder); + (void) drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; @@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv) } -int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, - struct vmw_framebuffer *framebuffer, - unsigned flags, unsigned color, - struct drm_clip_rect *clips, - unsigned num_clips, int increment) +int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer *framebuffer, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips, int increment) { size_t fifo_size; int i; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c index efd1ffd68185..e53bc639a754 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2010 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index d07c585e3c1d..7ed179d30ec5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -225,7 +225,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, ret = ttm_bo_reserve(bo, false, true, NULL); BUG_ON(ret != 0); - vmw_fence_single_bo(bo, NULL); + vmw_bo_fence_single(bo, NULL); ttm_bo_unreserve(bo); } @@ -362,7 +362,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ret = ttm_bo_reserve(bo, false, true, NULL); BUG_ON(ret != 0); - vmw_fence_single_bo(bo, NULL); + vmw_bo_fence_single(bo, NULL); ttm_bo_unreserve(bo); ttm_bo_unref(&batch->otable_bo); @@ -620,7 +620,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, vmw_fifo_commit(dev_priv, sizeof(*cmd)); } if (bo) { - vmw_fence_single_bo(bo, NULL); + vmw_bo_fence_single(bo, NULL); ttm_bo_unreserve(bo); } vmw_fifo_resource_dec(dev_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 21d746bdc922..8b9270f31409 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -1,6 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /* - * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2016 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -31,6 +31,7 @@ #include <linux/frame.h> #include <asm/hypervisor.h> #include <drm/drmP.h> +#include "vmwgfx_drv.h" #include "vmwgfx_msg.h" @@ -234,7 +235,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { - DRM_ERROR("Failed to get reply size\n"); + DRM_ERROR("Failed to get reply size for host message.\n"); return -EINVAL; } @@ -245,7 +246,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, reply_len = ebx; reply = kzalloc(reply_len + 1, GFP_KERNEL); if (!reply) { - DRM_ERROR("Cannot allocate memory for reply\n"); + DRM_ERROR("Cannot allocate memory for host message reply.\n"); return -ENOMEM; } @@ -338,7 +339,8 @@ int vmw_host_get_guestinfo(const char *guest_info_param, msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param); if (!msg) { - DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); + DRM_ERROR("Cannot allocate memory to get guest info \"%s\".", + guest_info_param); return -ENOMEM; } @@ -374,7 +376,7 @@ out_msg: out_open: *length = 0; kfree(msg); - DRM_ERROR("Failed to get %s", guest_info_param); + DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param); return -EINVAL; } @@ -403,7 +405,7 @@ int vmw_host_log(const char *log) msg = kasprintf(GFP_KERNEL, "log %s", log); if (!msg) { - DRM_ERROR("Cannot allocate memory for log message\n"); + DRM_ERROR("Cannot allocate memory for host log message.\n"); return -ENOMEM; } @@ -422,7 +424,7 @@ out_msg: vmw_close_channel(&channel); out_open: kfree(msg); - DRM_ERROR("Failed to send log\n"); + DRM_ERROR("Failed to send host log message.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h index 8545488aa0cf..4907e50fb20a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h @@ -1,16 +1,29 @@ -/* - * Copyright (C) 2016, VMware, Inc. +/* SPDX-License-Identifier: GPL-2.0+ OR MIT */ +/************************************************************************** + * + * Copyright 2016 VMware, Inc., Palo Alto, CA., USA + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. + ************************************************************************** * * Based on code from vmware.c and vmmouse.c. * Author: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 222c9c2123a1..9f1b9d289bec 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -38,7 +38,7 @@ #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) struct vmw_stream { - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; bool claimed; bool paused; struct drm_vmw_control_stream_arg saved; @@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd, * -ERESTARTSYS if interrupted by a signal. */ static int vmw_overlay_send_put(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, struct drm_vmw_control_stream_arg *arg, bool interruptible) { @@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, * used with GMRs instead of being locked to vram. */ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, bool pin, bool inter) { if (!pin) - return vmw_dmabuf_unpin(dev_priv, buf, inter); + return vmw_bo_unpin(dev_priv, buf, inter); if (dev_priv->active_display_unit == vmw_du_legacy) - return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter); + return vmw_bo_pin_in_vram(dev_priv, buf, inter); - return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter); + return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter); } /** @@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, } if (!pause) { - vmw_dmabuf_unreference(&stream->buf); + vmw_bo_unreference(&stream->buf); stream->paused = false; } else { stream->paused = true; @@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, * -ERESTARTSYS if interrupted. */ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buf, + struct vmw_buffer_object *buf, struct drm_vmw_control_stream_arg *arg, bool interruptible) { @@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, } if (stream->buf != buf) - stream->buf = vmw_dmabuf_reference(buf); + stream->buf = vmw_bo_reference(buf); stream->saved = *arg; /* stream is no longer stopped/paused */ stream->paused = false; @@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct vmw_overlay *overlay = dev_priv->overlay_priv; struct drm_vmw_control_stream_arg *arg = (struct drm_vmw_control_stream_arg *)data; - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; struct vmw_resource *res; int ret; @@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL); + ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL); if (ret) goto out_unlock; ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); - vmw_dmabuf_unreference(&buf); + vmw_bo_unreference(&buf); out_unlock: mutex_unlock(&overlay->mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c index 0d42a46521fc..0861c821a7fe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2013 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -40,7 +40,6 @@ */ static int vmw_prime_map_attach(struct dma_buf *dma_buf, - struct device *target_dev, struct dma_buf_attachment *attach) { return -ENOSYS; @@ -72,17 +71,6 @@ static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) { } -static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf, - unsigned long page_num) -{ - return NULL; -} - -static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, - unsigned long page_num, void *addr) -{ - -} static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) { @@ -109,9 +97,7 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = { .unmap_dma_buf = vmw_prime_unmap_dma_buf, .release = NULL, .map = vmw_prime_dmabuf_kmap, - .map_atomic = vmw_prime_dmabuf_kmap_atomic, .unmap = vmw_prime_dmabuf_kunmap, - .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic, .mmap = vmw_prime_dmabuf_mmap, .vmap = vmw_prime_dmabuf_vmap, .vunmap = vmw_prime_dmabuf_vunmap, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h index dce798053a96..e99f6cdbb091 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h @@ -1,7 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6b3a942b18df..92003ea5a219 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -27,7 +27,6 @@ #include "vmwgfx_drv.h" #include <drm/vmwgfx_drm.h> -#include <drm/ttm/ttm_object.h> #include <drm/ttm/ttm_placement.h> #include <drm/drmP.h> #include "vmwgfx_resource_priv.h" @@ -35,29 +34,6 @@ #define VMW_RES_EVICT_ERR_COUNT 10 -struct vmw_user_dma_buffer { - struct ttm_prime_object prime; - struct vmw_dma_buffer dma; -}; - -struct vmw_bo_user_rep { - uint32_t handle; - uint64_t map_handle; -}; - -static inline struct vmw_dma_buffer * -vmw_dma_buffer(struct ttm_buffer_object *bo) -{ - return container_of(bo, struct vmw_dma_buffer, base); -} - -static inline struct vmw_user_dma_buffer * -vmw_user_dma_buffer(struct ttm_buffer_object *bo) -{ - struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); - return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); -} - struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) { kref_get(&res->kref); @@ -116,7 +92,7 @@ static void vmw_resource_release(struct kref *kref) res->backup_dirty = false; list_del_init(&res->mob_head); ttm_bo_unreserve(bo); - vmw_dmabuf_unreference(&res->backup); + vmw_bo_unreference(&res->backup); } if (likely(res->hw_destroy != NULL)) { @@ -287,7 +263,7 @@ out_bad_resource: } /** - * Helper function that looks either a surface or dmabuf. + * Helper function that looks either a surface or bo. * * The pointer this pointed at by out_surf and out_buf needs to be null. */ @@ -295,7 +271,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t handle, struct vmw_surface **out_surf, - struct vmw_dma_buffer **out_buf) + struct vmw_buffer_object **out_buf) { struct vmw_resource *res; int ret; @@ -311,513 +287,11 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, } *out_surf = NULL; - ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); + ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL); return ret; } /** - * Buffer management. - */ - -/** - * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers - * - * @dev_priv: Pointer to a struct vmw_private identifying the device. - * @size: The requested buffer size. - * @user: Whether this is an ordinary dma buffer or a user dma buffer. - */ -static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, - bool user) -{ - static size_t struct_size, user_struct_size; - size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; - size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); - - if (unlikely(struct_size == 0)) { - size_t backend_size = ttm_round_pot(vmw_tt_size); - - struct_size = backend_size + - ttm_round_pot(sizeof(struct vmw_dma_buffer)); - user_struct_size = backend_size + - ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); - } - - if (dev_priv->map_mode == vmw_dma_alloc_coherent) - page_array_size += - ttm_round_pot(num_pages * sizeof(dma_addr_t)); - - return ((user) ? user_struct_size : struct_size) + - page_array_size; -} - -void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) -{ - struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); - - vmw_dma_buffer_unmap(vmw_bo); - kfree(vmw_bo); -} - -static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) -{ - struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); - - vmw_dma_buffer_unmap(&vmw_user_bo->dma); - ttm_prime_object_kfree(vmw_user_bo, prime); -} - -int vmw_dmabuf_init(struct vmw_private *dev_priv, - struct vmw_dma_buffer *vmw_bo, - size_t size, struct ttm_placement *placement, - bool interruptible, - void (*bo_free) (struct ttm_buffer_object *bo)) -{ - struct ttm_bo_device *bdev = &dev_priv->bdev; - size_t acc_size; - int ret; - bool user = (bo_free == &vmw_user_dmabuf_destroy); - - BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); - - acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); - memset(vmw_bo, 0, sizeof(*vmw_bo)); - - INIT_LIST_HEAD(&vmw_bo->res_list); - - ret = ttm_bo_init(bdev, &vmw_bo->base, size, - ttm_bo_type_device, placement, - 0, interruptible, acc_size, - NULL, NULL, bo_free); - return ret; -} - -static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) -{ - struct vmw_user_dma_buffer *vmw_user_bo; - struct ttm_base_object *base = *p_base; - struct ttm_buffer_object *bo; - - *p_base = NULL; - - if (unlikely(base == NULL)) - return; - - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, - prime.base); - bo = &vmw_user_bo->dma.base; - ttm_bo_unref(&bo); -} - -static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, - enum ttm_ref_type ref_type) -{ - struct vmw_user_dma_buffer *user_bo; - user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); - - switch (ref_type) { - case TTM_REF_SYNCCPU_WRITE: - ttm_bo_synccpu_write_release(&user_bo->dma.base); - break; - default: - BUG(); - } -} - -/** - * vmw_user_dmabuf_alloc - Allocate a user dma buffer - * - * @dev_priv: Pointer to a struct device private. - * @tfile: Pointer to a struct ttm_object_file on which to register the user - * object. - * @size: Size of the dma buffer. - * @shareable: Boolean whether the buffer is shareable with other open files. - * @handle: Pointer to where the handle value should be assigned. - * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer - * should be assigned. - */ -int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_dma_buffer **p_dma_buf, - struct ttm_base_object **p_base) -{ - struct vmw_user_dma_buffer *user_bo; - struct ttm_buffer_object *tmp; - int ret; - - user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); - if (unlikely(!user_bo)) { - DRM_ERROR("Failed to allocate a buffer.\n"); - return -ENOMEM; - } - - ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, - (dev_priv->has_mob) ? - &vmw_sys_placement : - &vmw_vram_sys_placement, true, - &vmw_user_dmabuf_destroy); - if (unlikely(ret != 0)) - return ret; - - tmp = ttm_bo_reference(&user_bo->dma.base); - ret = ttm_prime_object_init(tfile, - size, - &user_bo->prime, - shareable, - ttm_buffer_type, - &vmw_user_dmabuf_release, - &vmw_user_dmabuf_ref_obj_release); - if (unlikely(ret != 0)) { - ttm_bo_unref(&tmp); - goto out_no_base_object; - } - - *p_dma_buf = &user_bo->dma; - if (p_base) { - *p_base = &user_bo->prime.base; - kref_get(&(*p_base)->refcount); - } - *handle = user_bo->prime.base.hash.key; - -out_no_base_object: - return ret; -} - -/** - * vmw_user_dmabuf_verify_access - verify access permissions on this - * buffer object. - * - * @bo: Pointer to the buffer object being accessed - * @tfile: Identifying the caller. - */ -int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile) -{ - struct vmw_user_dma_buffer *vmw_user_bo; - - if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) - return -EPERM; - - vmw_user_bo = vmw_user_dma_buffer(bo); - - /* Check that the caller has opened the object. */ - if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) - return 0; - - DRM_ERROR("Could not grant buffer access.\n"); - return -EPERM; -} - -/** - * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu - * access, idling previous GPU operations on the buffer and optionally - * blocking it for further command submissions. - * - * @user_bo: Pointer to the buffer object being grabbed for CPU access - * @tfile: Identifying the caller. - * @flags: Flags indicating how the grab should be performed. - * - * A blocking grab will be automatically released when @tfile is closed. - */ -static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, - struct ttm_object_file *tfile, - uint32_t flags) -{ - struct ttm_buffer_object *bo = &user_bo->dma.base; - bool existed; - int ret; - - if (flags & drm_vmw_synccpu_allow_cs) { - bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); - long lret; - - lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, - nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); - if (!lret) - return -EBUSY; - else if (lret < 0) - return lret; - return 0; - } - - ret = ttm_bo_synccpu_write_grab - (bo, !!(flags & drm_vmw_synccpu_dontblock)); - if (unlikely(ret != 0)) - return ret; - - ret = ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_SYNCCPU_WRITE, &existed, false); - if (ret != 0 || existed) - ttm_bo_synccpu_write_release(&user_bo->dma.base); - - return ret; -} - -/** - * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, - * and unblock command submission on the buffer if blocked. - * - * @handle: Handle identifying the buffer object. - * @tfile: Identifying the caller. - * @flags: Flags indicating the type of release. - */ -static int vmw_user_dmabuf_synccpu_release(uint32_t handle, - struct ttm_object_file *tfile, - uint32_t flags) -{ - if (!(flags & drm_vmw_synccpu_allow_cs)) - return ttm_ref_object_base_unref(tfile, handle, - TTM_REF_SYNCCPU_WRITE); - - return 0; -} - -/** - * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu - * functionality. - * - * @dev: Identifies the drm device. - * @data: Pointer to the ioctl argument. - * @file_priv: Identifies the caller. - * - * This function checks the ioctl arguments for validity and calls the - * relevant synccpu functions. - */ -int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_vmw_synccpu_arg *arg = - (struct drm_vmw_synccpu_arg *) data; - struct vmw_dma_buffer *dma_buf; - struct vmw_user_dma_buffer *user_bo; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_base_object *buffer_base; - int ret; - - if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 - || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | - drm_vmw_synccpu_dontblock | - drm_vmw_synccpu_allow_cs)) != 0) { - DRM_ERROR("Illegal synccpu flags.\n"); - return -EINVAL; - } - - switch (arg->op) { - case drm_vmw_synccpu_grab: - ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, - &buffer_base); - if (unlikely(ret != 0)) - return ret; - - user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, - dma); - ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); - vmw_dmabuf_unreference(&dma_buf); - ttm_base_object_unref(&buffer_base); - if (unlikely(ret != 0 && ret != -ERESTARTSYS && - ret != -EBUSY)) { - DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", - (unsigned int) arg->handle); - return ret; - } - break; - case drm_vmw_synccpu_release: - ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, - arg->flags); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", - (unsigned int) arg->handle); - return ret; - } - break; - default: - DRM_ERROR("Invalid synccpu operation.\n"); - return -EINVAL; - } - - return 0; -} - -int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct vmw_private *dev_priv = vmw_priv(dev); - union drm_vmw_alloc_dmabuf_arg *arg = - (union drm_vmw_alloc_dmabuf_arg *)data; - struct drm_vmw_alloc_dmabuf_req *req = &arg->req; - struct drm_vmw_dmabuf_rep *rep = &arg->rep; - struct vmw_dma_buffer *dma_buf; - uint32_t handle; - int ret; - - ret = ttm_read_lock(&dev_priv->reservation_sem, true); - if (unlikely(ret != 0)) - return ret; - - ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - req->size, false, &handle, &dma_buf, - NULL); - if (unlikely(ret != 0)) - goto out_no_dmabuf; - - rep->handle = handle; - rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node); - rep->cur_gmr_id = handle; - rep->cur_gmr_offset = 0; - - vmw_dmabuf_unreference(&dma_buf); - -out_no_dmabuf: - ttm_read_unlock(&dev_priv->reservation_sem); - - return ret; -} - -int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_vmw_unref_dmabuf_arg *arg = - (struct drm_vmw_unref_dmabuf_arg *)data; - - return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - arg->handle, - TTM_REF_USAGE); -} - -int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, - uint32_t handle, struct vmw_dma_buffer **out, - struct ttm_base_object **p_base) -{ - struct vmw_user_dma_buffer *vmw_user_bo; - struct ttm_base_object *base; - - base = ttm_base_object_lookup(tfile, handle); - if (unlikely(base == NULL)) { - pr_err("Invalid buffer object handle 0x%08lx\n", - (unsigned long)handle); - return -ESRCH; - } - - if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { - ttm_base_object_unref(&base); - pr_err("Invalid buffer object handle 0x%08lx\n", - (unsigned long)handle); - return -EINVAL; - } - - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, - prime.base); - (void)ttm_bo_reference(&vmw_user_bo->dma.base); - if (p_base) - *p_base = base; - else - ttm_base_object_unref(&base); - *out = &vmw_user_bo->dma; - - return 0; -} - -int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, - struct vmw_dma_buffer *dma_buf, - uint32_t *handle) -{ - struct vmw_user_dma_buffer *user_bo; - - if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) - return -EINVAL; - - user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); - - *handle = user_bo->prime.base.hash.key; - return ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_USAGE, NULL, false); -} - -/** - * vmw_dumb_create - Create a dumb kms buffer - * - * @file_priv: Pointer to a struct drm_file identifying the caller. - * @dev: Pointer to the drm device. - * @args: Pointer to a struct drm_mode_create_dumb structure - * - * This is a driver callback for the core drm create_dumb functionality. - * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except - * that the arguments have a different format. - */ -int vmw_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args) -{ - struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_dma_buffer *dma_buf; - int ret; - - args->pitch = args->width * ((args->bpp + 7) / 8); - args->size = args->pitch * args->height; - - ret = ttm_read_lock(&dev_priv->reservation_sem, true); - if (unlikely(ret != 0)) - return ret; - - ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - args->size, false, &args->handle, - &dma_buf, NULL); - if (unlikely(ret != 0)) - goto out_no_dmabuf; - - vmw_dmabuf_unreference(&dma_buf); -out_no_dmabuf: - ttm_read_unlock(&dev_priv->reservation_sem); - return ret; -} - -/** - * vmw_dumb_map_offset - Return the address space offset of a dumb buffer - * - * @file_priv: Pointer to a struct drm_file identifying the caller. - * @dev: Pointer to the drm device. - * @handle: Handle identifying the dumb buffer. - * @offset: The address space offset returned. - * - * This is a driver callback for the core drm dumb_map_offset functionality. - */ -int vmw_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset) -{ - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_dma_buffer *out_buf; - int ret; - - ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); - if (ret != 0) - return -EINVAL; - - *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); - vmw_dmabuf_unreference(&out_buf); - return 0; -} - -/** - * vmw_dumb_destroy - Destroy a dumb boffer - * - * @file_priv: Pointer to a struct drm_file identifying the caller. - * @dev: Pointer to the drm device. - * @handle: Handle identifying the dumb buffer. - * - * This is a driver callback for the core drm dumb_destroy functionality. - */ -int vmw_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle) -{ - return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - handle, TTM_REF_USAGE); -} - -/** * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. * * @res: The resource for which to allocate a backup buffer. @@ -829,7 +303,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, { unsigned long size = (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; - struct vmw_dma_buffer *backup; + struct vmw_buffer_object *backup; int ret; if (likely(res->backup)) { @@ -841,16 +315,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, if (unlikely(!backup)) return -ENOMEM; - ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, + ret = vmw_bo_init(res->dev_priv, backup, res->backup_size, res->func->backup_placement, interruptible, - &vmw_dmabuf_bo_free); + &vmw_bo_bo_free); if (unlikely(ret != 0)) - goto out_no_dmabuf; + goto out_no_bo; res->backup = backup; -out_no_dmabuf: +out_no_bo: return ret; } @@ -919,7 +393,7 @@ out_bind_failed: */ void vmw_resource_unreserve(struct vmw_resource *res, bool switch_backup, - struct vmw_dma_buffer *new_backup, + struct vmw_buffer_object *new_backup, unsigned long new_backup_offset) { struct vmw_private *dev_priv = res->dev_priv; @@ -931,11 +405,11 @@ void vmw_resource_unreserve(struct vmw_resource *res, if (res->backup) { lockdep_assert_held(&res->backup->base.resv->lock.base); list_del_init(&res->mob_head); - vmw_dmabuf_unreference(&res->backup); + vmw_bo_unreference(&res->backup); } if (new_backup) { - res->backup = vmw_dmabuf_reference(new_backup); + res->backup = vmw_bo_reference(new_backup); lockdep_assert_held(&new_backup->base.resv->lock.base); list_add_tail(&res->mob_head, &new_backup->res_list); } else { @@ -959,6 +433,7 @@ void vmw_resource_unreserve(struct vmw_resource *res, * for a resource and in that case, allocate * one, reserve and validate it. * + * @ticket: The ww aqcquire context to use, or NULL if trylocking. * @res: The resource for which to allocate a backup buffer. * @interruptible: Whether any sleeps during allocation should be * performed while interruptible. @@ -966,7 +441,8 @@ void vmw_resource_unreserve(struct vmw_resource *res, * reserved and validated backup buffer. */ static int -vmw_resource_check_buffer(struct vmw_resource *res, +vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, + struct vmw_resource *res, bool interruptible, struct ttm_validate_buffer *val_buf) { @@ -985,7 +461,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, val_buf->bo = ttm_bo_reference(&res->backup->base); val_buf->shared = false; list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL); + ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); if (unlikely(ret != 0)) goto out_no_reserve; @@ -1003,11 +479,11 @@ vmw_resource_check_buffer(struct vmw_resource *res, return 0; out_no_validate: - ttm_eu_backoff_reservation(NULL, &val_list); + ttm_eu_backoff_reservation(ticket, &val_list); out_no_reserve: ttm_bo_unref(&val_buf->bo); if (backup_dirty) - vmw_dmabuf_unreference(&res->backup); + vmw_bo_unreference(&res->backup); return ret; } @@ -1050,10 +526,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, * vmw_resource_backoff_reservation - Unreserve and unreference a * backup buffer *. + * @ticket: The ww acquire ctx used for reservation. * @val_buf: Backup buffer information. */ static void -vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) +vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, + struct ttm_validate_buffer *val_buf) { struct list_head val_list; @@ -1062,7 +540,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) INIT_LIST_HEAD(&val_list); list_add_tail(&val_buf->head, &val_list); - ttm_eu_backoff_reservation(NULL, &val_list); + ttm_eu_backoff_reservation(ticket, &val_list); ttm_bo_unref(&val_buf->bo); } @@ -1070,10 +548,12 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) * vmw_resource_do_evict - Evict a resource, and transfer its data * to a backup buffer. * + * @ticket: The ww acquire ticket to use, or NULL if trylocking. * @res: The resource to evict. * @interruptible: Whether to wait interruptible. */ -static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) +static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket, + struct vmw_resource *res, bool interruptible) { struct ttm_validate_buffer val_buf; const struct vmw_res_func *func = res->func; @@ -1083,7 +563,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) val_buf.bo = NULL; val_buf.shared = false; - ret = vmw_resource_check_buffer(res, interruptible, &val_buf); + ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf); if (unlikely(ret != 0)) return ret; @@ -1098,7 +578,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) res->backup_dirty = true; res->res_dirty = false; out_no_unbind: - vmw_resource_backoff_reservation(&val_buf); + vmw_resource_backoff_reservation(ticket, &val_buf); return ret; } @@ -1152,7 +632,8 @@ int vmw_resource_validate(struct vmw_resource *res) write_unlock(&dev_priv->resource_lock); - ret = vmw_resource_do_evict(evict_res, true); + /* Trylock backup buffers with a NULL ticket. */ + ret = vmw_resource_do_evict(NULL, evict_res, true); if (unlikely(ret != 0)) { write_lock(&dev_priv->resource_lock); list_add_tail(&evict_res->lru_head, lru_list); @@ -1171,7 +652,7 @@ int vmw_resource_validate(struct vmw_resource *res) goto out_no_validate; else if (!res->func->needs_backup && res->backup) { list_del_init(&res->mob_head); - vmw_dmabuf_unreference(&res->backup); + vmw_bo_unreference(&res->backup); } return 0; @@ -1180,109 +661,39 @@ out_no_validate: return ret; } -/** - * vmw_fence_single_bo - Utility function to fence a single TTM buffer - * object without unreserving it. - * - * @bo: Pointer to the struct ttm_buffer_object to fence. - * @fence: Pointer to the fence. If NULL, this function will - * insert a fence into the command stream.. - * - * Contrary to the ttm_eu version of this function, it takes only - * a single buffer object instead of a list, and it also doesn't - * unreserve the buffer object, which needs to be done separately. - */ -void vmw_fence_single_bo(struct ttm_buffer_object *bo, - struct vmw_fence_obj *fence) -{ - struct ttm_bo_device *bdev = bo->bdev; - - struct vmw_private *dev_priv = - container_of(bdev, struct vmw_private, bdev); - - if (fence == NULL) { - vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - reservation_object_add_excl_fence(bo->resv, &fence->base); - dma_fence_put(&fence->base); - } else - reservation_object_add_excl_fence(bo->resv, &fence->base); -} /** - * vmw_resource_move_notify - TTM move_notify_callback + * vmw_resource_unbind_list * - * @bo: The TTM buffer object about to move. - * @mem: The struct ttm_mem_reg indicating to what memory - * region the move is taking place. + * @vbo: Pointer to the current backing MOB. * * Evicts the Guest Backed hardware resource if the backup * buffer is being moved out of MOB memory. - * Note that this function should not race with the resource - * validation code as long as it accesses only members of struct - * resource that remain static while bo::res is !NULL and - * while we have @bo reserved. struct resource::backup is *not* a - * static member. The resource validation code will take care - * to set @bo::res to NULL, while having @bo reserved when the - * buffer is no longer bound to the resource, so @bo:res can be - * used to determine whether there is a need to unbind and whether - * it is safe to unbind. + * Note that this function will not race with the resource + * validation code, since resource validation and eviction + * both require the backup buffer to be reserved. */ -void vmw_resource_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) +void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) { - struct vmw_dma_buffer *dma_buf; - - if (mem == NULL) - return; - - if (bo->destroy != vmw_dmabuf_bo_free && - bo->destroy != vmw_user_dmabuf_destroy) - return; - - dma_buf = container_of(bo, struct vmw_dma_buffer, base); - - /* - * Kill any cached kernel maps before move. An optimization could - * be to do this iff source or destination memory type is VRAM. - */ - vmw_dma_buffer_unmap(dma_buf); - if (mem->mem_type != VMW_PL_MOB) { - struct vmw_resource *res, *n; - struct ttm_validate_buffer val_buf; + struct vmw_resource *res, *next; + struct ttm_validate_buffer val_buf = { + .bo = &vbo->base, + .shared = false + }; - val_buf.bo = bo; - val_buf.shared = false; + lockdep_assert_held(&vbo->base.resv->lock.base); + list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { + if (!res->func->unbind) + continue; - list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { - - if (unlikely(res->func->unbind == NULL)) - continue; - - (void) res->func->unbind(res, true, &val_buf); - res->backup_dirty = true; - res->res_dirty = false; - list_del_init(&res->mob_head); - } - - (void) ttm_bo_wait(bo, false, false); + (void) res->func->unbind(res, true, &val_buf); + res->backup_dirty = true; + res->res_dirty = false; + list_del_init(&res->mob_head); } -} - - -/** - * vmw_resource_swap_notify - swapout notify callback. - * - * @bo: The buffer object to be swapped out. - */ -void vmw_resource_swap_notify(struct ttm_buffer_object *bo) -{ - if (bo->destroy != vmw_dmabuf_bo_free && - bo->destroy != vmw_user_dmabuf_destroy) - return; - /* Kill any cached kernel maps before swapout */ - vmw_dma_buffer_unmap(vmw_dma_buffer(bo)); + (void) ttm_bo_wait(&vbo->base, false, false); } @@ -1294,7 +705,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo) * Read back cached states from the device if they exist. This function * assumings binding_mutex is held. */ -int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) +int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob) { struct vmw_resource *dx_query_ctx; struct vmw_private *dev_priv; @@ -1344,7 +755,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) void vmw_query_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { - struct vmw_dma_buffer *dx_query_mob; + struct vmw_buffer_object *dx_query_mob; struct ttm_bo_device *bdev = bo->bdev; struct vmw_private *dev_priv; @@ -1353,7 +764,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, mutex_lock(&dev_priv->binding_mutex); - dx_query_mob = container_of(bo, struct vmw_dma_buffer, base); + dx_query_mob = container_of(bo, struct vmw_buffer_object, base); if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { mutex_unlock(&dev_priv->binding_mutex); return; @@ -1368,7 +779,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, /* Create a fence and attach the BO to it */ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - vmw_fence_single_bo(bo, fence); + vmw_bo_fence_single(bo, fence); if (fence != NULL) vmw_fence_obj_unreference(&fence); @@ -1405,6 +816,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, struct vmw_resource *evict_res; unsigned err_count = 0; int ret; + struct ww_acquire_ctx ticket; do { write_lock(&dev_priv->resource_lock); @@ -1418,7 +830,8 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, list_del_init(&evict_res->lru_head); write_unlock(&dev_priv->resource_lock); - ret = vmw_resource_do_evict(evict_res, false); + /* Wait lock backup buffers with a ticket. */ + ret = vmw_resource_do_evict(&ticket, evict_res, false); if (unlikely(ret != 0)) { write_lock(&dev_priv->resource_lock); list_add_tail(&evict_res->lru_head, lru_list); @@ -1481,7 +894,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) goto out_no_reserve; if (res->pin_count == 0) { - struct vmw_dma_buffer *vbo = NULL; + struct vmw_buffer_object *vbo = NULL; if (res->backup) { vbo = res->backup; @@ -1539,7 +952,7 @@ void vmw_resource_unpin(struct vmw_resource *res) WARN_ON(res->pin_count == 0); if (--res->pin_count == 0 && res->backup) { - struct vmw_dma_buffer *vbo = res->backup; + struct vmw_buffer_object *vbo = res->backup; (void) ttm_bo_reserve(&vbo->base, false, false, NULL); vmw_bo_pin_reserved(vbo, false); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h index ac05968a832b..a8c1c5ebd71d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h @@ -1,7 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2012-2014 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 3d667e903beb..ad0de7f0cd60 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit { SVGAFifoCmdBlitScreenToGMRFB body; }; -struct vmw_kms_sou_dmabuf_blit { +struct vmw_kms_sou_bo_blit { uint32 header; SVGAFifoCmdBlitGMRFBToScreen body; }; @@ -83,7 +83,7 @@ struct vmw_screen_object_unit { struct vmw_display_unit base; unsigned long buffer_size; /**< Size of allocated buffer */ - struct vmw_dma_buffer *buffer; /**< Backing store buffer */ + struct vmw_buffer_object *buffer; /**< Backing store buffer */ bool defined; }; @@ -109,7 +109,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) */ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, struct vmw_screen_object_unit *sou, - uint32_t x, uint32_t y, + int x, int y, struct drm_display_mode *mode) { size_t fifo_size; @@ -139,13 +139,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); cmd->obj.size.width = mode->hdisplay; cmd->obj.size.height = mode->vdisplay; - if (sou->base.is_implicit) { - cmd->obj.root.x = x; - cmd->obj.root.y = y; - } else { - cmd->obj.root.x = sou->base.gui_x; - cmd->obj.root.y = sou->base.gui_y; - } + cmd->obj.root.x = x; + cmd->obj.root.y = y; sou->base.set_gui_x = cmd->obj.root.x; sou->base.set_gui_y = cmd->obj.root.y; @@ -222,12 +217,11 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) struct vmw_plane_state *vps; int ret; - - sou = vmw_crtc_to_sou(crtc); + sou = vmw_crtc_to_sou(crtc); dev_priv = vmw_priv(crtc->dev); - ps = crtc->primary->state; - fb = ps->fb; - vps = vmw_plane_state_to_vps(ps); + ps = crtc->primary->state; + fb = ps->fb; + vps = vmw_plane_state_to_vps(ps); vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL; @@ -240,11 +234,25 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) } if (vfb) { - sou->buffer = vps->dmabuf; - sou->buffer_size = vps->dmabuf_size; + struct drm_connector_state *conn_state; + struct vmw_connector_state *vmw_conn_state; + int x, y; + + sou->buffer = vps->bo; + sou->buffer_size = vps->bo_size; + + if (sou->base.is_implicit) { + x = crtc->x; + y = crtc->y; + } else { + conn_state = sou->base.connector.state; + vmw_conn_state = vmw_connector_state_to_vcs(conn_state); + + x = vmw_conn_state->gui_x; + y = vmw_conn_state->gui_y; + } - ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y, - &crtc->mode); + ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode); if (ret) DRM_ERROR("Failed to define Screen Object %dx%d\n", crtc->x, crtc->y); @@ -408,10 +416,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, struct drm_crtc *crtc = plane->state->crtc ? plane->state->crtc : old_state->crtc; - if (vps->dmabuf) - vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false); - vmw_dmabuf_unreference(&vps->dmabuf); - vps->dmabuf_size = 0; + if (vps->bo) + vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false); + vmw_bo_unreference(&vps->bo); + vps->bo_size = 0; vmw_du_plane_cleanup_fb(plane, old_state); } @@ -440,8 +448,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, if (!new_fb) { - vmw_dmabuf_unreference(&vps->dmabuf); - vps->dmabuf_size = 0; + vmw_bo_unreference(&vps->bo); + vps->bo_size = 0; return 0; } @@ -449,22 +457,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, size = new_state->crtc_w * new_state->crtc_h * 4; dev_priv = vmw_priv(crtc->dev); - if (vps->dmabuf) { - if (vps->dmabuf_size == size) { + if (vps->bo) { + if (vps->bo_size == size) { /* * Note that this might temporarily up the pin-count * to 2, until cleanup_fb() is called. */ - return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, + return vmw_bo_pin_in_vram(dev_priv, vps->bo, true); } - vmw_dmabuf_unreference(&vps->dmabuf); - vps->dmabuf_size = 0; + vmw_bo_unreference(&vps->bo); + vps->bo_size = 0; } - vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL); - if (!vps->dmabuf) + vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL); + if (!vps->bo) return -ENOMEM; vmw_svga_enable(dev_priv); @@ -473,22 +481,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); - ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size, + ret = vmw_bo_init(dev_priv, vps->bo, size, &vmw_vram_ne_placement, - false, &vmw_dmabuf_bo_free); + false, &vmw_bo_bo_free); vmw_overlay_resume_all(dev_priv); if (ret) { - vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ + vps->bo = NULL; /* vmw_bo_init frees on error */ return ret; } - vps->dmabuf_size = size; + vps->bo_size = size; /* * TTM already thinks the buffer is pinned, but make sure the * pin_count is upped. */ - return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true); + return vmw_bo_pin_in_vram(dev_priv, vps->bo, true); } @@ -512,10 +520,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, vclips.w = crtc->mode.hdisplay; vclips.h = crtc->mode.vdisplay; - if (vfb->dmabuf) - ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL, - &vclips, 1, 1, true, - &fence, crtc); + if (vfb->bo) + ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL, + &vclips, 1, 1, true, + &fence, crtc); else ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, &vclips, NULL, 0, 0, @@ -527,8 +535,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, */ if (ret != 0) DRM_ERROR("Failed to update screen.\n"); - - crtc->primary->fb = plane->state->fb; } else { /* * When disabling a plane, CRTC and FB should always be NULL @@ -697,7 +703,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) goto err_free_connector; } - (void) drm_mode_connector_attach_encoder(connector, encoder); + (void) drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; @@ -777,11 +783,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) return 0; } -static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv, +static int do_bo_define_gmrfb(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer) { - struct vmw_dma_buffer *buf = - container_of(framebuffer, struct vmw_framebuffer_dmabuf, + struct vmw_buffer_object *buf = + container_of(framebuffer, struct vmw_framebuffer_bo, base)->buffer; int depth = framebuffer->base.format->depth; struct { @@ -972,13 +978,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, } /** - * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips. + * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips. * * @dirty: The closure structure. * * Commits a previously built command buffer of readback clips. */ -static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) +static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty) { if (!dirty->num_hits) { vmw_fifo_commit(dirty->dev_priv, 0); @@ -986,20 +992,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) } vmw_fifo_commit(dirty->dev_priv, - sizeof(struct vmw_kms_sou_dmabuf_blit) * + sizeof(struct vmw_kms_sou_bo_blit) * dirty->num_hits); } /** - * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect. + * vmw_sou_bo_clip - Callback to encode a readback cliprect. * * @dirty: The closure structure * * Encodes a BLIT_GMRFB_TO_SCREEN cliprect. */ -static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) +static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty) { - struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd; + struct vmw_kms_sou_bo_blit *blit = dirty->cmd; blit += dirty->num_hits; blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; @@ -1014,10 +1020,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) } /** - * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer + * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer * * @dev_priv: Pointer to the device private structure. - * @framebuffer: Pointer to the dma-buffer backed framebuffer. + * @framebuffer: Pointer to the buffer-object backed framebuffer. * @clips: Array of clip rects. * @vclips: Alternate array of clip rects. Either @clips or @vclips must * be NULL. @@ -1027,12 +1033,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) * @out_fence: If non-NULL, will return a ref-counted pointer to a * struct vmw_fence_obj. The returned fence pointer may be NULL in which * case the device has already synchronized. - * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only. + * @crtc: If crtc is passed, perform bo dirty on that crtc only. * * Returns 0 on success, negative error code on failure. -ERESTARTSYS if * interrupted. */ -int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, +int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer, struct drm_clip_rect *clips, struct drm_vmw_rect *vclips, @@ -1041,8 +1047,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, struct vmw_fence_obj **out_fence, struct drm_crtc *crtc) { - struct vmw_dma_buffer *buf = - container_of(framebuffer, struct vmw_framebuffer_dmabuf, + struct vmw_buffer_object *buf = + container_of(framebuffer, struct vmw_framebuffer_bo, base)->buffer; struct vmw_kms_dirty dirty; int ret; @@ -1052,14 +1058,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, if (ret) return ret; - ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer); + ret = do_bo_define_gmrfb(dev_priv, framebuffer); if (unlikely(ret != 0)) goto out_revert; dirty.crtc = crtc; - dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit; - dirty.clip = vmw_sou_dmabuf_clip; - dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) * + dirty.fifo_commit = vmw_sou_bo_fifo_commit; + dirty.clip = vmw_sou_bo_clip; + dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) * num_clips; ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 0, 0, num_clips, increment, &dirty); @@ -1118,12 +1124,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty) /** * vmw_kms_sou_readback - Perform a readback from the screen object system to - * a dma-buffer backed framebuffer. + * a buffer-object backed framebuffer. * * @dev_priv: Pointer to the device private structure. * @file_priv: Pointer to a struct drm_file identifying the caller. * Must be set to NULL if @user_fence_rep is NULL. - * @vfb: Pointer to the dma-buffer backed framebuffer. + * @vfb: Pointer to the buffer-object backed framebuffer. * @user_fence_rep: User-space provided structure for fence information. * Must be set to non-NULL if @file_priv is non-NULL. * @vclips: Array of clip rects. @@ -1141,8 +1147,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, uint32_t num_clips, struct drm_crtc *crtc) { - struct vmw_dma_buffer *buf = - container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; + struct vmw_buffer_object *buf = + container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; struct vmw_kms_dirty dirty; int ret; @@ -1151,7 +1157,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, if (ret) return ret; - ret = do_dmabuf_define_gmrfb(dev_priv, vfb); + ret = do_bo_define_gmrfb(dev_priv, vfb); if (unlikely(ret != 0)) goto out_revert; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 73b8e9a16368..fe4842ca3b6e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -159,7 +159,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, SVGA3dShaderType type, uint8_t num_input_sig, uint8_t num_output_sig, - struct vmw_dma_buffer *byte_code, + struct vmw_buffer_object *byte_code, void (*res_free) (struct vmw_resource *res)) { struct vmw_shader *shader = vmw_res_to_shader(res); @@ -178,7 +178,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, res->backup_size = size; if (byte_code) { - res->backup = vmw_dmabuf_reference(byte_code); + res->backup = vmw_bo_reference(byte_code); res->backup_offset = offset; } shader->size = size; @@ -306,7 +306,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - vmw_fence_single_bo(val_buf->bo, fence); + vmw_bo_fence_single(val_buf->bo, fence); if (likely(fence != NULL)) vmw_fence_obj_unreference(&fence); @@ -537,7 +537,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res, (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - vmw_fence_single_bo(val_buf->bo, fence); + vmw_bo_fence_single(val_buf->bo, fence); if (likely(fence != NULL)) vmw_fence_obj_unreference(&fence); @@ -723,7 +723,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, } static int vmw_user_shader_alloc(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buffer, + struct vmw_buffer_object *buffer, size_t shader_size, size_t offset, SVGA3dShaderType shader_type, @@ -801,7 +801,7 @@ out: static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buffer, + struct vmw_buffer_object *buffer, size_t shader_size, size_t offset, SVGA3dShaderType shader_type) @@ -862,12 +862,12 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, { struct vmw_private *dev_priv = vmw_priv(dev); struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_dma_buffer *buffer = NULL; + struct vmw_buffer_object *buffer = NULL; SVGA3dShaderType shader_type; int ret; if (buffer_handle != SVGA3D_INVALID_ID) { - ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, + ret = vmw_user_bo_lookup(tfile, buffer_handle, &buffer, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not find buffer for shader " @@ -906,7 +906,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ttm_read_unlock(&dev_priv->reservation_sem); out_bad_arg: - vmw_dmabuf_unreference(&buffer); + vmw_bo_unreference(&buffer); return ret; } @@ -983,7 +983,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, struct list_head *list) { struct ttm_operation_ctx ctx = { false, true }; - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; struct ttm_bo_kmap_obj map; bool is_iomem; int ret; @@ -997,8 +997,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, if (unlikely(!buf)) return -ENOMEM; - ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, - true, vmw_dmabuf_bo_free); + ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement, + true, vmw_bo_bo_free); if (unlikely(ret != 0)) goto out; @@ -1031,7 +1031,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, res, list); vmw_resource_unreference(&res); no_reserve: - vmw_dmabuf_unreference(&buf); + vmw_bo_unreference(&buf); out: return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c index a0cb310665cc..6ebc5affde14 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2016 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c index d3573c37c436..e9b6b7baa009 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c @@ -1,6 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** - * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h index 268738387b5e..b80c7252f2fd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h @@ -1,6 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** - * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 67331f01ef32..93f6b96ca7bb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /****************************************************************************** * - * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * COPYRIGHT (C) 2014-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -44,7 +44,7 @@ enum stdu_content_type { SAME_AS_DISPLAY = 0, SEPARATE_SURFACE, - SEPARATE_DMA + SEPARATE_BO }; /** @@ -58,7 +58,7 @@ enum stdu_content_type { * @bottom: Bottom side of bounding box. * @fb_left: Left side of the framebuffer/content bounding box * @fb_top: Top of the framebuffer/content bounding box - * @buf: DMA buffer when DMA-ing between buffer and screen targets. + * @buf: buffer object when DMA-ing between buffer and screen targets. * @sid: Surface ID when copying between surface and screen targets. */ struct vmw_stdu_dirty { @@ -68,7 +68,7 @@ struct vmw_stdu_dirty { s32 fb_left, fb_top; u32 pitch; union { - struct vmw_dma_buffer *buf; + struct vmw_buffer_object *buf; u32 sid; }; }; @@ -178,13 +178,9 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv, cmd->body.height = mode->vdisplay; cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0; cmd->body.dpi = 0; - if (stdu->base.is_implicit) { - cmd->body.xRoot = crtc_x; - cmd->body.yRoot = crtc_y; - } else { - cmd->body.xRoot = stdu->base.gui_x; - cmd->body.yRoot = stdu->base.gui_y; - } + cmd->body.xRoot = crtc_x; + cmd->body.yRoot = crtc_y; + stdu->base.set_gui_x = cmd->body.xRoot; stdu->base.set_gui_y = cmd->body.yRoot; @@ -374,11 +370,14 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; - int ret; - + struct drm_connector_state *conn_state; + struct vmw_connector_state *vmw_conn_state; + int x, y, ret; - stdu = vmw_crtc_to_stdu(crtc); + stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); + conn_state = stdu->base.connector.state; + vmw_conn_state = vmw_connector_state_to_vcs(conn_state); if (stdu->defined) { ret = vmw_stdu_bind_st(dev_priv, stdu, NULL); @@ -397,8 +396,16 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc) if (!crtc->state->enable) return; + if (stdu->base.is_implicit) { + x = crtc->x; + y = crtc->y; + } else { + x = vmw_conn_state->gui_x; + y = vmw_conn_state->gui_y; + } + vmw_svga_enable(dev_priv); - ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, crtc->x, crtc->y); + ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y); if (ret) DRM_ERROR("Failed to define Screen Target of size %dx%d\n", @@ -414,6 +421,7 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc) static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { + struct drm_plane_state *plane_state = crtc->primary->state; struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; struct vmw_framebuffer *vfb; @@ -422,7 +430,7 @@ static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc, stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); - fb = crtc->primary->fb; + fb = plane_state->fb; vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL; @@ -507,14 +515,14 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc, /** - * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect + * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect * * @dirty: The closure structure. * * Encodes a surface DMA command cliprect and updates the bounding box * for the DMA. */ -static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty) +static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *ddirty = container_of(dirty, struct vmw_stdu_dirty, base); @@ -542,14 +550,14 @@ static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty) } /** - * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command. + * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command. * * @dirty: The closure structure. * * Fills in the missing fields in a DMA command, and optionally encodes * a screen target update command, depending on transfer direction. */ -static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) +static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *ddirty = container_of(dirty, struct vmw_stdu_dirty, base); @@ -593,13 +601,13 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) /** - * vmw_stdu_dmabuf_cpu_clip - Callback to encode a CPU blit + * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit * * @dirty: The closure structure. * * This function calculates the bounding box for all the incoming clips. */ -static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) +static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *ddirty = container_of(dirty, struct vmw_stdu_dirty, base); @@ -623,14 +631,14 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) /** - * vmw_stdu_dmabuf_cpu_commit - Callback to do a CPU blit from DMAbuf + * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object * * @dirty: The closure structure. * * For the special case when we cannot create a proxy surface in a * 2D VM, we have to do a CPU blit ourselves. */ -static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) +static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *ddirty = container_of(dirty, struct vmw_stdu_dirty, base); @@ -651,7 +659,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) if (width == 0 || height == 0) return; - /* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */ + /* Assume we are blitting from Guest (bo) to Host (display_srf) */ dst_pitch = stdu->display_srf->base_size.width * stdu->cpp; dst_bo = &stdu->display_srf->res.backup->base; dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp; @@ -711,13 +719,13 @@ out_cleanup: } /** - * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed + * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed * framebuffer and the screen target system. * * @dev_priv: Pointer to the device private structure. * @file_priv: Pointer to a struct drm-file identifying the caller. May be * set to NULL, but then @user_fence_rep must also be set to NULL. - * @vfb: Pointer to the dma-buffer backed framebuffer. + * @vfb: Pointer to the buffer-object backed framebuffer. * @clips: Array of clip rects. Either @clips or @vclips must be NULL. * @vclips: Alternate array of clip rects. Either @clips or @vclips must * be NULL. @@ -746,8 +754,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, bool interruptible, struct drm_crtc *crtc) { - struct vmw_dma_buffer *buf = - container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; + struct vmw_buffer_object *buf = + container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; struct vmw_stdu_dirty ddirty; int ret; bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D); @@ -769,8 +777,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, ddirty.fb_left = ddirty.fb_top = S32_MAX; ddirty.pitch = vfb->base.pitches[0]; ddirty.buf = buf; - ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; - ddirty.base.clip = vmw_stdu_dmabuf_clip; + ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit; + ddirty.base.clip = vmw_stdu_bo_clip; ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) + num_clips * sizeof(SVGA3dCopyBox) + sizeof(SVGA3dCmdSurfaceDMASuffix); @@ -779,8 +787,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, if (cpu_blit) { - ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit; - ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip; + ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit; + ddirty.base.clip = vmw_stdu_bo_cpu_clip; ddirty.base.fifo_reserve_size = 0; } @@ -926,7 +934,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, if (ret) return ret; - if (vfbs->is_dmabuf_proxy) { + if (vfbs->is_bo_proxy) { ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); if (ret) goto out_finish; @@ -1074,7 +1082,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane, * @new_state: info on the new plane state, including the FB * * This function allocates a new display surface if the content is - * backed by a DMA. The display surface is pinned here, and it'll + * backed by a buffer object. The display surface is pinned here, and it'll * be unpinned in .cleanup_fb() * * Returns 0 on success @@ -1104,13 +1112,13 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, } vfb = vmw_framebuffer_to_vfb(new_fb); - new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb); + new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb); if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay && new_vfbs->surface->base_size.height == vdisplay) new_content_type = SAME_AS_DISPLAY; - else if (vfb->dmabuf) - new_content_type = SEPARATE_DMA; + else if (vfb->bo) + new_content_type = SEPARATE_BO; else new_content_type = SEPARATE_SURFACE; @@ -1123,10 +1131,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, display_base_size.depth = 1; /* - * If content buffer is a DMA buf, then we have to construct - * surface info + * If content buffer is a buffer object, then we have to + * construct surface info */ - if (new_content_type == SEPARATE_DMA) { + if (new_content_type == SEPARATE_BO) { switch (new_fb->format->cpp[0]*8) { case 32: @@ -1149,6 +1157,9 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, content_srf.flags = 0; content_srf.mip_levels[0] = 1; content_srf.multisample_count = 0; + content_srf.multisample_pattern = + SVGA3D_MS_PATTERN_NONE; + content_srf.quality_level = SVGA3D_MS_QUALITY_NONE; } else { content_srf = *new_vfbs->surface; } @@ -1177,6 +1188,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, content_srf.multisample_count, 0, display_base_size, + content_srf.multisample_pattern, + content_srf.quality_level, &vps->surf); if (ret != 0) { DRM_ERROR("Couldn't allocate STDU surface.\n"); @@ -1211,12 +1224,12 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, vps->content_fb_type = new_content_type; /* - * This should only happen if the DMA buf is too large to create a + * This should only happen if the buffer object is too large to create a * proxy surface for. - * If we are a 2D VM with a DMA buffer then we have to use CPU blit + * If we are a 2D VM with a buffer object then we have to use CPU blit * so cache these mappings */ - if (vps->content_fb_type == SEPARATE_DMA && + if (vps->content_fb_type == SEPARATE_BO && !(dev_priv->capabilities & SVGA_CAP_3D)) vps->cpp = new_fb->pitches[0] / new_fb->width; @@ -1275,7 +1288,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, if (ret) DRM_ERROR("Failed to bind surface to STDU.\n"); - if (vfb->dmabuf) + if (vfb->bo) ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL, &vclips, 1, 1, true, false, crtc); @@ -1285,8 +1298,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, 1, 1, NULL, crtc); if (ret) DRM_ERROR("Failed to update STDU.\n"); - - crtc->primary->fb = plane->state->fb; } else { crtc = old_state->crtc; stdu = vmw_crtc_to_stdu(crtc); @@ -1487,7 +1498,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) goto err_free_connector; } - (void) drm_mode_connector_attach_encoder(connector, encoder); + (void) drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index b236c48bf265..e125233e074b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -33,6 +33,10 @@ #include "vmwgfx_binding.h" #include "device_include/svga3d_surfacedefs.h" +#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32) +#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32) +#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \ + (svga3d_flags & ((uint64_t)U32_MAX)) /** * struct vmw_user_surface - User-space visible surface resource @@ -81,7 +85,16 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, bool readback, struct ttm_validate_buffer *val_buf); static int vmw_gb_surface_destroy(struct vmw_resource *res); - +static int +vmw_gb_surface_define_internal(struct drm_device *dev, + struct drm_vmw_gb_surface_create_ext_req *req, + struct drm_vmw_gb_surface_create_rep *rep, + struct drm_file *file_priv); +static int +vmw_gb_surface_reference_internal(struct drm_device *dev, + struct drm_vmw_surface_arg *req, + struct drm_vmw_gb_surface_ref_ext_rep *rep, + struct drm_file *file_priv); static const struct vmw_user_resource_conv user_surface_conv = { .object_type = VMW_RES_SURFACE, @@ -224,7 +237,12 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf, cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; cmd->header.size = cmd_len; cmd->body.sid = srf->res.id; - cmd->body.surfaceFlags = srf->flags; + /* + * Downcast of surfaceFlags, was upcasted when received from user-space, + * since driver internally stores as 64 bit. + * For legacy surface define only 32 bit flag is supported. + */ + cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags; cmd->body.format = srf->format; for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) cmd->body.face[i].numMipLevels = srf->mip_levels[i]; @@ -468,7 +486,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res, (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - vmw_fence_single_bo(val_buf->bo, fence); + vmw_bo_fence_single(val_buf->bo, fence); if (likely(fence != NULL)) vmw_fence_obj_unreference(&fence); @@ -760,7 +778,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, srf = &user_srf->srf; res = &srf->res; - srf->flags = req->flags; + /* Driver internally stores as 64-bit flags */ + srf->flags = (SVGA3dSurfaceAllFlags)req->flags; srf->format = req->format; srf->scanout = req->scanout; @@ -785,6 +804,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, srf->base_size = *srf->sizes; srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; srf->multisample_count = 0; + srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE; + srf->quality_level = SVGA3D_MS_QUALITY_NONE; cur_bo_offset = 0; cur_offset = srf->offsets; @@ -842,12 +863,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, if (dev_priv->has_mob && req->shareable) { uint32_t backup_handle; - ret = vmw_user_dmabuf_alloc(dev_priv, tfile, - res->backup_size, - true, - &backup_handle, - &res->backup, - &user_srf->backup_base); + ret = vmw_user_bo_alloc(dev_priv, tfile, + res->backup_size, + true, + &backup_handle, + &res->backup, + &user_srf->backup_base); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; @@ -990,7 +1011,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, user_srf = container_of(base, struct vmw_user_surface, prime.base); srf = &user_srf->srf; - rep->flags = srf->flags; + /* Downcast of flags when sending back to user space */ + rep->flags = (uint32_t)srf->flags; rep->format = srf->format; memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); user_sizes = (struct drm_vmw_size __user *)(unsigned long) @@ -1031,6 +1053,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res) SVGA3dCmdHeader header; SVGA3dCmdDefineGBSurface_v2 body; } *cmd2; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineGBSurface_v3 body; + } *cmd3; if (likely(res->id != -1)) return 0; @@ -1047,7 +1073,11 @@ static int vmw_gb_surface_create(struct vmw_resource *res) goto out_no_fifo; } - if (srf->array_size > 0) { + if (dev_priv->has_sm4_1 && srf->array_size > 0) { + cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3; + cmd_len = sizeof(cmd3->body); + submit_len = sizeof(*cmd3); + } else if (srf->array_size > 0) { /* has_dx checked on creation time. */ cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2; cmd_len = sizeof(cmd2->body); @@ -1060,6 +1090,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) cmd = vmw_fifo_reserve(dev_priv, submit_len); cmd2 = (typeof(cmd2))cmd; + cmd3 = (typeof(cmd3))cmd; if (unlikely(!cmd)) { DRM_ERROR("Failed reserving FIFO space for surface " "creation.\n"); @@ -1067,12 +1098,27 @@ static int vmw_gb_surface_create(struct vmw_resource *res) goto out_no_fifo; } - if (srf->array_size > 0) { + if (dev_priv->has_sm4_1 && srf->array_size > 0) { + cmd3->header.id = cmd_id; + cmd3->header.size = cmd_len; + cmd3->body.sid = srf->res.id; + cmd3->body.surfaceFlags = srf->flags; + cmd3->body.format = srf->format; + cmd3->body.numMipLevels = srf->mip_levels[0]; + cmd3->body.multisampleCount = srf->multisample_count; + cmd3->body.multisamplePattern = srf->multisample_pattern; + cmd3->body.qualityLevel = srf->quality_level; + cmd3->body.autogenFilter = srf->autogen_filter; + cmd3->body.size.width = srf->base_size.width; + cmd3->body.size.height = srf->base_size.height; + cmd3->body.size.depth = srf->base_size.depth; + cmd3->body.arraySize = srf->array_size; + } else if (srf->array_size > 0) { cmd2->header.id = cmd_id; cmd2->header.size = cmd_len; cmd2->body.sid = srf->res.id; cmd2->body.surfaceFlags = srf->flags; - cmd2->body.format = cpu_to_le32(srf->format); + cmd2->body.format = srf->format; cmd2->body.numMipLevels = srf->mip_levels[0]; cmd2->body.multisampleCount = srf->multisample_count; cmd2->body.autogenFilter = srf->autogen_filter; @@ -1085,7 +1131,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) cmd->header.size = cmd_len; cmd->body.sid = srf->res.id; cmd->body.surfaceFlags = srf->flags; - cmd->body.format = cpu_to_le32(srf->format); + cmd->body.format = srf->format; cmd->body.numMipLevels = srf->mip_levels[0]; cmd->body.multisampleCount = srf->multisample_count; cmd->body.autogenFilter = srf->autogen_filter; @@ -1210,7 +1256,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - vmw_fence_single_bo(val_buf->bo, fence); + vmw_bo_fence_single(val_buf->bo, fence); if (likely(fence != NULL)) vmw_fence_obj_unreference(&fence); @@ -1256,194 +1302,55 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) /** * vmw_gb_surface_define_ioctl - Ioctl function implementing - * the user surface define functionality. + * the user surface define functionality. * - * @dev: Pointer to a struct drm_device. - * @data: Pointer to data copied from / to user-space. - * @file_priv: Pointer to a drm file private structure. + * @dev: Pointer to a struct drm_device. + * @data: Pointer to data copied from / to user-space. + * @file_priv: Pointer to a drm file private structure. */ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_user_surface *user_srf; - struct vmw_surface *srf; - struct vmw_resource *res; - struct vmw_resource *tmp; union drm_vmw_gb_surface_create_arg *arg = (union drm_vmw_gb_surface_create_arg *)data; - struct drm_vmw_gb_surface_create_req *req = &arg->req; struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - int ret; - uint32_t size; - uint32_t backup_handle = 0; - - if (req->multisample_count != 0) - return -EINVAL; - - if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS) - return -EINVAL; + struct drm_vmw_gb_surface_create_ext_req req_ext; - if (unlikely(vmw_user_surface_size == 0)) - vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + - 128; - - size = vmw_user_surface_size + 128; - - /* Define a surface based on the parameters. */ - ret = vmw_surface_gb_priv_define(dev, - size, - req->svga3d_flags, - req->format, - req->drm_surface_flags & drm_vmw_surface_flag_scanout, - req->mip_levels, - req->multisample_count, - req->array_size, - req->base_size, - &srf); - if (unlikely(ret != 0)) - return ret; - - user_srf = container_of(srf, struct vmw_user_surface, srf); - if (drm_is_primary_client(file_priv)) - user_srf->master = drm_master_get(file_priv->master); + req_ext.base = arg->req; + req_ext.version = drm_vmw_gb_surface_v1; + req_ext.svga3d_flags_upper_32_bits = 0; + req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE; + req_ext.quality_level = SVGA3D_MS_QUALITY_NONE; + req_ext.must_be_zero = 0; - ret = ttm_read_lock(&dev_priv->reservation_sem, true); - if (unlikely(ret != 0)) - return ret; - - res = &user_srf->srf.res; - - - if (req->buffer_handle != SVGA3D_INVALID_ID) { - ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, - &res->backup, - &user_srf->backup_base); - if (ret == 0) { - if (res->backup->base.num_pages * PAGE_SIZE < - res->backup_size) { - DRM_ERROR("Surface backup buffer is too small.\n"); - vmw_dmabuf_unreference(&res->backup); - ret = -EINVAL; - goto out_unlock; - } else { - backup_handle = req->buffer_handle; - } - } - } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) - ret = vmw_user_dmabuf_alloc(dev_priv, tfile, - res->backup_size, - req->drm_surface_flags & - drm_vmw_surface_flag_shareable, - &backup_handle, - &res->backup, - &user_srf->backup_base); - - if (unlikely(ret != 0)) { - vmw_resource_unreference(&res); - goto out_unlock; - } - - tmp = vmw_resource_reference(res); - ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, - req->drm_surface_flags & - drm_vmw_surface_flag_shareable, - VMW_RES_SURFACE, - &vmw_user_surface_base_release, NULL); - - if (unlikely(ret != 0)) { - vmw_resource_unreference(&tmp); - vmw_resource_unreference(&res); - goto out_unlock; - } - - rep->handle = user_srf->prime.base.hash.key; - rep->backup_size = res->backup_size; - if (res->backup) { - rep->buffer_map_handle = - drm_vma_node_offset_addr(&res->backup->base.vma_node); - rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; - rep->buffer_handle = backup_handle; - } else { - rep->buffer_map_handle = 0; - rep->buffer_size = 0; - rep->buffer_handle = SVGA3D_INVALID_ID; - } - - vmw_resource_unreference(&res); - -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); - return ret; + return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv); } /** * vmw_gb_surface_reference_ioctl - Ioctl function implementing - * the user surface reference functionality. + * the user surface reference functionality. * - * @dev: Pointer to a struct drm_device. - * @data: Pointer to data copied from / to user-space. - * @file_priv: Pointer to a drm file private structure. + * @dev: Pointer to a struct drm_device. + * @data: Pointer to data copied from / to user-space. + * @file_priv: Pointer to a drm file private structure. */ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct vmw_private *dev_priv = vmw_priv(dev); union drm_vmw_gb_surface_reference_arg *arg = (union drm_vmw_gb_surface_reference_arg *)data; struct drm_vmw_surface_arg *req = &arg->req; struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_surface *srf; - struct vmw_user_surface *user_srf; - struct ttm_base_object *base; - uint32_t backup_handle; - int ret = -EINVAL; + struct drm_vmw_gb_surface_ref_ext_rep rep_ext; + int ret; + + ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv); - ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, - req->handle_type, &base); if (unlikely(ret != 0)) return ret; - user_srf = container_of(base, struct vmw_user_surface, prime.base); - srf = &user_srf->srf; - if (!srf->res.backup) { - DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); - goto out_bad_resource; - } - - mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ - ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, - &backup_handle); - mutex_unlock(&dev_priv->cmdbuf_mutex); - - if (unlikely(ret != 0)) { - DRM_ERROR("Could not add a reference to a GB surface " - "backup buffer.\n"); - (void) ttm_ref_object_base_unref(tfile, base->hash.key, - TTM_REF_USAGE); - goto out_bad_resource; - } - - rep->creq.svga3d_flags = srf->flags; - rep->creq.format = srf->format; - rep->creq.mip_levels = srf->mip_levels[0]; - rep->creq.drm_surface_flags = 0; - rep->creq.multisample_count = srf->multisample_count; - rep->creq.autogen_filter = srf->autogen_filter; - rep->creq.array_size = srf->array_size; - rep->creq.buffer_handle = backup_handle; - rep->creq.base_size = srf->base_size; - rep->crep.handle = user_srf->prime.base.hash.key; - rep->crep.backup_size = srf->res.backup_size; - rep->crep.buffer_handle = backup_handle; - rep->crep.buffer_map_handle = - drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); - rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; - -out_bad_resource: - ttm_base_object_unref(&base); + rep->creq = rep_ext.creq.base; + rep->crep = rep_ext.crep; return ret; } @@ -1461,6 +1368,8 @@ out_bad_resource: * @multisample_count: * @array_size: Surface array size. * @size: width, heigh, depth of the surface requested + * @multisample_pattern: Multisampling pattern when msaa is supported + * @quality_level: Precision settings * @user_srf_out: allocated user_srf. Set to NULL on failure. * * GB surfaces allocated by this function will not have a user mode handle, and @@ -1470,13 +1379,15 @@ out_bad_resource: */ int vmw_surface_gb_priv_define(struct drm_device *dev, uint32_t user_accounting_size, - uint32_t svga3d_flags, + SVGA3dSurfaceAllFlags svga3d_flags, SVGA3dSurfaceFormat format, bool for_scanout, uint32_t num_mip_levels, uint32_t multisample_count, uint32_t array_size, struct drm_vmw_size size, + SVGA3dMSPattern multisample_pattern, + SVGA3dMSQualityLevel quality_level, struct vmw_surface **srf_out) { struct vmw_private *dev_priv = vmw_priv(dev); @@ -1487,7 +1398,8 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, }; struct vmw_surface *srf; int ret; - u32 num_layers; + u32 num_layers = 1; + u32 sample_count = 1; *srf_out = NULL; @@ -1562,19 +1474,23 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; srf->array_size = array_size; srf->multisample_count = multisample_count; + srf->multisample_pattern = multisample_pattern; + srf->quality_level = quality_level; if (array_size) num_layers = array_size; else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP) num_layers = SVGA3D_MAX_SURFACE_FACES; - else - num_layers = 1; + + if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE) + sample_count = srf->multisample_count; srf->res.backup_size = - svga3dsurface_get_serialized_size(srf->format, - srf->base_size, - srf->mip_levels[0], - num_layers); + svga3dsurface_get_serialized_size_extended(srf->format, + srf->base_size, + srf->mip_levels[0], + num_layers, + sample_count); if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) srf->res.backup_size += sizeof(SVGA3dDXSOState); @@ -1599,3 +1515,266 @@ out_unlock: ttm_read_unlock(&dev_priv->reservation_sem); return ret; } + +/** + * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing + * the user surface define functionality. + * + * @dev: Pointer to a struct drm_device. + * @data: Pointer to data copied from / to user-space. + * @file_priv: Pointer to a drm file private structure. + */ +int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + union drm_vmw_gb_surface_create_ext_arg *arg = + (union drm_vmw_gb_surface_create_ext_arg *)data; + struct drm_vmw_gb_surface_create_ext_req *req = &arg->req; + struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; + + return vmw_gb_surface_define_internal(dev, req, rep, file_priv); +} + +/** + * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing + * the user surface reference functionality. + * + * @dev: Pointer to a struct drm_device. + * @data: Pointer to data copied from / to user-space. + * @file_priv: Pointer to a drm file private structure. + */ +int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + union drm_vmw_gb_surface_reference_ext_arg *arg = + (union drm_vmw_gb_surface_reference_ext_arg *)data; + struct drm_vmw_surface_arg *req = &arg->req; + struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep; + + return vmw_gb_surface_reference_internal(dev, req, rep, file_priv); +} + +/** + * vmw_gb_surface_define_internal - Ioctl function implementing + * the user surface define functionality. + * + * @dev: Pointer to a struct drm_device. + * @req: Request argument from user-space. + * @rep: Response argument to user-space. + * @file_priv: Pointer to a drm file private structure. + */ +static int +vmw_gb_surface_define_internal(struct drm_device *dev, + struct drm_vmw_gb_surface_create_ext_req *req, + struct drm_vmw_gb_surface_create_rep *rep, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_user_surface *user_srf; + struct vmw_surface *srf; + struct vmw_resource *res; + struct vmw_resource *tmp; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret; + uint32_t size; + uint32_t backup_handle = 0; + SVGA3dSurfaceAllFlags svga3d_flags_64 = + SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits, + req->base.svga3d_flags); + + if (!dev_priv->has_sm4_1) { + /* + * If SM4_1 is not support then cannot send 64-bit flag to + * device. + */ + if (req->svga3d_flags_upper_32_bits != 0) + return -EINVAL; + + if (req->base.multisample_count != 0) + return -EINVAL; + + if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE) + return -EINVAL; + + if (req->quality_level != SVGA3D_MS_QUALITY_NONE) + return -EINVAL; + } + + if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) && + req->base.multisample_count == 0) + return -EINVAL; + + if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) + return -EINVAL; + + if (unlikely(vmw_user_surface_size == 0)) + vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + + 128; + + size = vmw_user_surface_size + 128; + + /* Define a surface based on the parameters. */ + ret = vmw_surface_gb_priv_define(dev, + size, + svga3d_flags_64, + req->base.format, + req->base.drm_surface_flags & + drm_vmw_surface_flag_scanout, + req->base.mip_levels, + req->base.multisample_count, + req->base.array_size, + req->base.base_size, + req->multisample_pattern, + req->quality_level, + &srf); + if (unlikely(ret != 0)) + return ret; + + user_srf = container_of(srf, struct vmw_user_surface, srf); + if (drm_is_primary_client(file_priv)) + user_srf->master = drm_master_get(file_priv->master); + + ret = ttm_read_lock(&dev_priv->reservation_sem, true); + if (unlikely(ret != 0)) + return ret; + + res = &user_srf->srf.res; + + if (req->base.buffer_handle != SVGA3D_INVALID_ID) { + ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle, + &res->backup, + &user_srf->backup_base); + if (ret == 0) { + if (res->backup->base.num_pages * PAGE_SIZE < + res->backup_size) { + DRM_ERROR("Surface backup buffer too small.\n"); + vmw_bo_unreference(&res->backup); + ret = -EINVAL; + goto out_unlock; + } else { + backup_handle = req->base.buffer_handle; + } + } + } else if (req->base.drm_surface_flags & + drm_vmw_surface_flag_create_buffer) + ret = vmw_user_bo_alloc(dev_priv, tfile, + res->backup_size, + req->base.drm_surface_flags & + drm_vmw_surface_flag_shareable, + &backup_handle, + &res->backup, + &user_srf->backup_base); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } + + tmp = vmw_resource_reference(res); + ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, + req->base.drm_surface_flags & + drm_vmw_surface_flag_shareable, + VMW_RES_SURFACE, + &vmw_user_surface_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + vmw_resource_unreference(&res); + goto out_unlock; + } + + rep->handle = user_srf->prime.base.hash.key; + rep->backup_size = res->backup_size; + if (res->backup) { + rep->buffer_map_handle = + drm_vma_node_offset_addr(&res->backup->base.vma_node); + rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; + rep->buffer_handle = backup_handle; + } else { + rep->buffer_map_handle = 0; + rep->buffer_size = 0; + rep->buffer_handle = SVGA3D_INVALID_ID; + } + + vmw_resource_unreference(&res); + +out_unlock: + ttm_read_unlock(&dev_priv->reservation_sem); + return ret; +} + +/** + * vmw_gb_surface_reference_internal - Ioctl function implementing + * the user surface reference functionality. + * + * @dev: Pointer to a struct drm_device. + * @req: Pointer to user-space request surface arg. + * @rep: Pointer to response to user-space. + * @file_priv: Pointer to a drm file private structure. + */ +static int +vmw_gb_surface_reference_internal(struct drm_device *dev, + struct drm_vmw_surface_arg *req, + struct drm_vmw_gb_surface_ref_ext_rep *rep, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_surface *srf; + struct vmw_user_surface *user_srf; + struct ttm_base_object *base; + uint32_t backup_handle; + int ret = -EINVAL; + + ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, + req->handle_type, &base); + if (unlikely(ret != 0)) + return ret; + + user_srf = container_of(base, struct vmw_user_surface, prime.base); + srf = &user_srf->srf; + if (!srf->res.backup) { + DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); + goto out_bad_resource; + } + + mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ + ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle); + mutex_unlock(&dev_priv->cmdbuf_mutex); + + if (unlikely(ret != 0)) { + DRM_ERROR("Could not add a reference to a GB surface " + "backup buffer.\n"); + (void) ttm_ref_object_base_unref(tfile, base->hash.key, + TTM_REF_USAGE); + goto out_bad_resource; + } + + rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags); + rep->creq.base.format = srf->format; + rep->creq.base.mip_levels = srf->mip_levels[0]; + rep->creq.base.drm_surface_flags = 0; + rep->creq.base.multisample_count = srf->multisample_count; + rep->creq.base.autogen_filter = srf->autogen_filter; + rep->creq.base.array_size = srf->array_size; + rep->creq.base.buffer_handle = backup_handle; + rep->creq.base.base_size = srf->base_size; + rep->crep.handle = user_srf->prime.base.hash.key; + rep->crep.backup_size = srf->res.backup_size; + rep->crep.buffer_handle = backup_handle; + rep->crep.buffer_map_handle = + drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); + rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; + + rep->creq.version = drm_vmw_gb_surface_v1; + rep->creq.svga3d_flags_upper_32_bits = + SVGA3D_FLAGS_UPPER_32(srf->flags); + rep->creq.multisample_pattern = srf->multisample_pattern; + rep->creq.quality_level = srf->quality_level; + rep->creq.must_be_zero = 0; + +out_bad_resource: + ttm_base_object_unref(&base); + + return ret; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 21111fd091f9..31786b200afc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) struct ttm_object_file *tfile = vmw_fpriv((struct drm_file *)filp->private_data)->tfile; - return vmw_user_dmabuf_verify_access(bo, tfile); + return vmw_user_bo_verify_access(bo, tfile); } static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) @@ -852,7 +852,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, bool evict, struct ttm_mem_reg *mem) { - vmw_resource_move_notify(bo, mem); + vmw_bo_move_notify(bo, mem); vmw_query_move_notify(bo, mem); } @@ -864,7 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, */ static void vmw_swap_notify(struct ttm_buffer_object *bo) { - vmw_resource_swap_notify(bo); + vmw_bo_swap_notify(bo); (void) ttm_bo_wait(bo, false, false); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index e771091d2cd3..7b1e5a5cbd2c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c index b4162fd78600..ebc1d83c34b4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2012-2016 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2012-2016 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the |