summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 15:49:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 15:49:32 -0700
commitf377ea88b862bf7151be96d276f4cb740f8e1c41 (patch)
tree6205913431c012e285316281b6221a20d4a92128 /drivers/gpu/drm/vmwgfx
parent51e771c0d25b43d0f12b2c7c01939942becbbe28 (diff)
parent73bf1b7be7aab60d7c651402441dd0b0b4991098 (diff)
downloadblackbird-op-linux-f377ea88b862bf7151be96d276f4cb740f8e1c41.tar.gz
blackbird-op-linux-f377ea88b862bf7151be96d276f4cb740f8e1c41.zip
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main pull request for the drm for 4.3. Nouveau is probably the biggest amount of changes in here, since it missed 4.2. Highlights below, along with the usual bunch of fixes. All stuff outside drm should have applicable acks. Highlights: - new drivers: freescale dcu kms driver - core: more atomic fixes disable some dri1 interfaces on kms drivers drop fb panic handling, this was just getting more broken, as more locking was required. new core fbdev Kconfig support - instead of each driver enable/disabling it struct_mutex cleanups - panel: more new panels cleanup Kconfig - i915: Skylake support enabled by default legacy modesetting using atomic infrastructure Skylake fixes GEN9 workarounds - amdgpu: Fiji support CGS support for amdgpu Initial GPU scheduler - off by default Lots of bug fixes and optimisations. - radeon: DP fixes misc fixes - amdkfd: Add Carrizo support for amdkfd using amdgpu. - nouveau: long pending cleanup to complete driver, fully bisectable which makes it larger, perfmon work more reclocking improvements maxwell displayport fixes - vmwgfx: new DX device support, supports OpenGL 3.3 screen targets support - mgag200: G200eW support G200e new revision support - msm: dragonboard 410c support, msm8x94 support, msm8x74v1 support yuv format support dma plane support mdp5 rotation initial hdcp - sti: atomic support - exynos: lots of cleanups atomic modesetting/pageflipping support render node support - tegra: tegra210 support (dc, dsi, dp/hdmi) dpms with atomic modesetting support - atmel: support for 3 more atmel SoCs new input formats, PRIME support. - dwhdmi: preparing to add audio support - rockchip: yuv plane support" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (1369 commits) drm/amdgpu: rename gmc_v8_0_init_compute_vmid drm/amdgpu: fix vce3 instance handling drm/amdgpu: remove ib test for the second VCE Ring drm/amdgpu: properly enable VM fault interrupts drm/amdgpu: fix warning in scheduler drm/amdgpu: fix buffer placement under memory pressure drm/amdgpu/cz: fix cz_dpm_update_low_memory_pstate logic drm/amdgpu: fix typo in dce11 watermark setup drm/amdgpu: fix typo in dce10 watermark setup drm/amdgpu: use top down allocation for non-CPU accessible vram drm/amdgpu: be explicit about cpu vram access for driver BOs (v2) drm/amdgpu: set MEC doorbell range for Fiji drm/amdgpu: implement burst NOP for SDMA drm/amdgpu: add insert_nop ring func and default implementation drm/amdgpu: add amdgpu_get_sdma_instance helper function drm/amdgpu: add AMDGPU_MAX_SDMA_INSTANCES drm/amdgpu: add burst_nop flag for sdma drm/amdgpu: add count field for the SDMA NOP packet v2 drm/amdgpu: use PT for VM sync on unmap drm/amdgpu: make wait_event uninterruptible in push_job ...
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/includeCheck.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h110
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h2071
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h457
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h1487
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h99
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h50
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h1204
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h1633
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h (renamed from drivers/gpu/drm/vmwgfx/svga_escape.h)2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h (renamed from drivers/gpu/drm/vmwgfx/svga_overlay.h)10
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h (renamed from drivers/gpu/drm/vmwgfx/svga_reg.h)664
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_types.h46
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h2627
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h912
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c1294
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h209
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c1303
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c786
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c662
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c184
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c508
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h335
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1935
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c575
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1654
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h194
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c212
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c277
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c556
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c500
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c555
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h160
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c1266
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c315
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c2
53 files changed, 19164 insertions, 6177 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index ce0ab951f507..d281575bbe11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
- vmwgfx_cmdbuf_res.o \
+ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
+ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
new file mode 100644
index 000000000000..8cce7f15b6eb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
@@ -0,0 +1,3 @@
+/*
+ * Intentionally empty file.
+ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
new file mode 100644
index 000000000000..9ce2466a5d00
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -0,0 +1,110 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_caps.h --
+ *
+ * Definitions for SVGA3D hardware capabilities. Capabilities
+ * are used to query for optional rendering features during
+ * driver initialization. The capability data is stored as very
+ * basic key/value dictionary within the "FIFO register" memory
+ * area at the beginning of BAR2.
+ *
+ * Note that these definitions are only for 3D capabilities.
+ * The SVGA device also has "device capabilities" and "FIFO
+ * capabilities", which are non-3D-specific and are stored as
+ * bitfields rather than key/value pairs.
+ */
+
+#ifndef _SVGA3D_CAPS_H_
+#define _SVGA3D_CAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#define SVGA_FIFO_3D_CAPS_SIZE (SVGA_FIFO_3D_CAPS_LAST - \
+ SVGA_FIFO_3D_CAPS + 1)
+
+
+/*
+ * SVGA3dCapsRecordType
+ *
+ * Record types that can be found in the caps block.
+ * Related record types are grouped together numerically so that
+ * SVGA3dCaps_FindRecord() can be applied on a range of record
+ * types.
+ */
+
+typedef enum {
+ SVGA3DCAPS_RECORD_UNKNOWN = 0,
+ SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
+} SVGA3dCapsRecordType;
+
+
+/*
+ * SVGA3dCapsRecordHeader
+ *
+ * Header field leading each caps block record. Contains the offset (in
+ * register words, NOT bytes) to the next caps block record (or the end
+ * of caps block records which will be a zero word) and the record type
+ * as defined above.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecordHeader {
+ uint32 length;
+ SVGA3dCapsRecordType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecordHeader;
+
+
+/*
+ * SVGA3dCapsRecord
+ *
+ * Caps block record; "data" is a placeholder for the actual data structure
+ * contained within the record;
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecord {
+ SVGA3dCapsRecordHeader header;
+ uint32 data[1];
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
new file mode 100644
index 000000000000..2dfd57c5f463
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -0,0 +1,2071 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_cmd.h --
+ *
+ * SVGA 3d hardware cmd definitions
+ */
+
+#ifndef _SVGA3D_CMD_H_
+#define _SVGA3D_CMD_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+#include "svga3d_types.h"
+
+/*
+ * Identifiers for commands in the command FIFO.
+ *
+ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
+ * the SVGA3D protocol and remain reserved; they should not be used in the
+ * future.
+ *
+ * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * current SVGA3D protocol.
+ *
+ * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * and up.
+ */
+
+typedef enum {
+ SVGA_3D_CMD_LEGACY_BASE = 1000,
+ SVGA_3D_CMD_BASE = 1040,
+
+ SVGA_3D_CMD_SURFACE_DEFINE = 1040,
+ SVGA_3D_CMD_SURFACE_DESTROY = 1041,
+ SVGA_3D_CMD_SURFACE_COPY = 1042,
+ SVGA_3D_CMD_SURFACE_STRETCHBLT = 1043,
+ SVGA_3D_CMD_SURFACE_DMA = 1044,
+ SVGA_3D_CMD_CONTEXT_DEFINE = 1045,
+ SVGA_3D_CMD_CONTEXT_DESTROY = 1046,
+ SVGA_3D_CMD_SETTRANSFORM = 1047,
+ SVGA_3D_CMD_SETZRANGE = 1048,
+ SVGA_3D_CMD_SETRENDERSTATE = 1049,
+ SVGA_3D_CMD_SETRENDERTARGET = 1050,
+ SVGA_3D_CMD_SETTEXTURESTATE = 1051,
+ SVGA_3D_CMD_SETMATERIAL = 1052,
+ SVGA_3D_CMD_SETLIGHTDATA = 1053,
+ SVGA_3D_CMD_SETLIGHTENABLED = 1054,
+ SVGA_3D_CMD_SETVIEWPORT = 1055,
+ SVGA_3D_CMD_SETCLIPPLANE = 1056,
+ SVGA_3D_CMD_CLEAR = 1057,
+ SVGA_3D_CMD_PRESENT = 1058,
+ SVGA_3D_CMD_SHADER_DEFINE = 1059,
+ SVGA_3D_CMD_SHADER_DESTROY = 1060,
+ SVGA_3D_CMD_SET_SHADER = 1061,
+ SVGA_3D_CMD_SET_SHADER_CONST = 1062,
+ SVGA_3D_CMD_DRAW_PRIMITIVES = 1063,
+ SVGA_3D_CMD_SETSCISSORRECT = 1064,
+ SVGA_3D_CMD_BEGIN_QUERY = 1065,
+ SVGA_3D_CMD_END_QUERY = 1066,
+ SVGA_3D_CMD_WAIT_FOR_QUERY = 1067,
+ SVGA_3D_CMD_PRESENT_READBACK = 1068,
+ SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069,
+ SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070,
+ SVGA_3D_CMD_GENERATE_MIPMAPS = 1071,
+ SVGA_3D_CMD_VIDEO_CREATE_DECODER = 1072,
+ SVGA_3D_CMD_VIDEO_DESTROY_DECODER = 1073,
+ SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR = 1074,
+ SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR = 1075,
+ SVGA_3D_CMD_VIDEO_DECODE_START_FRAME = 1076,
+ SVGA_3D_CMD_VIDEO_DECODE_RENDER = 1077,
+ SVGA_3D_CMD_VIDEO_DECODE_END_FRAME = 1078,
+ SVGA_3D_CMD_VIDEO_PROCESS_FRAME = 1079,
+ SVGA_3D_CMD_ACTIVATE_SURFACE = 1080,
+ SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081,
+ SVGA_3D_CMD_SCREEN_DMA = 1082,
+ SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE = 1083,
+ SVGA_3D_CMD_OPEN_CONTEXT_SURFACE = 1084,
+
+ SVGA_3D_CMD_LOGICOPS_BITBLT = 1085,
+ SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086,
+ SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1087,
+ SVGA_3D_CMD_LOGICOPS_COLORFILL = 1088,
+ SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1089,
+ SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1090,
+
+ SVGA_3D_CMD_SET_OTABLE_BASE = 1091,
+ SVGA_3D_CMD_READBACK_OTABLE = 1092,
+
+ SVGA_3D_CMD_DEFINE_GB_MOB = 1093,
+ SVGA_3D_CMD_DESTROY_GB_MOB = 1094,
+ SVGA_3D_CMD_DEAD3 = 1095,
+ SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING = 1096,
+
+ SVGA_3D_CMD_DEFINE_GB_SURFACE = 1097,
+ SVGA_3D_CMD_DESTROY_GB_SURFACE = 1098,
+ SVGA_3D_CMD_BIND_GB_SURFACE = 1099,
+ SVGA_3D_CMD_COND_BIND_GB_SURFACE = 1100,
+ SVGA_3D_CMD_UPDATE_GB_IMAGE = 1101,
+ SVGA_3D_CMD_UPDATE_GB_SURFACE = 1102,
+ SVGA_3D_CMD_READBACK_GB_IMAGE = 1103,
+ SVGA_3D_CMD_READBACK_GB_SURFACE = 1104,
+ SVGA_3D_CMD_INVALIDATE_GB_IMAGE = 1105,
+ SVGA_3D_CMD_INVALIDATE_GB_SURFACE = 1106,
+
+ SVGA_3D_CMD_DEFINE_GB_CONTEXT = 1107,
+ SVGA_3D_CMD_DESTROY_GB_CONTEXT = 1108,
+ SVGA_3D_CMD_BIND_GB_CONTEXT = 1109,
+ SVGA_3D_CMD_READBACK_GB_CONTEXT = 1110,
+ SVGA_3D_CMD_INVALIDATE_GB_CONTEXT = 1111,
+
+ SVGA_3D_CMD_DEFINE_GB_SHADER = 1112,
+ SVGA_3D_CMD_DESTROY_GB_SHADER = 1113,
+ SVGA_3D_CMD_BIND_GB_SHADER = 1114,
+
+ SVGA_3D_CMD_SET_OTABLE_BASE64 = 1115,
+
+ SVGA_3D_CMD_BEGIN_GB_QUERY = 1116,
+ SVGA_3D_CMD_END_GB_QUERY = 1117,
+ SVGA_3D_CMD_WAIT_FOR_GB_QUERY = 1118,
+
+ SVGA_3D_CMD_NOP = 1119,
+
+ SVGA_3D_CMD_ENABLE_GART = 1120,
+ SVGA_3D_CMD_DISABLE_GART = 1121,
+ SVGA_3D_CMD_MAP_MOB_INTO_GART = 1122,
+ SVGA_3D_CMD_UNMAP_GART_RANGE = 1123,
+
+ SVGA_3D_CMD_DEFINE_GB_SCREENTARGET = 1124,
+ SVGA_3D_CMD_DESTROY_GB_SCREENTARGET = 1125,
+ SVGA_3D_CMD_BIND_GB_SCREENTARGET = 1126,
+ SVGA_3D_CMD_UPDATE_GB_SCREENTARGET = 1127,
+
+ SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL = 1128,
+ SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL = 1129,
+
+ SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE = 1130,
+
+ SVGA_3D_CMD_GB_SCREEN_DMA = 1131,
+ SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH = 1132,
+ SVGA_3D_CMD_GB_MOB_FENCE = 1133,
+ SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 = 1134,
+ SVGA_3D_CMD_DEFINE_GB_MOB64 = 1135,
+ SVGA_3D_CMD_REDEFINE_GB_MOB64 = 1136,
+ SVGA_3D_CMD_NOP_ERROR = 1137,
+
+ SVGA_3D_CMD_SET_VERTEX_STREAMS = 1138,
+ SVGA_3D_CMD_SET_VERTEX_DECLS = 1139,
+ SVGA_3D_CMD_SET_VERTEX_DIVISORS = 1140,
+ SVGA_3D_CMD_DRAW = 1141,
+ SVGA_3D_CMD_DRAW_INDEXED = 1142,
+
+ /*
+ * DX10 Commands
+ */
+ SVGA_3D_CMD_DX_MIN = 1143,
+ SVGA_3D_CMD_DX_DEFINE_CONTEXT = 1143,
+ SVGA_3D_CMD_DX_DESTROY_CONTEXT = 1144,
+ SVGA_3D_CMD_DX_BIND_CONTEXT = 1145,
+ SVGA_3D_CMD_DX_READBACK_CONTEXT = 1146,
+ SVGA_3D_CMD_DX_INVALIDATE_CONTEXT = 1147,
+ SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER = 1148,
+ SVGA_3D_CMD_DX_SET_SHADER_RESOURCES = 1149,
+ SVGA_3D_CMD_DX_SET_SHADER = 1150,
+ SVGA_3D_CMD_DX_SET_SAMPLERS = 1151,
+ SVGA_3D_CMD_DX_DRAW = 1152,
+ SVGA_3D_CMD_DX_DRAW_INDEXED = 1153,
+ SVGA_3D_CMD_DX_DRAW_INSTANCED = 1154,
+ SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED = 1155,
+ SVGA_3D_CMD_DX_DRAW_AUTO = 1156,
+ SVGA_3D_CMD_DX_SET_INPUT_LAYOUT = 1157,
+ SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS = 1158,
+ SVGA_3D_CMD_DX_SET_INDEX_BUFFER = 1159,
+ SVGA_3D_CMD_DX_SET_TOPOLOGY = 1160,
+ SVGA_3D_CMD_DX_SET_RENDERTARGETS = 1161,
+ SVGA_3D_CMD_DX_SET_BLEND_STATE = 1162,
+ SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE = 1163,
+ SVGA_3D_CMD_DX_SET_RASTERIZER_STATE = 1164,
+ SVGA_3D_CMD_DX_DEFINE_QUERY = 1165,
+ SVGA_3D_CMD_DX_DESTROY_QUERY = 1166,
+ SVGA_3D_CMD_DX_BIND_QUERY = 1167,
+ SVGA_3D_CMD_DX_SET_QUERY_OFFSET = 1168,
+ SVGA_3D_CMD_DX_BEGIN_QUERY = 1169,
+ SVGA_3D_CMD_DX_END_QUERY = 1170,
+ SVGA_3D_CMD_DX_READBACK_QUERY = 1171,
+ SVGA_3D_CMD_DX_SET_PREDICATION = 1172,
+ SVGA_3D_CMD_DX_SET_SOTARGETS = 1173,
+ SVGA_3D_CMD_DX_SET_VIEWPORTS = 1174,
+ SVGA_3D_CMD_DX_SET_SCISSORRECTS = 1175,
+ SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW = 1176,
+ SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177,
+ SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178,
+ SVGA_3D_CMD_DX_PRED_COPY = 1179,
+ SVGA_3D_CMD_DX_STRETCHBLT = 1180,
+ SVGA_3D_CMD_DX_GENMIPS = 1181,
+ SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182,
+ SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183,
+ SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE = 1184,
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW = 1185,
+ SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW = 1186,
+ SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW = 1187,
+ SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW = 1188,
+ SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW = 1189,
+ SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW = 1190,
+ SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT = 1191,
+ SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT = 1192,
+ SVGA_3D_CMD_DX_DEFINE_BLEND_STATE = 1193,
+ SVGA_3D_CMD_DX_DESTROY_BLEND_STATE = 1194,
+ SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE = 1195,
+ SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE = 1196,
+ SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE = 1197,
+ SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE = 1198,
+ SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE = 1199,
+ SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE = 1200,
+ SVGA_3D_CMD_DX_DEFINE_SHADER = 1201,
+ SVGA_3D_CMD_DX_DESTROY_SHADER = 1202,
+ SVGA_3D_CMD_DX_BIND_SHADER = 1203,
+ SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT = 1204,
+ SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT = 1205,
+ SVGA_3D_CMD_DX_SET_STREAMOUTPUT = 1206,
+ SVGA_3D_CMD_DX_SET_COTABLE = 1207,
+ SVGA_3D_CMD_DX_READBACK_COTABLE = 1208,
+ SVGA_3D_CMD_DX_BUFFER_COPY = 1209,
+ SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER = 1210,
+ SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK = 1211,
+ SVGA_3D_CMD_DX_MOVE_QUERY = 1212,
+ SVGA_3D_CMD_DX_BIND_ALL_QUERY = 1213,
+ SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214,
+ SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215,
+ SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216,
+ SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT = 1217,
+ SVGA_3D_CMD_DX_HINT = 1218,
+ SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219,
+ SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220,
+ SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET = 1221,
+ SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222,
+
+ /*
+ * Reserve some IDs to be used for the DX11 shader types.
+ */
+ SVGA_3D_CMD_DX_RESERVED1 = 1223,
+ SVGA_3D_CMD_DX_RESERVED2 = 1224,
+ SVGA_3D_CMD_DX_RESERVED3 = 1225,
+
+ SVGA_3D_CMD_DX_MAX = 1226,
+ SVGA_3D_CMD_MAX = 1226,
+ SVGA_3D_CMD_FUTURE_MAX = 3000
+} SVGAFifo3dCmdId;
+
+/*
+ * FIFO command format definitions:
+ */
+
+/*
+ * The data size header following cmdNum for every 3d command
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 id;
+ uint32 size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdHeader;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 numMipLevels;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceFace;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ /*
+ * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+ * structures must have the same value of numMipLevels field.
+ * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+ * numMipLevels set to 0.
+ */
+ SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+ /*
+ * Followed by an SVGA3dSize structure for each mip level in each face.
+ *
+ * A note on surface sizes: Sizes are always specified in pixels,
+ * even if the true surface size is not a multiple of the minimum
+ * block size of the surface's format. For example, a 3x3x1 DXT1
+ * compressed texture would actually be stored as a 4x4x1 image in
+ * memory.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ /*
+ * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+ * structures must have the same value of numMipLevels field.
+ * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+ * numMipLevels set to 0.
+ */
+ SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ /*
+ * Followed by an SVGA3dSize structure for each mip level in each face.
+ *
+ * A note on surface sizes: Sizes are always specified in pixels,
+ * even if the true surface size is not a multiple of the minimum
+ * block size of the surface's format. For example, a 3x3x1 DXT1
+ * compressed texture would actually be stored as a 4x4x1 image in
+ * memory.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dClearFlag clearFlag;
+ uint32 color;
+ float depth;
+ uint32 stencil;
+ /* Followed by variable number of SVGA3dRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dLightType type;
+ SVGA3dBool inWorldSpace;
+ float diffuse[4];
+ float specular[4];
+ float ambient[4];
+ float position[4];
+ float direction[4];
+ float range;
+ float falloff;
+ float attenuation0;
+ float attenuation1;
+ float attenuation2;
+ float theta;
+ float phi;
+}
+#include "vmware_pack_end.h"
+SVGA3dLightData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ /* Followed by variable number of SVGA3dCopyRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dRenderStateName state;
+ union {
+ uint32 uintValue;
+ float floatValue;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ /* Followed by variable number of SVGA3dRenderState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dRenderTargetType type;
+ SVGA3dSurfaceImageId target;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
+ /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
+ SVGA3dBox boxSrc;
+ SVGA3dBox boxDest;
+ SVGA3dStretchBltMode mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * If the discard flag is present in a surface DMA operation, the host may
+ * discard the contents of the current mipmap level and face of the target
+ * surface before applying the surface DMA contents.
+ */
+ uint32 discard : 1;
+
+ /*
+ * If the unsynchronized flag is present, the host may perform this upload
+ * without syncing to pending reads on this surface.
+ */
+ uint32 unsynchronized : 1;
+
+ /*
+ * Guests *MUST* set the reserved bits to 0 before submitting the command
+ * suffix as future flags may occupy these bits.
+ */
+ uint32 reserved : 30;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceDMAFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAGuestImage guest;
+ SVGA3dSurfaceImageId host;
+ SVGA3dTransferType transfer;
+ /*
+ * Followed by variable number of SVGA3dCopyBox structures. For consistency
+ * in all clipping logic and coordinate translation, we define the
+ * "source" in each copyBox as the guest image and the
+ * "destination" as the host image, regardless of transfer
+ * direction.
+ *
+ * For efficiency, the SVGA3D device is free to copy more data than
+ * specified. For example, it may round copy boxes outwards such
+ * that they lie on particular alignment boundaries.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
+
+/*
+ * SVGA3dCmdSurfaceDMASuffix --
+ *
+ * This is a command suffix that will appear after a SurfaceDMA command in
+ * the FIFO. It contains some extra information that hosts may use to
+ * optimize performance or protect the guest. This suffix exists to preserve
+ * backwards compatibility while also allowing for new functionality to be
+ * implemented.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 suffixSize;
+
+ /*
+ * The maximum offset is used to determine the maximum offset from the
+ * guestPtr base address that will be accessed or written to during this
+ * surfaceDMA. If the suffix is supported, the host will respect this
+ * boundary while performing surface DMAs.
+ *
+ * Defaults to MAX_UINT32
+ */
+ uint32 maximumOffset;
+
+ /*
+ * A set of flags that describes optimizations that the host may perform
+ * while performing this surface DMA operation. The guest should never rely
+ * on behaviour that is different when these flags are set for correctness.
+ *
+ * Defaults to 0
+ */
+ SVGA3dSurfaceDMAFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMASuffix;
+
+/*
+ * SVGA_3D_CMD_DRAW_PRIMITIVES --
+ *
+ * This command is the SVGA3D device's generic drawing entry point.
+ * It can draw multiple ranges of primitives, optionally using an
+ * index buffer, using an arbitrary collection of vertex buffers.
+ *
+ * Each SVGA3dVertexDecl defines a distinct vertex array to bind
+ * during this draw call. The declarations specify which surface
+ * the vertex data lives in, what that vertex data is used for,
+ * and how to interpret it.
+ *
+ * Each SVGA3dPrimitiveRange defines a collection of primitives
+ * to render using the same vertex arrays. An index buffer is
+ * optional.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * A range hint is an optional specification for the range of indices
+ * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
+ * that the entire array will be used.
+ *
+ * These are only hints. The SVGA3D device may use them for
+ * performance optimization if possible, but it's also allowed to
+ * ignore these values.
+ */
+ uint32 first;
+ uint32 last;
+}
+#include "vmware_pack_end.h"
+SVGA3dArrayRangeHint;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * Define the origin and shape of a vertex or index array. Both
+ * 'offset' and 'stride' are in bytes. The provided surface will be
+ * reinterpreted as a flat array of bytes in the same format used
+ * by surface DMA operations. To avoid unnecessary conversions, the
+ * surface should be created with the SVGA3D_BUFFER format.
+ *
+ * Index 0 in the array starts 'offset' bytes into the surface.
+ * Index 1 begins at byte 'offset + stride', etc. Array indices may
+ * not be negative.
+ */
+ uint32 surfaceId;
+ uint32 offset;
+ uint32 stride;
+}
+#include "vmware_pack_end.h"
+SVGA3dArray;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * Describe a vertex array's data type, and define how it is to be
+ * used by the fixed function pipeline or the vertex shader. It
+ * isn't useful to have two VertexDecls with the same
+ * VertexArrayIdentity in one draw call.
+ */
+ SVGA3dDeclType type;
+ SVGA3dDeclMethod method;
+ SVGA3dDeclUsage usage;
+ uint32 usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexArrayIdentity;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexDecl {
+ SVGA3dVertexArrayIdentity identity;
+ SVGA3dArray array;
+ SVGA3dArrayRangeHint rangeHint;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexDecl;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dPrimitiveRange {
+ /*
+ * Define a group of primitives to render, from sequential indices.
+ *
+ * The value of 'primitiveType' and 'primitiveCount' imply the
+ * total number of vertices that will be rendered.
+ */
+ SVGA3dPrimitiveType primType;
+ uint32 primitiveCount;
+
+ /*
+ * Optional index buffer. If indexArray.surfaceId is
+ * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
+ * without an index buffer is identical to rendering with an index
+ * buffer containing the sequence [0, 1, 2, 3, ...].
+ *
+ * If an index buffer is in use, indexWidth specifies the width in
+ * bytes of each index value. It must be less than or equal to
+ * indexArray.stride.
+ *
+ * (Currently, the SVGA3D device requires index buffers to be tightly
+ * packed. In other words, indexWidth == indexArray.stride)
+ */
+ SVGA3dArray indexArray;
+ uint32 indexWidth;
+
+ /*
+ * Optional index bias. This number is added to all indices from
+ * indexArray before they are used as vertex array indices. This
+ * can be used in multiple ways:
+ *
+ * - When not using an indexArray, this bias can be used to
+ * specify where in the vertex arrays to begin rendering.
+ *
+ * - A positive number here is equivalent to increasing the
+ * offset in each vertex array.
+ *
+ * - A negative number can be used to render using a small
+ * vertex array and an index buffer that contains large
+ * values. This may be used by some applications that
+ * crop a vertex buffer without modifying their index
+ * buffer.
+ *
+ * Note that rendering with a negative bias value may be slower and
+ * use more memory than rendering with a positive or zero bias.
+ */
+ int32 indexBias;
+}
+#include "vmware_pack_end.h"
+SVGA3dPrimitiveRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 numVertexDecls;
+ uint32 numRanges;
+
+ /*
+ * There are two variable size arrays after the
+ * SVGA3dCmdDrawPrimitives structure. In order,
+ * they are:
+ *
+ * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
+ * SVGA3D_MAX_VERTEX_ARRAYS;
+ * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
+ * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
+ * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
+ * the frequency divisor for the corresponding vertex decl).
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+
+ uint32 primitiveCount; /* How many primitives to render */
+ uint32 startVertexLocation; /* Which vertex do we start rendering at. */
+
+ uint8 primitiveType; /* SVGA3dPrimitiveType */
+ uint8 padding[3];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDraw;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+
+ uint8 primitiveType; /* SVGA3dPrimitiveType */
+
+ uint32 indexBufferSid; /* Valid index buffer sid. */
+ uint32 indexBufferOffset; /* Byte offset into the vertex buffer, almost */
+ /* always 0 for DX9 guests, non-zero for OpenGL */
+ /* guests. We can't represent non-multiple of */
+ /* stride offsets in D3D9Renderer... */
+ uint8 indexBufferStride; /* Allowable values = 1, 2, or 4 */
+
+ int32 baseVertexLocation; /* Bias applied to the index when selecting a */
+ /* vertex from the streams, may be negative */
+
+ uint32 primitiveCount; /* How many primitives to render */
+ uint32 pad0;
+ uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawIndexed;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * Describe a vertex array's data type, and define how it is to be
+ * used by the fixed function pipeline or the vertex shader. It
+ * isn't useful to have two VertexDecls with the same
+ * VertexArrayIdentity in one draw call.
+ */
+ uint16 streamOffset;
+ uint8 stream;
+ uint8 type; /* SVGA3dDeclType */
+ uint8 method; /* SVGA3dDeclMethod */
+ uint8 usage; /* SVGA3dDeclUsage */
+ uint8 usageIndex;
+ uint8 padding;
+
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+
+ uint32 numElements;
+
+ /*
+ * Followed by numElements SVGA3dVertexElement structures.
+ *
+ * If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements
+ * are cleared and will not be used by following draws.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDecls;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ uint32 stride;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexStream;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+
+ uint32 numStreams;
+ /*
+ * Followed by numStream SVGA3dVertexStream structures.
+ *
+ * If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams
+ * are cleared and will not be used by following draws.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexStreams;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 numDivisors;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDivisors;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stage;
+ SVGA3dTextureStateName name;
+ union {
+ uint32 value;
+ float floatValue;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dTextureState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ /* Followed by variable number of SVGA3dTextureState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dTransformType type;
+ float matrix[16];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ float min;
+ float max;
+}
+#include "vmware_pack_end.h"
+SVGA3dZRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dZRange zRange;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ float diffuse[4];
+ float ambient[4];
+ float specular[4];
+ float emissive[4];
+ float shininess;
+}
+#include "vmware_pack_end.h"
+SVGA3dMaterial;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dFace face;
+ SVGA3dMaterial material;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 index;
+ SVGA3dLightData data;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 index;
+ uint32 enabled;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dRect rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dRect rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 index;
+ float plane[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 shid;
+ SVGA3dShaderType type;
+ /* Followed by variable number of DWORDs for shader bycode */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 shid;
+ SVGA3dShaderType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 reg; /* register number */
+ SVGA3dShaderType type;
+ SVGA3dShaderConstType ctype;
+ uint32 values[4];
+
+ /*
+ * Followed by a variable number of additional values.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dShaderType type;
+ uint32 shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
+
+
+/*
+ * SVGA3D_CMD_WAIT_FOR_QUERY --
+ *
+ * Will read the SVGA3dQueryResult structure pointed to by guestResult,
+ * and if the state member is set to anything else than
+ * SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op.
+ *
+ * Otherwise, in addition to the query explicitly waited for,
+ * All queries with the same type and issued with the same cid, for which
+ * an SVGA_3D_CMD_END_QUERY command has previously been sent, will
+ * be finished after execution of this command.
+ *
+ * A query will be identified by the gmrId and offset of the guestResult
+ * member. If the device can't find an SVGA_3D_CMD_END_QUERY that has
+ * been sent previously with an indentical gmrId and offset, it will
+ * effectively end all queries with an identical type issued with the
+ * same cid, and the SVGA3dQueryResult structure pointed to by
+ * guestResult will not be written to. This property can be used to
+ * implement a query barrier for a given cid and query type.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid; /* Same parameters passed to END_QUERY */
+ SVGA3dQueryType type;
+ SVGAGuestPtr guestResult;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 totalSize; /* Set by guest before query is ended. */
+ SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
+ union { /* Set by host on exit from PENDING state */
+ uint32 result32;
+ uint32 queryCookie; /* May be used to identify which QueryGetData this
+ result corresponds to. */
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dQueryResult;
+
+
+/*
+ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
+ *
+ * This is a blit from an SVGA3D surface to a Screen Object.
+ * This blit must be directed at a specific screen.
+ *
+ * The blit copies from a rectangular region of an SVGA3D surface
+ * image to a rectangular region of a screen.
+ *
+ * This command takes an optional variable-length list of clipping
+ * rectangles after the body of the command. If no rectangles are
+ * specified, there is no clipping region. The entire destRect is
+ * drawn to. If one or more rectangles are included, they describe
+ * a clipping region. The clip rectangle coordinates are measured
+ * relative to the top-left corner of destRect.
+ *
+ * The srcImage must be from mip=0 face=0.
+ *
+ * This supports scaling if the src and dest are of different sizes.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId srcImage;
+ SVGASignedRect srcRect;
+ uint32 destScreenId; /* Screen Object ID */
+ SVGASignedRect destRect;
+ /* Clipping: zero or more SVGASignedRects follow */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ SVGA3dTextureFilter filter;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
+
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdActivateSurface; /* SVGA_3D_CMD_ACTIVATE_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDeactivateSurface; /* SVGA_3D_CMD_DEACTIVATE_SURFACE */
+
+/*
+ * Screen DMA command
+ *
+ * Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2. The SVGA_CAP_3D device
+ * cap bit is not required.
+ *
+ * - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could
+ * be different, but it is required that guest makes sure refBuffer has
+ * exactly the same contents that were written to when last time screen DMA
+ * command is received by host.
+ *
+ * - changemap is generated by lib/blit, and it has the changes from last
+ * received screen DMA or more.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdScreenDMA {
+ uint32 screenId;
+ SVGAGuestImage refBuffer;
+ SVGAGuestImage destBuffer;
+ SVGAGuestImage changeMap;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */
+
+/*
+ * Set Unity Surface Cookie
+ *
+ * Associates the supplied cookie with the surface id for use with
+ * Unity. This cookie is a hint from guest to host, there is no way
+ * for the guest to readback the cookie and the host is free to drop
+ * the cookie association at will. The default value for the cookie
+ * on all surfaces is 0.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdSetUnitySurfaceCookie {
+ uint32 sid;
+ uint64 cookie;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetUnitySurfaceCookie; /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
+
+/*
+ * Open a context-specific surface in a non-context-specific manner.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdOpenContextSurface {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdOpenContextSurface; /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
+
+
+/*
+ * Logic ops
+ */
+
+#define SVGA3D_LOTRANSBLT_HONORALPHA (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORX (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORY (0x02)
+#define SVGA3D_LOALPHABLEND_SRCHASALPHA (0x01)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsBitBlt {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dst;
+ SVGA3dLogicOp logicOp;
+ /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsBitBlt; /* SVGA_3D_CMD_LOGICOPS_BITBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsTransBlt {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dst;
+ uint32 color;
+ uint32 flags;
+ SVGA3dBox srcBox;
+ SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsTransBlt; /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsStretchBlt {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dst;
+ uint16 mode;
+ uint16 flags;
+ SVGA3dBox srcBox;
+ SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsStretchBlt; /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsColorFill {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId dst;
+ uint32 color;
+ SVGA3dLogicOp logicOp;
+ /* Followed by variable number of SVGA3dRect structures. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsColorFill; /* SVGA_3D_CMD_LOGICOPS_COLORFILL */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsAlphaBlend {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dst;
+ uint32 alphaVal;
+ uint32 flags;
+ SVGA3dBox srcBox;
+ SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsAlphaBlend; /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
+
+#define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF
+
+#define SVGA3D_CLEARTYPE_GAMMA_WIDTH 512
+#define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsClearTypeBlend {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId tmp;
+ SVGA3dSurfaceImageId dst;
+ SVGA3dSurfaceImageId gammaSurf;
+ SVGA3dSurfaceImageId alphaSurf;
+ uint32 gamma;
+ uint32 color;
+ uint32 color2;
+ int32 alphaOffsetX;
+ int32 alphaOffsetY;
+ /* Followed by variable number of SVGA3dBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsClearTypeBlend; /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */
+
+
+/*
+ * Guest-backed objects definitions.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAMobFormat ptDepth;
+ uint32 sizeInBytes;
+ PPN64 base;
+}
+#include "vmware_pack_end.h"
+SVGAOTableMobEntry;
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceFormat format;
+ SVGA3dSurfaceFlags surfaceFlags;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ SVGAMobId mobid;
+ uint32 arraySize;
+ uint32 mobPitch;
+ uint32 pad[5];
+}
+#include "vmware_pack_end.h"
+SVGAOTableSurfaceEntry;
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableContextEntry;
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dShaderType type;
+ uint32 sizeInBytes;
+ uint32 offsetInBytes;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableShaderEntry;
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
+
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+typedef uint32 SVGAScreenTargetFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId image;
+ uint32 width;
+ uint32 height;
+ int32 xRoot;
+ int32 yRoot;
+ SVGAScreenTargetFlags flags;
+ uint32 dpi;
+ uint32 pad[7];
+}
+#include "vmware_pack_end.h"
+SVGAOTableScreenTargetEntry;
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \
+ (sizeof(SVGAOTableScreenTargetEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ float value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ int32 value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstInt;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 value;
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstBool;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint16 streamOffset;
+ uint8 stream;
+ uint8 type;
+ uint8 methodUsage;
+ uint8 usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ uint16 stride;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexStream;
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dRect viewport;
+ SVGA3dRect scissorRect;
+ SVGA3dZRange zRange;
+
+ SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX];
+ SVGAGBVertexElement decl1[4];
+
+ uint32 renderStates[SVGA3D_RS_MAX];
+ SVGAGBVertexElement decl2[18];
+ uint32 pad0[2];
+
+ struct {
+ SVGA3dFace face;
+ SVGA3dMaterial material;
+ } material;
+
+ float clipPlanes[SVGA3D_NUM_CLIPPLANES][4];
+ float matrices[SVGA3D_TRANSFORM_MAX][16];
+
+ SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS];
+ SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS];
+
+ /*
+ * Shaders currently bound
+ */
+ uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX];
+ SVGAGBVertexElement decl3[10];
+ uint32 pad1[3];
+
+ uint32 occQueryActive;
+ uint32 occQueryValue;
+
+ /*
+ * Int/Bool Shader constants
+ */
+ SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX];
+ SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX];
+ uint16 pShaderBValues;
+ uint16 vShaderBValues;
+
+
+ SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS];
+ SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS];
+ uint32 numVertexDecls;
+ uint32 numVertexStreams;
+ uint32 numVertexDivisors;
+ uint32 pad2[30];
+
+ /*
+ * Texture Stages
+ *
+ * SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the
+ * textureStages array.
+ * SVGA3D_TS_COLOR_KEY is in tsColorKey.
+ */
+ uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS];
+ uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1];
+ uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS];
+
+ /*
+ * Float Shader constants.
+ */
+ SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX];
+ SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX];
+}
+#include "vmware_pack_end.h"
+SVGAGBContextData;
+#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData))
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAOTableType type;
+ PPN baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAOTableType type;
+ PPN64 baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN base;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBMob {
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob64 {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN64 base;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdRedefineGBMob64 {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN64 base;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBMobMapping {
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBSurface {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to a mob.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurface {
+ uint32 sid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurfaceWithPitch {
+ uint32 sid;
+ SVGAMobId mobid;
+ uint32 baseLevelPitch;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
+
+/*
+ * Conditionally bind a mob to a guest-backed surface if testMobid
+ * matches the currently bound mob. Optionally issue a
+ * readback/update on the surface while it is still bound to the old
+ * mobid if the mobid is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct{
+ uint32 sid;
+ SVGAMobId testMobid;
+ SVGAMobId mobid;
+ uint32 flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBImage {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBSurface {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImage {
+ SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE */
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBSurface {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface. After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImagePartial {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+ uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImage {
+ SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBSurface {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface. After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImagePartial {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+ uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBContext {
+ uint32 cid;
+ SVGAMobId mobid;
+ uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBShader {
+ uint32 shid;
+ SVGA3dShaderType type;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBShader {
+ uint32 shid;
+ SVGAMobId mobid;
+ uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBShader {
+ uint32 shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 regStart;
+ SVGA3dShaderType shaderType;
+ SVGA3dShaderConstType constType;
+
+ /*
+ * Followed by a variable number of shader constants.
+ *
+ * Note that FLOAT and INT constants are 4-dwords in length, while
+ * BOOL constants are 1-dword in length.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetGBShaderConstInline; /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAMobId mobid;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ * The semantics of this command are identical to the
+ * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ * to a Mob instead of a GMR.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAMobId mobid;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAMobId mobid;
+ uint32 mustBeZero;
+ uint32 initialized;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAMobId mobid;
+ uint32 gartOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 gartOffset;
+ uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+ uint32 width;
+ uint32 height;
+ int32 xRoot;
+ int32 yRoot;
+ SVGAScreenTargetFlags flags;
+
+ /*
+ * The physical DPI that the guest expects this screen displayed at.
+ *
+ * Guests which are not DPI-aware should set this to zero.
+ */
+ uint32 dpi;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+ SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+ SVGA3dRect rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdGBScreenDMA {
+ uint32 screenId;
+ uint32 dead;
+ SVGAMobId destMobID;
+ uint32 destPitch;
+ SVGAMobId changeMapMobID;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBScreenDMA; /* SVGA_3D_CMD_GB_SCREEN_DMA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 value;
+ uint32 mobId;
+ uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE*/
+
+#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
new file mode 100644
index 000000000000..c18b663f360f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -0,0 +1,457 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_devcaps.h --
+ *
+ * SVGA 3d caps definitions
+ */
+
+#ifndef _SVGA3D_DEVCAPS_H_
+#define _SVGA3D_DEVCAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * 3D Hardware Version
+ *
+ * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
+ * register. Is set by the host and read by the guest. This lets
+ * us make new guest drivers which are backwards-compatible with old
+ * SVGA hardware revisions. It does not let us support old guest
+ * drivers. Good enough for now.
+ *
+ */
+
+#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
+#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
+
+typedef enum {
+ SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
+ SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
+ SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
+ SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
+ SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
+ SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
+ SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
+ SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
+} SVGA3dHardwareVersion;
+
+/*
+ * DevCap indexes.
+ */
+
+typedef enum {
+ SVGA3D_DEVCAP_INVALID = ((uint32)-1),
+ SVGA3D_DEVCAP_3D = 0,
+ SVGA3D_DEVCAP_MAX_LIGHTS = 1,
+
+ /*
+ * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+ * fixed-function texture units available. Each of these units
+ * work in both FFP and Shader modes, and they support texture
+ * transforms and texture coordinates. The host may have additional
+ * texture image units that are only usable with shaders.
+ */
+ SVGA3D_DEVCAP_MAX_TEXTURES = 2,
+ SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
+ SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
+ SVGA3D_DEVCAP_VERTEX_SHADER = 5,
+ SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
+ SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
+ SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
+ SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
+ SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
+ SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
+ SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12,
+ SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13,
+ SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14,
+ SVGA3D_DEVCAP_QUERY_TYPES = 15,
+ SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
+ SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
+ SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
+ SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
+ SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
+ SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
+ SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
+ SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
+ SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
+ SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
+ SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
+ SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
+ SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
+ SVGA3D_DEVCAP_TEXTURE_OPS = 31,
+ SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
+ SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
+ SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
+ SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
+ SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
+ SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
+ SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
+ SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
+ SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
+ SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
+ SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
+ SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
+ SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
+ SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
+ SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
+ SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
+ SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
+ SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
+ SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
+
+ /*
+ * There is a hole in our devcap definitions for
+ * historical reasons.
+ *
+ * Define a constant just for completeness.
+ */
+ SVGA3D_DEVCAP_MISSING62 = 62,
+
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
+
+ /*
+ * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+ * render targets. This does not include the depth or stencil targets.
+ */
+ SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
+
+ SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
+ SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
+ SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
+ SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
+ SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
+ SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
+ SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
+ SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
+ SVGA3D_DEVCAP_SUPERSAMPLE = 73,
+ SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
+ SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
+ SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
+
+ /*
+ * This is the maximum number of SVGA context IDs that the guest
+ * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+ */
+ SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
+
+ /*
+ * This is the maximum number of SVGA surface IDs that the guest
+ * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+ */
+ SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
+
+ SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
+
+ SVGA3D_DEVCAP_SURFACEFMT_ATI1 = 82,
+ SVGA3D_DEVCAP_SURFACEFMT_ATI2 = 83,
+
+ /*
+ * Deprecated.
+ */
+ SVGA3D_DEVCAP_DEAD1 = 84,
+
+ /*
+ * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+ * ored together, one for every type of video decoding supported.
+ */
+ SVGA3D_DEVCAP_VIDEO_DECODE = 85,
+
+ /*
+ * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+ * ored together, one for every type of video processing supported.
+ */
+ SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
+
+ SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
+ SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
+ SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
+ SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
+
+ SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
+
+ /*
+ * Does the host support the SVGA logic ops commands?
+ */
+ SVGA3D_DEVCAP_LOGICOPS = 92,
+
+ /*
+ * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
+ */
+ SVGA3D_DEVCAP_TS_COLOR_KEY = 93, /* boolean */
+
+ /*
+ * Deprecated.
+ */
+ SVGA3D_DEVCAP_DEAD2 = 94,
+
+ /*
+ * Does the device support the DX commands?
+ */
+ SVGA3D_DEVCAP_DX = 95,
+
+ /*
+ * What is the maximum size of a texture array?
+ *
+ * (Even if this cap is zero, cubemaps are still allowed.)
+ */
+ SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
+
+ /*
+ * What is the maximum number of vertex buffers that can
+ * be used in the DXContext inputAssembly?
+ */
+ SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
+
+ /*
+ * What is the maximum number of constant buffers
+ * that can be expected to work correctly with a
+ * DX context?
+ */
+ SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
+
+ /*
+ * Does the device support provoking vertex control?
+ * If zero, the first vertex will always be the provoking vertex.
+ */
+ SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
+
+ SVGA3D_DEVCAP_DXFMT_X8R8G8B8 = 100,
+ SVGA3D_DEVCAP_DXFMT_A8R8G8B8 = 101,
+ SVGA3D_DEVCAP_DXFMT_R5G6B5 = 102,
+ SVGA3D_DEVCAP_DXFMT_X1R5G5B5 = 103,
+ SVGA3D_DEVCAP_DXFMT_A1R5G5B5 = 104,
+ SVGA3D_DEVCAP_DXFMT_A4R4G4B4 = 105,
+ SVGA3D_DEVCAP_DXFMT_Z_D32 = 106,
+ SVGA3D_DEVCAP_DXFMT_Z_D16 = 107,
+ SVGA3D_DEVCAP_DXFMT_Z_D24S8 = 108,
+ SVGA3D_DEVCAP_DXFMT_Z_D15S1 = 109,
+ SVGA3D_DEVCAP_DXFMT_LUMINANCE8 = 110,
+ SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 = 111,
+ SVGA3D_DEVCAP_DXFMT_LUMINANCE16 = 112,
+ SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 = 113,
+ SVGA3D_DEVCAP_DXFMT_DXT1 = 114,
+ SVGA3D_DEVCAP_DXFMT_DXT2 = 115,
+ SVGA3D_DEVCAP_DXFMT_DXT3 = 116,
+ SVGA3D_DEVCAP_DXFMT_DXT4 = 117,
+ SVGA3D_DEVCAP_DXFMT_DXT5 = 118,
+ SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
+ SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
+ SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
+ SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122,
+ SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
+ SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
+ SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
+ SVGA3D_DEVCAP_DXFMT_V8U8 = 126,
+ SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 = 127,
+ SVGA3D_DEVCAP_DXFMT_CxV8U8 = 128,
+ SVGA3D_DEVCAP_DXFMT_X8L8V8U8 = 129,
+ SVGA3D_DEVCAP_DXFMT_A2W10V10U10 = 130,
+ SVGA3D_DEVCAP_DXFMT_ALPHA8 = 131,
+ SVGA3D_DEVCAP_DXFMT_R_S10E5 = 132,
+ SVGA3D_DEVCAP_DXFMT_R_S23E8 = 133,
+ SVGA3D_DEVCAP_DXFMT_RG_S10E5 = 134,
+ SVGA3D_DEVCAP_DXFMT_RG_S23E8 = 135,
+ SVGA3D_DEVCAP_DXFMT_BUFFER = 136,
+ SVGA3D_DEVCAP_DXFMT_Z_D24X8 = 137,
+ SVGA3D_DEVCAP_DXFMT_V16U16 = 138,
+ SVGA3D_DEVCAP_DXFMT_G16R16 = 139,
+ SVGA3D_DEVCAP_DXFMT_A16B16G16R16 = 140,
+ SVGA3D_DEVCAP_DXFMT_UYVY = 141,
+ SVGA3D_DEVCAP_DXFMT_YUY2 = 142,
+ SVGA3D_DEVCAP_DXFMT_NV12 = 143,
+ SVGA3D_DEVCAP_DXFMT_AYUV = 144,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS = 145,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT = 146,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT = 147,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS = 148,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT = 149,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT = 150,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT = 151,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS = 152,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT = 153,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM = 154,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT = 155,
+ SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS = 156,
+ SVGA3D_DEVCAP_DXFMT_R32G32_UINT = 157,
+ SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
+ SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
+ SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
+ SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161,
+ SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162,
+ SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
+ SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
+ SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS = 166,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM = 167,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB = 168,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT = 169,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT = 170,
+ SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS = 171,
+ SVGA3D_DEVCAP_DXFMT_R16G16_UINT = 172,
+ SVGA3D_DEVCAP_DXFMT_R16G16_SINT = 173,
+ SVGA3D_DEVCAP_DXFMT_R32_TYPELESS = 174,
+ SVGA3D_DEVCAP_DXFMT_D32_FLOAT = 175,
+ SVGA3D_DEVCAP_DXFMT_R32_UINT = 176,
+ SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
+ SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
+ SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
+ SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180,
+ SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181,
+ SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
+ SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
+ SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
+ SVGA3D_DEVCAP_DXFMT_R8G8_SINT = 185,
+ SVGA3D_DEVCAP_DXFMT_R16_TYPELESS = 186,
+ SVGA3D_DEVCAP_DXFMT_R16_UNORM = 187,
+ SVGA3D_DEVCAP_DXFMT_R16_UINT = 188,
+ SVGA3D_DEVCAP_DXFMT_R16_SNORM = 189,
+ SVGA3D_DEVCAP_DXFMT_R16_SINT = 190,
+ SVGA3D_DEVCAP_DXFMT_R8_TYPELESS = 191,
+ SVGA3D_DEVCAP_DXFMT_R8_UNORM = 192,
+ SVGA3D_DEVCAP_DXFMT_R8_UINT = 193,
+ SVGA3D_DEVCAP_DXFMT_R8_SNORM = 194,
+ SVGA3D_DEVCAP_DXFMT_R8_SINT = 195,
+ SVGA3D_DEVCAP_DXFMT_P8 = 196,
+ SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP = 197,
+ SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM = 198,
+ SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM = 199,
+ SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS = 200,
+ SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB = 201,
+ SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS = 202,
+ SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB = 203,
+ SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS = 204,
+ SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB = 205,
+ SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS = 206,
+ SVGA3D_DEVCAP_DXFMT_ATI1 = 207,
+ SVGA3D_DEVCAP_DXFMT_BC4_SNORM = 208,
+ SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS = 209,
+ SVGA3D_DEVCAP_DXFMT_ATI2 = 210,
+ SVGA3D_DEVCAP_DXFMT_BC5_SNORM = 211,
+ SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM = 212,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS = 213,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB = 214,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS = 215,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB = 216,
+ SVGA3D_DEVCAP_DXFMT_Z_DF16 = 217,
+ SVGA3D_DEVCAP_DXFMT_Z_DF24 = 218,
+ SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT = 219,
+ SVGA3D_DEVCAP_DXFMT_YV12 = 220,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT = 221,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT = 222,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM = 223,
+ SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT = 224,
+ SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM = 225,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM = 226,
+ SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT = 227,
+ SVGA3D_DEVCAP_DXFMT_R16G16_UNORM = 228,
+ SVGA3D_DEVCAP_DXFMT_R16G16_SNORM = 229,
+ SVGA3D_DEVCAP_DXFMT_R32_FLOAT = 230,
+ SVGA3D_DEVCAP_DXFMT_R8G8_SNORM = 231,
+ SVGA3D_DEVCAP_DXFMT_R16_FLOAT = 232,
+ SVGA3D_DEVCAP_DXFMT_D16_UNORM = 233,
+ SVGA3D_DEVCAP_DXFMT_A8_UNORM = 234,
+ SVGA3D_DEVCAP_DXFMT_BC1_UNORM = 235,
+ SVGA3D_DEVCAP_DXFMT_BC2_UNORM = 236,
+ SVGA3D_DEVCAP_DXFMT_BC3_UNORM = 237,
+ SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM = 238,
+ SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM = 239,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM = 240,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM = 241,
+ SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
+ SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
+
+ SVGA3D_DEVCAP_MAX /* This must be the last index. */
+} SVGA3dDevCapIndex;
+
+/*
+ * Bit definitions for DXFMT devcaps
+ *
+ *
+ * SUPPORTED: Can the format be defined?
+ * SHADER_SAMPLE: Can the format be sampled from a shader?
+ * COLOR_RENDERTARGET: Can the format be a color render target?
+ * DEPTH_RENDERTARGET: Can the format be a depth render target?
+ * BLENDABLE: Is the format blendable?
+ * MIPS: Does the format support mip levels?
+ * ARRAY: Does the format support texture arrays?
+ * VOLUME: Does the format support having volume?
+ * MULTISAMPLE_2: Does the format support 2x multisample?
+ * MULTISAMPLE_4: Does the format support 4x multisample?
+ * MULTISAMPLE_8: Does the format support 8x multisample?
+ */
+#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
+#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1)
+#define SVGA3D_DXFMT_COLOR_RENDERTARGET (1 << 2)
+#define SVGA3D_DXFMT_DEPTH_RENDERTARGET (1 << 3)
+#define SVGA3D_DXFMT_BLENDABLE (1 << 4)
+#define SVGA3D_DXFMT_MIPS (1 << 5)
+#define SVGA3D_DXFMT_ARRAY (1 << 6)
+#define SVGA3D_DXFMT_VOLUME (1 << 7)
+#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8)
+#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9)
+#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10)
+#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11)
+#define SVGADX_DXFMT_MAX (1 << 12)
+
+/*
+ * Convenience mask for any multisample capability.
+ *
+ * The multisample bits imply both load and render capability.
+ */
+#define SVGA3D_DXFMT_MULTISAMPLE ( \
+ SVGADX_DXFMT_MULTISAMPLE_2 | \
+ SVGADX_DXFMT_MULTISAMPLE_4 | \
+ SVGADX_DXFMT_MULTISAMPLE_8 )
+
+typedef union {
+ Bool b;
+ uint32 u;
+ int32 i;
+ float f;
+} SVGA3dDevCapResult;
+
+#endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
new file mode 100644
index 000000000000..8c5ae608cfb4
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -0,0 +1,1487 @@
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_dx.h --
+ *
+ * SVGA 3d hardware definitions for DX10 support.
+ */
+
+#ifndef _SVGA3D_DX_H_
+#define _SVGA3D_DX_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga3d_limits.h"
+
+#define SVGA3D_INPUT_MIN 0
+#define SVGA3D_INPUT_PER_VERTEX_DATA 0
+#define SVGA3D_INPUT_PER_INSTANCE_DATA 1
+#define SVGA3D_INPUT_MAX 2
+typedef uint32 SVGA3dInputClassification;
+
+#define SVGA3D_RESOURCE_TYPE_MIN 1
+#define SVGA3D_RESOURCE_BUFFER 1
+#define SVGA3D_RESOURCE_TEXTURE1D 2
+#define SVGA3D_RESOURCE_TEXTURE2D 3
+#define SVGA3D_RESOURCE_TEXTURE3D 4
+#define SVGA3D_RESOURCE_TEXTURECUBE 5
+#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6
+#define SVGA3D_RESOURCE_BUFFEREX 6
+#define SVGA3D_RESOURCE_TYPE_MAX 7
+typedef uint32 SVGA3dResourceType;
+
+#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0
+#define SVGA3D_DEPTH_WRITE_MASK_ALL 1
+typedef uint8 SVGA3dDepthWriteMask;
+
+#define SVGA3D_FILTER_MIP_LINEAR (1 << 0)
+#define SVGA3D_FILTER_MAG_LINEAR (1 << 2)
+#define SVGA3D_FILTER_MIN_LINEAR (1 << 4)
+#define SVGA3D_FILTER_ANISOTROPIC (1 << 6)
+#define SVGA3D_FILTER_COMPARE (1 << 7)
+typedef uint32 SVGA3dFilter;
+
+#define SVGA3D_CULL_INVALID 0
+#define SVGA3D_CULL_MIN 1
+#define SVGA3D_CULL_NONE 1
+#define SVGA3D_CULL_FRONT 2
+#define SVGA3D_CULL_BACK 3
+#define SVGA3D_CULL_MAX 4
+typedef uint8 SVGA3dCullMode;
+
+#define SVGA3D_COMPARISON_INVALID 0
+#define SVGA3D_COMPARISON_MIN 1
+#define SVGA3D_COMPARISON_NEVER 1
+#define SVGA3D_COMPARISON_LESS 2
+#define SVGA3D_COMPARISON_EQUAL 3
+#define SVGA3D_COMPARISON_LESS_EQUAL 4
+#define SVGA3D_COMPARISON_GREATER 5
+#define SVGA3D_COMPARISON_NOT_EQUAL 6
+#define SVGA3D_COMPARISON_GREATER_EQUAL 7
+#define SVGA3D_COMPARISON_ALWAYS 8
+#define SVGA3D_COMPARISON_MAX 9
+typedef uint8 SVGA3dComparisonFunc;
+
+#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
+#define SVGA3D_DX_MAX_SOTARGETS 4
+#define SVGA3D_DX_MAX_SRVIEWS 128
+#define SVGA3D_DX_MAX_CONSTBUFFERS 16
+#define SVGA3D_DX_MAX_SAMPLERS 16
+
+/* Id limits */
+static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
+static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
+
+typedef uint32 SVGA3dSurfaceId;
+typedef uint32 SVGA3dShaderResourceViewId;
+typedef uint32 SVGA3dRenderTargetViewId;
+typedef uint32 SVGA3dDepthStencilViewId;
+
+typedef uint32 SVGA3dShaderId;
+typedef uint32 SVGA3dElementLayoutId;
+typedef uint32 SVGA3dSamplerId;
+typedef uint32 SVGA3dBlendStateId;
+typedef uint32 SVGA3dDepthStencilStateId;
+typedef uint32 SVGA3dRasterizerStateId;
+typedef uint32 SVGA3dQueryId;
+typedef uint32 SVGA3dStreamOutputId;
+
+typedef union {
+ struct {
+ float r;
+ float g;
+ float b;
+ float a;
+ };
+
+ float value[4];
+} SVGA3dRGBAFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableDXContextEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineContext; /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyContext; /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */
+
+/*
+ * Bind a DX context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindContext {
+ uint32 cid;
+ SVGAMobId mobid;
+ uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindContext; /* SVGA_3D_CMD_DX_BIND_CONTEXT */
+
+/*
+ * Readback a DX context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackContext; /* SVGA_3D_CMD_DX_READBACK_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dReplyFormatData {
+ uint32 formatSupport;
+ uint32 msaa2xQualityLevels:5;
+ uint32 msaa4xQualityLevels:5;
+ uint32 msaa8xQualityLevels:5;
+ uint32 msaa16xQualityLevels:5;
+ uint32 msaa32xQualityLevels:5;
+ uint32 pad:7;
+}
+#include "vmware_pack_end.h"
+SVGA3dReplyFormatData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSingleConstantBuffer {
+ uint32 slot;
+ SVGA3dShaderType type;
+ SVGA3dSurfaceId sid;
+ uint32 offsetInBytes;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSingleConstantBuffer;
+/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShaderResources {
+ uint32 startView;
+ SVGA3dShaderType type;
+
+ /*
+ * Followed by a variable number of SVGA3dShaderResourceViewId's.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShader {
+ SVGA3dShaderId shaderId;
+ SVGA3dShaderType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSamplers {
+ uint32 startSampler;
+ SVGA3dShaderType type;
+
+ /*
+ * Followed by a variable number of SVGA3dSamplerId's.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDraw {
+ uint32 vertexCount;
+ uint32 startVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexed {
+ uint32 indexCount;
+ uint32 startIndexLocation;
+ int32 baseVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawInstanced {
+ uint32 vertexCountPerInstance;
+ uint32 instanceCount;
+ uint32 startVertexLocation;
+ uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexedInstanced {
+ uint32 indexCountPerInstance;
+ uint32 instanceCount;
+ uint32 startIndexLocation;
+ int32 baseVertexLocation;
+ uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawAuto {
+ uint32 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetInputLayout {
+ SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexBuffer {
+ SVGA3dSurfaceId sid;
+ uint32 stride;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexBuffer;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetVertexBuffers {
+ uint32 startBuffer;
+ /* Followed by a variable number of SVGA3dVertexBuffer's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetIndexBuffer {
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetTopology {
+ SVGA3dPrimitiveType topology;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRenderTargets {
+ SVGA3dDepthStencilViewId depthStencilViewId;
+ /* Followed by a variable number of SVGA3dRenderTargetViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetBlendState {
+ SVGA3dBlendStateId blendId;
+ float blendFactor[4];
+ uint32 sampleMask;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetDepthStencilState {
+ SVGA3dDepthStencilStateId depthStencilId;
+ uint32 stencilRef;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRasterizerState {
+ SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */
+
+#define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0)
+typedef uint32 SVGA3dDXQueryFlags;
+
+/*
+ * The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device
+ * to track query state transitions, but are not intended to be used by the
+ * driver.
+ */
+#define SVGADX_QDSTATE_INVALID ((uint8)-1) /* Query has no state */
+#define SVGADX_QDSTATE_MIN 0
+#define SVGADX_QDSTATE_IDLE 0 /* Query hasn't started yet */
+#define SVGADX_QDSTATE_ACTIVE 1 /* Query is actively gathering data */
+#define SVGADX_QDSTATE_PENDING 2 /* Query is waiting for results */
+#define SVGADX_QDSTATE_FINISHED 3 /* Query has completed */
+#define SVGADX_QDSTATE_MAX 4
+typedef uint8 SVGADXQueryDeviceState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dQueryTypeUint8 type;
+ uint16 pad0;
+ SVGADXQueryDeviceState state;
+ SVGA3dDXQueryFlags flags;
+ SVGAMobId mobid;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXQueryEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineQuery {
+ SVGA3dQueryId queryId;
+ SVGA3dQueryType type;
+ SVGA3dDXQueryFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyQuery {
+ SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindQuery {
+ SVGA3dQueryId queryId;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetQueryOffset {
+ SVGA3dQueryId queryId;
+ uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBeginQuery {
+ SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXEndQuery {
+ SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackQuery {
+ SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXMoveQuery {
+ SVGA3dQueryId queryId;
+ SVGAMobId mobid;
+ uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllQuery {
+ uint32 cid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackAllQuery {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetPredication {
+ SVGA3dQueryId queryId;
+ uint32 predicateValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct MKS3dDXSOState {
+ uint32 offset; /* Starting offset */
+ uint32 intOffset; /* Internal offset */
+ uint32 vertexCount; /* vertices written */
+ uint32 sizeInBytes; /* max bytes to write */
+}
+#include "vmware_pack_end.h"
+SVGA3dDXSOState;
+
+/* Set the offset field to this value to append SO values to the buffer */
+#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSoTarget {
+ SVGA3dSurfaceId sid;
+ uint32 offset;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dSoTarget;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSOTargets {
+ uint32 pad0;
+ /* Followed by a variable number of SVGA3dSOTarget's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dViewport
+{
+ float x;
+ float y;
+ float width;
+ float height;
+ float minDepth;
+ float maxDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dViewport;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetViewports {
+ uint32 pad0;
+ /* Followed by a variable number of SVGA3dViewport's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */
+
+#define SVGA3D_DX_MAX_VIEWPORTS 16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetScissorRects {
+ uint32 pad0;
+ /* Followed by a variable number of SVGASignedRect's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */
+
+#define SVGA3D_DX_MAX_SCISSORRECTS 16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearRenderTargetView {
+ SVGA3dRenderTargetViewId renderTargetViewId;
+ SVGA3dRGBAFloat rgba;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearDepthStencilView {
+ uint16 flags;
+ uint16 stencil;
+ SVGA3dDepthStencilViewId depthStencilViewId;
+ float depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopyRegion {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopyRegion;
+/* SVGA_3D_CMD_DX_PRED_COPY_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopy {
+ SVGA3dSurfaceId dstSid;
+ SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferCopy {
+ SVGA3dSurfaceId dest;
+ SVGA3dSurfaceId src;
+ uint32 destX;
+ uint32 srcX;
+ uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferCopy;
+/* SVGA_3D_CMD_DX_BUFFER_COPY */
+
+typedef uint32 SVGA3dDXStretchBltMode;
+#define SVGADX_STRETCHBLT_LINEAR (1 << 0)
+#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXStretchBlt {
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dSurfaceId dstSid;
+ uint32 destSubResource;
+ SVGA3dBox boxSrc;
+ SVGA3dBox boxDest;
+ SVGA3dDXStretchBltMode mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGenMips {
+ SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
+
+/*
+ * Defines a resource/DX surface. Resources share the surfaceId namespace.
+ *
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v2 {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ uint32 arraySize;
+ uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
+
+/*
+ * Update a sub-resource in a guest-backed resource.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXUpdateSubResource {
+ SVGA3dSurfaceId sid;
+ uint32 subResource;
+ SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXUpdateSubResource; /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */
+
+/*
+ * Readback a subresource in a guest-backed resource.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackSubResource {
+ SVGA3dSurfaceId sid;
+ uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackSubResource; /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateSubResource {
+ SVGA3dSurfaceId sid;
+ uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTransferFromBuffer {
+ SVGA3dSurfaceId srcSid;
+ uint32 srcOffset;
+ uint32 srcPitch;
+ uint32 srcSlicePitch;
+ SVGA3dSurfaceId destSid;
+ uint32 destSubResource;
+ SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTransferFromBuffer; /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box. Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
+ * The context is implied from the command buffer header.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredTransferFromBuffer {
+ SVGA3dSurfaceId srcSid;
+ uint32 srcOffset;
+ uint32 srcPitch;
+ uint32 srcSlicePitch;
+ SVGA3dSurfaceId destSid;
+ uint32 destSubResource;
+ SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredTransferFromBuffer;
+/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSurfaceCopyAndReadback {
+ SVGA3dSurfaceId srcSid;
+ SVGA3dSurfaceId destSid;
+ SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSurfaceCopyAndReadback;
+/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ union {
+ struct {
+ uint32 firstElement;
+ uint32 numElements;
+ uint32 pad0;
+ uint32 pad1;
+ } buffer;
+ struct {
+ uint32 mostDetailedMip;
+ uint32 firstArraySlice;
+ uint32 mipLevels;
+ uint32 arraySize;
+ } tex;
+ struct {
+ uint32 firstElement;
+ uint32 numElements;
+ uint32 flags;
+ uint32 pad0;
+ } bufferex;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderResourceViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ SVGA3dShaderResourceViewDesc desc;
+ uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSRViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShaderResourceView {
+ SVGA3dShaderResourceViewId shaderResourceViewId;
+
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+
+ SVGA3dShaderResourceViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShaderResourceView;
+/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShaderResourceView {
+ SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShaderResourceView;
+/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRenderTargetViewDesc {
+ union {
+ struct {
+ uint32 firstElement;
+ uint32 numElements;
+ } buffer;
+ struct {
+ uint32 mipSlice;
+ uint32 firstArraySlice;
+ uint32 arraySize;
+ } tex; /* 1d, 2d, cube */
+ struct {
+ uint32 mipSlice;
+ uint32 firstW;
+ uint32 wSize;
+ } tex3D;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderTargetViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ SVGA3dRenderTargetViewDesc desc;
+ uint32 pad[2];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRTViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRenderTargetView {
+ SVGA3dRenderTargetViewId renderTargetViewId;
+
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+
+ SVGA3dRenderTargetViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRenderTargetView;
+/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRenderTargetView {
+ SVGA3dRenderTargetViewId renderTargetViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRenderTargetView;
+/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */
+
+/*
+ */
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH 0x01
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02
+#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK 0x03
+typedef uint8 SVGA3DCreateDSViewFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ uint32 mipSlice;
+ uint32 firstArraySlice;
+ uint32 arraySize;
+ SVGA3DCreateDSViewFlags flags;
+ uint8 pad0;
+ uint16 pad1;
+ uint32 pad2;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDSViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilView {
+ SVGA3dDepthStencilViewId depthStencilViewId;
+
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ uint32 mipSlice;
+ uint32 firstArraySlice;
+ uint32 arraySize;
+ SVGA3DCreateDSViewFlags flags;
+ uint8 pad0;
+ uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilView;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilView {
+ SVGA3dDepthStencilViewId depthStencilViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilView;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dInputElementDesc {
+ uint32 inputSlot;
+ uint32 alignedByteOffset;
+ SVGA3dSurfaceFormat format;
+ SVGA3dInputClassification inputSlotClass;
+ uint32 instanceDataStepRate;
+ uint32 inputRegister;
+}
+#include "vmware_pack_end.h"
+SVGA3dInputElementDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * XXX: How many of these can there be?
+ */
+ uint32 elid;
+ uint32 numDescs;
+ SVGA3dInputElementDesc desc[32];
+ uint32 pad[62];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXElementLayoutEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineElementLayout {
+ SVGA3dElementLayoutId elementLayoutId;
+ /* Followed by a variable number of SVGA3dInputElementDesc's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineElementLayout;
+/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyElementLayout {
+ SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyElementLayout;
+/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */
+
+
+#define SVGA3D_DX_MAX_RENDER_TARGETS 8
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXBlendStatePerRT {
+ uint8 blendEnable;
+ uint8 srcBlend;
+ uint8 destBlend;
+ uint8 blendOp;
+ uint8 srcBlendAlpha;
+ uint8 destBlendAlpha;
+ uint8 blendOpAlpha;
+ uint8 renderTargetWriteMask;
+ uint8 logicOpEnable;
+ uint8 logicOp;
+ uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXBlendStatePerRT;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint8 alphaToCoverageEnable;
+ uint8 independentBlendEnable;
+ uint16 pad0;
+ SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+ uint32 pad1[7];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXBlendStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineBlendState {
+ SVGA3dBlendStateId blendId;
+ uint8 alphaToCoverageEnable;
+ uint8 independentBlendEnable;
+ uint16 pad0;
+ SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyBlendState {
+ SVGA3dBlendStateId blendId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint8 depthEnable;
+ SVGA3dDepthWriteMask depthWriteMask;
+ SVGA3dComparisonFunc depthFunc;
+ uint8 stencilEnable;
+ uint8 frontEnable;
+ uint8 backEnable;
+ uint8 stencilReadMask;
+ uint8 stencilWriteMask;
+
+ uint8 frontStencilFailOp;
+ uint8 frontStencilDepthFailOp;
+ uint8 frontStencilPassOp;
+ SVGA3dComparisonFunc frontStencilFunc;
+
+ uint8 backStencilFailOp;
+ uint8 backStencilDepthFailOp;
+ uint8 backStencilPassOp;
+ SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDepthStencilEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilState {
+ SVGA3dDepthStencilStateId depthStencilId;
+
+ uint8 depthEnable;
+ SVGA3dDepthWriteMask depthWriteMask;
+ SVGA3dComparisonFunc depthFunc;
+ uint8 stencilEnable;
+ uint8 frontEnable;
+ uint8 backEnable;
+ uint8 stencilReadMask;
+ uint8 stencilWriteMask;
+
+ uint8 frontStencilFailOp;
+ uint8 frontStencilDepthFailOp;
+ uint8 frontStencilPassOp;
+ SVGA3dComparisonFunc frontStencilFunc;
+
+ uint8 backStencilFailOp;
+ uint8 backStencilDepthFailOp;
+ uint8 backStencilPassOp;
+ SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilState;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilState {
+ SVGA3dDepthStencilStateId depthStencilId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilState;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint8 fillMode;
+ SVGA3dCullMode cullMode;
+ uint8 frontCounterClockwise;
+ uint8 provokingVertexLast;
+ int32 depthBias;
+ float depthBiasClamp;
+ float slopeScaledDepthBias;
+ uint8 depthClipEnable;
+ uint8 scissorEnable;
+ uint8 multisampleEnable;
+ uint8 antialiasedLineEnable;
+ float lineWidth;
+ uint8 lineStippleEnable;
+ uint8 lineStippleFactor;
+ uint16 lineStipplePattern;
+ uint32 forcedSampleCount;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRasterizerStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRasterizerState {
+ SVGA3dRasterizerStateId rasterizerId;
+
+ uint8 fillMode;
+ SVGA3dCullMode cullMode;
+ uint8 frontCounterClockwise;
+ uint8 provokingVertexLast;
+ int32 depthBias;
+ float depthBiasClamp;
+ float slopeScaledDepthBias;
+ uint8 depthClipEnable;
+ uint8 scissorEnable;
+ uint8 multisampleEnable;
+ uint8 antialiasedLineEnable;
+ float lineWidth;
+ uint8 lineStippleEnable;
+ uint8 lineStippleFactor;
+ uint16 lineStipplePattern;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRasterizerState;
+/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRasterizerState {
+ SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRasterizerState;
+/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dFilter filter;
+ uint8 addressU;
+ uint8 addressV;
+ uint8 addressW;
+ uint8 pad0;
+ float mipLODBias;
+ uint8 maxAnisotropy;
+ SVGA3dComparisonFunc comparisonFunc;
+ uint16 pad1;
+ SVGA3dRGBAFloat borderColor;
+ float minLOD;
+ float maxLOD;
+ uint32 pad2[6];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSamplerEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineSamplerState {
+ SVGA3dSamplerId samplerId;
+ SVGA3dFilter filter;
+ uint8 addressU;
+ uint8 addressV;
+ uint8 addressW;
+ uint8 pad0;
+ float mipLODBias;
+ uint8 maxAnisotropy;
+ SVGA3dComparisonFunc comparisonFunc;
+ uint16 pad1;
+ SVGA3dRGBAFloat borderColor;
+ float minLOD;
+ float maxLOD;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroySamplerState {
+ SVGA3dSamplerId samplerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSignatureEntry {
+ uint8 systemValue;
+ uint8 reg; /* register is a reserved word */
+ uint16 mask;
+ uint8 registerComponentType;
+ uint8 minPrecision;
+ uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dSignatureEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShader {
+ SVGA3dShaderId shaderId;
+ SVGA3dShaderType type;
+ uint32 sizeInBytes; /* Number of bytes of shader text. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACOTableDXShaderEntry {
+ SVGA3dShaderType type;
+ uint32 sizeInBytes;
+ uint32 offsetInBytes;
+ SVGAMobId mobid;
+ uint32 numInputSignatureEntries;
+ uint32 numOutputSignatureEntries;
+
+ uint32 numPatchConstantSignatureEntries;
+
+ uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXShaderEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShader {
+ SVGA3dShaderId shaderId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindShader {
+ uint32 cid;
+ uint32 shid;
+ SVGAMobId mobid;
+ uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */
+
+/*
+ * The maximum number of streamout decl's in each streamout entry.
+ */
+#define SVGA3D_MAX_STREAMOUT_DECLS 64
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dStreamOutputDeclarationEntry {
+ uint32 outputSlot;
+ uint32 registerIndex;
+ uint8 registerMask;
+ uint8 pad0;
+ uint16 pad1;
+ uint32 stream;
+}
+#include "vmware_pack_end.h"
+SVGA3dStreamOutputDeclarationEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOTableStreamOutputEntry {
+ uint32 numOutputStreamEntries;
+ SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+ uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+ uint32 rasterizedStream;
+ uint32 pad[250];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXStreamOutputEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineStreamOutput {
+ SVGA3dStreamOutputId soid;
+ uint32 numOutputStreamEntries;
+ SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+ uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+ uint32 rasterizedStream;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyStreamOutput {
+ SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetStreamOutput {
+ SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 value;
+ uint32 mobId;
+ uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
+
+/*
+ * SVGA3dCmdSetCOTable --
+ *
+ * This command allows the guest to bind a mob to a context-object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetCOTable {
+ uint32 cid;
+ uint32 mobid;
+ SVGACOTableType type;
+ uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackCOTable {
+ uint32 cid;
+ SVGACOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCOTableData {
+ uint32 mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCOTableData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dBufferBinding {
+ uint32 bufferId;
+ uint32 stride;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dConstantBufferBinding {
+ uint32 sid;
+ uint32 offsetInBytes;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dConstantBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXInputAssemblyMobFormat {
+ uint32 layoutId;
+ SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+ uint32 indexBufferSid;
+ uint32 pad;
+ uint32 indexBufferOffset;
+ uint32 indexBufferFormat;
+ uint32 topology;
+}
+#include "vmware_pack_end.h"
+SVGADXInputAssemblyMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXContextMobFormat {
+ SVGADXInputAssemblyMobFormat inputAssembly;
+
+ struct {
+ uint32 blendStateId;
+ uint32 blendFactor[4];
+ uint32 sampleMask;
+ uint32 depthStencilStateId;
+ uint32 stencilRef;
+ uint32 rasterizerStateId;
+ uint32 depthStencilViewId;
+ uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
+ uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
+ } renderState;
+
+ struct {
+ uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
+ uint32 soid;
+ } streamOut;
+ uint32 pad0[11];
+
+ uint8 numViewports;
+ uint8 numScissorRects;
+ uint16 pad1[1];
+
+ uint32 pad2[3];
+
+ SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
+ uint32 pad3[32];
+
+ SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
+ uint32 pad4[64];
+
+ struct {
+ uint32 queryID;
+ uint32 value;
+ } predication;
+ uint32 pad5[2];
+
+ struct {
+ uint32 shaderId;
+ SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+ uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS];
+ uint32 samplers[SVGA3D_DX_MAX_SAMPLERS];
+ } shaderState[SVGA3D_NUM_SHADERTYPE];
+ uint32 pad6[26];
+
+ SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
+
+ SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
+ uint32 pad7[381];
+}
+#include "vmware_pack_end.h"
+SVGADXContextMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTempSetContext {
+ uint32 dxcid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */
+
+#endif /* _SVGA3D_DX_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
new file mode 100644
index 000000000000..a1c36877ad55
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -0,0 +1,99 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_limits.h --
+ *
+ * SVGA 3d hardware limits
+ */
+
+#ifndef _SVGA3D_LIMITS_H_
+#define _SVGA3D_LIMITS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#define SVGA3D_NUM_CLIPPLANES 6
+#define SVGA3D_MAX_RENDER_TARGETS 8
+#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS (SVGA3D_MAX_RENDER_TARGETS)
+#define SVGA3D_MAX_UAVIEWS 8
+#define SVGA3D_MAX_CONTEXT_IDS 256
+#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
+
+/*
+ * Maximum ID a shader can be assigned on a given context.
+ */
+#define SVGA3D_MAX_SHADERIDS 5000
+/*
+ * Maximum number of shaders of a given type that can be defined
+ * (including all contexts).
+ */
+#define SVGA3D_MAX_SIMULTANEOUS_SHADERS 20000
+
+#define SVGA3D_NUM_TEXTURE_UNITS 32
+#define SVGA3D_NUM_LIGHTS 8
+
+/*
+ * Maximum size in dwords of shader text the SVGA device will allow.
+ * Currently 8 MB.
+ */
+#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32))
+
+#define SVGA3D_MAX_CLIP_PLANES 6
+
+/*
+ * This is the limit to the number of fixed-function texture
+ * transforms and texture coordinates we can support. It does *not*
+ * correspond to the number of texture image units (samplers) we
+ * support!
+ */
+#define SVGA3D_MAX_TEXTURE_COORDS 8
+
+/*
+ * Number of faces in a cubemap.
+ */
+#define SVGA3D_MAX_SURFACE_FACES 6
+
+/*
+ * Maximum number of array indexes in a GB surface (with DX enabled).
+ */
+#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
+
+/*
+ * The maximum number of vertex arrays we're guaranteed to support in
+ * SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_VERTEX_ARRAYS 32
+
+/*
+ * The maximum number of primitive ranges we're guaranteed to support
+ * in SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+
+#endif /* _SVGA3D_LIMITS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
new file mode 100644
index 000000000000..b44ce648f592
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -0,0 +1,50 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_reg.h --
+ *
+ * SVGA 3d hardware definitions
+ */
+
+#ifndef _SVGA3D_REG_H_
+#define _SVGA3D_REG_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#include "svga3d_types.h"
+#include "svga3d_limits.h"
+#include "svga3d_cmd.h"
+#include "svga3d_dx.h"
+#include "svga3d_devcaps.h"
+
+
+#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..58704f0a4607
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -0,0 +1,1204 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ * 1. Red, bump W, luminance and depth are stored in the first channel.
+ * 2. Green, bump V and stencil are stored in the second channel.
+ * 3. Blue and bump U are stored in the third channel.
+ * 4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ * 1. For compressed formats, only the data channel is used and its size
+ * is equal to that of a singular block in the compression scheme.
+ * 2. For buffer formats, only the data channel is used and its size is
+ * exactly one byte in length.
+ * 3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+ SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
+ SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
+ data */
+ SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
+ data */
+ SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
+ U and V */
+ SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
+ data */
+ SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
+ data */
+ SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
+ channel */
+ SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
+ data */
+ SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
+ data */
+ SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
+ data */
+ SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
+ data */
+ SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
+ SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
+ channel */
+ SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
+ data */
+ SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
+ data */
+ SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
+ data depending on the
+ compression method used */
+ SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
+ floating point
+ representation in
+ all channels */
+ SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
+ data. */
+ SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
+ SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
+ SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
+ SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
+ SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
+ e.g., NV12. */
+ SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
+ Y, U, V, e.g., YV12. */
+
+ SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V,
+ SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_LUMINANCE,
+ SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_W,
+ SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_Q,
+ SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_IEEE_FP,
+ SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL,
+ SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
+ SVGA3DBLOCKDESC_Y,
+ SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_Y |
+ SVGA3DBLOCKDESC_U_VIDEO |
+ SVGA3DBLOCKDESC_V_VIDEO,
+ SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_EXP,
+ SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_2PLANAR_YUV,
+ SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ * 1. Block description.
+ * 2. Dimensions of a block in the surface.
+ * 3. Size of block in bytes.
+ * 4. Bit depth of the pixel data.
+ * 5. Channel bit depths and masks (if applicable).
+ */
+struct svga3d_channel_def {
+ union {
+ u8 blue;
+ u8 u;
+ u8 uv_video;
+ u8 u_video;
+ };
+ union {
+ u8 green;
+ u8 v;
+ u8 stencil;
+ u8 v_video;
+ };
+ union {
+ u8 red;
+ u8 w;
+ u8 luminance;
+ u8 y;
+ u8 depth;
+ u8 data;
+ };
+ union {
+ u8 alpha;
+ u8 q;
+ u8 exp;
+ };
+};
+
+struct svga3d_surface_desc {
+ SVGA3dSurfaceFormat format;
+ enum svga3d_block_desc block_desc;
+ surf_size_struct block_size;
+ u32 bytes_per_block;
+ u32 pitch_bytes_per_block;
+
+ u32 total_bit_depth;
+ struct svga3d_channel_def bit_depth;
+ struct svga3d_channel_def bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+ {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
+ {1, 1, 1}, 0, 0,
+ 0, {{0}, {0}, {0}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4,
+ 24, {{8}, {8}, {8}, {0}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2,
+ 16, {{5}, {6}, {5}, {0}},
+ {{0}, {5}, {11}, {0}}},
+
+ {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2,
+ 15, {{5}, {5}, {5}, {0}},
+ {{0}, {5}, {10}, {0}}},
+
+ {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2,
+ 16, {{5}, {5}, {5}, {1}},
+ {{0}, {5}, {10}, {15}}},
+
+ {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2,
+ 16, {{4}, {4}, {4}, {4}},
+ {{0}, {4}, {8}, {12}}},
+
+ {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {1}, {15}, {0}},
+ {{0}, {15}, {0}, {0}}},
+
+ {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
+ {1 , 1, 1}, 1, 1,
+ 8, {{0}, {0}, {4}, {4}},
+ {{0}, {0}, {0}, {4}}},
+
+ {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {8}, {8}},
+ {{0}, {0}, {0}, {8}}},
+
+ {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {8}, {8}},
+ {{0}, {0}, {0}, {8}}},
+
+ {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 2, 2,
+ 16, {{5}, {5}, {6}, {0}},
+ {{11}, {6}, {0}, {0}}},
+
+ {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {0}},
+ {{16}, {8}, {0}, {0}}},
+
+ {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 3, 3,
+ 24, {{8}, {8}, {8}, {0}},
+ {{16}, {8}, {0}, {0}}},
+
+ {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 16, 16,
+ 128, {{32}, {32}, {32}, {32}},
+ {{64}, {32}, {0}, {96}}},
+
+ {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2,
+ 16, {{8}, {8}, {0}, {0}},
+ {{8}, {0}, {0}, {0}}},
+
+ {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{24}, {16}, {8}, {0}}},
+
+ {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2,
+ 16, {{8}, {8}, {0}, {0}},
+ {{8}, {0}, {0}, {0}}},
+
+ {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4,
+ 24, {{8}, {8}, {8}, {0}},
+ {{16}, {8}, {0}, {0}}},
+
+ {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {0}, {8}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
+
+ {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4,
+ 32, {{16}, {16}, {0}, {0}},
+ {{16}, {0}, {0}, {0}}},
+
+ {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {0}, {16}, {0}}},
+
+ {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2,
+ 16, {{8}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}}},
+
+ {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2,
+ 16, {{8}, {0}, {8}, {0}},
+ {{8}, {0}, {0}, {0}}},
+
+ {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
+ {2, 2, 1}, 6, 2,
+ 48, {{0}, {0}, {48}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16,
+ 128, {{32}, {32}, {32}, {32}},
+ {{64}, {32}, {0}, {96}}},
+
+ {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16,
+ 128, {{32}, {32}, {32}, {32}},
+ {{64}, {32}, {0}, {96}}},
+
+ {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 16, 16,
+ 128, {{32}, {32}, {32}, {32}},
+ {{64}, {32}, {0}, {96}}},
+
+ {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12,
+ 96, {{32}, {32}, {32}, {0}},
+ {{64}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 12, 12,
+ 96, {{32}, {32}, {32}, {0}},
+ {{64}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12,
+ 96, {{32}, {32}, {32}, {0}},
+ {{64}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
+ {1, 1, 1}, 12, 12,
+ 96, {{32}, {32}, {32}, {0}},
+ {{64}, {32}, {0}, {0}}},
+
+ {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {8}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {8}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {8}, {0}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {11}, {11}, {0}},
+ {{0}, {10}, {21}, {0}}},
+
+ {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
+
+ {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
+
+ {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
+
+ {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
+
+ {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
+
+ {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
+
+ {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
+
+ {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
+
+ {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {0}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
+ {1, 1, 1}, 4, 4,
+ 32, {{9}, {9}, {9}, {5}},
+ {{18}, {9}, {0}, {27}}},
+
+ {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4,
+ 24, {{8}, {8}, {8}, {0}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
+ {1, 1, 1}, 4, 4,
+ 24, {{8}, {8}, {8}, {0}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
+ {2, 2, 1}, 6, 2,
+ 48, {{0}, {0}, {48}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 16, 16,
+ 128, {{32}, {32}, {32}, {32}},
+ {{64}, {32}, {0}, {96}}},
+
+ {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{24}, {16}, {8}, {0}}},
+
+ {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
+
+ {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {0}, {16}, {0}}},
+
+ {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4,
+ 32, {{16}, {16}, {0}, {0}},
+ {{16}, {0}, {0}, {0}}},
+
+ {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{8}, {8}, {0}, {0}},
+ {{8}, {0}, {0}, {0}}},
+
+ {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {0}, {8}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2,
+ 16, {{5}, {6}, {5}, {0}},
+ {{0}, {5}, {11}, {0}}},
+
+ {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2,
+ 16, {{5}, {5}, {5}, {1}},
+ {{0}, {5}, {10}, {15}}},
+
+ {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4,
+ 24, {{8}, {8}, {8}, {0}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+ uint64_t tmp = (uint64_t) a*b;
+ return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+ if (format < ARRAY_SIZE(svga3d_surface_descs))
+ return &svga3d_surface_descs[format];
+
+ return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ * Given a base level size and the mip level, compute the size of
+ * the mip level.
+ *
+ * Results:
+ * See above.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+ surf_size_struct size;
+
+ size.width = max_t(u32, base_level.width >> mip_level, 1);
+ size.height = max_t(u32, base_level.height >> mip_level, 1);
+ size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+ return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *pixel_size,
+ surf_size_struct *block_size)
+{
+ block_size->width = DIV_ROUND_UP(pixel_size->width,
+ desc->block_size.width);
+ block_size->height = DIV_ROUND_UP(pixel_size->height,
+ desc->block_size.height);
+ block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+ desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+ return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size)
+{
+ u32 pitch;
+ surf_size_struct blocks;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+ pitch = blocks.width * desc->pitch_bytes_per_block;
+
+ return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ * Return the number of bytes of buffer space required to store
+ * one image of a surface, optionally using the specified pitch.
+ *
+ * If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ * This function is overflow-safe. If the result would have
+ * overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ * Byte count.
+ *
+ * Side effects:
+ * None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size,
+ u32 pitch)
+{
+ surf_size_struct image_blocks;
+ u32 slice_size, total_size;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+ if (svga3dsurface_is_planar_surface(desc)) {
+ total_size = clamped_umul32(image_blocks.width,
+ image_blocks.height);
+ total_size = clamped_umul32(total_size, image_blocks.depth);
+ total_size = clamped_umul32(total_size, desc->bytes_per_block);
+ return total_size;
+ }
+
+ if (pitch == 0)
+ pitch = svga3dsurface_calculate_pitch(desc, size);
+
+ slice_size = clamped_umul32(image_blocks.height, pitch);
+ total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+ return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+ surf_size_struct base_level_size,
+ u32 num_mip_levels,
+ u32 num_layers)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ u32 total_size = 0;
+ u32 mip;
+
+ for (mip = 0; mip < num_mip_levels; mip++) {
+ surf_size_struct size =
+ svga3dsurface_get_mip_size(base_level_size, mip);
+ total_size += svga3dsurface_get_image_buffer_size(desc,
+ &size, 0);
+ }
+
+ return total_size * num_layers;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+ u32 width, u32 height,
+ u32 x, u32 y, u32 z)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+ const u32 bd = desc->block_size.depth;
+ const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+ const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+ const u32 offset = (z / bd * imgstride +
+ y / bh * rowstride +
+ x / bw * desc->bytes_per_block);
+ return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+ surf_size_struct baseLevelSize,
+ u32 numMipLevels,
+ u32 face,
+ u32 mip)
+
+{
+ u32 offset;
+ u32 mipChainBytes;
+ u32 mipChainBytesToLevel;
+ u32 i;
+ const struct svga3d_surface_desc *desc;
+ surf_size_struct mipSize;
+ u32 bytes;
+
+ desc = svga3dsurface_get_desc(format);
+
+ mipChainBytes = 0;
+ mipChainBytesToLevel = 0;
+ for (i = 0; i < numMipLevels; i++) {
+ mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+ bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+ mipChainBytes += bytes;
+ if (i < mip)
+ mipChainBytesToLevel += bytes;
+ }
+
+ offset = mipChainBytes * face + mipChainBytesToLevel;
+
+ return offset;
+}
+
+
+/**
+ * svga3dsurface_is_gb_screen_target_format - Is the specified format usable as
+ * a ScreenTarget?
+ * (with just the GBObjects cap-bit
+ * set)
+ * @format: format to queried
+ *
+ * RETURNS:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)
+{
+ return (format == SVGA3D_X8R8G8B8 ||
+ format == SVGA3D_A8R8G8B8 ||
+ format == SVGA3D_R5G6B5 ||
+ format == SVGA3D_X1R5G5B5 ||
+ format == SVGA3D_A1R5G5B5 ||
+ format == SVGA3D_P8);
+}
+
+
+/**
+ * svga3dsurface_is_dx_screen_target_format - Is the specified format usable as
+ * a ScreenTarget?
+ * (with DX10 enabled)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)
+{
+ return (format == SVGA3D_R8G8B8A8_UNORM ||
+ format == SVGA3D_B8G8R8A8_UNORM ||
+ format == SVGA3D_B8G8R8X8_UNORM);
+}
+
+
+/**
+ * svga3dsurface_is_screen_target_format - Is the specified format usable as a
+ * ScreenTarget?
+ * (for some combination of caps)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
+{
+ if (svga3dsurface_is_gb_screen_target_format(format)) {
+ return true;
+ }
+ return svga3dsurface_is_dx_screen_target_format(format);
+}
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
new file mode 100644
index 000000000000..27b33ba88430
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -0,0 +1,1633 @@
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_types.h --
+ *
+ * SVGA 3d hardware definitions for basic types
+ */
+
+#ifndef _SVGA3D_TYPES_H_
+#define _SVGA3D_TYPES_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * Generic Types
+ */
+
+#define SVGA3D_INVALID_ID ((uint32)-1)
+
+typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
+typedef uint32 SVGA3dColor; /* a, r, g, b */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyRect {
+ uint32 x;
+ uint32 y;
+ uint32 w;
+ uint32 h;
+ uint32 srcx;
+ uint32 srcy;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyBox {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+ uint32 w;
+ uint32 h;
+ uint32 d;
+ uint32 srcx;
+ uint32 srcy;
+ uint32 srcz;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRect {
+ uint32 x;
+ uint32 y;
+ uint32 w;
+ uint32 h;
+}
+#include "vmware_pack_end.h"
+SVGA3dRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+ uint32 w;
+ uint32 h;
+ uint32 d;
+}
+#include "vmware_pack_end.h"
+SVGA3dBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+}
+#include "vmware_pack_end.h"
+SVGA3dPoint;
+
+/*
+ * Surface formats.
+ */
+typedef enum SVGA3dSurfaceFormat {
+ SVGA3D_FORMAT_INVALID = 0,
+
+ SVGA3D_X8R8G8B8 = 1,
+ SVGA3D_FORMAT_MIN = 1,
+
+ SVGA3D_A8R8G8B8 = 2,
+
+ SVGA3D_R5G6B5 = 3,
+ SVGA3D_X1R5G5B5 = 4,
+ SVGA3D_A1R5G5B5 = 5,
+ SVGA3D_A4R4G4B4 = 6,
+
+ SVGA3D_Z_D32 = 7,
+ SVGA3D_Z_D16 = 8,
+ SVGA3D_Z_D24S8 = 9,
+ SVGA3D_Z_D15S1 = 10,
+
+ SVGA3D_LUMINANCE8 = 11,
+ SVGA3D_LUMINANCE4_ALPHA4 = 12,
+ SVGA3D_LUMINANCE16 = 13,
+ SVGA3D_LUMINANCE8_ALPHA8 = 14,
+
+ SVGA3D_DXT1 = 15,
+ SVGA3D_DXT2 = 16,
+ SVGA3D_DXT3 = 17,
+ SVGA3D_DXT4 = 18,
+ SVGA3D_DXT5 = 19,
+
+ SVGA3D_BUMPU8V8 = 20,
+ SVGA3D_BUMPL6V5U5 = 21,
+ SVGA3D_BUMPX8L8V8U8 = 22,
+ SVGA3D_BUMPL8V8U8 = 23,
+
+ SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
+ SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
+
+ SVGA3D_A2R10G10B10 = 26,
+
+ /* signed formats */
+ SVGA3D_V8U8 = 27,
+ SVGA3D_Q8W8V8U8 = 28,
+ SVGA3D_CxV8U8 = 29,
+
+ /* mixed formats */
+ SVGA3D_X8L8V8U8 = 30,
+ SVGA3D_A2W10V10U10 = 31,
+
+ SVGA3D_ALPHA8 = 32,
+
+ /* Single- and dual-component floating point formats */
+ SVGA3D_R_S10E5 = 33,
+ SVGA3D_R_S23E8 = 34,
+ SVGA3D_RG_S10E5 = 35,
+ SVGA3D_RG_S23E8 = 36,
+
+ SVGA3D_BUFFER = 37,
+
+ SVGA3D_Z_D24X8 = 38,
+
+ SVGA3D_V16U16 = 39,
+
+ SVGA3D_G16R16 = 40,
+ SVGA3D_A16B16G16R16 = 41,
+
+ /* Packed Video formats */
+ SVGA3D_UYVY = 42,
+ SVGA3D_YUY2 = 43,
+
+ /* Planar video formats */
+ SVGA3D_NV12 = 44,
+
+ /* Video format with alpha */
+ SVGA3D_AYUV = 45,
+
+ SVGA3D_R32G32B32A32_TYPELESS = 46,
+ SVGA3D_R32G32B32A32_UINT = 47,
+ SVGA3D_R32G32B32A32_SINT = 48,
+ SVGA3D_R32G32B32_TYPELESS = 49,
+ SVGA3D_R32G32B32_FLOAT = 50,
+ SVGA3D_R32G32B32_UINT = 51,
+ SVGA3D_R32G32B32_SINT = 52,
+ SVGA3D_R16G16B16A16_TYPELESS = 53,
+ SVGA3D_R16G16B16A16_UINT = 54,
+ SVGA3D_R16G16B16A16_SNORM = 55,
+ SVGA3D_R16G16B16A16_SINT = 56,
+ SVGA3D_R32G32_TYPELESS = 57,
+ SVGA3D_R32G32_UINT = 58,
+ SVGA3D_R32G32_SINT = 59,
+ SVGA3D_R32G8X24_TYPELESS = 60,
+ SVGA3D_D32_FLOAT_S8X24_UINT = 61,
+ SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
+ SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
+ SVGA3D_R10G10B10A2_TYPELESS = 64,
+ SVGA3D_R10G10B10A2_UINT = 65,
+ SVGA3D_R11G11B10_FLOAT = 66,
+ SVGA3D_R8G8B8A8_TYPELESS = 67,
+ SVGA3D_R8G8B8A8_UNORM = 68,
+ SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
+ SVGA3D_R8G8B8A8_UINT = 70,
+ SVGA3D_R8G8B8A8_SINT = 71,
+ SVGA3D_R16G16_TYPELESS = 72,
+ SVGA3D_R16G16_UINT = 73,
+ SVGA3D_R16G16_SINT = 74,
+ SVGA3D_R32_TYPELESS = 75,
+ SVGA3D_D32_FLOAT = 76,
+ SVGA3D_R32_UINT = 77,
+ SVGA3D_R32_SINT = 78,
+ SVGA3D_R24G8_TYPELESS = 79,
+ SVGA3D_D24_UNORM_S8_UINT = 80,
+ SVGA3D_R24_UNORM_X8_TYPELESS = 81,
+ SVGA3D_X24_TYPELESS_G8_UINT = 82,
+ SVGA3D_R8G8_TYPELESS = 83,
+ SVGA3D_R8G8_UNORM = 84,
+ SVGA3D_R8G8_UINT = 85,
+ SVGA3D_R8G8_SINT = 86,
+ SVGA3D_R16_TYPELESS = 87,
+ SVGA3D_R16_UNORM = 88,
+ SVGA3D_R16_UINT = 89,
+ SVGA3D_R16_SNORM = 90,
+ SVGA3D_R16_SINT = 91,
+ SVGA3D_R8_TYPELESS = 92,
+ SVGA3D_R8_UNORM = 93,
+ SVGA3D_R8_UINT = 94,
+ SVGA3D_R8_SNORM = 95,
+ SVGA3D_R8_SINT = 96,
+ SVGA3D_P8 = 97,
+ SVGA3D_R9G9B9E5_SHAREDEXP = 98,
+ SVGA3D_R8G8_B8G8_UNORM = 99,
+ SVGA3D_G8R8_G8B8_UNORM = 100,
+ SVGA3D_BC1_TYPELESS = 101,
+ SVGA3D_BC1_UNORM_SRGB = 102,
+ SVGA3D_BC2_TYPELESS = 103,
+ SVGA3D_BC2_UNORM_SRGB = 104,
+ SVGA3D_BC3_TYPELESS = 105,
+ SVGA3D_BC3_UNORM_SRGB = 106,
+ SVGA3D_BC4_TYPELESS = 107,
+ SVGA3D_ATI1 = 108, /* DX9-specific BC4_UNORM */
+ SVGA3D_BC4_SNORM = 109,
+ SVGA3D_BC5_TYPELESS = 110,
+ SVGA3D_ATI2 = 111, /* DX9-specific BC5_UNORM */
+ SVGA3D_BC5_SNORM = 112,
+ SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
+ SVGA3D_B8G8R8A8_TYPELESS = 114,
+ SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
+ SVGA3D_B8G8R8X8_TYPELESS = 116,
+ SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
+
+ /* Advanced depth formats. */
+ SVGA3D_Z_DF16 = 118,
+ SVGA3D_Z_DF24 = 119,
+ SVGA3D_Z_D24S8_INT = 120,
+
+ /* Planar video formats. */
+ SVGA3D_YV12 = 121,
+
+ SVGA3D_R32G32B32A32_FLOAT = 122,
+ SVGA3D_R16G16B16A16_FLOAT = 123,
+ SVGA3D_R16G16B16A16_UNORM = 124,
+ SVGA3D_R32G32_FLOAT = 125,
+ SVGA3D_R10G10B10A2_UNORM = 126,
+ SVGA3D_R8G8B8A8_SNORM = 127,
+ SVGA3D_R16G16_FLOAT = 128,
+ SVGA3D_R16G16_UNORM = 129,
+ SVGA3D_R16G16_SNORM = 130,
+ SVGA3D_R32_FLOAT = 131,
+ SVGA3D_R8G8_SNORM = 132,
+ SVGA3D_R16_FLOAT = 133,
+ SVGA3D_D16_UNORM = 134,
+ SVGA3D_A8_UNORM = 135,
+ SVGA3D_BC1_UNORM = 136,
+ SVGA3D_BC2_UNORM = 137,
+ SVGA3D_BC3_UNORM = 138,
+ SVGA3D_B5G6R5_UNORM = 139,
+ SVGA3D_B5G5R5A1_UNORM = 140,
+ SVGA3D_B8G8R8A8_UNORM = 141,
+ SVGA3D_B8G8R8X8_UNORM = 142,
+ SVGA3D_BC4_UNORM = 143,
+ SVGA3D_BC5_UNORM = 144,
+
+ SVGA3D_FORMAT_MAX
+} SVGA3dSurfaceFormat;
+
+typedef enum SVGA3dSurfaceFlags {
+ SVGA3D_SURFACE_CUBEMAP = (1 << 0),
+
+ /*
+ * HINT flags are not enforced by the device but are useful for
+ * performance.
+ */
+ SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
+ SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
+ SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
+ SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
+ SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
+ SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
+ SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
+ SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
+ SVGA3D_SURFACE_DECODE_RENDERTARGET = (1 << 11),
+
+ /*
+ * Is this surface using a base-level pitch for it's mob backing?
+ *
+ * This flag is not intended to be set by guest-drivers, but is instead
+ * set by the device when the surface is bound to a mob with a specified
+ * pitch.
+ */
+ SVGA3D_SURFACE_MOB_PITCH = (1 << 12),
+
+ SVGA3D_SURFACE_INACTIVE = (1 << 13),
+ SVGA3D_SURFACE_HINT_RT_LOCKABLE = (1 << 14),
+ SVGA3D_SURFACE_VOLUME = (1 << 15),
+
+ /*
+ * Required to be set on a surface to bind it to a screen target.
+ */
+ SVGA3D_SURFACE_SCREENTARGET = (1 << 16),
+
+ /*
+ * Align images in the guest-backing mob to 16-bytes.
+ */
+ SVGA3D_SURFACE_ALIGN16 = (1 << 17),
+
+ SVGA3D_SURFACE_1D = (1 << 18),
+ SVGA3D_SURFACE_ARRAY = (1 << 19),
+
+ /*
+ * Bind flags.
+ * These are enforced for any surface defined with DefineGBSurface_v2.
+ */
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER = (1 << 20),
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER = (1 << 21),
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER = (1 << 22),
+ SVGA3D_SURFACE_BIND_SHADER_RESOURCE = (1 << 23),
+ SVGA3D_SURFACE_BIND_RENDER_TARGET = (1 << 24),
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL = (1 << 25),
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT = (1 << 26),
+
+ /*
+ * A note on staging flags:
+ *
+ * The STAGING flags notes that the surface will not be used directly by the
+ * drawing pipeline, i.e. that it will not be bound to any bind point.
+ * Staging surfaces may be used by copy operations to move data in and out
+ * of other surfaces.
+ *
+ * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
+ * updates indirectly, i.e. the surface will not be updated directly, but
+ * will receive copies from staging surfaces.
+ */
+ SVGA3D_SURFACE_STAGING_UPLOAD = (1 << 27),
+ SVGA3D_SURFACE_STAGING_DOWNLOAD = (1 << 28),
+ SVGA3D_SURFACE_HINT_INDIRECT_UPDATE = (1 << 29),
+
+ /*
+ * Setting this flag allow this surface to be used with the
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
+ * buffer surfaces, an no bind flags are allowed to be set on surfaces
+ * with this flag.
+ */
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER = (1 << 30),
+
+ /*
+ * Marker for the last defined bit.
+ */
+ SVGA3D_SURFACE_FLAG_MAX = (1 << 31),
+} SVGA3dSurfaceFlags;
+
+#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_MOB_PITCH | \
+ SVGA3D_SURFACE_SCREENTARGET | \
+ SVGA3D_SURFACE_ALIGN16 | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ )
+
+#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_DECODE_RENDERTARGET | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_ARRAY | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ )
+
+#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_DECODE_RENDERTARGET | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_INACTIVE | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ )
+
+#define SVGA3D_SURFACE_DX_ONLY_MASK \
+ ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+
+#define SVGA3D_SURFACE_STAGING_MASK \
+ ( SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD \
+ )
+
+#define SVGA3D_SURFACE_BIND_MASK \
+ ( SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \
+ SVGA3D_SURFACE_BIND_RENDER_TARGET | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT \
+ )
+
+typedef enum {
+ SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
+ SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
+ SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
+ SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
+ SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
+ SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
+ SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
+
+/*
+ * This format can be used as a render target if the current display mode
+ * is the same depth if the alpha channel is ignored. e.g. if the device
+ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
+ * format op list entry for A8R8G8B8 should have this cap.
+ */
+ SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
+
+/*
+ * This format contains DirectDraw support (including Flip). This flag
+ * should not to be set on alpha formats.
+ */
+ SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
+
+/*
+ * The rasterizer can support some level of Direct3D support in this format
+ * and implies that the driver can create a Context in this mode (for some
+ * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
+ * flag must also be set.
+ */
+ SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
+
+/*
+ * This is set for a private format when the driver has put the bpp in
+ * the structure.
+ */
+ SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
+
+/*
+ * Indicates that this format can be converted to any RGB format for which
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ */
+ SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
+
+/*
+ * Indicates that this format can be used to create offscreen plain surfaces.
+ */
+ SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
+
+/*
+ * Indicated that this format can be read as an SRGB texture (meaning that the
+ * sampler will linearize the looked up data)
+ */
+ SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
+
+/*
+ * Indicates that this format can be used in the bumpmap instructions
+ */
+ SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
+
+/*
+ * Indicates that this format can be sampled by the displacement map sampler
+ */
+ SVGA3DFORMAT_OP_DMAP = 0x00020000,
+
+/*
+ * Indicates that this format cannot be used with texture filtering
+ */
+ SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
+
+/*
+ * Indicates that format conversions are supported to this RGB format if
+ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
+ */
+ SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
+
+/*
+ * Indicated that this format can be written as an SRGB target
+ * (meaning that the pixel pipe will DE-linearize data on output to format)
+ */
+ SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
+
+/*
+ * Indicates that this format cannot be used with alpha blending
+ */
+ SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
+
+/*
+ * Indicates that the device can auto-generated sublevels for resources
+ * of this format
+ */
+ SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
+
+/*
+ * Indicates that this format can be used by vertex texture sampler
+ */
+ SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
+
+/*
+ * Indicates that this format supports neither texture coordinate
+ * wrap modes, nor mipmapping.
+ */
+ SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
+} SVGA3dFormatOp;
+
+#define SVGA3D_FORMAT_POSITIVE \
+ (SVGA3DFORMAT_OP_TEXTURE | \
+ SVGA3DFORMAT_OP_VOLUMETEXTURE | \
+ SVGA3DFORMAT_OP_CUBETEXTURE | \
+ SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET | \
+ SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET | \
+ SVGA3DFORMAT_OP_ZSTENCIL | \
+ SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH | \
+ SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \
+ SVGA3DFORMAT_OP_DISPLAYMODE | \
+ SVGA3DFORMAT_OP_3DACCELERATION | \
+ SVGA3DFORMAT_OP_PIXELSIZE | \
+ SVGA3DFORMAT_OP_CONVERT_TO_ARGB | \
+ SVGA3DFORMAT_OP_OFFSCREENPLAIN | \
+ SVGA3DFORMAT_OP_SRGBREAD | \
+ SVGA3DFORMAT_OP_BUMPMAP | \
+ SVGA3DFORMAT_OP_DMAP | \
+ SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB | \
+ SVGA3DFORMAT_OP_SRGBWRITE | \
+ SVGA3DFORMAT_OP_AUTOGENMIPMAP | \
+ SVGA3DFORMAT_OP_VERTEXTEXTURE)
+
+#define SVGA3D_FORMAT_NEGATIVE \
+ (SVGA3DFORMAT_OP_NOFILTER | \
+ SVGA3DFORMAT_OP_NOALPHABLEND | \
+ SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP)
+
+/*
+ * This structure is a conversion of SVGA3DFORMAT_OP_*
+ * Entries must be located at the same position.
+ */
+typedef union {
+ uint32 value;
+ struct {
+ uint32 texture : 1;
+ uint32 volumeTexture : 1;
+ uint32 cubeTexture : 1;
+ uint32 offscreenRenderTarget : 1;
+ uint32 sameFormatRenderTarget : 1;
+ uint32 unknown1 : 1;
+ uint32 zStencil : 1;
+ uint32 zStencilArbitraryDepth : 1;
+ uint32 sameFormatUpToAlpha : 1;
+ uint32 unknown2 : 1;
+ uint32 displayMode : 1;
+ uint32 acceleration3d : 1;
+ uint32 pixelSize : 1;
+ uint32 convertToARGB : 1;
+ uint32 offscreenPlain : 1;
+ uint32 sRGBRead : 1;
+ uint32 bumpMap : 1;
+ uint32 dmap : 1;
+ uint32 noFilter : 1;
+ uint32 memberOfGroupARGB : 1;
+ uint32 sRGBWrite : 1;
+ uint32 noAlphaBlend : 1;
+ uint32 autoGenMipMap : 1;
+ uint32 vertexTexture : 1;
+ uint32 noTexCoordWrapNorMip : 1;
+ };
+} SVGA3dSurfaceFormatCaps;
+
+/*
+ * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+ SVGA3D_RS_INVALID = 0,
+ SVGA3D_RS_MIN = 1,
+ SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
+ SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
+ SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
+ SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
+ SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
+ SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
+ SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
+ SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
+ SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
+ SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
+ SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
+ SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
+ SVGA3D_RS_STENCILREF = 13, /* uint32 */
+ SVGA3D_RS_STENCILMASK = 14, /* uint32 */
+ SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
+ SVGA3D_RS_FOGSTART = 16, /* float */
+ SVGA3D_RS_FOGEND = 17, /* float */
+ SVGA3D_RS_FOGDENSITY = 18, /* float */
+ SVGA3D_RS_POINTSIZE = 19, /* float */
+ SVGA3D_RS_POINTSIZEMIN = 20, /* float */
+ SVGA3D_RS_POINTSIZEMAX = 21, /* float */
+ SVGA3D_RS_POINTSCALE_A = 22, /* float */
+ SVGA3D_RS_POINTSCALE_B = 23, /* float */
+ SVGA3D_RS_POINTSCALE_C = 24, /* float */
+ SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
+ SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
+ SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
+ SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
+ SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
+ SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
+ SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
+ SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
+ SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
+ SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
+ SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
+ SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
+ SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
+ SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
+ SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
+ SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
+ SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
+ SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
+ SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
+ SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
+ SVGA3D_RS_ZBIAS = 45, /* float */
+ SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
+ SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
+ SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
+ SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
+ SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
+ SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
+ SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
+ SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
+ SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
+ SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
+ SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
+ SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
+ SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
+ SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
+ SVGA3D_RS_DEPTHBIAS = 64, /* float */
+
+
+ /*
+ * Output Gamma Level
+ *
+ * Output gamma effects the gamma curve of colors that are output from the
+ * rendering pipeline. A value of 1.0 specifies a linear color space. If the
+ * value is <= 0.0, gamma correction is ignored and linear color space is
+ * used.
+ */
+
+ SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
+ SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
+ SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
+ SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
+ SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
+ SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
+ SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
+ SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
+ SVGA3D_RS_TWEENFACTOR = 88, /* float */
+ SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
+ SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
+ SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
+ SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
+ SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
+ SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
+ SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
+ SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
+ SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
+ SVGA3D_RS_LINEWIDTH = 98, /* float */
+ SVGA3D_RS_MAX
+} SVGA3dRenderStateName;
+
+typedef enum {
+ SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
+ SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
+ SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
+ SVGA3D_TRANSPARENCYANTIALIAS_MAX
+} SVGA3dTransparencyAntialiasType;
+
+typedef enum {
+ SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
+ SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
+ SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
+ SVGA3D_VERTEXMATERIAL_MAX = 3,
+} SVGA3dVertexMaterial;
+
+typedef enum {
+ SVGA3D_FILLMODE_INVALID = 0,
+ SVGA3D_FILLMODE_MIN = 1,
+ SVGA3D_FILLMODE_POINT = 1,
+ SVGA3D_FILLMODE_LINE = 2,
+ SVGA3D_FILLMODE_FILL = 3,
+ SVGA3D_FILLMODE_MAX
+} SVGA3dFillModeType;
+
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+ struct {
+ uint16 mode; /* SVGA3dFillModeType */
+ uint16 face; /* SVGA3dFace */
+ };
+ uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dFillMode;
+
+typedef enum {
+ SVGA3D_SHADEMODE_INVALID = 0,
+ SVGA3D_SHADEMODE_FLAT = 1,
+ SVGA3D_SHADEMODE_SMOOTH = 2,
+ SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
+ SVGA3D_SHADEMODE_MAX
+} SVGA3dShadeMode;
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+ struct {
+ uint16 repeat;
+ uint16 pattern;
+ };
+ uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dLinePattern;
+
+typedef enum {
+ SVGA3D_BLENDOP_INVALID = 0,
+ SVGA3D_BLENDOP_MIN = 1,
+ SVGA3D_BLENDOP_ZERO = 1,
+ SVGA3D_BLENDOP_ONE = 2,
+ SVGA3D_BLENDOP_SRCCOLOR = 3,
+ SVGA3D_BLENDOP_INVSRCCOLOR = 4,
+ SVGA3D_BLENDOP_SRCALPHA = 5,
+ SVGA3D_BLENDOP_INVSRCALPHA = 6,
+ SVGA3D_BLENDOP_DESTALPHA = 7,
+ SVGA3D_BLENDOP_INVDESTALPHA = 8,
+ SVGA3D_BLENDOP_DESTCOLOR = 9,
+ SVGA3D_BLENDOP_INVDESTCOLOR = 10,
+ SVGA3D_BLENDOP_SRCALPHASAT = 11,
+ SVGA3D_BLENDOP_BLENDFACTOR = 12,
+ SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
+ SVGA3D_BLENDOP_SRC1COLOR = 14,
+ SVGA3D_BLENDOP_INVSRC1COLOR = 15,
+ SVGA3D_BLENDOP_SRC1ALPHA = 16,
+ SVGA3D_BLENDOP_INVSRC1ALPHA = 17,
+ SVGA3D_BLENDOP_BLENDFACTORALPHA = 18,
+ SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19,
+ SVGA3D_BLENDOP_MAX
+} SVGA3dBlendOp;
+
+typedef enum {
+ SVGA3D_BLENDEQ_INVALID = 0,
+ SVGA3D_BLENDEQ_MIN = 1,
+ SVGA3D_BLENDEQ_ADD = 1,
+ SVGA3D_BLENDEQ_SUBTRACT = 2,
+ SVGA3D_BLENDEQ_REVSUBTRACT = 3,
+ SVGA3D_BLENDEQ_MINIMUM = 4,
+ SVGA3D_BLENDEQ_MAXIMUM = 5,
+ SVGA3D_BLENDEQ_MAX
+} SVGA3dBlendEquation;
+
+typedef enum {
+ SVGA3D_DX11_LOGICOP_MIN = 0,
+ SVGA3D_DX11_LOGICOP_CLEAR = 0,
+ SVGA3D_DX11_LOGICOP_SET = 1,
+ SVGA3D_DX11_LOGICOP_COPY = 2,
+ SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3,
+ SVGA3D_DX11_LOGICOP_NOOP = 4,
+ SVGA3D_DX11_LOGICOP_INVERT = 5,
+ SVGA3D_DX11_LOGICOP_AND = 6,
+ SVGA3D_DX11_LOGICOP_NAND = 7,
+ SVGA3D_DX11_LOGICOP_OR = 8,
+ SVGA3D_DX11_LOGICOP_NOR = 9,
+ SVGA3D_DX11_LOGICOP_XOR = 10,
+ SVGA3D_DX11_LOGICOP_EQUIV = 11,
+ SVGA3D_DX11_LOGICOP_AND_REVERSE = 12,
+ SVGA3D_DX11_LOGICOP_AND_INVERTED = 13,
+ SVGA3D_DX11_LOGICOP_OR_REVERSE = 14,
+ SVGA3D_DX11_LOGICOP_OR_INVERTED = 15,
+ SVGA3D_DX11_LOGICOP_MAX
+} SVGA3dDX11LogicOp;
+
+typedef enum {
+ SVGA3D_FRONTWINDING_INVALID = 0,
+ SVGA3D_FRONTWINDING_CW = 1,
+ SVGA3D_FRONTWINDING_CCW = 2,
+ SVGA3D_FRONTWINDING_MAX
+} SVGA3dFrontWinding;
+
+typedef enum {
+ SVGA3D_FACE_INVALID = 0,
+ SVGA3D_FACE_NONE = 1,
+ SVGA3D_FACE_MIN = 1,
+ SVGA3D_FACE_FRONT = 2,
+ SVGA3D_FACE_BACK = 3,
+ SVGA3D_FACE_FRONT_BACK = 4,
+ SVGA3D_FACE_MAX
+} SVGA3dFace;
+
+/*
+ * The order and the values should not be changed
+ */
+
+typedef enum {
+ SVGA3D_CMP_INVALID = 0,
+ SVGA3D_CMP_NEVER = 1,
+ SVGA3D_CMP_LESS = 2,
+ SVGA3D_CMP_EQUAL = 3,
+ SVGA3D_CMP_LESSEQUAL = 4,
+ SVGA3D_CMP_GREATER = 5,
+ SVGA3D_CMP_NOTEQUAL = 6,
+ SVGA3D_CMP_GREATEREQUAL = 7,
+ SVGA3D_CMP_ALWAYS = 8,
+ SVGA3D_CMP_MAX
+} SVGA3dCmpFunc;
+
+/*
+ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
+ * the fog factor to be specified in the alpha component of the specular
+ * (a.k.a. secondary) vertex color.
+ */
+typedef enum {
+ SVGA3D_FOGFUNC_INVALID = 0,
+ SVGA3D_FOGFUNC_EXP = 1,
+ SVGA3D_FOGFUNC_EXP2 = 2,
+ SVGA3D_FOGFUNC_LINEAR = 3,
+ SVGA3D_FOGFUNC_PER_VERTEX = 4
+} SVGA3dFogFunction;
+
+/*
+ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
+ * or per-pixel basis.
+ */
+typedef enum {
+ SVGA3D_FOGTYPE_INVALID = 0,
+ SVGA3D_FOGTYPE_VERTEX = 1,
+ SVGA3D_FOGTYPE_PIXEL = 2,
+ SVGA3D_FOGTYPE_MAX = 3
+} SVGA3dFogType;
+
+/*
+ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
+ * computed using the eye Z value of each pixel (or vertex), whereas range-
+ * based fog is computed using the actual distance (range) to the eye.
+ */
+typedef enum {
+ SVGA3D_FOGBASE_INVALID = 0,
+ SVGA3D_FOGBASE_DEPTHBASED = 1,
+ SVGA3D_FOGBASE_RANGEBASED = 2,
+ SVGA3D_FOGBASE_MAX = 3
+} SVGA3dFogBase;
+
+typedef enum {
+ SVGA3D_STENCILOP_INVALID = 0,
+ SVGA3D_STENCILOP_MIN = 1,
+ SVGA3D_STENCILOP_KEEP = 1,
+ SVGA3D_STENCILOP_ZERO = 2,
+ SVGA3D_STENCILOP_REPLACE = 3,
+ SVGA3D_STENCILOP_INCRSAT = 4,
+ SVGA3D_STENCILOP_DECRSAT = 5,
+ SVGA3D_STENCILOP_INVERT = 6,
+ SVGA3D_STENCILOP_INCR = 7,
+ SVGA3D_STENCILOP_DECR = 8,
+ SVGA3D_STENCILOP_MAX
+} SVGA3dStencilOp;
+
+typedef enum {
+ SVGA3D_CLIPPLANE_0 = (1 << 0),
+ SVGA3D_CLIPPLANE_1 = (1 << 1),
+ SVGA3D_CLIPPLANE_2 = (1 << 2),
+ SVGA3D_CLIPPLANE_3 = (1 << 3),
+ SVGA3D_CLIPPLANE_4 = (1 << 4),
+ SVGA3D_CLIPPLANE_5 = (1 << 5),
+} SVGA3dClipPlanes;
+
+typedef enum {
+ SVGA3D_CLEAR_COLOR = 0x1,
+ SVGA3D_CLEAR_DEPTH = 0x2,
+ SVGA3D_CLEAR_STENCIL = 0x4,
+
+ /*
+ * Hint only, must be used together with SVGA3D_CLEAR_COLOR. If
+ * SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this
+ * bit will be ignored.
+ */
+ SVGA3D_CLEAR_COLORFILL = 0x8
+} SVGA3dClearFlag;
+
+typedef enum {
+ SVGA3D_RT_DEPTH = 0,
+ SVGA3D_RT_MIN = 0,
+ SVGA3D_RT_STENCIL = 1,
+ SVGA3D_RT_COLOR0 = 2,
+ SVGA3D_RT_COLOR1 = 3,
+ SVGA3D_RT_COLOR2 = 4,
+ SVGA3D_RT_COLOR3 = 5,
+ SVGA3D_RT_COLOR4 = 6,
+ SVGA3D_RT_COLOR5 = 7,
+ SVGA3D_RT_COLOR6 = 8,
+ SVGA3D_RT_COLOR7 = 9,
+ SVGA3D_RT_MAX,
+ SVGA3D_RT_INVALID = ((uint32)-1),
+} SVGA3dRenderTargetType;
+
+#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+ struct {
+ uint32 red : 1;
+ uint32 green : 1;
+ uint32 blue : 1;
+ uint32 alpha : 1;
+ };
+ uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dColorMask;
+
+typedef enum {
+ SVGA3D_VBLEND_DISABLE = 0,
+ SVGA3D_VBLEND_1WEIGHT = 1,
+ SVGA3D_VBLEND_2WEIGHT = 2,
+ SVGA3D_VBLEND_3WEIGHT = 3,
+ SVGA3D_VBLEND_MAX = 4,
+} SVGA3dVertexBlendFlags;
+
+typedef enum {
+ SVGA3D_WRAPCOORD_0 = 1 << 0,
+ SVGA3D_WRAPCOORD_1 = 1 << 1,
+ SVGA3D_WRAPCOORD_2 = 1 << 2,
+ SVGA3D_WRAPCOORD_3 = 1 << 3,
+ SVGA3D_WRAPCOORD_ALL = 0xF,
+} SVGA3dWrapFlags;
+
+/*
+ * SVGA_3D_CMD_TEXTURESTATE Types. All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+ SVGA3D_TS_INVALID = 0,
+ SVGA3D_TS_MIN = 1,
+ SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
+ SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
+ SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
+ SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
+ SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
+ SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
+ SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
+ SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
+ SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
+ SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
+ SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
+ SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
+ SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
+ SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
+ SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
+ SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
+ SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
+ SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
+ SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
+ SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
+ SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
+
+
+ /*
+ * Sampler Gamma Level
+ *
+ * Sampler gamma effects the color of samples taken from the sampler. A
+ * value of 1.0 will produce linear samples. If the value is <= 0.0 the
+ * gamma value is ignored and a linear space is used.
+ */
+
+ SVGA3D_TS_GAMMA = 25, /* float */
+ SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
+ SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
+ SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
+ SVGA3D_TS_PREGB_MAX = 30, /* Max value before GBObjects */
+ SVGA3D_TS_CONSTANT = 30, /* SVGA3dColor */
+ SVGA3D_TS_COLOR_KEY_ENABLE = 31, /* SVGA3dBool */
+ SVGA3D_TS_COLOR_KEY = 32, /* SVGA3dColor */
+ SVGA3D_TS_MAX
+} SVGA3dTextureStateName;
+
+typedef enum {
+ SVGA3D_TC_INVALID = 0,
+ SVGA3D_TC_DISABLE = 1,
+ SVGA3D_TC_SELECTARG1 = 2,
+ SVGA3D_TC_SELECTARG2 = 3,
+ SVGA3D_TC_MODULATE = 4,
+ SVGA3D_TC_ADD = 5,
+ SVGA3D_TC_ADDSIGNED = 6,
+ SVGA3D_TC_SUBTRACT = 7,
+ SVGA3D_TC_BLENDTEXTUREALPHA = 8,
+ SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
+ SVGA3D_TC_BLENDCURRENTALPHA = 10,
+ SVGA3D_TC_BLENDFACTORALPHA = 11,
+ SVGA3D_TC_MODULATE2X = 12,
+ SVGA3D_TC_MODULATE4X = 13,
+ SVGA3D_TC_DSDT = 14,
+ SVGA3D_TC_DOTPRODUCT3 = 15,
+ SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
+ SVGA3D_TC_ADDSIGNED2X = 17,
+ SVGA3D_TC_ADDSMOOTH = 18,
+ SVGA3D_TC_PREMODULATE = 19,
+ SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
+ SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
+ SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
+ SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
+ SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
+ SVGA3D_TC_MULTIPLYADD = 25,
+ SVGA3D_TC_LERP = 26,
+ SVGA3D_TC_MAX
+} SVGA3dTextureCombiner;
+
+#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
+
+typedef enum {
+ SVGA3D_TEX_ADDRESS_INVALID = 0,
+ SVGA3D_TEX_ADDRESS_MIN = 1,
+ SVGA3D_TEX_ADDRESS_WRAP = 1,
+ SVGA3D_TEX_ADDRESS_MIRROR = 2,
+ SVGA3D_TEX_ADDRESS_CLAMP = 3,
+ SVGA3D_TEX_ADDRESS_BORDER = 4,
+ SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
+ SVGA3D_TEX_ADDRESS_EDGE = 6,
+ SVGA3D_TEX_ADDRESS_MAX
+} SVGA3dTextureAddress;
+
+/*
+ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
+ * disabled, and the rasterizer should use the magnification filter instead.
+ */
+typedef enum {
+ SVGA3D_TEX_FILTER_NONE = 0,
+ SVGA3D_TEX_FILTER_MIN = 0,
+ SVGA3D_TEX_FILTER_NEAREST = 1,
+ SVGA3D_TEX_FILTER_LINEAR = 2,
+ SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
+ SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
+ SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
+ SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
+ SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
+ SVGA3D_TEX_FILTER_MAX
+} SVGA3dTextureFilter;
+
+typedef enum {
+ SVGA3D_TEX_TRANSFORM_OFF = 0,
+ SVGA3D_TEX_TRANSFORM_S = (1 << 0),
+ SVGA3D_TEX_TRANSFORM_T = (1 << 1),
+ SVGA3D_TEX_TRANSFORM_R = (1 << 2),
+ SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
+ SVGA3D_TEX_PROJECTED = (1 << 15),
+} SVGA3dTexTransformFlags;
+
+typedef enum {
+ SVGA3D_TEXCOORD_GEN_OFF = 0,
+ SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
+ SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
+ SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
+ SVGA3D_TEXCOORD_GEN_SPHERE = 4,
+ SVGA3D_TEXCOORD_GEN_MAX
+} SVGA3dTextureCoordGen;
+
+/*
+ * Texture argument constants for texture combiner
+ */
+typedef enum {
+ SVGA3D_TA_INVALID = 0,
+ SVGA3D_TA_TFACTOR = 1,
+ SVGA3D_TA_PREVIOUS = 2,
+ SVGA3D_TA_DIFFUSE = 3,
+ SVGA3D_TA_TEXTURE = 4,
+ SVGA3D_TA_SPECULAR = 5,
+ SVGA3D_TA_CONSTANT = 6,
+ SVGA3D_TA_MAX
+} SVGA3dTextureArgData;
+
+#define SVGA3D_TM_MASK_LEN 4
+
+/* Modifiers for texture argument constants defined above. */
+typedef enum {
+ SVGA3D_TM_NONE = 0,
+ SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
+ SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
+} SVGA3dTextureArgModifier;
+
+/*
+ * Vertex declarations
+ *
+ * Notes:
+ *
+ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
+ * draw with any POSITIONT vertex arrays, the programmable vertex
+ * pipeline will be implicitly disabled. Drawing will take place as if
+ * no vertex shader was bound.
+ */
+
+typedef enum {
+ SVGA3D_DECLUSAGE_POSITION = 0,
+ SVGA3D_DECLUSAGE_BLENDWEIGHT,
+ SVGA3D_DECLUSAGE_BLENDINDICES,
+ SVGA3D_DECLUSAGE_NORMAL,
+ SVGA3D_DECLUSAGE_PSIZE,
+ SVGA3D_DECLUSAGE_TEXCOORD,
+ SVGA3D_DECLUSAGE_TANGENT,
+ SVGA3D_DECLUSAGE_BINORMAL,
+ SVGA3D_DECLUSAGE_TESSFACTOR,
+ SVGA3D_DECLUSAGE_POSITIONT,
+ SVGA3D_DECLUSAGE_COLOR,
+ SVGA3D_DECLUSAGE_FOG,
+ SVGA3D_DECLUSAGE_DEPTH,
+ SVGA3D_DECLUSAGE_SAMPLE,
+ SVGA3D_DECLUSAGE_MAX
+} SVGA3dDeclUsage;
+
+typedef enum {
+ SVGA3D_DECLMETHOD_DEFAULT = 0,
+ SVGA3D_DECLMETHOD_PARTIALU,
+ SVGA3D_DECLMETHOD_PARTIALV,
+ SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
+ SVGA3D_DECLMETHOD_UV,
+ SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
+ SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */
+ /* map */
+} SVGA3dDeclMethod;
+
+typedef enum {
+ SVGA3D_DECLTYPE_FLOAT1 = 0,
+ SVGA3D_DECLTYPE_FLOAT2 = 1,
+ SVGA3D_DECLTYPE_FLOAT3 = 2,
+ SVGA3D_DECLTYPE_FLOAT4 = 3,
+ SVGA3D_DECLTYPE_D3DCOLOR = 4,
+ SVGA3D_DECLTYPE_UBYTE4 = 5,
+ SVGA3D_DECLTYPE_SHORT2 = 6,
+ SVGA3D_DECLTYPE_SHORT4 = 7,
+ SVGA3D_DECLTYPE_UBYTE4N = 8,
+ SVGA3D_DECLTYPE_SHORT2N = 9,
+ SVGA3D_DECLTYPE_SHORT4N = 10,
+ SVGA3D_DECLTYPE_USHORT2N = 11,
+ SVGA3D_DECLTYPE_USHORT4N = 12,
+ SVGA3D_DECLTYPE_UDEC3 = 13,
+ SVGA3D_DECLTYPE_DEC3N = 14,
+ SVGA3D_DECLTYPE_FLOAT16_2 = 15,
+ SVGA3D_DECLTYPE_FLOAT16_4 = 16,
+ SVGA3D_DECLTYPE_MAX,
+} SVGA3dDeclType;
+
+/*
+ * This structure is used for the divisor for geometry instancing;
+ * it's a direct translation of the Direct3D equivalent.
+ */
+typedef union {
+ struct {
+ /*
+ * For index data, this number represents the number of instances to draw.
+ * For instance data, this number represents the number of
+ * instances/vertex in this stream
+ */
+ uint32 count : 30;
+
+ /*
+ * This is 1 if this is supposed to be the data that is repeated for
+ * every instance.
+ */
+ uint32 indexedData : 1;
+
+ /*
+ * This is 1 if this is supposed to be the per-instance data.
+ */
+ uint32 instanceData : 1;
+ };
+
+ uint32 value;
+} SVGA3dVertexDivisor;
+
+typedef enum {
+ /*
+ * SVGA3D_PRIMITIVE_INVALID is a valid primitive type.
+ *
+ * List MIN second so debuggers will think INVALID is
+ * the correct name.
+ */
+ SVGA3D_PRIMITIVE_INVALID = 0,
+ SVGA3D_PRIMITIVE_MIN = 0,
+ SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
+ SVGA3D_PRIMITIVE_POINTLIST = 2,
+ SVGA3D_PRIMITIVE_LINELIST = 3,
+ SVGA3D_PRIMITIVE_LINESTRIP = 4,
+ SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
+ SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
+ SVGA3D_PRIMITIVE_LINELIST_ADJ = 7,
+ SVGA3D_PRIMITIVE_PREDX_MAX = 7,
+ SVGA3D_PRIMITIVE_LINESTRIP_ADJ = 8,
+ SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ = 9,
+ SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ = 10,
+ SVGA3D_PRIMITIVE_MAX
+} SVGA3dPrimitiveType;
+
+typedef enum {
+ SVGA3D_COORDINATE_INVALID = 0,
+ SVGA3D_COORDINATE_LEFTHANDED = 1,
+ SVGA3D_COORDINATE_RIGHTHANDED = 2,
+ SVGA3D_COORDINATE_MAX
+} SVGA3dCoordinateType;
+
+typedef enum {
+ SVGA3D_TRANSFORM_INVALID = 0,
+ SVGA3D_TRANSFORM_WORLD = 1,
+ SVGA3D_TRANSFORM_MIN = 1,
+ SVGA3D_TRANSFORM_VIEW = 2,
+ SVGA3D_TRANSFORM_PROJECTION = 3,
+ SVGA3D_TRANSFORM_TEXTURE0 = 4,
+ SVGA3D_TRANSFORM_TEXTURE1 = 5,
+ SVGA3D_TRANSFORM_TEXTURE2 = 6,
+ SVGA3D_TRANSFORM_TEXTURE3 = 7,
+ SVGA3D_TRANSFORM_TEXTURE4 = 8,
+ SVGA3D_TRANSFORM_TEXTURE5 = 9,
+ SVGA3D_TRANSFORM_TEXTURE6 = 10,
+ SVGA3D_TRANSFORM_TEXTURE7 = 11,
+ SVGA3D_TRANSFORM_WORLD1 = 12,
+ SVGA3D_TRANSFORM_WORLD2 = 13,
+ SVGA3D_TRANSFORM_WORLD3 = 14,
+ SVGA3D_TRANSFORM_MAX
+} SVGA3dTransformType;
+
+typedef enum {
+ SVGA3D_LIGHTTYPE_INVALID = 0,
+ SVGA3D_LIGHTTYPE_MIN = 1,
+ SVGA3D_LIGHTTYPE_POINT = 1,
+ SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
+ SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
+ SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
+ SVGA3D_LIGHTTYPE_MAX
+} SVGA3dLightType;
+
+typedef enum {
+ SVGA3D_CUBEFACE_POSX = 0,
+ SVGA3D_CUBEFACE_NEGX = 1,
+ SVGA3D_CUBEFACE_POSY = 2,
+ SVGA3D_CUBEFACE_NEGY = 3,
+ SVGA3D_CUBEFACE_POSZ = 4,
+ SVGA3D_CUBEFACE_NEGZ = 5,
+} SVGA3dCubeFace;
+
+typedef enum {
+ SVGA3D_SHADERTYPE_INVALID = 0,
+ SVGA3D_SHADERTYPE_MIN = 1,
+ SVGA3D_SHADERTYPE_VS = 1,
+ SVGA3D_SHADERTYPE_PS = 2,
+ SVGA3D_SHADERTYPE_PREDX_MAX = 3,
+ SVGA3D_SHADERTYPE_GS = 3,
+ SVGA3D_SHADERTYPE_DX10_MAX = 4,
+ SVGA3D_SHADERTYPE_HS = 4,
+ SVGA3D_SHADERTYPE_DS = 5,
+ SVGA3D_SHADERTYPE_CS = 6,
+ SVGA3D_SHADERTYPE_MAX = 7
+} SVGA3dShaderType;
+
+#define SVGA3D_NUM_SHADERTYPE_PREDX \
+ (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE_DX10 \
+ (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE \
+ (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
+typedef enum {
+ SVGA3D_CONST_TYPE_MIN = 0,
+ SVGA3D_CONST_TYPE_FLOAT = 0,
+ SVGA3D_CONST_TYPE_INT = 1,
+ SVGA3D_CONST_TYPE_BOOL = 2,
+ SVGA3D_CONST_TYPE_MAX = 3,
+} SVGA3dShaderConstType;
+
+/*
+ * Register limits for shader consts.
+ */
+#define SVGA3D_CONSTREG_MAX 256
+#define SVGA3D_CONSTINTREG_MAX 16
+#define SVGA3D_CONSTBOOLREG_MAX 16
+
+typedef enum {
+ SVGA3D_STRETCH_BLT_POINT = 0,
+ SVGA3D_STRETCH_BLT_LINEAR = 1,
+ SVGA3D_STRETCH_BLT_MAX
+} SVGA3dStretchBltMode;
+
+typedef enum {
+ SVGA3D_QUERYTYPE_INVALID = ((uint8)-1),
+ SVGA3D_QUERYTYPE_MIN = 0,
+ SVGA3D_QUERYTYPE_OCCLUSION = 0,
+ SVGA3D_QUERYTYPE_TIMESTAMP = 1,
+ SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT = 2,
+ SVGA3D_QUERYTYPE_PIPELINESTATS = 3,
+ SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE = 4,
+ SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS = 5,
+ SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE = 6,
+ SVGA3D_QUERYTYPE_OCCLUSION64 = 7,
+ SVGA3D_QUERYTYPE_EVENT = 8,
+ SVGA3D_QUERYTYPE_DX10_MAX = 9,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 9,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 10,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 11,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 12,
+ SVGA3D_QUERYTYPE_SOP_STREAM0 = 13,
+ SVGA3D_QUERYTYPE_SOP_STREAM1 = 14,
+ SVGA3D_QUERYTYPE_SOP_STREAM2 = 15,
+ SVGA3D_QUERYTYPE_SOP_STREAM3 = 16,
+ SVGA3D_QUERYTYPE_MAX
+} SVGA3dQueryType;
+
+typedef uint8 SVGA3dQueryTypeUint8;
+
+#define SVGA3D_NUM_QUERYTYPE (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN)
+
+/*
+ * This is the maximum number of queries per context that can be active
+ * simultaneously between a beginQuery and endQuery.
+ */
+#define SVGA3D_MAX_QUERY 64
+
+/*
+ * Query result buffer formats
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 passed;
+}
+#include "vmware_pack_end.h"
+SVGADXEventQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 timestamp;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 realFrequency;
+ uint32 disjoint;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampDisjointQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 inputAssemblyVertices;
+ uint64 inputAssemblyPrimitives;
+ uint64 vertexShaderInvocations;
+ uint64 geometryShaderInvocations;
+ uint64 geometryShaderPrimitives;
+ uint64 clipperInvocations;
+ uint64 clipperPrimitives;
+ uint64 pixelShaderInvocations;
+ uint64 hullShaderInvocations;
+ uint64 domainShaderInvocations;
+ uint64 computeShaderInvocations;
+}
+#include "vmware_pack_end.h"
+SVGADXPipelineStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 anySamplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 numPrimitivesWritten;
+ uint64 numPrimitivesRequired;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 overflowed;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusion64QueryResult;
+
+/*
+ * SVGADXQueryResultUnion is not intended for use in the protocol, but is
+ * very helpful when working with queries generically.
+ */
+typedef
+#include "vmware_pack_begin.h"
+union SVGADXQueryResultUnion {
+ SVGADXOcclusionQueryResult occ;
+ SVGADXEventQueryResult event;
+ SVGADXTimestampQueryResult ts;
+ SVGADXTimestampDisjointQueryResult tsDisjoint;
+ SVGADXPipelineStatisticsQueryResult pipelineStats;
+ SVGADXOcclusionPredicateQueryResult occPred;
+ SVGADXStreamOutStatisticsQueryResult soStats;
+ SVGADXStreamOutPredicateQueryResult soPred;
+ SVGADXOcclusion64QueryResult occ64;
+}
+#include "vmware_pack_end.h"
+SVGADXQueryResultUnion;
+
+
+typedef enum {
+ SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */
+ SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */
+ SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully */
+ SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (guest only) */
+} SVGA3dQueryState;
+
+typedef enum {
+ SVGA3D_WRITE_HOST_VRAM = 1,
+ SVGA3D_READ_HOST_VRAM = 2,
+} SVGA3dTransferType;
+
+typedef enum {
+ SVGA3D_LOGICOP_INVALID = 0,
+ SVGA3D_LOGICOP_MIN = 1,
+ SVGA3D_LOGICOP_COPY = 1,
+ SVGA3D_LOGICOP_NOT = 2,
+ SVGA3D_LOGICOP_AND = 3,
+ SVGA3D_LOGICOP_OR = 4,
+ SVGA3D_LOGICOP_XOR = 5,
+ SVGA3D_LOGICOP_NXOR = 6,
+ SVGA3D_LOGICOP_ROP3MIN = 30, /* 7-29 are reserved for future logic ops. */
+ SVGA3D_LOGICOP_ROP3MAX = (SVGA3D_LOGICOP_ROP3MIN + 255),
+ SVGA3D_LOGICOP_MAX = (SVGA3D_LOGICOP_ROP3MAX + 1),
+} SVGA3dLogicOp;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ union {
+ struct {
+ uint16 function; /* SVGA3dFogFunction */
+ uint8 type; /* SVGA3dFogType */
+ uint8 base; /* SVGA3dFogBase */
+ };
+ uint32 uintValue;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dFogMode;
+
+/*
+ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
+ * is a surface ID as well as face/mipmap indices.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSurfaceImageId {
+ uint32 sid;
+ uint32 face;
+ uint32 mipmap;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceImageId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 width;
+ uint32 height;
+ uint32 depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dSize;
+
+/*
+ * Guest-backed objects definitions.
+ */
+typedef enum {
+ SVGA_OTABLE_MOB = 0,
+ SVGA_OTABLE_MIN = 0,
+ SVGA_OTABLE_SURFACE = 1,
+ SVGA_OTABLE_CONTEXT = 2,
+ SVGA_OTABLE_SHADER = 3,
+ SVGA_OTABLE_SCREENTARGET = 4,
+
+ SVGA_OTABLE_DX9_MAX = 5,
+
+ SVGA_OTABLE_DXCONTEXT = 5,
+ SVGA_OTABLE_MAX = 6
+} SVGAOTableType;
+
+/*
+ * Deprecated.
+ */
+#define SVGA_OTABLE_COUNT 4
+
+typedef enum {
+ SVGA_COTABLE_MIN = 0,
+ SVGA_COTABLE_RTVIEW = 0,
+ SVGA_COTABLE_DSVIEW = 1,
+ SVGA_COTABLE_SRVIEW = 2,
+ SVGA_COTABLE_ELEMENTLAYOUT = 3,
+ SVGA_COTABLE_BLENDSTATE = 4,
+ SVGA_COTABLE_DEPTHSTENCIL = 5,
+ SVGA_COTABLE_RASTERIZERSTATE = 6,
+ SVGA_COTABLE_SAMPLER = 7,
+ SVGA_COTABLE_STREAMOUTPUT = 8,
+ SVGA_COTABLE_DXQUERY = 9,
+ SVGA_COTABLE_DXSHADER = 10,
+ SVGA_COTABLE_DX10_MAX = 11,
+ SVGA_COTABLE_UAVIEW = 11,
+ SVGA_COTABLE_MAX
+} SVGACOTableType;
+
+/*
+ * The largest size (number of entries) allowed in a COTable.
+ */
+#define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2)
+
+typedef enum SVGAMobFormat {
+ SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
+ SVGA3D_MOBFMT_PTDEPTH_0 = 0,
+ SVGA3D_MOBFMT_MIN = 0,
+ SVGA3D_MOBFMT_PTDEPTH_1 = 1,
+ SVGA3D_MOBFMT_PTDEPTH_2 = 2,
+ SVGA3D_MOBFMT_RANGE = 3,
+ SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+ SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+ SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+ SVGA3D_MOBFMT_PREDX_MAX = 7,
+ SVGA3D_MOBFMT_EMPTY = 7,
+ SVGA3D_MOBFMT_MAX,
+} SVGAMobFormat;
+
+#define SVGA3D_MOB_EMPTY_BASE 1
+
+#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index 8e8d9682e018..884b1d1fb85f 100644
--- a/drivers/gpu/drm/vmwgfx/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,5 +1,5 @@
/**********************************************************
- * Copyright 2007-2009 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index f38416fcb046..faf6d9b2b891 100644
--- a/drivers/gpu/drm/vmwgfx/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,5 +1,5 @@
/**********************************************************
- * Copyright 2007-2009 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -152,19 +152,17 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
switch (format) {
case VMWARE_FOURCC_YV12:
*height = (*height + 1) & ~1;
- *size = (*width + 3) & ~3;
+ *size = (*width) * (*height);
if (pitches) {
- pitches[0] = *size;
+ pitches[0] = *width;
}
- *size *= *height;
-
if (offsets) {
offsets[1] = *size;
}
- tmp = ((*width >> 1) + 3) & ~3;
+ tmp = *width >> 1;
if (pitches) {
pitches[1] = pitches[2] = tmp;
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index e4259c2c1acc..6e0ccb70a700 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,5 +1,5 @@
/**********************************************************
- * Copyright 1998-2009 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -31,20 +31,38 @@
#ifndef _SVGA_REG_H_
#define _SVGA_REG_H_
+#include <linux/pci_ids.h>
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga_types.h"
/*
- * PCI device IDs.
+ * SVGA_REG_ENABLE bit definitions.
*/
-#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
+typedef enum {
+ SVGA_REG_ENABLE_DISABLE = 0,
+ SVGA_REG_ENABLE_ENABLE = (1 << 0),
+ SVGA_REG_ENABLE_HIDE = (1 << 1),
+} SvgaRegEnable;
+
+typedef uint32 SVGAMobId;
/*
- * SVGA_REG_ENABLE bit definitions.
+ * Arbitrary and meaningless limits. Please ignore these when writing
+ * new drivers.
*/
-#define SVGA_REG_ENABLE_DISABLE 0
-#define SVGA_REG_ENABLE_ENABLE 1
-#define SVGA_REG_ENABLE_HIDE 2
-#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\
- SVGA_REG_ENABLE_HIDE)
+#define SVGA_MAX_WIDTH 2560
+#define SVGA_MAX_HEIGHT 1600
+
+
+#define SVGA_MAX_BITS_PER_PIXEL 32
+#define SVGA_MAX_DEPTH 24
+#define SVGA_MAX_DISPLAYS 10
/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
@@ -57,14 +75,9 @@
#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
/*
- * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
- * The changeMap in the monitor is proportional to this number. Therefore, we'd
- * like to keep it as small as possible to reduce monitor overhead (using
- * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
- * 4k!).
- *
- * NB: For compatibility reasons, this value must be greater than 0xff0000.
- * See bug 335072.
+ * The maximum framebuffer size that can traced for guests unless the
+ * SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES. In that case
+ * the full framebuffer can be traced independent of this limit.
*/
#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
@@ -106,6 +119,8 @@
#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
+#define SVGA_IRQFLAG_COMMAND_BUFFER 0x8 /* Command buffer completed */
+#define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */
/*
* Registers
@@ -131,6 +146,7 @@ enum {
SVGA_REG_FB_SIZE = 16,
/* ID 0 implementation only had the above registers, then the palette */
+ SVGA_REG_ID_0_TOP = 17,
SVGA_REG_CAPABILITIES = 17,
SVGA_REG_MEM_START = 18, /* (Deprecated) */
@@ -171,7 +187,7 @@ enum {
SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
- SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
SVGA_REG_CMD_PREPEND_LOW = 53,
SVGA_REG_CMD_PREPEND_HIGH = 54,
@@ -182,7 +198,6 @@ enum {
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
-
SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
/* Base of scratch registers */
/* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
@@ -190,7 +205,6 @@ enum {
the use of the current SVGA driver. */
};
-
/*
* Guest memory regions (GMRs):
*
@@ -288,17 +302,205 @@ enum {
#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */
typedef
+#include "vmware_pack_begin.h"
struct SVGAGuestMemDescriptor {
uint32 ppn;
uint32 numPages;
-} SVGAGuestMemDescriptor;
+}
+#include "vmware_pack_end.h"
+SVGAGuestMemDescriptor;
typedef
+#include "vmware_pack_begin.h"
struct SVGAGuestPtr {
uint32 gmrId;
uint32 offset;
-} SVGAGuestPtr;
+}
+#include "vmware_pack_end.h"
+SVGAGuestPtr;
+
+/*
+ * Register based command buffers --
+ *
+ * Provide an SVGA device interface that allows the guest to submit
+ * command buffers to the SVGA device through an SVGA device register.
+ * The metadata for each command buffer is contained in the
+ * SVGACBHeader structure along with the return status codes.
+ *
+ * The SVGA device supports command buffers if
+ * SVGA_CAP_COMMAND_BUFFERS is set in the device caps register. The
+ * fifo must be enabled for command buffers to be submitted.
+ *
+ * Command buffers are submitted when the guest writing the 64 byte
+ * aligned physical address into the SVGA_REG_COMMAND_LOW and
+ * SVGA_REG_COMMAND_HIGH. SVGA_REG_COMMAND_HIGH contains the upper 32
+ * bits of the physical address. SVGA_REG_COMMAND_LOW contains the
+ * lower 32 bits of the physical address, since the command buffer
+ * headers are required to be 64 byte aligned the lower 6 bits are
+ * used for the SVGACBContext value. Writing to SVGA_REG_COMMAND_LOW
+ * submits the command buffer to the device and queues it for
+ * execution. The SVGA device supports at least
+ * SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued
+ * per context and if that limit is reached the device will write the
+ * status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command
+ * buffer header synchronously and not raise any IRQs.
+ *
+ * It is invalid to submit a command buffer without a valid physical
+ * address and results are undefined.
+ *
+ * The device guarantees that command buffers of size SVGA_CB_MAX_SIZE
+ * will be supported. If a larger command buffer is submitted results
+ * are unspecified and the device will either complete the command
+ * buffer or return an error.
+ *
+ * The device guarantees that any individual command in a command
+ * buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is
+ * enough to fit a 64x64 color-cursor definition. If the command is
+ * too large the device is allowed to process the command or return an
+ * error.
+ *
+ * The device context is a special SVGACBContext that allows for
+ * synchronous register like accesses with the flexibility of
+ * commands. There is a different command set defined by
+ * SVGADeviceContextCmdId. The commands in each command buffer is not
+ * allowed to straddle physical pages.
+ *
+ * The offset field which is available starting with the
+ * SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the
+ * start of command processing into the buffer. If an error is
+ * encountered the errorOffset will still be relative to the specific
+ * PA, not biased by the offset. When the command buffer is finished
+ * the guest should not read the offset field as there is no guarantee
+ * what it will set to.
+ */
+
+#define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */
+#define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32
+#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */
+
+#define SVGA_CB_CONTEXT_MASK 0x3f
+typedef enum {
+ SVGA_CB_CONTEXT_DEVICE = 0x3f,
+ SVGA_CB_CONTEXT_0 = 0x0,
+ SVGA_CB_CONTEXT_MAX = 0x1,
+} SVGACBContext;
+
+
+typedef enum {
+ /*
+ * The guest is supposed to write SVGA_CB_STATUS_NONE to the status
+ * field before submitting the command buffer header, the host will
+ * change the value when it is done with the command buffer.
+ */
+ SVGA_CB_STATUS_NONE = 0,
+
+ /*
+ * Written by the host when a command buffer completes successfully.
+ * The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless
+ * the SVGA_CB_FLAG_NO_IRQ flag is set.
+ */
+ SVGA_CB_STATUS_COMPLETED = 1,
+
+ /*
+ * Written by the host synchronously with the command buffer
+ * submission to indicate the command buffer was not submitted. No
+ * IRQ is raised.
+ */
+ SVGA_CB_STATUS_QUEUE_FULL = 2,
+
+ /*
+ * Written by the host when an error was detected parsing a command
+ * in the command buffer, errorOffset is written to contain the
+ * offset to the first byte of the failing command. The device
+ * raises the IRQ with both SVGA_IRQFLAG_ERROR and
+ * SVGA_IRQFLAG_COMMAND_BUFFER. Some of the commands may have been
+ * processed.
+ */
+ SVGA_CB_STATUS_COMMAND_ERROR = 3,
+
+ /*
+ * Written by the host if there is an error parsing the command
+ * buffer header. The device raises the IRQ with both
+ * SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER. The device
+ * did not processes any of the command buffer.
+ */
+ SVGA_CB_STATUS_CB_HEADER_ERROR = 4,
+ /*
+ * Written by the host if the guest requested the host to preempt
+ * the command buffer. The device will not raise any IRQs and the
+ * command buffer was not processed.
+ */
+ SVGA_CB_STATUS_PREEMPTED = 5,
+
+ /*
+ * Written by the host synchronously with the command buffer
+ * submission to indicate the the command buffer was not submitted
+ * due to an error. No IRQ is raised.
+ */
+ SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
+} SVGACBStatus;
+
+typedef enum {
+ SVGA_CB_FLAG_NONE = 0,
+ SVGA_CB_FLAG_NO_IRQ = 1 << 0,
+ SVGA_CB_FLAG_DX_CONTEXT = 1 << 1,
+ SVGA_CB_FLAG_MOB = 1 << 2,
+} SVGACBFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ volatile SVGACBStatus status;
+ volatile uint32 errorOffset;
+ uint64 id;
+ SVGACBFlags flags;
+ uint32 length;
+ union {
+ PA pa;
+ struct {
+ SVGAMobId mobid;
+ uint32 mobOffset;
+ } mob;
+ } ptr;
+ uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
+ uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
+ uint32 mustBeZero[6];
+}
+#include "vmware_pack_end.h"
+SVGACBHeader;
+
+typedef enum {
+ SVGA_DC_CMD_NOP = 0,
+ SVGA_DC_CMD_START_STOP_CONTEXT = 1,
+ SVGA_DC_CMD_PREEMPT = 2,
+ SVGA_DC_CMD_MAX = 3,
+ SVGA_DC_CMD_FORCE_UINT = MAX_UINT32,
+} SVGADeviceContextCmdId;
+
+typedef struct {
+ uint32 enable;
+ SVGACBContext context;
+} SVGADCCmdStartStop;
+
+/*
+ * SVGADCCmdPreempt --
+ *
+ * This command allows the guest to request that all command buffers
+ * on the specified context be preempted that can be. After execution
+ * of this command all command buffers that were preempted will
+ * already have SVGA_CB_STATUS_PREEMPTED written into the status
+ * field. The device might still be processing a command buffer,
+ * assuming execution of it started before the preemption request was
+ * received. Specifying the ignoreIDZero flag to TRUE will cause the
+ * device to not preempt command buffers with the id field in the
+ * command buffer header set to zero.
+ */
+
+typedef struct {
+ SVGACBContext context;
+ uint32 ignoreIDZero;
+} SVGADCCmdPreempt;
/*
* SVGAGMRImageFormat --
@@ -320,13 +522,12 @@ struct SVGAGuestPtr {
*
*/
-typedef
-struct SVGAGMRImageFormat {
+typedef struct SVGAGMRImageFormat {
union {
struct {
uint32 bitsPerPixel : 8;
uint32 colorDepth : 8;
- uint32 reserved : 16; /* Must be zero */
+ uint32 reserved : 16; /* Must be zero */
};
uint32 value;
@@ -334,6 +535,7 @@ struct SVGAGMRImageFormat {
} SVGAGMRImageFormat;
typedef
+#include "vmware_pack_begin.h"
struct SVGAGuestImage {
SVGAGuestPtr ptr;
@@ -353,7 +555,9 @@ struct SVGAGuestImage {
* assuming each row of blocks is tightly packed.
*/
uint32 pitch;
-} SVGAGuestImage;
+}
+#include "vmware_pack_end.h"
+SVGAGuestImage;
/*
* SVGAColorBGRX --
@@ -363,14 +567,13 @@ struct SVGAGuestImage {
* GMRFB state.
*/
-typedef
-struct SVGAColorBGRX {
+typedef struct SVGAColorBGRX {
union {
struct {
uint32 b : 8;
uint32 g : 8;
uint32 r : 8;
- uint32 x : 8; /* Unused */
+ uint32 x : 8; /* Unused */
};
uint32 value;
@@ -392,26 +595,49 @@ struct SVGAColorBGRX {
*/
typedef
-struct SVGASignedRect {
+#include "vmware_pack_begin.h"
+struct {
int32 left;
int32 top;
int32 right;
int32 bottom;
-} SVGASignedRect;
+}
+#include "vmware_pack_end.h"
+SVGASignedRect;
typedef
-struct SVGASignedPoint {
+#include "vmware_pack_begin.h"
+struct {
int32 x;
int32 y;
-} SVGASignedPoint;
+}
+#include "vmware_pack_end.h"
+SVGASignedPoint;
/*
- * Capabilities
+ * SVGA Device Capabilities
+ *
+ * Note the holes in the bitfield. Missing bits have been deprecated,
+ * and must not be reused. Those capabilities will never be reported
+ * by new versions of the SVGA device.
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ * of the new features that each capability provides.
*
- * Note the holes in the bitfield. Missing bits have been deprecated,
- * and must not be reused. Those capabilities will never be reported
- * by new versions of the SVGA device.
+ * SVGA_CAP_IRQMASK --
+ * Provides device interrupts. Adds device register SVGA_REG_IRQMASK
+ * to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
+ * set/clear pending interrupts.
+ *
+ * SVGA_CAP_GMR --
+ * Provides synchronous mapping of guest memory regions (GMR).
+ * Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR,
+ * SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH.
+ *
+ * SVGA_CAP_TRACES --
+ * Allows framebuffer trace-based updates even when FIFO is enabled.
+ * Adds device register SVGA_REG_TRACES.
*
* SVGA_CAP_GMR2 --
* Provides asynchronous commands to define and remap guest memory
@@ -421,21 +647,39 @@ struct SVGASignedPoint {
* SVGA_CAP_SCREEN_OBJECT_2 --
* Allow screen object support, and require backing stores from the
* guest for each screen object.
+ *
+ * SVGA_CAP_COMMAND_BUFFERS --
+ * Enable register based command buffer submission.
+ *
+ * SVGA_CAP_DEAD1 --
+ * This cap was incorrectly used by old drivers and should not be
+ * reused.
+ *
+ * SVGA_CAP_CMD_BUFFERS_2 --
+ * Enable support for the prepend command buffer submision
+ * registers. SVGA_REG_CMD_PREPEND_LOW and
+ * SVGA_REG_CMD_PREPEND_HIGH.
+ *
+ * SVGA_CAP_GBOBJECTS --
+ * Enable guest-backed objects and surfaces.
+ *
+ * SVGA_CAP_CMD_BUFFERS_3 --
+ * Enable support for command buffers in a mob.
*/
#define SVGA_CAP_NONE 0x00000000
#define SVGA_CAP_RECT_COPY 0x00000002
#define SVGA_CAP_CURSOR 0x00000020
-#define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */
-#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */
+#define SVGA_CAP_CURSOR_BYPASS 0x00000040
+#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080
#define SVGA_CAP_8BIT_EMULATION 0x00000100
#define SVGA_CAP_ALPHA_CURSOR 0x00000200
#define SVGA_CAP_3D 0x00004000
#define SVGA_CAP_EXTENDED_FIFO 0x00008000
-#define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */
+#define SVGA_CAP_MULTIMON 0x00010000
#define SVGA_CAP_PITCHLOCK 0x00020000
#define SVGA_CAP_IRQMASK 0x00040000
-#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */
+#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000
#define SVGA_CAP_GMR 0x00100000
#define SVGA_CAP_TRACES 0x00200000
#define SVGA_CAP_GMR2 0x00400000
@@ -444,6 +688,33 @@ struct SVGASignedPoint {
#define SVGA_CAP_DEAD1 0x02000000
#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
#define SVGA_CAP_GBOBJECTS 0x08000000
+#define SVGA_CAP_DX 0x10000000
+
+#define SVGA_CAP_CMD_RESERVED 0x80000000
+
+
+/*
+ * The Guest can optionally read some SVGA device capabilities through
+ * the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before
+ * the SVGA device is initialized. The type of capability the guest
+ * is requesting from the SVGABackdoorCapType enum should be placed in
+ * the upper 16 bits of the backdoor command id (ECX). On success the
+ * the value of EBX will be set to BDOOR_MAGIC and EAX will be set to
+ * the requested capability. If the command is not supported then EBX
+ * will be left unchanged and EAX will be set to -1. Because it is
+ * possible that -1 is the value of the requested cap the correct way
+ * to check if the command was successful is to check if EBX was changed
+ * to BDOOR_MAGIC making sure to initialize the register to something
+ * else first.
+ */
+
+typedef enum {
+ SVGABackdoorCapDeviceCaps = 0,
+ SVGABackdoorCapFifoCaps = 1,
+ SVGABackdoorCap3dHWVersion = 2,
+ SVGABackdoorCapMax = 3,
+} SVGABackdoorCapType;
+
/*
* FIFO register indices.
@@ -883,7 +1154,8 @@ enum {
SVGA_VIDEO_PITCH_2,
SVGA_VIDEO_PITCH_3,
SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
- SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
+ SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */
+ /* (SVGA_ID_INVALID) */
SVGA_VIDEO_NUM_REGS
};
@@ -896,7 +1168,9 @@ enum {
* video frame to be displayed.
*/
-typedef struct SVGAOverlayUnit {
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOverlayUnit {
uint32 enabled;
uint32 flags;
uint32 dataOffset;
@@ -916,7 +1190,27 @@ typedef struct SVGAOverlayUnit {
uint32 pitches[3];
uint32 dataGMRId;
uint32 dstScreenId;
-} SVGAOverlayUnit;
+}
+#include "vmware_pack_end.h"
+SVGAOverlayUnit;
+
+
+/*
+ * Guest display topology
+ *
+ * XXX: This structure is not part of the SVGA device's interface, and
+ * doesn't really belong here.
+ */
+#define SVGA_INVALID_DISPLAY_ID ((uint32)-1)
+
+typedef struct SVGADisplayTopology {
+ uint16 displayId;
+ uint16 isPrimary;
+ uint32 width;
+ uint32 height;
+ uint32 positionX;
+ uint32 positionY;
+} SVGADisplayTopology;
/*
@@ -951,10 +1245,10 @@ typedef struct SVGAOverlayUnit {
* value of zero means no cloning should happen.
*/
-#define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */
+#define SVGA_SCREEN_MUST_BE_SET (1 << 0)
#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
-#define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */
-#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
+#define SVGA_SCREEN_IS_PRIMARY (1 << 1)
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)
/*
* Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is
@@ -977,7 +1271,8 @@ typedef struct SVGAOverlayUnit {
#define SVGA_SCREEN_BLANKING (1 << 4)
typedef
-struct SVGAScreenObject {
+#include "vmware_pack_begin.h"
+struct {
uint32 structSize; /* sizeof(SVGAScreenObject) */
uint32 id;
uint32 flags;
@@ -995,8 +1290,17 @@ struct SVGAScreenObject {
* with SVGA_FIFO_CAP_SCREEN_OBJECT.
*/
SVGAGuestImage backingStore;
+
+ /*
+ * The cloneCount field is treated as a hint from the guest that
+ * the user wants this display to be cloned, cloneCount times.
+ *
+ * A value of zero means no cloning should happen.
+ */
uint32 cloneCount;
-} SVGAScreenObject;
+}
+#include "vmware_pack_end.h"
+SVGAScreenObject;
/*
@@ -1009,7 +1313,7 @@ struct SVGAScreenObject {
* Note the holes in the command ID numbers: These commands have been
* deprecated, and the old IDs must not be reused.
*
- * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
+ * Command IDs from 1000 to 2999 are reserved for use by the SVGA3D
* protocol.
*
* Each command's parameters are described by the comments and
@@ -1020,6 +1324,7 @@ typedef enum {
SVGA_CMD_INVALID_CMD = 0,
SVGA_CMD_UPDATE = 1,
SVGA_CMD_RECT_COPY = 3,
+ SVGA_CMD_RECT_ROP_COPY = 14,
SVGA_CMD_DEFINE_CURSOR = 19,
SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
SVGA_CMD_UPDATE_VERBOSE = 25,
@@ -1035,9 +1340,14 @@ typedef enum {
SVGA_CMD_ANNOTATION_COPY = 40,
SVGA_CMD_DEFINE_GMR2 = 41,
SVGA_CMD_REMAP_GMR2 = 42,
+ SVGA_CMD_DEAD = 43,
+ SVGA_CMD_DEAD_2 = 44,
+ SVGA_CMD_NOP = 45,
+ SVGA_CMD_NOP_ERROR = 46,
SVGA_CMD_MAX
} SVGAFifoCmdId;
+#define SVGA_CMD_MAX_DATASIZE (256 * 1024)
#define SVGA_CMD_MAX_ARGS 64
@@ -1070,12 +1380,15 @@ typedef enum {
*/
typedef
-struct SVGAFifoCmdUpdate {
+#include "vmware_pack_begin.h"
+struct {
uint32 x;
uint32 y;
uint32 width;
uint32 height;
-} SVGAFifoCmdUpdate;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdate;
/*
@@ -1089,14 +1402,44 @@ struct SVGAFifoCmdUpdate {
*/
typedef
-struct SVGAFifoCmdRectCopy {
+#include "vmware_pack_begin.h"
+struct {
+ uint32 srcX;
+ uint32 srcY;
+ uint32 destX;
+ uint32 destY;
+ uint32 width;
+ uint32 height;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectCopy;
+
+
+/*
+ * SVGA_CMD_RECT_ROP_COPY --
+ *
+ * Perform a rectangular DMA transfer from one area of the GFB to
+ * another, and copy the result to any screens which intersect it.
+ * The value of ROP may only be SVGA_ROP_COPY, and this command is
+ * only supported for backwards compatibility reasons.
+ *
+ * Availability:
+ * SVGA_CAP_RECT_COPY
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
uint32 srcX;
uint32 srcY;
uint32 destX;
uint32 destY;
uint32 width;
uint32 height;
-} SVGAFifoCmdRectCopy;
+ uint32 rop;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectRopCopy;
/*
@@ -1113,7 +1456,8 @@ struct SVGAFifoCmdRectCopy {
*/
typedef
-struct SVGAFifoCmdDefineCursor {
+#include "vmware_pack_begin.h"
+struct {
uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
@@ -1125,7 +1469,9 @@ struct SVGAFifoCmdDefineCursor {
* Followed by scanline data for AND mask, then XOR mask.
* Each scanline is padded to a 32-bit boundary.
*/
-} SVGAFifoCmdDefineCursor;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineCursor;
/*
@@ -1142,14 +1488,17 @@ struct SVGAFifoCmdDefineCursor {
*/
typedef
-struct SVGAFifoCmdDefineAlphaCursor {
+#include "vmware_pack_begin.h"
+struct {
uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
uint32 height;
/* Followed by scanline data */
-} SVGAFifoCmdDefineAlphaCursor;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineAlphaCursor;
/*
@@ -1165,13 +1514,16 @@ struct SVGAFifoCmdDefineAlphaCursor {
*/
typedef
-struct SVGAFifoCmdUpdateVerbose {
+#include "vmware_pack_begin.h"
+struct {
uint32 x;
uint32 y;
uint32 width;
uint32 height;
uint32 reason;
-} SVGAFifoCmdUpdateVerbose;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdateVerbose;
/*
@@ -1190,14 +1542,17 @@ struct SVGAFifoCmdUpdateVerbose {
#define SVGA_ROP_COPY 0x03
typedef
-struct SVGAFifoCmdFrontRopFill {
+#include "vmware_pack_begin.h"
+struct {
uint32 color; /* In the same format as the GFB */
uint32 x;
uint32 y;
uint32 width;
uint32 height;
uint32 rop; /* Must be SVGA_ROP_COPY */
-} SVGAFifoCmdFrontRopFill;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFrontRopFill;
/*
@@ -1216,9 +1571,12 @@ struct SVGAFifoCmdFrontRopFill {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
uint32 fence;
-} SVGAFifoCmdFence;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFence;
/*
@@ -1233,11 +1591,14 @@ struct {
*/
typedef
-struct SVGAFifoCmdEscape {
+#include "vmware_pack_begin.h"
+struct {
uint32 nsid;
uint32 size;
/* followed by 'size' bytes of data */
-} SVGAFifoCmdEscape;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdEscape;
/*
@@ -1267,9 +1628,12 @@ struct SVGAFifoCmdEscape {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGAScreenObject screen; /* Variable-length according to version */
-} SVGAFifoCmdDefineScreen;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineScreen;
/*
@@ -1283,9 +1647,12 @@ struct {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
uint32 screenId;
-} SVGAFifoCmdDestroyScreen;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDestroyScreen;
/*
@@ -1336,11 +1703,14 @@ struct {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGAGuestPtr ptr;
uint32 bytesPerLine;
SVGAGMRImageFormat format;
-} SVGAFifoCmdDefineGMRFB;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMRFB;
/*
@@ -1348,19 +1718,10 @@ struct {
*
* This is a guest-to-host blit. It performs a DMA operation to
* copy a rectangular region of pixels from the current GMRFB to
- * one or more Screen Objects.
+ * a ScreenObject.
*
* The destination coordinate may be specified relative to a
- * screen's origin (if a screen ID is specified) or relative to the
- * virtual coordinate system's origin (if the screen ID is
- * SVGA_ID_INVALID). The actual destination may span zero or more
- * screens, in the case of a virtual destination rect or a rect
- * which extends off the edge of the specified screen.
- *
- * This command writes to the screen's "base layer": the underlying
- * framebuffer which exists below any cursor or video overlays. No
- * action is necessary to explicitly hide or update any overlays
- * which exist on top of the updated region.
+ * screen's origin. The provided screen ID must be valid.
*
* The SVGA device is guaranteed to finish reading from the GMRFB
* by the time any subsequent FENCE commands are reached.
@@ -1373,46 +1734,27 @@ struct {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGASignedPoint srcOrigin;
SVGASignedRect destRect;
uint32 destScreenId;
-} SVGAFifoCmdBlitGMRFBToScreen;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitGMRFBToScreen;
/*
* SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
*
* This is a host-to-guest blit. It performs a DMA operation to
- * copy a rectangular region of pixels from a single Screen Object
+ * copy a rectangular region of pixels from a single ScreenObject
* back to the current GMRFB.
*
- * Usage note: This command should be used rarely. It will
- * typically be inefficient, but it is necessary for some types of
- * synchronization between 3D (GPU) and 2D (CPU) rendering into
- * overlapping areas of a screen.
- *
* The source coordinate is specified relative to a screen's
- * origin. The provided screen ID must be valid. If any parameters
+ * origin. The provided screen ID must be valid. If any parameters
* are invalid, the resulting pixel values are undefined.
*
- * This command reads the screen's "base layer". Overlays like
- * video and cursor are not included, but any data which was sent
- * using a blit-to-screen primitive will be available, no matter
- * whether the data's original source was the GMRFB or the 3D
- * acceleration hardware.
- *
- * Note that our guest-to-host blits and host-to-guest blits aren't
- * symmetric in their current implementation. While the parameters
- * are identical, host-to-guest blits are a lot less featureful.
- * They do not support clipping: If the source parameters don't
- * fully fit within a screen, the blit fails. They must originate
- * from exactly one screen. Virtual coordinates are not directly
- * supported.
- *
- * Host-to-guest blits do support the same set of GMRFB formats
- * offered by guest-to-host blits.
- *
* The SVGA device is guaranteed to finish writing to the GMRFB by
* the time any subsequent FENCE commands are reached.
*
@@ -1421,77 +1763,57 @@ struct {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGASignedPoint destOrigin;
SVGASignedRect srcRect;
uint32 srcScreenId;
-} SVGAFifoCmdBlitScreenToGMRFB;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitScreenToGMRFB;
/*
* SVGA_CMD_ANNOTATION_FILL --
*
- * This is a blit annotation. This command stores a small piece of
- * device state which is consumed by the next blit-to-screen
- * command. The state is only cleared by commands which are
- * specifically documented as consuming an annotation. Other
- * commands (such as ESCAPEs for debugging) may intervene between
- * the annotation and its associated blit.
- *
- * This annotation is a promise about the contents of the next
- * blit: The video driver is guaranteeing that all pixels in that
- * blit will have the same value, specified here as a color in
- * SVGAColorBGRX format.
- *
- * The SVGA device can still render the blit correctly even if it
- * ignores this annotation, but the annotation may allow it to
- * perform the blit more efficiently, for example by ignoring the
- * source data and performing a fill in hardware.
- *
- * This annotation is most important for performance when the
- * user's display is being remoted over a network connection.
+ * The annotation commands have been deprecated, should not be used
+ * by new drivers. They used to provide performance hints to the SVGA
+ * device about the content of screen updates, but newer SVGA devices
+ * ignore these.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGAColorBGRX color;
-} SVGAFifoCmdAnnotationFill;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationFill;
/*
* SVGA_CMD_ANNOTATION_COPY --
*
- * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
- * information about annotations.
- *
- * This annotation is a promise about the contents of the next
- * blit: The video driver is guaranteeing that all pixels in that
- * blit will have the same value as those which already exist at an
- * identically-sized region on the same or a different screen.
- *
- * Note that the source pixels for the COPY in this annotation are
- * sampled before applying the anqnotation's associated blit. They
- * are allowed to overlap with the blit's destination pixels.
- *
- * The copy source rectangle is specified the same way as the blit
- * destination: it can be a rectangle which spans zero or more
- * screens, specified relative to either a screen or to the virtual
- * coordinate system's origin. If the source rectangle includes
- * pixels which are not from exactly one screen, the results are
- * undefined.
+ * The annotation commands have been deprecated, should not be used
+ * by new drivers. They used to provide performance hints to the SVGA
+ * device about the content of screen updates, but newer SVGA devices
+ * ignore these.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGASignedPoint srcOrigin;
uint32 srcScreenId;
-} SVGAFifoCmdAnnotationCopy;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationCopy;
/*
@@ -1504,10 +1826,13 @@ struct {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
uint32 gmrId;
uint32 numPages;
-} SVGAFifoCmdDefineGMR2;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMR2;
/*
@@ -1546,6 +1871,7 @@ typedef enum {
} SVGARemapGMR2Flags;
typedef
+#include "vmware_pack_begin.h"
struct {
uint32 gmrId;
SVGARemapGMR2Flags flags;
@@ -1559,6 +1885,52 @@ struct {
* (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag
* SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
*/
-} SVGAFifoCmdRemapGMR2;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRemapGMR2;
+
+
+/*
+ * Size of SVGA device memory such as frame buffer and FIFO.
+ */
+#define SVGA_VRAM_MIN_SIZE (4 * 640 * 480) /* bytes */
+#define SVGA_VRAM_MIN_SIZE_3D (16 * 1024 * 1024)
+#define SVGA_VRAM_MAX_SIZE (128 * 1024 * 1024)
+#define SVGA_MEMORY_SIZE_MAX (1024 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_MAX (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024)
+
+#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */
+
+/*
+ * To simplify autoDetect display configuration, support a minimum of
+ * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
+ * numDisplays = 2
+ * maxWidth = numDisplay * 1920 = 3840
+ * maxHeight = rotated width of single monitor = 1920
+ * vramSize = maxWidth * maxHeight * 4 = 29491200
+ */
+#define SVGA_VRAM_SIZE_AUTODETECT (32 * 1024 * 1024)
+
+#if defined(VMX86_SERVER)
+#define SVGA_VRAM_SIZE (4 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024)
+#define SVGA_FIFO_SIZE (256 * 1024)
+#define SVGA_FIFO_SIZE_3D (516 * 1024)
+#define SVGA_MEMORY_SIZE_DEFAULT (160 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT FALSE
+#else
+#define SVGA_VRAM_SIZE (16 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D SVGA_VRAM_MAX_SIZE
+#define SVGA_FIFO_SIZE (2 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_3D SVGA_FIFO_SIZE
+#define SVGA_MEMORY_SIZE_DEFAULT (768 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT TRUE
+#endif
+
+#define SVGA_FIFO_SIZE_GBOBJECTS (256 * 1024)
+#define SVGA_VRAM_SIZE_GBOBJECTS (4 * 1024 * 1024)
#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
new file mode 100644
index 000000000000..2e8ba4df8de9
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -0,0 +1,46 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8 uint8;
+typedef s8 int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+#define MAX_UINT16 U16_MAX
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
new file mode 100644
index 000000000000..120eab830eaf
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -0,0 +1,21 @@
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8 uint8;
+typedef s8 int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
new file mode 100644
index 000000000000..7e7b0ce34aa2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -0,0 +1,25 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#include <linux/compiler.h>
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
new file mode 100644
index 000000000000..e2e440ed3d44
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -0,0 +1,25 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+__packed
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
deleted file mode 100644
index f58dc7dd15c5..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ /dev/null
@@ -1,2627 +0,0 @@
-/**********************************************************
- * Copyright 1998-2009 VMware, Inc. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga3d_reg.h --
- *
- * SVGA 3D hardware definitions
- */
-
-#ifndef _SVGA3D_REG_H_
-#define _SVGA3D_REG_H_
-
-#include "svga_reg.h"
-
-typedef uint32 PPN;
-typedef __le64 PPN64;
-
-/*
- * 3D Hardware Version
- *
- * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
- * register. Is set by the host and read by the guest. This lets
- * us make new guest drivers which are backwards-compatible with old
- * SVGA hardware revisions. It does not let us support old guest
- * drivers. Good enough for now.
- *
- */
-
-#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
-#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
-#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
-
-typedef enum {
- SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
- SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
- SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
- SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
- SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
- SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
- SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
- SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
-} SVGA3dHardwareVersion;
-
-/*
- * Generic Types
- */
-
-typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
-#define SVGA3D_NUM_CLIPPLANES 6
-#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
-#define SVGA3D_MAX_CONTEXT_IDS 256
-#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
-
-#define SVGA3D_NUM_TEXTURE_UNITS 32
-#define SVGA3D_NUM_LIGHTS 8
-
-/*
- * Surface formats.
- *
- * If you modify this list, be sure to keep GLUtil.c in sync. It
- * includes the internal format definition of each surface in
- * GLUtil_ConvertSurfaceFormat, and it contains a table of
- * human-readable names in GLUtil_GetFormatName.
- */
-
-typedef enum SVGA3dSurfaceFormat {
- SVGA3D_FORMAT_MIN = 0,
- SVGA3D_FORMAT_INVALID = 0,
-
- SVGA3D_X8R8G8B8 = 1,
- SVGA3D_A8R8G8B8 = 2,
-
- SVGA3D_R5G6B5 = 3,
- SVGA3D_X1R5G5B5 = 4,
- SVGA3D_A1R5G5B5 = 5,
- SVGA3D_A4R4G4B4 = 6,
-
- SVGA3D_Z_D32 = 7,
- SVGA3D_Z_D16 = 8,
- SVGA3D_Z_D24S8 = 9,
- SVGA3D_Z_D15S1 = 10,
-
- SVGA3D_LUMINANCE8 = 11,
- SVGA3D_LUMINANCE4_ALPHA4 = 12,
- SVGA3D_LUMINANCE16 = 13,
- SVGA3D_LUMINANCE8_ALPHA8 = 14,
-
- SVGA3D_DXT1 = 15,
- SVGA3D_DXT2 = 16,
- SVGA3D_DXT3 = 17,
- SVGA3D_DXT4 = 18,
- SVGA3D_DXT5 = 19,
-
- SVGA3D_BUMPU8V8 = 20,
- SVGA3D_BUMPL6V5U5 = 21,
- SVGA3D_BUMPX8L8V8U8 = 22,
- SVGA3D_BUMPL8V8U8 = 23,
-
- SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
- SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
-
- SVGA3D_A2R10G10B10 = 26,
-
- /* signed formats */
- SVGA3D_V8U8 = 27,
- SVGA3D_Q8W8V8U8 = 28,
- SVGA3D_CxV8U8 = 29,
-
- /* mixed formats */
- SVGA3D_X8L8V8U8 = 30,
- SVGA3D_A2W10V10U10 = 31,
-
- SVGA3D_ALPHA8 = 32,
-
- /* Single- and dual-component floating point formats */
- SVGA3D_R_S10E5 = 33,
- SVGA3D_R_S23E8 = 34,
- SVGA3D_RG_S10E5 = 35,
- SVGA3D_RG_S23E8 = 36,
-
- SVGA3D_BUFFER = 37,
-
- SVGA3D_Z_D24X8 = 38,
-
- SVGA3D_V16U16 = 39,
-
- SVGA3D_G16R16 = 40,
- SVGA3D_A16B16G16R16 = 41,
-
- /* Packed Video formats */
- SVGA3D_UYVY = 42,
- SVGA3D_YUY2 = 43,
-
- /* Planar video formats */
- SVGA3D_NV12 = 44,
-
- /* Video format with alpha */
- SVGA3D_AYUV = 45,
-
- SVGA3D_R32G32B32A32_TYPELESS = 46,
- SVGA3D_R32G32B32A32_FLOAT = 25,
- SVGA3D_R32G32B32A32_UINT = 47,
- SVGA3D_R32G32B32A32_SINT = 48,
- SVGA3D_R32G32B32_TYPELESS = 49,
- SVGA3D_R32G32B32_FLOAT = 50,
- SVGA3D_R32G32B32_UINT = 51,
- SVGA3D_R32G32B32_SINT = 52,
- SVGA3D_R16G16B16A16_TYPELESS = 53,
- SVGA3D_R16G16B16A16_FLOAT = 24,
- SVGA3D_R16G16B16A16_UNORM = 41,
- SVGA3D_R16G16B16A16_UINT = 54,
- SVGA3D_R16G16B16A16_SNORM = 55,
- SVGA3D_R16G16B16A16_SINT = 56,
- SVGA3D_R32G32_TYPELESS = 57,
- SVGA3D_R32G32_FLOAT = 36,
- SVGA3D_R32G32_UINT = 58,
- SVGA3D_R32G32_SINT = 59,
- SVGA3D_R32G8X24_TYPELESS = 60,
- SVGA3D_D32_FLOAT_S8X24_UINT = 61,
- SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
- SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
- SVGA3D_R10G10B10A2_TYPELESS = 64,
- SVGA3D_R10G10B10A2_UNORM = 26,
- SVGA3D_R10G10B10A2_UINT = 65,
- SVGA3D_R11G11B10_FLOAT = 66,
- SVGA3D_R8G8B8A8_TYPELESS = 67,
- SVGA3D_R8G8B8A8_UNORM = 68,
- SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
- SVGA3D_R8G8B8A8_UINT = 70,
- SVGA3D_R8G8B8A8_SNORM = 28,
- SVGA3D_R8G8B8A8_SINT = 71,
- SVGA3D_R16G16_TYPELESS = 72,
- SVGA3D_R16G16_FLOAT = 35,
- SVGA3D_R16G16_UNORM = 40,
- SVGA3D_R16G16_UINT = 73,
- SVGA3D_R16G16_SNORM = 39,
- SVGA3D_R16G16_SINT = 74,
- SVGA3D_R32_TYPELESS = 75,
- SVGA3D_D32_FLOAT = 76,
- SVGA3D_R32_FLOAT = 34,
- SVGA3D_R32_UINT = 77,
- SVGA3D_R32_SINT = 78,
- SVGA3D_R24G8_TYPELESS = 79,
- SVGA3D_D24_UNORM_S8_UINT = 80,
- SVGA3D_R24_UNORM_X8_TYPELESS = 81,
- SVGA3D_X24_TYPELESS_G8_UINT = 82,
- SVGA3D_R8G8_TYPELESS = 83,
- SVGA3D_R8G8_UNORM = 84,
- SVGA3D_R8G8_UINT = 85,
- SVGA3D_R8G8_SNORM = 27,
- SVGA3D_R8G8_SINT = 86,
- SVGA3D_R16_TYPELESS = 87,
- SVGA3D_R16_FLOAT = 33,
- SVGA3D_D16_UNORM = 8,
- SVGA3D_R16_UNORM = 88,
- SVGA3D_R16_UINT = 89,
- SVGA3D_R16_SNORM = 90,
- SVGA3D_R16_SINT = 91,
- SVGA3D_R8_TYPELESS = 92,
- SVGA3D_R8_UNORM = 93,
- SVGA3D_R8_UINT = 94,
- SVGA3D_R8_SNORM = 95,
- SVGA3D_R8_SINT = 96,
- SVGA3D_A8_UNORM = 32,
- SVGA3D_R1_UNORM = 97,
- SVGA3D_R9G9B9E5_SHAREDEXP = 98,
- SVGA3D_R8G8_B8G8_UNORM = 99,
- SVGA3D_G8R8_G8B8_UNORM = 100,
- SVGA3D_BC1_TYPELESS = 101,
- SVGA3D_BC1_UNORM = 15,
- SVGA3D_BC1_UNORM_SRGB = 102,
- SVGA3D_BC2_TYPELESS = 103,
- SVGA3D_BC2_UNORM = 17,
- SVGA3D_BC2_UNORM_SRGB = 104,
- SVGA3D_BC3_TYPELESS = 105,
- SVGA3D_BC3_UNORM = 19,
- SVGA3D_BC3_UNORM_SRGB = 106,
- SVGA3D_BC4_TYPELESS = 107,
- SVGA3D_BC4_UNORM = 108,
- SVGA3D_BC4_SNORM = 109,
- SVGA3D_BC5_TYPELESS = 110,
- SVGA3D_BC5_UNORM = 111,
- SVGA3D_BC5_SNORM = 112,
- SVGA3D_B5G6R5_UNORM = 3,
- SVGA3D_B5G5R5A1_UNORM = 5,
- SVGA3D_B8G8R8A8_UNORM = 2,
- SVGA3D_B8G8R8X8_UNORM = 1,
- SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
- SVGA3D_B8G8R8A8_TYPELESS = 114,
- SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
- SVGA3D_B8G8R8X8_TYPELESS = 116,
- SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
-
- /* Advanced D3D9 depth formats. */
- SVGA3D_Z_DF16 = 118,
- SVGA3D_Z_DF24 = 119,
- SVGA3D_Z_D24S8_INT = 120,
-
- /* Planar video formats. */
- SVGA3D_YV12 = 121,
-
- SVGA3D_FORMAT_MAX = 122,
-} SVGA3dSurfaceFormat;
-
-typedef uint32 SVGA3dColor; /* a, r, g, b */
-
-/*
- * These match the D3DFORMAT_OP definitions used by Direct3D. We need
- * them so that we can query the host for what the supported surface
- * operations are (when we're using the D3D backend, in particular),
- * and so we can send those operations to the guest.
- */
-typedef enum {
- SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
- SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
- SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
- SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
- SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
- SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
- SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
-
-/*
- * This format can be used as a render target if the current display mode
- * is the same depth if the alpha channel is ignored. e.g. if the device
- * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
- * format op list entry for A8R8G8B8 should have this cap.
- */
- SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
-
-/*
- * This format contains DirectDraw support (including Flip). This flag
- * should not to be set on alpha formats.
- */
- SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
-
-/*
- * The rasterizer can support some level of Direct3D support in this format
- * and implies that the driver can create a Context in this mode (for some
- * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
- * flag must also be set.
- */
- SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
-
-/*
- * This is set for a private format when the driver has put the bpp in
- * the structure.
- */
- SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
-
-/*
- * Indicates that this format can be converted to any RGB format for which
- * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
- */
- SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
-
-/*
- * Indicates that this format can be used to create offscreen plain surfaces.
- */
- SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
-
-/*
- * Indicated that this format can be read as an SRGB texture (meaning that the
- * sampler will linearize the looked up data)
- */
- SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
-
-/*
- * Indicates that this format can be used in the bumpmap instructions
- */
- SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
-
-/*
- * Indicates that this format can be sampled by the displacement map sampler
- */
- SVGA3DFORMAT_OP_DMAP = 0x00020000,
-
-/*
- * Indicates that this format cannot be used with texture filtering
- */
- SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
-
-/*
- * Indicates that format conversions are supported to this RGB format if
- * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
- */
- SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
-
-/*
- * Indicated that this format can be written as an SRGB target (meaning that the
- * pixel pipe will DE-linearize data on output to format)
- */
- SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
-
-/*
- * Indicates that this format cannot be used with alpha blending
- */
- SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
-
-/*
- * Indicates that the device can auto-generated sublevels for resources
- * of this format
- */
- SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
-
-/*
- * Indicates that this format can be used by vertex texture sampler
- */
- SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
-
-/*
- * Indicates that this format supports neither texture coordinate wrap
- * modes, nor mipmapping
- */
- SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
-} SVGA3dFormatOp;
-
-/*
- * This structure is a conversion of SVGA3DFORMAT_OP_*.
- * Entries must be located at the same position.
- */
-typedef union {
- uint32 value;
- struct {
- uint32 texture : 1;
- uint32 volumeTexture : 1;
- uint32 cubeTexture : 1;
- uint32 offscreenRenderTarget : 1;
- uint32 sameFormatRenderTarget : 1;
- uint32 unknown1 : 1;
- uint32 zStencil : 1;
- uint32 zStencilArbitraryDepth : 1;
- uint32 sameFormatUpToAlpha : 1;
- uint32 unknown2 : 1;
- uint32 displayMode : 1;
- uint32 acceleration3d : 1;
- uint32 pixelSize : 1;
- uint32 convertToARGB : 1;
- uint32 offscreenPlain : 1;
- uint32 sRGBRead : 1;
- uint32 bumpMap : 1;
- uint32 dmap : 1;
- uint32 noFilter : 1;
- uint32 memberOfGroupARGB : 1;
- uint32 sRGBWrite : 1;
- uint32 noAlphaBlend : 1;
- uint32 autoGenMipMap : 1;
- uint32 vertexTexture : 1;
- uint32 noTexCoordWrapNorMip : 1;
- };
-} SVGA3dSurfaceFormatCaps;
-
-/*
- * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
- * must fit in a uint32.
- */
-
-typedef enum {
- SVGA3D_RS_INVALID = 0,
- SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
- SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
- SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
- SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
- SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
- SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
- SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
- SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
- SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
- SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
- SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
- SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
- SVGA3D_RS_STENCILREF = 13, /* uint32 */
- SVGA3D_RS_STENCILMASK = 14, /* uint32 */
- SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
- SVGA3D_RS_FOGSTART = 16, /* float */
- SVGA3D_RS_FOGEND = 17, /* float */
- SVGA3D_RS_FOGDENSITY = 18, /* float */
- SVGA3D_RS_POINTSIZE = 19, /* float */
- SVGA3D_RS_POINTSIZEMIN = 20, /* float */
- SVGA3D_RS_POINTSIZEMAX = 21, /* float */
- SVGA3D_RS_POINTSCALE_A = 22, /* float */
- SVGA3D_RS_POINTSCALE_B = 23, /* float */
- SVGA3D_RS_POINTSCALE_C = 24, /* float */
- SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
- SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
- SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
- SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
- SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
- SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
- SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
- SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
- SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
- SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
- SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
- SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
- SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
- SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
- SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
- SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
- SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
- SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
- SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
- SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
- SVGA3D_RS_ZBIAS = 45, /* float */
- SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
- SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
- SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
- SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
- SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
- SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
- SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
- SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
- SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
- SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
- SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
- SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
- SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
- SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
- SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
- SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
- SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
- SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
- SVGA3D_RS_DEPTHBIAS = 64, /* float */
-
-
- /*
- * Output Gamma Level
- *
- * Output gamma effects the gamma curve of colors that are output from the
- * rendering pipeline. A value of 1.0 specifies a linear color space. If the
- * value is <= 0.0, gamma correction is ignored and linear color space is
- * used.
- */
-
- SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
- SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
- SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
- SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
- SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
- SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
- SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
- SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
- SVGA3D_RS_TWEENFACTOR = 88, /* float */
- SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
- SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
- SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
- SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
- SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
- SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
- SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
- SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
- SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
- SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */
- SVGA3D_RS_LINEWIDTH = 99, /* float */
- SVGA3D_RS_MAX
-} SVGA3dRenderStateName;
-
-typedef enum {
- SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
- SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
- SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
- SVGA3D_TRANSPARENCYANTIALIAS_MAX
-} SVGA3dTransparencyAntialiasType;
-
-typedef enum {
- SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
- SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
- SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
-} SVGA3dVertexMaterial;
-
-typedef enum {
- SVGA3D_FILLMODE_INVALID = 0,
- SVGA3D_FILLMODE_POINT = 1,
- SVGA3D_FILLMODE_LINE = 2,
- SVGA3D_FILLMODE_FILL = 3,
- SVGA3D_FILLMODE_MAX
-} SVGA3dFillModeType;
-
-
-typedef
-union {
- struct {
- uint16 mode; /* SVGA3dFillModeType */
- uint16 face; /* SVGA3dFace */
- };
- uint32 uintValue;
-} SVGA3dFillMode;
-
-typedef enum {
- SVGA3D_SHADEMODE_INVALID = 0,
- SVGA3D_SHADEMODE_FLAT = 1,
- SVGA3D_SHADEMODE_SMOOTH = 2,
- SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
- SVGA3D_SHADEMODE_MAX
-} SVGA3dShadeMode;
-
-typedef
-union {
- struct {
- uint16 repeat;
- uint16 pattern;
- };
- uint32 uintValue;
-} SVGA3dLinePattern;
-
-typedef enum {
- SVGA3D_BLENDOP_INVALID = 0,
- SVGA3D_BLENDOP_ZERO = 1,
- SVGA3D_BLENDOP_ONE = 2,
- SVGA3D_BLENDOP_SRCCOLOR = 3,
- SVGA3D_BLENDOP_INVSRCCOLOR = 4,
- SVGA3D_BLENDOP_SRCALPHA = 5,
- SVGA3D_BLENDOP_INVSRCALPHA = 6,
- SVGA3D_BLENDOP_DESTALPHA = 7,
- SVGA3D_BLENDOP_INVDESTALPHA = 8,
- SVGA3D_BLENDOP_DESTCOLOR = 9,
- SVGA3D_BLENDOP_INVDESTCOLOR = 10,
- SVGA3D_BLENDOP_SRCALPHASAT = 11,
- SVGA3D_BLENDOP_BLENDFACTOR = 12,
- SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
- SVGA3D_BLENDOP_MAX
-} SVGA3dBlendOp;
-
-typedef enum {
- SVGA3D_BLENDEQ_INVALID = 0,
- SVGA3D_BLENDEQ_ADD = 1,
- SVGA3D_BLENDEQ_SUBTRACT = 2,
- SVGA3D_BLENDEQ_REVSUBTRACT = 3,
- SVGA3D_BLENDEQ_MINIMUM = 4,
- SVGA3D_BLENDEQ_MAXIMUM = 5,
- SVGA3D_BLENDEQ_MAX
-} SVGA3dBlendEquation;
-
-typedef enum {
- SVGA3D_FRONTWINDING_INVALID = 0,
- SVGA3D_FRONTWINDING_CW = 1,
- SVGA3D_FRONTWINDING_CCW = 2,
- SVGA3D_FRONTWINDING_MAX
-} SVGA3dFrontWinding;
-
-typedef enum {
- SVGA3D_FACE_INVALID = 0,
- SVGA3D_FACE_NONE = 1,
- SVGA3D_FACE_FRONT = 2,
- SVGA3D_FACE_BACK = 3,
- SVGA3D_FACE_FRONT_BACK = 4,
- SVGA3D_FACE_MAX
-} SVGA3dFace;
-
-/*
- * The order and the values should not be changed
- */
-
-typedef enum {
- SVGA3D_CMP_INVALID = 0,
- SVGA3D_CMP_NEVER = 1,
- SVGA3D_CMP_LESS = 2,
- SVGA3D_CMP_EQUAL = 3,
- SVGA3D_CMP_LESSEQUAL = 4,
- SVGA3D_CMP_GREATER = 5,
- SVGA3D_CMP_NOTEQUAL = 6,
- SVGA3D_CMP_GREATEREQUAL = 7,
- SVGA3D_CMP_ALWAYS = 8,
- SVGA3D_CMP_MAX
-} SVGA3dCmpFunc;
-
-/*
- * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
- * the fog factor to be specified in the alpha component of the specular
- * (a.k.a. secondary) vertex color.
- */
-typedef enum {
- SVGA3D_FOGFUNC_INVALID = 0,
- SVGA3D_FOGFUNC_EXP = 1,
- SVGA3D_FOGFUNC_EXP2 = 2,
- SVGA3D_FOGFUNC_LINEAR = 3,
- SVGA3D_FOGFUNC_PER_VERTEX = 4
-} SVGA3dFogFunction;
-
-/*
- * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
- * or per-pixel basis.
- */
-typedef enum {
- SVGA3D_FOGTYPE_INVALID = 0,
- SVGA3D_FOGTYPE_VERTEX = 1,
- SVGA3D_FOGTYPE_PIXEL = 2,
- SVGA3D_FOGTYPE_MAX = 3
-} SVGA3dFogType;
-
-/*
- * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
- * computed using the eye Z value of each pixel (or vertex), whereas range-
- * based fog is computed using the actual distance (range) to the eye.
- */
-typedef enum {
- SVGA3D_FOGBASE_INVALID = 0,
- SVGA3D_FOGBASE_DEPTHBASED = 1,
- SVGA3D_FOGBASE_RANGEBASED = 2,
- SVGA3D_FOGBASE_MAX = 3
-} SVGA3dFogBase;
-
-typedef enum {
- SVGA3D_STENCILOP_INVALID = 0,
- SVGA3D_STENCILOP_KEEP = 1,
- SVGA3D_STENCILOP_ZERO = 2,
- SVGA3D_STENCILOP_REPLACE = 3,
- SVGA3D_STENCILOP_INCRSAT = 4,
- SVGA3D_STENCILOP_DECRSAT = 5,
- SVGA3D_STENCILOP_INVERT = 6,
- SVGA3D_STENCILOP_INCR = 7,
- SVGA3D_STENCILOP_DECR = 8,
- SVGA3D_STENCILOP_MAX
-} SVGA3dStencilOp;
-
-typedef enum {
- SVGA3D_CLIPPLANE_0 = (1 << 0),
- SVGA3D_CLIPPLANE_1 = (1 << 1),
- SVGA3D_CLIPPLANE_2 = (1 << 2),
- SVGA3D_CLIPPLANE_3 = (1 << 3),
- SVGA3D_CLIPPLANE_4 = (1 << 4),
- SVGA3D_CLIPPLANE_5 = (1 << 5),
-} SVGA3dClipPlanes;
-
-typedef enum {
- SVGA3D_CLEAR_COLOR = 0x1,
- SVGA3D_CLEAR_DEPTH = 0x2,
- SVGA3D_CLEAR_STENCIL = 0x4
-} SVGA3dClearFlag;
-
-typedef enum {
- SVGA3D_RT_DEPTH = 0,
- SVGA3D_RT_STENCIL = 1,
- SVGA3D_RT_COLOR0 = 2,
- SVGA3D_RT_COLOR1 = 3,
- SVGA3D_RT_COLOR2 = 4,
- SVGA3D_RT_COLOR3 = 5,
- SVGA3D_RT_COLOR4 = 6,
- SVGA3D_RT_COLOR5 = 7,
- SVGA3D_RT_COLOR6 = 8,
- SVGA3D_RT_COLOR7 = 9,
- SVGA3D_RT_MAX,
- SVGA3D_RT_INVALID = ((uint32)-1),
-} SVGA3dRenderTargetType;
-
-#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
-
-typedef
-union {
- struct {
- uint32 red : 1;
- uint32 green : 1;
- uint32 blue : 1;
- uint32 alpha : 1;
- };
- uint32 uintValue;
-} SVGA3dColorMask;
-
-typedef enum {
- SVGA3D_VBLEND_DISABLE = 0,
- SVGA3D_VBLEND_1WEIGHT = 1,
- SVGA3D_VBLEND_2WEIGHT = 2,
- SVGA3D_VBLEND_3WEIGHT = 3,
-} SVGA3dVertexBlendFlags;
-
-typedef enum {
- SVGA3D_WRAPCOORD_0 = 1 << 0,
- SVGA3D_WRAPCOORD_1 = 1 << 1,
- SVGA3D_WRAPCOORD_2 = 1 << 2,
- SVGA3D_WRAPCOORD_3 = 1 << 3,
- SVGA3D_WRAPCOORD_ALL = 0xF,
-} SVGA3dWrapFlags;
-
-/*
- * SVGA_3D_CMD_TEXTURESTATE Types. All value types
- * must fit in a uint32.
- */
-
-typedef enum {
- SVGA3D_TS_INVALID = 0,
- SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
- SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
- SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
- SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
- SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
- SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
- SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
- SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
- SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
- SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
- SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
- SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
- SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
- SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
- SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
- SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
- SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
- SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
- SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
- SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
- SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
- SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
- SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
- SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
-
-
- /*
- * Sampler Gamma Level
- *
- * Sampler gamma effects the color of samples taken from the sampler. A
- * value of 1.0 will produce linear samples. If the value is <= 0.0 the
- * gamma value is ignored and a linear space is used.
- */
-
- SVGA3D_TS_GAMMA = 25, /* float */
- SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
- SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
- SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
- SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
- SVGA3D_TS_MAX
-} SVGA3dTextureStateName;
-
-typedef enum {
- SVGA3D_TC_INVALID = 0,
- SVGA3D_TC_DISABLE = 1,
- SVGA3D_TC_SELECTARG1 = 2,
- SVGA3D_TC_SELECTARG2 = 3,
- SVGA3D_TC_MODULATE = 4,
- SVGA3D_TC_ADD = 5,
- SVGA3D_TC_ADDSIGNED = 6,
- SVGA3D_TC_SUBTRACT = 7,
- SVGA3D_TC_BLENDTEXTUREALPHA = 8,
- SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
- SVGA3D_TC_BLENDCURRENTALPHA = 10,
- SVGA3D_TC_BLENDFACTORALPHA = 11,
- SVGA3D_TC_MODULATE2X = 12,
- SVGA3D_TC_MODULATE4X = 13,
- SVGA3D_TC_DSDT = 14,
- SVGA3D_TC_DOTPRODUCT3 = 15,
- SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
- SVGA3D_TC_ADDSIGNED2X = 17,
- SVGA3D_TC_ADDSMOOTH = 18,
- SVGA3D_TC_PREMODULATE = 19,
- SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
- SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
- SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
- SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
- SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
- SVGA3D_TC_MULTIPLYADD = 25,
- SVGA3D_TC_LERP = 26,
- SVGA3D_TC_MAX
-} SVGA3dTextureCombiner;
-
-#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
-
-typedef enum {
- SVGA3D_TEX_ADDRESS_INVALID = 0,
- SVGA3D_TEX_ADDRESS_WRAP = 1,
- SVGA3D_TEX_ADDRESS_MIRROR = 2,
- SVGA3D_TEX_ADDRESS_CLAMP = 3,
- SVGA3D_TEX_ADDRESS_BORDER = 4,
- SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
- SVGA3D_TEX_ADDRESS_EDGE = 6,
- SVGA3D_TEX_ADDRESS_MAX
-} SVGA3dTextureAddress;
-
-/*
- * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
- * disabled, and the rasterizer should use the magnification filter instead.
- */
-typedef enum {
- SVGA3D_TEX_FILTER_NONE = 0,
- SVGA3D_TEX_FILTER_NEAREST = 1,
- SVGA3D_TEX_FILTER_LINEAR = 2,
- SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
- SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
- SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
- SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
- SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
- SVGA3D_TEX_FILTER_MAX
-} SVGA3dTextureFilter;
-
-typedef enum {
- SVGA3D_TEX_TRANSFORM_OFF = 0,
- SVGA3D_TEX_TRANSFORM_S = (1 << 0),
- SVGA3D_TEX_TRANSFORM_T = (1 << 1),
- SVGA3D_TEX_TRANSFORM_R = (1 << 2),
- SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
- SVGA3D_TEX_PROJECTED = (1 << 15),
-} SVGA3dTexTransformFlags;
-
-typedef enum {
- SVGA3D_TEXCOORD_GEN_OFF = 0,
- SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
- SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
- SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
- SVGA3D_TEXCOORD_GEN_SPHERE = 4,
- SVGA3D_TEXCOORD_GEN_MAX
-} SVGA3dTextureCoordGen;
-
-/*
- * Texture argument constants for texture combiner
- */
-typedef enum {
- SVGA3D_TA_INVALID = 0,
- SVGA3D_TA_CONSTANT = 1,
- SVGA3D_TA_PREVIOUS = 2,
- SVGA3D_TA_DIFFUSE = 3,
- SVGA3D_TA_TEXTURE = 4,
- SVGA3D_TA_SPECULAR = 5,
- SVGA3D_TA_MAX
-} SVGA3dTextureArgData;
-
-#define SVGA3D_TM_MASK_LEN 4
-
-/* Modifiers for texture argument constants defined above. */
-typedef enum {
- SVGA3D_TM_NONE = 0,
- SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
- SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
-} SVGA3dTextureArgModifier;
-
-#define SVGA3D_INVALID_ID ((uint32)-1)
-#define SVGA3D_MAX_CLIP_PLANES 6
-
-/*
- * This is the limit to the number of fixed-function texture
- * transforms and texture coordinates we can support. It does *not*
- * correspond to the number of texture image units (samplers) we
- * support!
- */
-#define SVGA3D_MAX_TEXTURE_COORDS 8
-
-/*
- * Vertex declarations
- *
- * Notes:
- *
- * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
- * draw with any POSITIONT vertex arrays, the programmable vertex
- * pipeline will be implicitly disabled. Drawing will take place as if
- * no vertex shader was bound.
- */
-
-typedef enum {
- SVGA3D_DECLUSAGE_POSITION = 0,
- SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */
- SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */
- SVGA3D_DECLUSAGE_NORMAL, /* 3 */
- SVGA3D_DECLUSAGE_PSIZE, /* 4 */
- SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */
- SVGA3D_DECLUSAGE_TANGENT, /* 6 */
- SVGA3D_DECLUSAGE_BINORMAL, /* 7 */
- SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */
- SVGA3D_DECLUSAGE_POSITIONT, /* 9 */
- SVGA3D_DECLUSAGE_COLOR, /* 10 */
- SVGA3D_DECLUSAGE_FOG, /* 11 */
- SVGA3D_DECLUSAGE_DEPTH, /* 12 */
- SVGA3D_DECLUSAGE_SAMPLE, /* 13 */
- SVGA3D_DECLUSAGE_MAX
-} SVGA3dDeclUsage;
-
-typedef enum {
- SVGA3D_DECLMETHOD_DEFAULT = 0,
- SVGA3D_DECLMETHOD_PARTIALU,
- SVGA3D_DECLMETHOD_PARTIALV,
- SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
- SVGA3D_DECLMETHOD_UV,
- SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
- SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
-} SVGA3dDeclMethod;
-
-typedef enum {
- SVGA3D_DECLTYPE_FLOAT1 = 0,
- SVGA3D_DECLTYPE_FLOAT2 = 1,
- SVGA3D_DECLTYPE_FLOAT3 = 2,
- SVGA3D_DECLTYPE_FLOAT4 = 3,
- SVGA3D_DECLTYPE_D3DCOLOR = 4,
- SVGA3D_DECLTYPE_UBYTE4 = 5,
- SVGA3D_DECLTYPE_SHORT2 = 6,
- SVGA3D_DECLTYPE_SHORT4 = 7,
- SVGA3D_DECLTYPE_UBYTE4N = 8,
- SVGA3D_DECLTYPE_SHORT2N = 9,
- SVGA3D_DECLTYPE_SHORT4N = 10,
- SVGA3D_DECLTYPE_USHORT2N = 11,
- SVGA3D_DECLTYPE_USHORT4N = 12,
- SVGA3D_DECLTYPE_UDEC3 = 13,
- SVGA3D_DECLTYPE_DEC3N = 14,
- SVGA3D_DECLTYPE_FLOAT16_2 = 15,
- SVGA3D_DECLTYPE_FLOAT16_4 = 16,
- SVGA3D_DECLTYPE_MAX,
-} SVGA3dDeclType;
-
-/*
- * This structure is used for the divisor for geometry instancing;
- * it's a direct translation of the Direct3D equivalent.
- */
-typedef union {
- struct {
- /*
- * For index data, this number represents the number of instances to draw.
- * For instance data, this number represents the number of
- * instances/vertex in this stream
- */
- uint32 count : 30;
-
- /*
- * This is 1 if this is supposed to be the data that is repeated for
- * every instance.
- */
- uint32 indexedData : 1;
-
- /*
- * This is 1 if this is supposed to be the per-instance data.
- */
- uint32 instanceData : 1;
- };
-
- uint32 value;
-} SVGA3dVertexDivisor;
-
-typedef enum {
- SVGA3D_PRIMITIVE_INVALID = 0,
- SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
- SVGA3D_PRIMITIVE_POINTLIST = 2,
- SVGA3D_PRIMITIVE_LINELIST = 3,
- SVGA3D_PRIMITIVE_LINESTRIP = 4,
- SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
- SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
- SVGA3D_PRIMITIVE_MAX
-} SVGA3dPrimitiveType;
-
-typedef enum {
- SVGA3D_COORDINATE_INVALID = 0,
- SVGA3D_COORDINATE_LEFTHANDED = 1,
- SVGA3D_COORDINATE_RIGHTHANDED = 2,
- SVGA3D_COORDINATE_MAX
-} SVGA3dCoordinateType;
-
-typedef enum {
- SVGA3D_TRANSFORM_INVALID = 0,
- SVGA3D_TRANSFORM_WORLD = 1,
- SVGA3D_TRANSFORM_VIEW = 2,
- SVGA3D_TRANSFORM_PROJECTION = 3,
- SVGA3D_TRANSFORM_TEXTURE0 = 4,
- SVGA3D_TRANSFORM_TEXTURE1 = 5,
- SVGA3D_TRANSFORM_TEXTURE2 = 6,
- SVGA3D_TRANSFORM_TEXTURE3 = 7,
- SVGA3D_TRANSFORM_TEXTURE4 = 8,
- SVGA3D_TRANSFORM_TEXTURE5 = 9,
- SVGA3D_TRANSFORM_TEXTURE6 = 10,
- SVGA3D_TRANSFORM_TEXTURE7 = 11,
- SVGA3D_TRANSFORM_WORLD1 = 12,
- SVGA3D_TRANSFORM_WORLD2 = 13,
- SVGA3D_TRANSFORM_WORLD3 = 14,
- SVGA3D_TRANSFORM_MAX
-} SVGA3dTransformType;
-
-typedef enum {
- SVGA3D_LIGHTTYPE_INVALID = 0,
- SVGA3D_LIGHTTYPE_POINT = 1,
- SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
- SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
- SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
- SVGA3D_LIGHTTYPE_MAX
-} SVGA3dLightType;
-
-typedef enum {
- SVGA3D_CUBEFACE_POSX = 0,
- SVGA3D_CUBEFACE_NEGX = 1,
- SVGA3D_CUBEFACE_POSY = 2,
- SVGA3D_CUBEFACE_NEGY = 3,
- SVGA3D_CUBEFACE_POSZ = 4,
- SVGA3D_CUBEFACE_NEGZ = 5,
-} SVGA3dCubeFace;
-
-typedef enum {
- SVGA3D_SHADERTYPE_INVALID = 0,
- SVGA3D_SHADERTYPE_MIN = 1,
- SVGA3D_SHADERTYPE_VS = 1,
- SVGA3D_SHADERTYPE_PS = 2,
- SVGA3D_SHADERTYPE_MAX = 3,
- SVGA3D_SHADERTYPE_GS = 3,
-} SVGA3dShaderType;
-
-#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
-
-typedef enum {
- SVGA3D_CONST_TYPE_FLOAT = 0,
- SVGA3D_CONST_TYPE_INT = 1,
- SVGA3D_CONST_TYPE_BOOL = 2,
- SVGA3D_CONST_TYPE_MAX
-} SVGA3dShaderConstType;
-
-#define SVGA3D_MAX_SURFACE_FACES 6
-
-typedef enum {
- SVGA3D_STRETCH_BLT_POINT = 0,
- SVGA3D_STRETCH_BLT_LINEAR = 1,
- SVGA3D_STRETCH_BLT_MAX
-} SVGA3dStretchBltMode;
-
-typedef enum {
- SVGA3D_QUERYTYPE_OCCLUSION = 0,
- SVGA3D_QUERYTYPE_MAX
-} SVGA3dQueryType;
-
-typedef enum {
- SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
- SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
- SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
- SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
-} SVGA3dQueryState;
-
-typedef enum {
- SVGA3D_WRITE_HOST_VRAM = 1,
- SVGA3D_READ_HOST_VRAM = 2,
-} SVGA3dTransferType;
-
-/*
- * The maximum number of vertex arrays we're guaranteed to support in
- * SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_VERTEX_ARRAYS 32
-
-/*
- * The maximum number of primitive ranges we're guaranteed to support
- * in SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
-
-/*
- * Identifiers for commands in the command FIFO.
- *
- * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
- * the SVGA3D protocol and remain reserved; they should not be used in the
- * future.
- *
- * IDs between 1040 and 1999 (inclusive) are available for use by the
- * current SVGA3D protocol.
- *
- * FIFO clients other than SVGA3D should stay below 1000, or at 2000
- * and up.
- */
-
-#define SVGA_3D_CMD_LEGACY_BASE 1000
-#define SVGA_3D_CMD_BASE 1040
-
-#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */
-#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
-#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
-#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
-#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
-#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
-#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
-#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
-#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
-#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
-#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
-#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
-#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
-#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
-#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
-#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
-#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
-#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
-#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */
-#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
-#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
-#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
-#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
-#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
-#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
-#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
-#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
-#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
-#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */
-#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
-#define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30
-#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
-#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
-#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_SCREEN_DMA 1082
-#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
-#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084
-
-#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085
-#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086
-#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087
-#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088
-#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089
-#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090
-
-#define SVGA_3D_CMD_SET_OTABLE_BASE 1091
-#define SVGA_3D_CMD_READBACK_OTABLE 1092
-
-#define SVGA_3D_CMD_DEFINE_GB_MOB 1093
-#define SVGA_3D_CMD_DESTROY_GB_MOB 1094
-#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095
-#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096
-
-#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097
-#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098
-#define SVGA_3D_CMD_BIND_GB_SURFACE 1099
-#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100
-#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101
-#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102
-#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103
-#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104
-#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105
-#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106
-
-#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107
-#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108
-#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109
-#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110
-#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111
-
-#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112
-#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113
-#define SVGA_3D_CMD_BIND_GB_SHADER 1114
-
-#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115
-
-#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116
-#define SVGA_3D_CMD_END_GB_QUERY 1117
-#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118
-
-#define SVGA_3D_CMD_NOP 1119
-
-#define SVGA_3D_CMD_ENABLE_GART 1120
-#define SVGA_3D_CMD_DISABLE_GART 1121
-#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122
-#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123
-
-#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124
-#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125
-#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126
-#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127
-
-#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128
-#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
-
-#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
-#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
-#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
-#define SVGA_3D_CMD_GB_MOB_FENCE 1133
-#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
-#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
-#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
-#define SVGA_3D_CMD_NOP_ERROR 1137
-
-#define SVGA_3D_CMD_RESERVED1 1138
-#define SVGA_3D_CMD_RESERVED2 1139
-#define SVGA_3D_CMD_RESERVED3 1140
-#define SVGA_3D_CMD_RESERVED4 1141
-#define SVGA_3D_CMD_RESERVED5 1142
-
-#define SVGA_3D_CMD_MAX 1142
-#define SVGA_3D_CMD_FUTURE_MAX 3000
-
-/*
- * Common substructures used in multiple FIFO commands:
- */
-
-typedef struct {
- union {
- struct {
- uint16 function; /* SVGA3dFogFunction */
- uint8 type; /* SVGA3dFogType */
- uint8 base; /* SVGA3dFogBase */
- };
- uint32 uintValue;
- };
-} SVGA3dFogMode;
-
-/*
- * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
- * is a surface ID as well as face/mipmap indices.
- */
-
-typedef
-struct SVGA3dSurfaceImageId {
- uint32 sid;
- uint32 face;
- uint32 mipmap;
-} SVGA3dSurfaceImageId;
-
-typedef
-struct SVGA3dGuestImage {
- SVGAGuestPtr ptr;
-
- /*
- * A note on interpretation of pitch: This value of pitch is the
- * number of bytes between vertically adjacent image
- * blocks. Normally this is the number of bytes between the first
- * pixel of two adjacent scanlines. With compressed textures,
- * however, this may represent the number of bytes between
- * compression blocks rather than between rows of pixels.
- *
- * XXX: Compressed textures currently must be tightly packed in guest memory.
- *
- * If the image is 1-dimensional, pitch is ignored.
- *
- * If 'pitch' is zero, the SVGA3D device calculates a pitch value
- * assuming each row of blocks is tightly packed.
- */
- uint32 pitch;
-} SVGA3dGuestImage;
-
-
-/*
- * FIFO command format definitions:
- */
-
-/*
- * The data size header following cmdNum for every 3d command
- */
-typedef
-struct {
- uint32 id;
- uint32 size;
-} SVGA3dCmdHeader;
-
-/*
- * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
- * optional mipmaps and cube faces.
- */
-
-typedef
-struct {
- uint32 width;
- uint32 height;
- uint32 depth;
-} SVGA3dSize;
-
-typedef enum {
- SVGA3D_SURFACE_CUBEMAP = (1 << 0),
- SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
- SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
- SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
- SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
- SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
- SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
- SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
- SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
- SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
- SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
-} SVGA3dSurfaceFlags;
-
-typedef
-struct {
- uint32 numMipLevels;
-} SVGA3dSurfaceFace;
-
-typedef
-struct {
- uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
- SVGA3dSurfaceFormat format;
- /*
- * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
- * structures must have the same value of numMipLevels field.
- * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
- * numMipLevels set to 0.
- */
- SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
- /*
- * Followed by an SVGA3dSize structure for each mip level in each face.
- *
- * A note on surface sizes: Sizes are always specified in pixels,
- * even if the true surface size is not a multiple of the minimum
- * block size of the surface's format. For example, a 3x3x1 DXT1
- * compressed texture would actually be stored as a 4x4x1 image in
- * memory.
- */
-} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
-
-typedef
-struct {
- uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
- SVGA3dSurfaceFormat format;
- /*
- * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
- * structures must have the same value of numMipLevels field.
- * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
- * numMipLevels set to 0.
- */
- SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
- uint32 multisampleCount;
- SVGA3dTextureFilter autogenFilter;
- /*
- * Followed by an SVGA3dSize structure for each mip level in each face.
- *
- * A note on surface sizes: Sizes are always specified in pixels,
- * even if the true surface size is not a multiple of the minimum
- * block size of the surface's format. For example, a 3x3x1 DXT1
- * compressed texture would actually be stored as a 4x4x1 image in
- * memory.
- */
-} SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
-
-typedef
-struct {
- uint32 sid;
-} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
-
-typedef
-struct {
- uint32 cid;
-} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
-
-typedef
-struct {
- uint32 cid;
-} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dClearFlag clearFlag;
- uint32 color;
- float depth;
- uint32 stencil;
- /* Followed by variable number of SVGA3dRect structures */
-} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
-
-typedef
-struct SVGA3dCopyRect {
- uint32 x;
- uint32 y;
- uint32 w;
- uint32 h;
- uint32 srcx;
- uint32 srcy;
-} SVGA3dCopyRect;
-
-typedef
-struct SVGA3dCopyBox {
- uint32 x;
- uint32 y;
- uint32 z;
- uint32 w;
- uint32 h;
- uint32 d;
- uint32 srcx;
- uint32 srcy;
- uint32 srcz;
-} SVGA3dCopyBox;
-
-typedef
-struct {
- uint32 x;
- uint32 y;
- uint32 w;
- uint32 h;
-} SVGA3dRect;
-
-typedef
-struct {
- uint32 x;
- uint32 y;
- uint32 z;
- uint32 w;
- uint32 h;
- uint32 d;
-} SVGA3dBox;
-
-typedef
-struct {
- uint32 x;
- uint32 y;
- uint32 z;
-} SVGA3dPoint;
-
-typedef
-struct {
- SVGA3dLightType type;
- SVGA3dBool inWorldSpace;
- float diffuse[4];
- float specular[4];
- float ambient[4];
- float position[4];
- float direction[4];
- float range;
- float falloff;
- float attenuation0;
- float attenuation1;
- float attenuation2;
- float theta;
- float phi;
-} SVGA3dLightData;
-
-typedef
-struct {
- uint32 sid;
- /* Followed by variable number of SVGA3dCopyRect structures */
-} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
-
-typedef
-struct {
- SVGA3dRenderStateName state;
- union {
- uint32 uintValue;
- float floatValue;
- };
-} SVGA3dRenderState;
-
-typedef
-struct {
- uint32 cid;
- /* Followed by variable number of SVGA3dRenderState structures */
-} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dRenderTargetType type;
- SVGA3dSurfaceImageId target;
-} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
-
-typedef
-struct {
- SVGA3dSurfaceImageId src;
- SVGA3dSurfaceImageId dest;
- /* Followed by variable number of SVGA3dCopyBox structures */
-} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
-
-typedef
-struct {
- SVGA3dSurfaceImageId src;
- SVGA3dSurfaceImageId dest;
- SVGA3dBox boxSrc;
- SVGA3dBox boxDest;
- SVGA3dStretchBltMode mode;
-} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
-
-typedef
-struct {
- /*
- * If the discard flag is present in a surface DMA operation, the host may
- * discard the contents of the current mipmap level and face of the target
- * surface before applying the surface DMA contents.
- */
- uint32 discard : 1;
-
- /*
- * If the unsynchronized flag is present, the host may perform this upload
- * without syncing to pending reads on this surface.
- */
- uint32 unsynchronized : 1;
-
- /*
- * Guests *MUST* set the reserved bits to 0 before submitting the command
- * suffix as future flags may occupy these bits.
- */
- uint32 reserved : 30;
-} SVGA3dSurfaceDMAFlags;
-
-typedef
-struct {
- SVGA3dGuestImage guest;
- SVGA3dSurfaceImageId host;
- SVGA3dTransferType transfer;
- /*
- * Followed by variable number of SVGA3dCopyBox structures. For consistency
- * in all clipping logic and coordinate translation, we define the
- * "source" in each copyBox as the guest image and the
- * "destination" as the host image, regardless of transfer
- * direction.
- *
- * For efficiency, the SVGA3D device is free to copy more data than
- * specified. For example, it may round copy boxes outwards such
- * that they lie on particular alignment boundaries.
- */
-} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
-
-/*
- * SVGA3dCmdSurfaceDMASuffix --
- *
- * This is a command suffix that will appear after a SurfaceDMA command in
- * the FIFO. It contains some extra information that hosts may use to
- * optimize performance or protect the guest. This suffix exists to preserve
- * backwards compatibility while also allowing for new functionality to be
- * implemented.
- */
-
-typedef
-struct {
- uint32 suffixSize;
-
- /*
- * The maximum offset is used to determine the maximum offset from the
- * guestPtr base address that will be accessed or written to during this
- * surfaceDMA. If the suffix is supported, the host will respect this
- * boundary while performing surface DMAs.
- *
- * Defaults to MAX_UINT32
- */
- uint32 maximumOffset;
-
- /*
- * A set of flags that describes optimizations that the host may perform
- * while performing this surface DMA operation. The guest should never rely
- * on behaviour that is different when these flags are set for correctness.
- *
- * Defaults to 0
- */
- SVGA3dSurfaceDMAFlags flags;
-} SVGA3dCmdSurfaceDMASuffix;
-
-/*
- * SVGA_3D_CMD_DRAW_PRIMITIVES --
- *
- * This command is the SVGA3D device's generic drawing entry point.
- * It can draw multiple ranges of primitives, optionally using an
- * index buffer, using an arbitrary collection of vertex buffers.
- *
- * Each SVGA3dVertexDecl defines a distinct vertex array to bind
- * during this draw call. The declarations specify which surface
- * the vertex data lives in, what that vertex data is used for,
- * and how to interpret it.
- *
- * Each SVGA3dPrimitiveRange defines a collection of primitives
- * to render using the same vertex arrays. An index buffer is
- * optional.
- */
-
-typedef
-struct {
- /*
- * A range hint is an optional specification for the range of indices
- * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
- * that the entire array will be used.
- *
- * These are only hints. The SVGA3D device may use them for
- * performance optimization if possible, but it's also allowed to
- * ignore these values.
- */
- uint32 first;
- uint32 last;
-} SVGA3dArrayRangeHint;
-
-typedef
-struct {
- /*
- * Define the origin and shape of a vertex or index array. Both
- * 'offset' and 'stride' are in bytes. The provided surface will be
- * reinterpreted as a flat array of bytes in the same format used
- * by surface DMA operations. To avoid unnecessary conversions, the
- * surface should be created with the SVGA3D_BUFFER format.
- *
- * Index 0 in the array starts 'offset' bytes into the surface.
- * Index 1 begins at byte 'offset + stride', etc. Array indices may
- * not be negative.
- */
- uint32 surfaceId;
- uint32 offset;
- uint32 stride;
-} SVGA3dArray;
-
-typedef
-struct {
- /*
- * Describe a vertex array's data type, and define how it is to be
- * used by the fixed function pipeline or the vertex shader. It
- * isn't useful to have two VertexDecls with the same
- * VertexArrayIdentity in one draw call.
- */
- SVGA3dDeclType type;
- SVGA3dDeclMethod method;
- SVGA3dDeclUsage usage;
- uint32 usageIndex;
-} SVGA3dVertexArrayIdentity;
-
-typedef
-struct {
- SVGA3dVertexArrayIdentity identity;
- SVGA3dArray array;
- SVGA3dArrayRangeHint rangeHint;
-} SVGA3dVertexDecl;
-
-typedef
-struct {
- /*
- * Define a group of primitives to render, from sequential indices.
- *
- * The value of 'primitiveType' and 'primitiveCount' imply the
- * total number of vertices that will be rendered.
- */
- SVGA3dPrimitiveType primType;
- uint32 primitiveCount;
-
- /*
- * Optional index buffer. If indexArray.surfaceId is
- * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
- * without an index buffer is identical to rendering with an index
- * buffer containing the sequence [0, 1, 2, 3, ...].
- *
- * If an index buffer is in use, indexWidth specifies the width in
- * bytes of each index value. It must be less than or equal to
- * indexArray.stride.
- *
- * (Currently, the SVGA3D device requires index buffers to be tightly
- * packed. In other words, indexWidth == indexArray.stride)
- */
- SVGA3dArray indexArray;
- uint32 indexWidth;
-
- /*
- * Optional index bias. This number is added to all indices from
- * indexArray before they are used as vertex array indices. This
- * can be used in multiple ways:
- *
- * - When not using an indexArray, this bias can be used to
- * specify where in the vertex arrays to begin rendering.
- *
- * - A positive number here is equivalent to increasing the
- * offset in each vertex array.
- *
- * - A negative number can be used to render using a small
- * vertex array and an index buffer that contains large
- * values. This may be used by some applications that
- * crop a vertex buffer without modifying their index
- * buffer.
- *
- * Note that rendering with a negative bias value may be slower and
- * use more memory than rendering with a positive or zero bias.
- */
- int32 indexBias;
-} SVGA3dPrimitiveRange;
-
-typedef
-struct {
- uint32 cid;
- uint32 numVertexDecls;
- uint32 numRanges;
-
- /*
- * There are two variable size arrays after the
- * SVGA3dCmdDrawPrimitives structure. In order,
- * they are:
- *
- * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
- * SVGA3D_MAX_VERTEX_ARRAYS;
- * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
- * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
- * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
- * the frequency divisor for the corresponding vertex decl).
- */
-} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
-
-typedef
-struct {
- uint32 stage;
- SVGA3dTextureStateName name;
- union {
- uint32 value;
- float floatValue;
- };
-} SVGA3dTextureState;
-
-typedef
-struct {
- uint32 cid;
- /* Followed by variable number of SVGA3dTextureState structures */
-} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dTransformType type;
- float matrix[16];
-} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
-
-typedef
-struct {
- float min;
- float max;
-} SVGA3dZRange;
-
-typedef
-struct {
- uint32 cid;
- SVGA3dZRange zRange;
-} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
-
-typedef
-struct {
- float diffuse[4];
- float ambient[4];
- float specular[4];
- float emissive[4];
- float shininess;
-} SVGA3dMaterial;
-
-typedef
-struct {
- uint32 cid;
- SVGA3dFace face;
- SVGA3dMaterial material;
-} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
-
-typedef
-struct {
- uint32 cid;
- uint32 index;
- SVGA3dLightData data;
-} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
-
-typedef
-struct {
- uint32 cid;
- uint32 index;
- uint32 enabled;
-} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dRect rect;
-} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dRect rect;
-} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
-
-typedef
-struct {
- uint32 cid;
- uint32 index;
- float plane[4];
-} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
-
-typedef
-struct {
- uint32 cid;
- uint32 shid;
- SVGA3dShaderType type;
- /* Followed by variable number of DWORDs for shader bycode */
-} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
-
-typedef
-struct {
- uint32 cid;
- uint32 shid;
- SVGA3dShaderType type;
-} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
-
-typedef
-struct {
- uint32 cid;
- uint32 reg; /* register number */
- SVGA3dShaderType type;
- SVGA3dShaderConstType ctype;
- uint32 values[4];
-} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dShaderType type;
- uint32 shid;
-} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dQueryType type;
-} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dQueryType type;
- SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
-} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
-
-typedef
-struct {
- uint32 cid; /* Same parameters passed to END_QUERY */
- SVGA3dQueryType type;
- SVGAGuestPtr guestResult;
-} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
-
-typedef
-struct {
- uint32 totalSize; /* Set by guest before query is ended. */
- SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
- union { /* Set by host on exit from PENDING state */
- uint32 result32;
- };
-} SVGA3dQueryResult;
-
-/*
- * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
- *
- * This is a blit from an SVGA3D surface to a Screen Object. Just
- * like GMR-to-screen blits, this blit may be directed at a
- * specific screen or to the virtual coordinate space.
- *
- * The blit copies from a rectangular region of an SVGA3D surface
- * image to a rectangular region of a screen or screens.
- *
- * This command takes an optional variable-length list of clipping
- * rectangles after the body of the command. If no rectangles are
- * specified, there is no clipping region. The entire destRect is
- * drawn to. If one or more rectangles are included, they describe
- * a clipping region. The clip rectangle coordinates are measured
- * relative to the top-left corner of destRect.
- *
- * This clipping region serves multiple purposes:
- *
- * - It can be used to perform an irregularly shaped blit more
- * efficiently than by issuing many separate blit commands.
- *
- * - It is equivalent to allowing blits with non-integer
- * source coordinates. You could blit just one half-pixel
- * of a source, for example, by specifying a larger
- * destination rectangle than you need, then removing
- * part of it using a clip rectangle.
- *
- * Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
- *
- * Limitations:
- *
- * - Currently, no backend supports blits from a mipmap or face
- * other than the first one.
- */
-
-typedef
-struct {
- SVGA3dSurfaceImageId srcImage;
- SVGASignedRect srcRect;
- uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
- SVGASignedRect destRect; /* Supports scaling if src/rest different size */
- /* Clipping: zero or more SVGASignedRects follow */
-} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
-
-typedef
-struct {
- uint32 sid;
- SVGA3dTextureFilter filter;
-} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
-
-
-/*
- * Guest-backed surface definitions.
- */
-
-typedef uint32 SVGAMobId;
-
-typedef enum SVGAMobFormat {
- SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
- SVGA3D_MOBFMT_PTDEPTH_0 = 0,
- SVGA3D_MOBFMT_PTDEPTH_1 = 1,
- SVGA3D_MOBFMT_PTDEPTH_2 = 2,
- SVGA3D_MOBFMT_RANGE = 3,
- SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
- SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
- SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
- SVGA3D_MOBFMT_MAX,
-} SVGAMobFormat;
-
-/*
- * Sizes of opaque types.
- */
-
-#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
-#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
-#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
-#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
-#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
-#define SVGA3D_CONTEXT_DATA_SIZE 16384
-
-/*
- * SVGA3dCmdSetOTableBase --
- *
- * This command allows the guest to specify the base PPN of the
- * specified object table.
- */
-
-typedef enum {
- SVGA_OTABLE_MOB = 0,
- SVGA_OTABLE_MIN = 0,
- SVGA_OTABLE_SURFACE = 1,
- SVGA_OTABLE_CONTEXT = 2,
- SVGA_OTABLE_SHADER = 3,
- SVGA_OTABLE_SCREEN_TARGET = 4,
- SVGA_OTABLE_DX9_MAX = 5,
- SVGA_OTABLE_MAX = 8
-} SVGAOTableType;
-
-typedef
-struct {
- SVGAOTableType type;
- PPN baseAddress;
- uint32 sizeInBytes;
- uint32 validSizeInBytes;
- SVGAMobFormat ptDepth;
-} __packed
-SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
-
-typedef
-struct {
- SVGAOTableType type;
- PPN64 baseAddress;
- uint32 sizeInBytes;
- uint32 validSizeInBytes;
- SVGAMobFormat ptDepth;
-} __packed
-SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
-
-typedef
-struct {
- SVGAOTableType type;
-} __packed
-SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
-
-/*
- * Define a memory object (Mob) in the OTable.
- */
-
-typedef
-struct SVGA3dCmdDefineGBMob {
- SVGAMobId mobid;
- SVGAMobFormat ptDepth;
- PPN base;
- uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
-
-
-/*
- * Destroys an object in the OTable.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBMob {
- SVGAMobId mobid;
-} __packed
-SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
-
-/*
- * Redefine an object in the OTable.
- */
-
-typedef
-struct SVGA3dCmdRedefineGBMob {
- SVGAMobId mobid;
- SVGAMobFormat ptDepth;
- PPN base;
- uint32 sizeInBytes;
-} __packed
-SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
-
-/*
- * Define a memory object (Mob) in the OTable with a PPN64 base.
- */
-
-typedef
-struct SVGA3dCmdDefineGBMob64 {
- SVGAMobId mobid;
- SVGAMobFormat ptDepth;
- PPN64 base;
- uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
-
-/*
- * Redefine an object in the OTable with PPN64 base.
- */
-
-typedef
-struct SVGA3dCmdRedefineGBMob64 {
- SVGAMobId mobid;
- SVGAMobFormat ptDepth;
- PPN64 base;
- uint32 sizeInBytes;
-} __packed
-SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
-
-/*
- * Notification that the page tables have been modified.
- */
-
-typedef
-struct SVGA3dCmdUpdateGBMobMapping {
- SVGAMobId mobid;
-} __packed
-SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
-
-/*
- * Define a guest-backed surface.
- */
-
-typedef
-struct SVGA3dCmdDefineGBSurface {
- uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
- SVGA3dSurfaceFormat format;
- uint32 numMipLevels;
- uint32 multisampleCount;
- SVGA3dTextureFilter autogenFilter;
- SVGA3dSize size;
-} __packed
-SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
-
-/*
- * Destroy a guest-backed surface.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBSurface {
- uint32 sid;
-} __packed
-SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
-
-/*
- * Bind a guest-backed surface to an object.
- */
-
-typedef
-struct SVGA3dCmdBindGBSurface {
- uint32 sid;
- SVGAMobId mobid;
-} __packed
-SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
-
-/*
- * Conditionally bind a mob to a guest backed surface if testMobid
- * matches the currently bound mob. Optionally issue a readback on
- * the surface while it is still bound to the old mobid if the mobid
- * is changed by this command.
- */
-
-#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
-
-typedef
-struct{
- uint32 sid;
- SVGAMobId testMobid;
- SVGAMobId mobid;
- uint32 flags;
-} __packed
-SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
-
-/*
- * Update an image in a guest-backed surface.
- * (Inform the device that the guest-contents have been updated.)
- */
-
-typedef
-struct SVGA3dCmdUpdateGBImage {
- SVGA3dSurfaceImageId image;
- SVGA3dBox box;
-} __packed
-SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
-
-/*
- * Update an entire guest-backed surface.
- * (Inform the device that the guest-contents have been updated.)
- */
-
-typedef
-struct SVGA3dCmdUpdateGBSurface {
- uint32 sid;
-} __packed
-SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
-
-/*
- * Readback an image in a guest-backed surface.
- * (Request the device to flush the dirty contents into the guest.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBImage {
- SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
-
-/*
- * Readback an entire guest-backed surface.
- * (Request the device to flush the dirty contents into the guest.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBSurface {
- uint32 sid;
-} __packed
-SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
-
-/*
- * Readback a sub rect of an image in a guest-backed surface. After
- * issuing this command the driver is required to issue an update call
- * of the same region before issuing any other commands that reference
- * this surface or rendering is not guaranteed.
- */
-
-typedef
-struct SVGA3dCmdReadbackGBImagePartial {
- SVGA3dSurfaceImageId image;
- SVGA3dBox box;
- uint32 invertBox;
-} __packed
-SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
-
-/*
- * Invalidate an image in a guest-backed surface.
- * (Notify the device that the contents can be lost.)
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBImage {
- SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
-
-/*
- * Invalidate an entire guest-backed surface.
- * (Notify the device that the contents if all images can be lost.)
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBSurface {
- uint32 sid;
-} __packed
-SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
-
-/*
- * Invalidate a sub rect of an image in a guest-backed surface. After
- * issuing this command the driver is required to issue an update call
- * of the same region before issuing any other commands that reference
- * this surface or rendering is not guaranteed.
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBImagePartial {
- SVGA3dSurfaceImageId image;
- SVGA3dBox box;
- uint32 invertBox;
-} __packed
-SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
-
-/*
- * Define a guest-backed context.
- */
-
-typedef
-struct SVGA3dCmdDefineGBContext {
- uint32 cid;
-} __packed
-SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
-
-/*
- * Destroy a guest-backed context.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBContext {
- uint32 cid;
-} __packed
-SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
-
-/*
- * Bind a guest-backed context.
- *
- * validContents should be set to 0 for new contexts,
- * and 1 if this is an old context which is getting paged
- * back on to the device.
- *
- * For new contexts, it is recommended that the driver
- * issue commands to initialize all interesting state
- * prior to rendering.
- */
-
-typedef
-struct SVGA3dCmdBindGBContext {
- uint32 cid;
- SVGAMobId mobid;
- uint32 validContents;
-} __packed
-SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
-
-/*
- * Readback a guest-backed context.
- * (Request that the device flush the contents back into guest memory.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBContext {
- uint32 cid;
-} __packed
-SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
-
-/*
- * Invalidate a guest-backed context.
- */
-typedef
-struct SVGA3dCmdInvalidateGBContext {
- uint32 cid;
-} __packed
-SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
-
-/*
- * Define a guest-backed shader.
- */
-
-typedef
-struct SVGA3dCmdDefineGBShader {
- uint32 shid;
- SVGA3dShaderType type;
- uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
-
-/*
- * Bind a guest-backed shader.
- */
-
-typedef struct SVGA3dCmdBindGBShader {
- uint32 shid;
- SVGAMobId mobid;
- uint32 offsetInBytes;
-} __packed
-SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
-
-/*
- * Destroy a guest-backed shader.
- */
-
-typedef struct SVGA3dCmdDestroyGBShader {
- uint32 shid;
-} __packed
-SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
-
-typedef
-struct {
- uint32 cid;
- uint32 regStart;
- SVGA3dShaderType shaderType;
- SVGA3dShaderConstType constType;
-
- /*
- * Followed by a variable number of shader constants.
- *
- * Note that FLOAT and INT constants are 4-dwords in length, while
- * BOOL constants are 1-dword in length.
- */
-} __packed
-SVGA3dCmdSetGBShaderConstInline;
-/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dQueryType type;
-} __packed
-SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dQueryType type;
- SVGAMobId mobid;
- uint32 offset;
-} __packed
-SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
-
-
-/*
- * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
- *
- * The semantics of this command are identical to the
- * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
- * to a Mob instead of a GMR.
- */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dQueryType type;
- SVGAMobId mobid;
- uint32 offset;
-} __packed
-SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
-
-typedef
-struct {
- SVGAMobId mobid;
- uint32 fbOffset;
- uint32 initalized;
-} __packed
-SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
-
-typedef
-struct {
- SVGAMobId mobid;
- uint32 gartOffset;
-} __packed
-SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
-
-
-typedef
-struct {
- uint32 gartOffset;
- uint32 numPages;
-} __packed
-SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
-
-
-/*
- * Screen Targets
- */
-#define SVGA_STFLAG_PRIMARY (1 << 0)
-
-typedef
-struct {
- uint32 stid;
- uint32 width;
- uint32 height;
- int32 xRoot;
- int32 yRoot;
- uint32 flags;
-} __packed
-SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
-
-typedef
-struct {
- uint32 stid;
-} __packed
-SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
-
-typedef
-struct {
- uint32 stid;
- SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
-
-typedef
-struct {
- uint32 stid;
- SVGA3dBox box;
-} __packed
-SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
-
-/*
- * Capability query index.
- *
- * Notes:
- *
- * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
- * fixed-function texture units available. Each of these units
- * work in both FFP and Shader modes, and they support texture
- * transforms and texture coordinates. The host may have additional
- * texture image units that are only usable with shaders.
- *
- * 2. The BUFFER_FORMAT capabilities are deprecated, and they always
- * return TRUE. Even on physical hardware that does not support
- * these formats natively, the SVGA3D device will provide an emulation
- * which should be invisible to the guest OS.
- *
- * In general, the SVGA3D device should support any operation on
- * any surface format, it just may perform some of these
- * operations in software depending on the capabilities of the
- * available physical hardware.
- *
- * XXX: In the future, we will add capabilities that describe in
- * detail what formats are supported in hardware for what kinds
- * of operations.
- */
-
-typedef enum {
- SVGA3D_DEVCAP_3D = 0,
- SVGA3D_DEVCAP_MAX_LIGHTS = 1,
- SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
- SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
- SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
- SVGA3D_DEVCAP_VERTEX_SHADER = 5,
- SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
- SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
- SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
- SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
- SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
- SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
- SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
- SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
- SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
- SVGA3D_DEVCAP_QUERY_TYPES = 15,
- SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
- SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
- SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
- SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
- SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
- SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
- SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
- SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
- SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
- SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
- SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
- SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
- SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
- SVGA3D_DEVCAP_TEXTURE_OPS = 31,
- SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
- SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
- SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
- SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
- SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
- SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
- SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
- SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
- SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
- SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
- SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
- SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
- SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
- SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
- SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
- SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
- SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
- SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
- SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
- SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
- SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
- SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
- SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
- SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
-
- /*
- * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
- * render targets. This does no include the depth or stencil targets.
- */
- SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
-
- SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
- SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
- SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
- SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
- SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
- SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
- SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
- SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
- SVGA3D_DEVCAP_SUPERSAMPLE = 73,
- SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
- SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
- SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
-
- /*
- * This is the maximum number of SVGA context IDs that the guest
- * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
- */
- SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
-
- /*
- * This is the maximum number of SVGA surface IDs that the guest
- * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
- */
- SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
-
- SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
- SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
-
- SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82,
- SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
-
- /*
- * Deprecated.
- */
- SVGA3D_DEVCAP_VGPU10 = 84,
-
- /*
- * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
- * ored together, one for every type of video decoding supported.
- */
- SVGA3D_DEVCAP_VIDEO_DECODE = 85,
-
- /*
- * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
- * ored together, one for every type of video processing supported.
- */
- SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
-
- SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
- SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
- SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
- SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
-
- SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
-
- /*
- * Does the host support the SVGA logic ops commands?
- */
- SVGA3D_DEVCAP_LOGICOPS = 92,
-
- /*
- * What support does the host have for screen targets?
- *
- * See the SVGA3D_SCREENTARGET_CAP bits below.
- */
- SVGA3D_DEVCAP_SCREENTARGETS = 93,
-
- SVGA3D_DEVCAP_MAX /* This must be the last index. */
-} SVGA3dDevCapIndex;
-
-typedef union {
- Bool b;
- uint32 u;
- int32 i;
- float f;
-} SVGA3dDevCapResult;
-
-typedef enum {
- SVGA3DCAPS_RECORD_UNKNOWN = 0,
- SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
- SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
- SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
-} SVGA3dCapsRecordType;
-
-typedef
-struct SVGA3dCapsRecordHeader {
- uint32 length;
- SVGA3dCapsRecordType type;
-}
-SVGA3dCapsRecordHeader;
-
-typedef
-struct SVGA3dCapsRecord {
- SVGA3dCapsRecordHeader header;
- uint32 data[1];
-}
-SVGA3dCapsRecord;
-
-
-typedef uint32 SVGA3dCapPair[2];
-
-#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
deleted file mode 100644
index ef3385096145..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ /dev/null
@@ -1,912 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifdef __KERNEL__
-
-#include <drm/vmwgfx_drm.h>
-#define surf_size_struct struct drm_vmw_size
-
-#else /* __KERNEL__ */
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
-#endif /* ARRAY_SIZE */
-
-#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
-#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
-#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
-#define surf_size_struct SVGA3dSize
-#define u32 uint32
-#define u64 uint64_t
-#define U32_MAX ((u32)~0U)
-
-#endif /* __KERNEL__ */
-
-#include "svga3d_reg.h"
-
-/*
- * enum svga3d_block_desc describes the active data channels in a block.
- *
- * There can be at-most four active channels in a block:
- * 1. Red, bump W, luminance and depth are stored in the first channel.
- * 2. Green, bump V and stencil are stored in the second channel.
- * 3. Blue and bump U are stored in the third channel.
- * 4. Alpha and bump Q are stored in the fourth channel.
- *
- * Block channels can be used to store compressed and buffer data:
- * 1. For compressed formats, only the data channel is used and its size
- * is equal to that of a singular block in the compression scheme.
- * 2. For buffer formats, only the data channel is used and its size is
- * exactly one byte in length.
- * 3. In each case the bit depth represent the size of a singular block.
- *
- * Note: Compressed and IEEE formats do not use the bitMask structure.
- */
-
-enum svga3d_block_desc {
- SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
- SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
- data */
- SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
- data */
- SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
- U and V */
- SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
- data */
- SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
- data */
- SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
- channel */
- SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
- data */
- SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
- data */
- SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
- data */
- SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
- data */
- SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
- SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
- channel */
- SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
- data */
- SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
- data */
- SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
- data depending on the
- compression method used */
- SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
- floating point
- representation in
- all channels */
- SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
- data. */
- SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
- SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
- SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
- SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
- SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
- e.g., NV12. */
- SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
- Y, U, V, e.g., YV12. */
-
- SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
- SVGA3DBLOCKDESC_GREEN,
- SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
- SVGA3DBLOCKDESC_BLUE,
- SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
- SVGA3DBLOCKDESC_V,
- SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
- SVGA3DBLOCKDESC_LUMINANCE,
- SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
- SVGA3DBLOCKDESC_W,
- SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
- SVGA3DBLOCKDESC_V |
- SVGA3DBLOCKDESC_W |
- SVGA3DBLOCKDESC_Q,
- SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
- SVGA3DBLOCKDESC_IEEE_FP,
- SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
- SVGA3DBLOCKDESC_GREEN,
- SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
- SVGA3DBLOCKDESC_BLUE,
- SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
- SVGA3DBLOCKDESC_STENCIL,
- SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
- SVGA3DBLOCKDESC_Y,
- SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
- SVGA3DBLOCKDESC_Y |
- SVGA3DBLOCKDESC_U_VIDEO |
- SVGA3DBLOCKDESC_V_VIDEO,
- SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_EXP,
- SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
- SVGA3DBLOCKDESC_2PLANAR_YUV,
- SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
- SVGA3DBLOCKDESC_3PLANAR_YUV,
-};
-
-/*
- * SVGA3dSurfaceDesc describes the actual pixel data.
- *
- * This structure provides the following information:
- * 1. Block description.
- * 2. Dimensions of a block in the surface.
- * 3. Size of block in bytes.
- * 4. Bit depth of the pixel data.
- * 5. Channel bit depths and masks (if applicable).
- */
-#define SVGA3D_CHANNEL_DEF(type) \
- struct { \
- union { \
- type blue; \
- type u; \
- type uv_video; \
- type u_video; \
- }; \
- union { \
- type green; \
- type v; \
- type stencil; \
- type v_video; \
- }; \
- union { \
- type red; \
- type w; \
- type luminance; \
- type y; \
- type depth; \
- type data; \
- }; \
- union { \
- type alpha; \
- type q; \
- type exp; \
- }; \
- }
-
-struct svga3d_surface_desc {
- enum svga3d_block_desc block_desc;
- surf_size_struct block_size;
- u32 bytes_per_block;
- u32 pitch_bytes_per_block;
-
- struct {
- u32 total;
- SVGA3D_CHANNEL_DEF(uint8);
- } bit_depth;
-
- struct {
- SVGA3D_CHANNEL_DEF(uint8);
- } bit_offset;
-};
-
-static const struct svga3d_surface_desc svga3d_surface_descs[] = {
- {SVGA3DBLOCKDESC_NONE,
- {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
- {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
- {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
- {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
- {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
-
- {SVGA3DBLOCKDESC_DEPTH,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
-
- {SVGA3DBLOCKDESC_DEPTH,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
- {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
-
- {SVGA3DBLOCKDESC_LUMINANCE,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
-
- {SVGA3DBLOCKDESC_LA,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
- {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
-
- {SVGA3DBLOCKDESC_LUMINANCE,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
-
- {SVGA3DBLOCKDESC_LA,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
- {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
- {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
-
- {SVGA3DBLOCKDESC_UVL,
- {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
- {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
-
- {SVGA3DBLOCKDESC_UVL,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
- {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
-
- {SVGA3DBLOCKDESC_UVL,
- {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
- {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
-
- {SVGA3DBLOCKDESC_RGBA_FP,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
-
- {SVGA3DBLOCKDESC_RGBA_FP,
- {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
- {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
- {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
- {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
-
- {SVGA3DBLOCKDESC_UVWQ,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
- {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
-
- {SVGA3DBLOCKDESC_UVL,
- {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
- {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
-
- {SVGA3DBLOCKDESC_UVWA,
- {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
- {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
-
- {SVGA3DBLOCKDESC_ALPHA,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
-
- {SVGA3DBLOCKDESC_R_FP,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
-
- {SVGA3DBLOCKDESC_R_FP,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
-
- {SVGA3DBLOCKDESC_RG_FP,
- {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
- {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
-
- {SVGA3DBLOCKDESC_RG_FP,
- {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
-
- {SVGA3DBLOCKDESC_BUFFER,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
-
- {SVGA3DBLOCKDESC_DEPTH,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
- {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
- {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
-
- {SVGA3DBLOCKDESC_YUV,
- {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
- {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
-
- {SVGA3DBLOCKDESC_YUV,
- {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
- {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
-
- {SVGA3DBLOCKDESC_NV12,
- {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
-
- {SVGA3DBLOCKDESC_AYUV,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
- {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
- {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
-
- {SVGA3DBLOCKDESC_UVWQ,
- {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
- {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
- {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGB_FP,
- {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
- {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
- {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
-
- {SVGA3DBLOCKDESC_UVW,
- {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
- {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
-
- {SVGA3DBLOCKDESC_UVWQ,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
-
- {SVGA3DBLOCKDESC_UVWQ,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
-
- {SVGA3DBLOCKDESC_R_FP,
- {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
-
- {SVGA3DBLOCKDESC_GREEN,
- {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
- {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
- {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
-
- {SVGA3DBLOCKDESC_RGB_FP,
- {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
- {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
-
- {SVGA3DBLOCKDESC_RGBA_SRGB,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
- {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
-
- {SVGA3DBLOCKDESC_RG_FP,
- {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
- {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
- {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
-
- {SVGA3DBLOCKDESC_DEPTH,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
-
- {SVGA3DBLOCKDESC_GREEN,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
-
- {SVGA3DBLOCKDESC_U,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
-
- {SVGA3DBLOCKDESC_U,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
-
- {SVGA3DBLOCKDESC_U,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
-
- {SVGA3DBLOCKDESC_U,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
-
- {SVGA3DBLOCKDESC_RED,
- {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
-
- {SVGA3DBLOCKDESC_RGBE,
- {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
- {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
-
- {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
-
- {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
-
- {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
- {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGBA_SRGB,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGB_SRGB,
- {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_DEPTH,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
-};
-
-static inline u32 clamped_umul32(u32 a, u32 b)
-{
- u64 tmp = (u64) a*b;
- return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
-}
-
-static inline const struct svga3d_surface_desc *
-svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
-{
- if (format < ARRAY_SIZE(svga3d_surface_descs))
- return &svga3d_surface_descs[format];
-
- return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * svga3dsurface_get_mip_size --
- *
- * Given a base level size and the mip level, compute the size of
- * the mip level.
- *
- * Results:
- * See above.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
- */
-
-static inline surf_size_struct
-svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
-{
- surf_size_struct size;
-
- size.width = max_t(u32, base_level.width >> mip_level, 1);
- size.height = max_t(u32, base_level.height >> mip_level, 1);
- size.depth = max_t(u32, base_level.depth >> mip_level, 1);
- return size;
-}
-
-static inline void
-svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
- const surf_size_struct *pixel_size,
- surf_size_struct *block_size)
-{
- block_size->width = DIV_ROUND_UP(pixel_size->width,
- desc->block_size.width);
- block_size->height = DIV_ROUND_UP(pixel_size->height,
- desc->block_size.height);
- block_size->depth = DIV_ROUND_UP(pixel_size->depth,
- desc->block_size.depth);
-}
-
-static inline bool
-svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
-{
- return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
-}
-
-static inline u32
-svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
- const surf_size_struct *size)
-{
- u32 pitch;
- surf_size_struct blocks;
-
- svga3dsurface_get_size_in_blocks(desc, size, &blocks);
-
- pitch = blocks.width * desc->pitch_bytes_per_block;
-
- return pitch;
-}
-
-/*
- *-----------------------------------------------------------------------------
- *
- * svga3dsurface_get_image_buffer_size --
- *
- * Return the number of bytes of buffer space required to store
- * one image of a surface, optionally using the specified pitch.
- *
- * If pitch is zero, it is assumed that rows are tightly packed.
- *
- * This function is overflow-safe. If the result would have
- * overflowed, instead we return MAX_UINT32.
- *
- * Results:
- * Byte count.
- *
- * Side effects:
- * None.
- *
- *-----------------------------------------------------------------------------
- */
-
-static inline u32
-svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
- const surf_size_struct *size,
- u32 pitch)
-{
- surf_size_struct image_blocks;
- u32 slice_size, total_size;
-
- svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
-
- if (svga3dsurface_is_planar_surface(desc)) {
- total_size = clamped_umul32(image_blocks.width,
- image_blocks.height);
- total_size = clamped_umul32(total_size, image_blocks.depth);
- total_size = clamped_umul32(total_size, desc->bytes_per_block);
- return total_size;
- }
-
- if (pitch == 0)
- pitch = svga3dsurface_calculate_pitch(desc, size);
-
- slice_size = clamped_umul32(image_blocks.height, pitch);
- total_size = clamped_umul32(slice_size, image_blocks.depth);
-
- return total_size;
-}
-
-static inline u32
-svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
- surf_size_struct base_level_size,
- u32 num_mip_levels,
- bool cubemap)
-{
- const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
- u64 total_size = 0;
- u32 mip;
-
- for (mip = 0; mip < num_mip_levels; mip++) {
- surf_size_struct size =
- svga3dsurface_get_mip_size(base_level_size, mip);
- total_size += svga3dsurface_get_image_buffer_size(desc,
- &size, 0);
- }
-
- if (cubemap)
- total_size *= SVGA3D_MAX_SURFACE_FACES;
-
- return (u32) min_t(u64, total_size, (u64) U32_MAX);
-}
-
-
-/**
- * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
- * in an image (or volume).
- *
- * @width: The image width in pixels.
- * @height: The image height in pixels
- */
-static inline u32
-svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
- u32 width, u32 height,
- u32 x, u32 y, u32 z)
-{
- const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
- const u32 bw = desc->block_size.width, bh = desc->block_size.height;
- const u32 bd = desc->block_size.depth;
- const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
- const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
- const u32 offset = (z / bd * imgstride +
- y / bh * rowstride +
- x / bw * desc->bytes_per_block);
- return offset;
-}
-
-
-static inline u32
-svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
- surf_size_struct baseLevelSize,
- u32 numMipLevels,
- u32 face,
- u32 mip)
-
-{
- u32 offset;
- u32 mipChainBytes;
- u32 mipChainBytesToLevel;
- u32 i;
- const struct svga3d_surface_desc *desc;
- surf_size_struct mipSize;
- u32 bytes;
-
- desc = svga3dsurface_get_desc(format);
-
- mipChainBytes = 0;
- mipChainBytesToLevel = 0;
- for (i = 0; i < numMipLevels; i++) {
- mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
- bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
- mipChainBytes += bytes;
- if (i < mip)
- mipChainBytesToLevel += bytes;
- }
-
- offset = mipChainBytes * face + mipChainBytesToLevel;
-
- return offset;
-}
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
deleted file mode 100644
index 55836dedcfc2..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga_types.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * Silly typedefs for the svga headers. Currently the headers are shared
- * between all components that talk to svga. And as such the headers are
- * are in a completely different style and use weird defines.
- *
- * This file lets all the ugly be prefixed with svga*.
- */
-
-#ifndef _SVGA_TYPES_H_
-#define _SVGA_TYPES_H_
-
-typedef uint16_t uint16;
-typedef uint32_t uint32;
-typedef uint8_t uint8;
-typedef int32_t int32;
-typedef bool Bool;
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
new file mode 100644
index 000000000000..9c42e96da510
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -0,0 +1,1294 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * This file implements the vmwgfx context binding manager,
+ * The sole reason for having to use this code is that vmware guest
+ * backed contexts can be swapped out to their backing mobs by the device
+ * at any time, also swapped in at any time. At swapin time, the device
+ * validates the context bindings to make sure they point to valid resources.
+ * It's this outside-of-drawcall validation (that can happen at any time),
+ * that makes this code necessary.
+ *
+ * We therefore need to kill any context bindings pointing to a resource
+ * when the resource is swapped out. Furthermore, if the vmwgfx driver has
+ * swapped out the context we can't swap it in again to kill bindings because
+ * of backing mob reservation lockdep violations, so as part of
+ * context swapout, also kill all bindings of a context, so that they are
+ * already killed if a resource to which a binding points
+ * needs to be swapped out.
+ *
+ * Note that a resource can be pointed to by bindings from multiple contexts,
+ * Therefore we can't easily protect this data by a per context mutex
+ * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
+ * to protect all binding manager data.
+ *
+ * Finally, any association between a context and a global resource
+ * (surface, shader or even DX query) is conceptually a context binding that
+ * needs to be tracked by this code.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
+#include "device_include/svga3d_reg.h"
+
+#define VMW_BINDING_RT_BIT 0
+#define VMW_BINDING_PS_BIT 1
+#define VMW_BINDING_SO_BIT 2
+#define VMW_BINDING_VB_BIT 3
+#define VMW_BINDING_NUM_BITS 4
+
+#define VMW_BINDING_PS_SR_BIT 0
+
+/**
+ * struct vmw_ctx_binding_state - per context binding state
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: linked list of individual active bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units bindings.
+ * @ds_view: Depth-stencil view binding.
+ * @so_targets: StreamOutput target bindings.
+ * @vertex_buffers: Vertex buffer bindings.
+ * @index_buffer: Index buffer binding.
+ * @per_shader: Per shader-type bindings.
+ * @dirty: Bitmap tracking per binding-type changes that have not yet
+ * been emitted to the device.
+ * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
+ * have not yet been emitted to the device.
+ * @bind_cmd_buffer: Scratch space used to construct binding commands.
+ * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
+ * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
+ * device binding slot of the first command data entry in @bind_cmd_buffer.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+ struct vmw_private *dev_priv;
+ struct list_head list;
+ struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
+ struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+ struct vmw_ctx_bindinfo_view ds_view;
+ struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
+ struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+ struct vmw_ctx_bindinfo_ib index_buffer;
+ struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
+
+ unsigned long dirty;
+ DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
+
+ u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
+ u32 bind_cmd_count;
+ u32 bind_first_slot;
+};
+
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind);
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
+ bool rebind);
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_binding_build_asserts(void) __attribute__ ((unused));
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
+/**
+ * struct vmw_binding_info - Per binding type information for the binding
+ * manager
+ *
+ * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
+ * @offsets: array[shader_slot] of offsets to the array[slot]
+ * of struct bindings for the binding type.
+ * @scrub_func: Pointer to the scrub function for this binding type.
+ *
+ * Holds static information to help optimize the binding manager and avoid
+ * an excessive amount of switch statements.
+ */
+struct vmw_binding_info {
+ size_t size;
+ const size_t *offsets;
+ vmw_scrub_func scrub_func;
+};
+
+/*
+ * A number of static variables that help determine the scrub func and the
+ * location of the struct vmw_ctx_bindinfo slots for each binding type.
+ */
+static const size_t vmw_binding_shader_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
+ offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
+ offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
+};
+static const size_t vmw_binding_rt_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, render_targets),
+};
+static const size_t vmw_binding_tex_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, texture_units),
+};
+static const size_t vmw_binding_cb_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
+ offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
+ offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
+};
+static const size_t vmw_binding_dx_ds_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, ds_view),
+};
+static const size_t vmw_binding_sr_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
+ offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
+ offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
+};
+static const size_t vmw_binding_so_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, so_targets),
+};
+static const size_t vmw_binding_vb_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, vertex_buffers),
+};
+static const size_t vmw_binding_ib_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, index_buffer),
+};
+
+static const struct vmw_binding_info vmw_binding_infos[] = {
+ [vmw_ctx_binding_shader] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_shader),
+ .offsets = vmw_binding_shader_offsets,
+ .scrub_func = vmw_binding_scrub_shader},
+ [vmw_ctx_binding_rt] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_rt_offsets,
+ .scrub_func = vmw_binding_scrub_render_target},
+ [vmw_ctx_binding_tex] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_tex),
+ .offsets = vmw_binding_tex_offsets,
+ .scrub_func = vmw_binding_scrub_texture},
+ [vmw_ctx_binding_cb] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_cb),
+ .offsets = vmw_binding_cb_offsets,
+ .scrub_func = vmw_binding_scrub_cb},
+ [vmw_ctx_binding_dx_shader] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_shader),
+ .offsets = vmw_binding_shader_offsets,
+ .scrub_func = vmw_binding_scrub_dx_shader},
+ [vmw_ctx_binding_dx_rt] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_rt_offsets,
+ .scrub_func = vmw_binding_scrub_dx_rt},
+ [vmw_ctx_binding_sr] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_sr_offsets,
+ .scrub_func = vmw_binding_scrub_sr},
+ [vmw_ctx_binding_ds] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_dx_ds_offsets,
+ .scrub_func = vmw_binding_scrub_dx_rt},
+ [vmw_ctx_binding_so] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_so),
+ .offsets = vmw_binding_so_offsets,
+ .scrub_func = vmw_binding_scrub_so},
+ [vmw_ctx_binding_vb] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_vb),
+ .offsets = vmw_binding_vb_offsets,
+ .scrub_func = vmw_binding_scrub_vb},
+ [vmw_ctx_binding_ib] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_ib),
+ .offsets = vmw_binding_ib_offsets,
+ .scrub_func = vmw_binding_scrub_ib},
+};
+
+/**
+ * vmw_cbs_context - Return a pointer to the context resource of a
+ * context binding state tracker.
+ *
+ * @cbs: The context binding state tracker.
+ *
+ * Provided there are any active bindings, this function will return an
+ * unreferenced pointer to the context resource that owns the context
+ * binding state tracker. If there are no active bindings, this function
+ * will return NULL. Note that the caller must somehow ensure that a reference
+ * is held on the context resource prior to calling this function.
+ */
+static const struct vmw_resource *
+vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
+{
+ if (list_empty(&cbs->list))
+ return NULL;
+
+ return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
+ ctx_list)->ctx;
+}
+
+/**
+ * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
+ *
+ * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
+ * @bt: The binding type.
+ * @shader_slot: The shader slot of the binding. If none, then set to 0.
+ * @slot: The slot of the binding.
+ */
+static struct vmw_ctx_bindinfo *
+vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
+ enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
+{
+ const struct vmw_binding_info *b = &vmw_binding_infos[bt];
+ size_t offset = b->offsets[shader_slot] + b->size*slot;
+
+ return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
+}
+
+/**
+ * vmw_binding_drop: Stop tracking a context binding
+ *
+ * @bi: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
+{
+ list_del(&bi->ctx_list);
+ if (!list_empty(&bi->res_list))
+ list_del(&bi->res_list);
+ bi->ctx = NULL;
+}
+
+/**
+ * vmw_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi,
+ u32 shader_slot, u32 slot)
+{
+ struct vmw_ctx_bindinfo *loc =
+ vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
+ const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
+
+ if (loc->ctx != NULL)
+ vmw_binding_drop(loc);
+
+ memcpy(loc, bi, b->size);
+ loc->scrubbed = false;
+ list_add(&loc->ctx_list, &cbs->list);
+ INIT_LIST_HEAD(&loc->res_list);
+}
+
+/**
+ * vmw_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_binding_state *from,
+ const struct vmw_ctx_bindinfo *bi)
+{
+ size_t offset = (unsigned long)bi - (unsigned long)from;
+ struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
+ ((unsigned long) cbs + offset);
+
+ if (loc->ctx != NULL) {
+ WARN_ON(bi->scrubbed);
+
+ vmw_binding_drop(loc);
+ }
+
+ if (bi->res != NULL) {
+ memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
+ list_add_tail(&loc->ctx_list, &cbs->list);
+ list_add_tail(&loc->res_list, &loc->res->binding_head);
+ }
+}
+
+/**
+ * vmw_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_bindinfo *entry, *next;
+
+ vmw_binding_state_scrub(cbs);
+ list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+ vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_bindinfo *entry;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (!entry->scrubbed) {
+ (void) vmw_binding_infos[entry->bt].scrub_func
+ (entry, false);
+ entry->scrubbed = true;
+ }
+ }
+
+ (void) vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_binding_res_list_kill(struct list_head *head)
+{
+ struct vmw_ctx_bindinfo *entry, *next;
+
+ vmw_binding_res_list_scrub(head);
+ list_for_each_entry_safe(entry, next, head, res_list)
+ vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_binding_res_list_scrub(struct list_head *head)
+{
+ struct vmw_ctx_bindinfo *entry;
+
+ list_for_each_entry(entry, head, res_list) {
+ if (!entry->scrubbed) {
+ (void) vmw_binding_infos[entry->bt].scrub_func
+ (entry, false);
+ entry->scrubbed = true;
+ }
+ }
+
+ list_for_each_entry(entry, head, res_list) {
+ struct vmw_ctx_binding_state *cbs =
+ vmw_context_binding_state(entry->ctx);
+
+ (void) vmw_binding_emit_dirty(cbs);
+ }
+}
+
+
+/**
+ * vmw_binding_state_commit - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ * @scrubbed: Transfer only scrubbed bindings.
+ *
+ * Transfers binding info from a temporary structure
+ * (typically used by execbuf) to the persistent
+ * structure in the context. This can be done once commands have been
+ * submitted to hardware
+ */
+void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+ struct vmw_ctx_binding_state *from)
+{
+ struct vmw_ctx_bindinfo *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
+ vmw_binding_transfer(to, from, entry);
+ vmw_binding_drop(entry);
+ }
+}
+
+/**
+ * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_bindinfo *entry;
+ int ret;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (likely(!entry->scrubbed))
+ continue;
+
+ if ((entry->res == NULL || entry->res->id ==
+ SVGA3D_INVALID_ID))
+ continue;
+
+ ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ entry->scrubbed = false;
+ }
+
+ return vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_shader *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShader body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = bi->ctx->id;
+ cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+ cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
+{
+ struct vmw_ctx_bindinfo_view *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetRenderTarget body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for render target "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = bi->ctx->id;
+ cmd->body.type = binding->slot;
+ cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ cmd->body.target.face = 0;
+ cmd->body.target.mipmap = 0;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
+{
+ struct vmw_ctx_bindinfo_tex *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ struct {
+ SVGA3dCmdSetTextureState c;
+ SVGA3dTextureState s1;
+ } body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for texture "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.c.cid = bi->ctx->id;
+ cmd->body.s1.stage = binding->texture_stage;
+ cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+ cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_shader *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetShader body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+ cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+ cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_cb *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetSingleConstantBuffer body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.slot = binding->slot;
+ cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+ if (rebind) {
+ cmd->body.offsetInBytes = binding->offset;
+ cmd->body.sizeInBytes = binding->size;
+ cmd->body.sid = bi->res->id;
+ } else {
+ cmd->body.offsetInBytes = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.sid = SVGA3D_INVALID_ID;
+ }
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_collect_view_ids - Build view id data for a view binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of view id data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi,
+ u32 max_num)
+{
+ const struct vmw_ctx_bindinfo_view *biv =
+ container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+ unsigned long i;
+
+ cbs->bind_cmd_count = 0;
+ cbs->bind_first_slot = 0;
+
+ for (i = 0; i < max_num; ++i, ++biv) {
+ if (!biv->bi.ctx)
+ break;
+
+ cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+ ((biv->bi.scrubbed) ?
+ SVGA3D_INVALID_ID : biv->bi.res->id);
+ }
+}
+
+/**
+ * vmw_collect_dirty_view_ids - Build view id data for a view binding command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of view id data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi,
+ unsigned long *dirty,
+ u32 max_num)
+{
+ const struct vmw_ctx_bindinfo_view *biv =
+ container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+ unsigned long i, next_bit;
+
+ cbs->bind_cmd_count = 0;
+ i = find_first_bit(dirty, max_num);
+ next_bit = i;
+ cbs->bind_first_slot = i;
+
+ biv += i;
+ for (; i < max_num; ++i, ++biv) {
+ cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+ ((!biv->bi.ctx || biv->bi.scrubbed) ?
+ SVGA3D_INVALID_ID : biv->bi.res->id);
+
+ if (next_bit == i) {
+ next_bit = find_next_bit(dirty, max_num, i + 1);
+ if (next_bit >= max_num)
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
+ int shader_slot)
+{
+ const struct vmw_ctx_bindinfo *loc =
+ &cbs->per_shader[shader_slot].shader_res[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetShaderResources body;
+ } *cmd;
+ size_t cmd_size, view_id_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_dirty_view_ids(cbs, loc,
+ cbs->per_shader[shader_slot].dirty_sr,
+ SVGA3D_DX_MAX_SRVIEWS);
+ if (cbs->bind_cmd_count == 0)
+ return 0;
+
+ view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+ cmd_size = sizeof(*cmd) + view_id_size;
+ cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX shader"
+ " resource binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
+ cmd->header.size = sizeof(cmd->body) + view_id_size;
+ cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
+ cmd->body.startView = cbs->bind_first_slot;
+
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
+ cbs->bind_first_slot, cbs->bind_cmd_count);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
+{
+ const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetRenderTargets body;
+ } *cmd;
+ size_t cmd_size, view_id_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
+ view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+ cmd_size = sizeof(*cmd) + view_id_size;
+ cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX render-target"
+ " binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
+ cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+ if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
+ cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
+ else
+ cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
+
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+ return 0;
+
+}
+
+/**
+ * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi,
+ u32 max_num)
+{
+ const struct vmw_ctx_bindinfo_so *biso =
+ container_of(bi, struct vmw_ctx_bindinfo_so, bi);
+ unsigned long i;
+ SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
+
+ cbs->bind_cmd_count = 0;
+ cbs->bind_first_slot = 0;
+
+ for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
+ ++cbs->bind_cmd_count) {
+ if (!biso->bi.ctx)
+ break;
+
+ if (!biso->bi.scrubbed) {
+ so_buffer->sid = biso->bi.res->id;
+ so_buffer->offset = biso->offset;
+ so_buffer->sizeInBytes = biso->size;
+ } else {
+ so_buffer->sid = SVGA3D_INVALID_ID;
+ so_buffer->offset = 0;
+ so_buffer->sizeInBytes = 0;
+ }
+ }
+}
+
+/**
+ * vmw_binding_emit_set_so - Issue delayed streamout binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
+{
+ const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetSOTargets body;
+ } *cmd;
+ size_t cmd_size, so_target_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
+ if (cbs->bind_cmd_count == 0)
+ return 0;
+
+ so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
+ cmd_size = sizeof(*cmd) + so_target_size;
+ cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX SO target"
+ " binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
+ cmd->header.size = sizeof(cmd->body) + so_target_size;
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+ return 0;
+
+}
+
+/**
+ * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
+ u32 i;
+ int ret;
+
+ for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
+ if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
+ continue;
+
+ ret = vmw_emit_set_sr(cbs, i);
+ if (ret)
+ break;
+
+ __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
+ * SVGA3dCmdDXSetVertexBuffers command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of SVGA3dVertexBuffer data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi,
+ unsigned long *dirty,
+ u32 max_num)
+{
+ const struct vmw_ctx_bindinfo_vb *biv =
+ container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+ unsigned long i, next_bit;
+ SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
+
+ cbs->bind_cmd_count = 0;
+ i = find_first_bit(dirty, max_num);
+ next_bit = i;
+ cbs->bind_first_slot = i;
+
+ biv += i;
+ for (; i < max_num; ++i, ++biv, ++vbs) {
+ if (!biv->bi.ctx || biv->bi.scrubbed) {
+ vbs->sid = SVGA3D_INVALID_ID;
+ vbs->stride = 0;
+ vbs->offset = 0;
+ } else {
+ vbs->sid = biv->bi.res->id;
+ vbs->stride = biv->stride;
+ vbs->offset = biv->offset;
+ }
+ cbs->bind_cmd_count++;
+ if (next_bit == i) {
+ next_bit = find_next_bit(dirty, max_num, i + 1);
+ if (next_bit >= max_num)
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
+{
+ const struct vmw_ctx_bindinfo *loc =
+ &cbs->vertex_buffers[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetVertexBuffers body;
+ } *cmd;
+ size_t cmd_size, set_vb_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
+ SVGA3D_DX_MAX_VERTEXBUFFERS);
+ if (cbs->bind_cmd_count == 0)
+ return 0;
+
+ set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
+ cmd_size = sizeof(*cmd) + set_vb_size;
+ cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
+ " binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
+ cmd->header.size = sizeof(cmd->body) + set_vb_size;
+ cmd->body.startBuffer = cbs->bind_first_slot;
+
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ bitmap_clear(cbs->dirty_vb,
+ cbs->bind_first_slot, cbs->bind_cmd_count);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_emit_dirty - Issue delayed binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ * This function issues the delayed binding commands that arise from
+ * previous scrub / unscrub calls. These binding commands are typically
+ * commands that batch a number of bindings and therefore it makes sense
+ * to delay them.
+ */
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
+{
+ int ret = 0;
+ unsigned long hit = 0;
+
+ while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
+ < VMW_BINDING_NUM_BITS) {
+
+ switch (hit) {
+ case VMW_BINDING_RT_BIT:
+ ret = vmw_emit_set_rt(cbs);
+ break;
+ case VMW_BINDING_PS_BIT:
+ ret = vmw_binding_emit_dirty_ps(cbs);
+ break;
+ case VMW_BINDING_SO_BIT:
+ ret = vmw_emit_set_so(cbs);
+ break;
+ case VMW_BINDING_VB_BIT:
+ ret = vmw_emit_set_vb(cbs);
+ break;
+ default:
+ BUG();
+ }
+ if (ret)
+ return ret;
+
+ __clear_bit(hit, &cbs->dirty);
+ hit++;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_view *biv =
+ container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+ struct vmw_ctx_binding_state *cbs =
+ vmw_context_binding_state(bi->ctx);
+
+ __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
+ __set_bit(VMW_BINDING_PS_SR_BIT,
+ &cbs->per_shader[biv->shader_slot].dirty);
+ __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_binding_state *cbs =
+ vmw_context_binding_state(bi->ctx);
+
+ __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_binding_state *cbs =
+ vmw_context_binding_state(bi->ctx);
+
+ __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_vb *bivb =
+ container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+ struct vmw_ctx_binding_state *cbs =
+ vmw_context_binding_state(bi->ctx);
+
+ __set_bit(bivb->slot, cbs->dirty_vb);
+ __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_ib *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetIndexBuffer body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX index buffer "
+ "binding.\n");
+ return -ENOMEM;
+ }
+ cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
+ cmd->header.size = sizeof(cmd->body);
+ if (rebind) {
+ cmd->body.sid = bi->res->id;
+ cmd->body.format = binding->format;
+ cmd->body.offset = binding->offset;
+ } else {
+ cmd->body.sid = SVGA3D_INVALID_ID;
+ cmd->body.format = 0;
+ cmd->body.offset = 0;
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
+ * memory accounting.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Returns a pointer to a newly allocated struct or an error pointer on error.
+ */
+struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv)
+{
+ struct vmw_ctx_binding_state *cbs;
+ int ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
+ false, false);
+ if (ret)
+ return ERR_PTR(ret);
+
+ cbs = vzalloc(sizeof(*cbs));
+ if (!cbs) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cbs->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&cbs->list);
+
+ return cbs;
+}
+
+/**
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
+ * memory accounting info.
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
+ */
+void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_private *dev_priv = cbs->dev_priv;
+
+ vfree(cbs);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+}
+
+/**
+ * vmw_binding_state_list - Get the binding list of a
+ * struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state
+ *
+ * Returns the binding list which can be used to traverse through the bindings
+ * and access the resource information of all bindings.
+ */
+struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
+{
+ return &cbs->list;
+}
+
+/**
+ * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
+ *
+ * Drops all bindings registered in @cbs. No device binding actions are
+ * performed.
+ */
+void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_bindinfo *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+ vmw_binding_drop(entry);
+}
+
+/*
+ * This function is unused at run-time, and only used to hold various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_binding_build_asserts(void)
+{
+ BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
+ BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
+ BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
+
+ /*
+ * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
+ * view id arrays.
+ */
+ BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
+ BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
+ BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
+
+ /*
+ * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
+ * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
+ */
+ BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
+ VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+ BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
+ VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
new file mode 100644
index 000000000000..bf2e77ad5a20
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -0,0 +1,209 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_BINDING_H_
+#define _VMWGFX_BINDING_H_
+
+#include "device_include/svga3d_reg.h"
+#include <linux/list.h>
+
+#define VMW_MAX_VIEW_BINDINGS 128
+
+struct vmw_private;
+struct vmw_ctx_binding_state;
+
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+ vmw_ctx_binding_shader,
+ vmw_ctx_binding_rt,
+ vmw_ctx_binding_tex,
+ vmw_ctx_binding_cb,
+ vmw_ctx_binding_dx_shader,
+ vmw_ctx_binding_dx_rt,
+ vmw_ctx_binding_sr,
+ vmw_ctx_binding_ds,
+ vmw_ctx_binding_so,
+ vmw_ctx_binding_vb,
+ vmw_ctx_binding_ib,
+ vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - single binding metadata
+ *
+ * @ctx_list: List head for the context's list of bindings.
+ * @res_list: List head for a resource's list of bindings.
+ * @ctx: Non-refcounted pointer to the context that owns the binding. NULL
+ * indicates no binding present.
+ * @res: Non-refcounted pointer to the resource the binding points to. This
+ * is typically a surface or a view.
+ * @bt: Binding type.
+ * @scrubbed: Whether the binding has been scrubbed from the context.
+ */
+struct vmw_ctx_bindinfo {
+ struct list_head ctx_list;
+ struct list_head res_list;
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
+ enum vmw_ctx_binding_type bt;
+ bool scrubbed;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_tex - texture stage binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @texture_stage: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_tex {
+ struct vmw_ctx_bindinfo bi;
+ uint32 texture_stage;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_shader - Shader binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_shader {
+ struct vmw_ctx_bindinfo bi;
+ SVGA3dShaderType shader_slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_cb {
+ struct vmw_ctx_bindinfo bi;
+ SVGA3dShaderType shader_slot;
+ uint32 offset;
+ uint32 size;
+ uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_view - View binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_view {
+ struct vmw_ctx_bindinfo bi;
+ SVGA3dShaderType shader_slot;
+ uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_so {
+ struct vmw_ctx_bindinfo bi;
+ uint32 offset;
+ uint32 size;
+ uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @stride: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_vb {
+ struct vmw_ctx_bindinfo bi;
+ uint32 offset;
+ uint32 stride;
+ uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @format: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_ib {
+ struct vmw_ctx_bindinfo bi;
+ uint32 offset;
+ uint32 format;
+};
+
+/**
+ * struct vmw_dx_shader_bindings - per shader type context binding state
+ *
+ * @shader: The shader binding for this shader type
+ * @const_buffer: Const buffer bindings for this shader type.
+ * @shader_res: Shader resource view bindings for this shader type.
+ * @dirty_sr: Bitmap tracking individual shader resource bindings changes
+ * that have not yet been emitted to the device.
+ * @dirty: Bitmap tracking per-binding type binding changes that have not
+ * yet been emitted to the device.
+ */
+struct vmw_dx_shader_bindings {
+ struct vmw_ctx_bindinfo_shader shader;
+ struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+ struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
+ DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
+ unsigned long dirty;
+};
+
+extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *ci,
+ u32 shader_slot, u32 slot);
+extern void
+vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+ struct vmw_ctx_binding_state *from);
+extern void vmw_binding_res_list_kill(struct list_head *head);
+extern void vmw_binding_res_list_scrub(struct list_head *head);
+extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+extern struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv);
+extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
+extern struct list_head *
+vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index cff2bf9db9d2..3329f623c8bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -72,6 +72,12 @@ static struct ttm_place mob_placement_flags = {
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
+static struct ttm_place mob_ne_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
@@ -200,6 +206,13 @@ struct ttm_placement vmw_mob_placement = {
.busy_placement = &mob_placement_flags
};
+struct ttm_placement vmw_mob_ne_placement = {
+ .num_placement = 1,
+ .num_busy_placement = 1,
+ .placement = &mob_ne_placement_flags,
+ .busy_placement = &mob_ne_placement_flags
+};
+
struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
@@ -804,9 +817,9 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
/**
* vmw_move_notify - TTM move_notify_callback
*
- * @bo: The TTM buffer object about to move.
- * @mem: The truct ttm_mem_reg indicating to what memory
- * region the move is taking place.
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
*
* Calls move_notify for all subsystems needing it.
* (currently only resources).
@@ -815,13 +828,14 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
vmw_resource_move_notify(bo, mem);
+ vmw_query_move_notify(bo, mem);
}
/**
* vmw_swap_notify - TTM move_notify_callback
*
- * @bo: The TTM buffer object about to be swapped out.
+ * @bo: The TTM buffer object about to be swapped out.
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
new file mode 100644
index 000000000000..5ae8f921da2a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -0,0 +1,1303 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_bo_api.h"
+
+/*
+ * Size of inline command buffers. Try to make sure that a page size is a
+ * multiple of the DMA pool allocation size.
+ */
+#define VMW_CMDBUF_INLINE_ALIGN 64
+#define VMW_CMDBUF_INLINE_SIZE \
+ (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
+
+/**
+ * struct vmw_cmdbuf_context - Command buffer context queues
+ *
+ * @submitted: List of command buffers that have been submitted to the
+ * manager but not yet submitted to hardware.
+ * @hw_submitted: List of command buffers submitted to hardware.
+ * @preempted: List of preempted command buffers.
+ * @num_hw_submitted: Number of buffers currently being processed by hardware
+ */
+struct vmw_cmdbuf_context {
+ struct list_head submitted;
+ struct list_head hw_submitted;
+ struct list_head preempted;
+ unsigned num_hw_submitted;
+};
+
+/**
+ * struct vmw_cmdbuf_man: - Command buffer manager
+ *
+ * @cur_mutex: Mutex protecting the command buffer used for incremental small
+ * kernel command submissions, @cur.
+ * @space_mutex: Mutex to protect against starvation when we allocate
+ * main pool buffer space.
+ * @work: A struct work_struct implementeing command buffer error handling.
+ * Immutable.
+ * @dev_priv: Pointer to the device private struct. Immutable.
+ * @ctx: Array of command buffer context queues. The queues and the context
+ * data is protected by @lock.
+ * @error: List of command buffers that have caused device errors.
+ * Protected by @lock.
+ * @mm: Range manager for the command buffer space. Manager allocations and
+ * frees are protected by @lock.
+ * @cmd_space: Buffer object for the command buffer space, unless we were
+ * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
+ * @map_obj: Mapping state for @cmd_space. Immutable.
+ * @map: Pointer to command buffer space. May be a mapped buffer object or
+ * a contigous coherent DMA memory allocation. Immutable.
+ * @cur: Command buffer for small kernel command submissions. Protected by
+ * the @cur_mutex.
+ * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
+ * @default_size: Default size for the @cur command buffer. Immutable.
+ * @max_hw_submitted: Max number of in-flight command buffers the device can
+ * handle. Immutable.
+ * @lock: Spinlock protecting command submission queues.
+ * @header: Pool of DMA memory for device command buffer headers.
+ * Internal protection.
+ * @dheaders: Pool of DMA memory for device command buffer headers with trailing
+ * space for inline data. Internal protection.
+ * @tasklet: Tasklet struct for irq processing. Immutable.
+ * @alloc_queue: Wait queue for processes waiting to allocate command buffer
+ * space.
+ * @idle_queue: Wait queue for processes waiting for command buffer idle.
+ * @irq_on: Whether the process function has requested irq to be turned on.
+ * Protected by @lock.
+ * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
+ * allocation. Immutable.
+ * @has_pool: Has a large pool of DMA memory which allows larger allocations.
+ * Typically this is false only during bootstrap.
+ * @handle: DMA address handle for the command buffer space if @using_mob is
+ * false. Immutable.
+ * @size: The size of the command buffer space. Immutable.
+ */
+struct vmw_cmdbuf_man {
+ struct mutex cur_mutex;
+ struct mutex space_mutex;
+ struct work_struct work;
+ struct vmw_private *dev_priv;
+ struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
+ struct list_head error;
+ struct drm_mm mm;
+ struct ttm_buffer_object *cmd_space;
+ struct ttm_bo_kmap_obj map_obj;
+ u8 *map;
+ struct vmw_cmdbuf_header *cur;
+ size_t cur_pos;
+ size_t default_size;
+ unsigned max_hw_submitted;
+ spinlock_t lock;
+ struct dma_pool *headers;
+ struct dma_pool *dheaders;
+ struct tasklet_struct tasklet;
+ wait_queue_head_t alloc_queue;
+ wait_queue_head_t idle_queue;
+ bool irq_on;
+ bool using_mob;
+ bool has_pool;
+ dma_addr_t handle;
+ size_t size;
+};
+
+/**
+ * struct vmw_cmdbuf_header - Command buffer metadata
+ *
+ * @man: The command buffer manager.
+ * @cb_header: Device command buffer header, allocated from a DMA pool.
+ * @cb_context: The device command buffer context.
+ * @list: List head for attaching to the manager lists.
+ * @node: The range manager node.
+ * @handle. The DMA address of @cb_header. Handed to the device on command
+ * buffer submission.
+ * @cmd: Pointer to the command buffer space of this buffer.
+ * @size: Size of the command buffer space of this buffer.
+ * @reserved: Reserved space of this buffer.
+ * @inline_space: Whether inline command buffer space is used.
+ */
+struct vmw_cmdbuf_header {
+ struct vmw_cmdbuf_man *man;
+ SVGACBHeader *cb_header;
+ SVGACBContext cb_context;
+ struct list_head list;
+ struct drm_mm_node node;
+ dma_addr_t handle;
+ u8 *cmd;
+ size_t size;
+ size_t reserved;
+ bool inline_space;
+};
+
+/**
+ * struct vmw_cmdbuf_dheader - Device command buffer header with inline
+ * command buffer space.
+ *
+ * @cb_header: Device command buffer header.
+ * @cmd: Inline command buffer space.
+ */
+struct vmw_cmdbuf_dheader {
+ SVGACBHeader cb_header;
+ u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
+};
+
+/**
+ * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
+ *
+ * @page_size: Size of requested command buffer space in pages.
+ * @node: Pointer to the range manager node.
+ * @done: True if this allocation has succeeded.
+ */
+struct vmw_cmdbuf_alloc_info {
+ size_t page_size;
+ struct drm_mm_node *node;
+ bool done;
+};
+
+/* Loop over each context in the command buffer manager. */
+#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
+ for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
+ ++(_i), ++(_ctx))
+
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
+
+
+/**
+ * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
+ *
+ * @man: The range manager.
+ * @interruptible: Whether to wait interruptible when locking.
+ */
+static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
+{
+ if (interruptible) {
+ if (mutex_lock_interruptible(&man->cur_mutex))
+ return -ERESTARTSYS;
+ } else {
+ mutex_lock(&man->cur_mutex);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
+ *
+ * @man: The range manager.
+ */
+static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
+{
+ mutex_unlock(&man->cur_mutex);
+}
+
+/**
+ * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
+ * been used for the device context with inline command buffers.
+ * Need not be called locked.
+ *
+ * @header: Pointer to the header to free.
+ */
+static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
+{
+ struct vmw_cmdbuf_dheader *dheader;
+
+ if (WARN_ON_ONCE(!header->inline_space))
+ return;
+
+ dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
+ cb_header);
+ dma_pool_free(header->man->dheaders, dheader, header->handle);
+ kfree(header);
+}
+
+/**
+ * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
+ * associated structures.
+ *
+ * header: Pointer to the header to free.
+ *
+ * For internal use. Must be called with man::lock held.
+ */
+static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+ struct vmw_cmdbuf_man *man = header->man;
+
+ BUG_ON(!spin_is_locked(&man->lock));
+
+ if (header->inline_space) {
+ vmw_cmdbuf_header_inline_free(header);
+ return;
+ }
+
+ drm_mm_remove_node(&header->node);
+ wake_up_all(&man->alloc_queue);
+ if (header->cb_header)
+ dma_pool_free(man->headers, header->cb_header,
+ header->handle);
+ kfree(header);
+}
+
+/**
+ * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
+ * associated structures.
+ *
+ * @header: Pointer to the header to free.
+ */
+void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+ struct vmw_cmdbuf_man *man = header->man;
+
+ /* Avoid locking if inline_space */
+ if (header->inline_space) {
+ vmw_cmdbuf_header_inline_free(header);
+ return;
+ }
+ spin_lock_bh(&man->lock);
+ __vmw_cmdbuf_header_free(header);
+ spin_unlock_bh(&man->lock);
+}
+
+
+/**
+ * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
+ *
+ * @header: The header of the buffer to submit.
+ */
+static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
+{
+ struct vmw_cmdbuf_man *man = header->man;
+ u32 val;
+
+ if (sizeof(header->handle) > 4)
+ val = (header->handle >> 32);
+ else
+ val = 0;
+ vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
+
+ val = (header->handle & 0xFFFFFFFFULL);
+ val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
+ vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
+
+ return header->cb_header->status;
+}
+
+/**
+ * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
+ *
+ * @ctx: The command buffer context to initialize
+ */
+static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
+{
+ INIT_LIST_HEAD(&ctx->hw_submitted);
+ INIT_LIST_HEAD(&ctx->submitted);
+ INIT_LIST_HEAD(&ctx->preempted);
+ ctx->num_hw_submitted = 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submits command buffers to hardware until there are no more command
+ * buffers to submit or the hardware can't handle more command buffers.
+ */
+static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_context *ctx)
+{
+ while (ctx->num_hw_submitted < man->max_hw_submitted &&
+ !list_empty(&ctx->submitted)) {
+ struct vmw_cmdbuf_header *entry;
+ SVGACBStatus status;
+
+ entry = list_first_entry(&ctx->submitted,
+ struct vmw_cmdbuf_header,
+ list);
+
+ status = vmw_cmdbuf_header_submit(entry);
+
+ /* This should never happen */
+ if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
+ entry->cb_header->status = SVGA_CB_STATUS_NONE;
+ break;
+ }
+
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &ctx->hw_submitted);
+ ctx->num_hw_submitted++;
+ }
+
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Process a command buffer context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submit command buffers to hardware if possible, and process finished
+ * buffers. Typically freeing them, but on preemption or error take
+ * appropriate action. Wake up waiters if appropriate.
+ */
+static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_context *ctx,
+ int *notempty)
+{
+ struct vmw_cmdbuf_header *entry, *next;
+
+ vmw_cmdbuf_ctx_submit(man, ctx);
+
+ list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
+ SVGACBStatus status = entry->cb_header->status;
+
+ if (status == SVGA_CB_STATUS_NONE)
+ break;
+
+ list_del(&entry->list);
+ wake_up_all(&man->idle_queue);
+ ctx->num_hw_submitted--;
+ switch (status) {
+ case SVGA_CB_STATUS_COMPLETED:
+ __vmw_cmdbuf_header_free(entry);
+ break;
+ case SVGA_CB_STATUS_COMMAND_ERROR:
+ case SVGA_CB_STATUS_CB_HEADER_ERROR:
+ list_add_tail(&entry->list, &man->error);
+ schedule_work(&man->work);
+ break;
+ case SVGA_CB_STATUS_PREEMPTED:
+ list_add(&entry->list, &ctx->preempted);
+ break;
+ default:
+ WARN_ONCE(true, "Undefined command buffer status.\n");
+ __vmw_cmdbuf_header_free(entry);
+ break;
+ }
+ }
+
+ vmw_cmdbuf_ctx_submit(man, ctx);
+ if (!list_empty(&ctx->submitted))
+ (*notempty)++;
+}
+
+/**
+ * vmw_cmdbuf_man_process - Process all command buffer contexts and
+ * switch on and off irqs as appropriate.
+ *
+ * @man: The command buffer manager.
+ *
+ * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
+ * command buffers left that are not submitted to hardware, Make sure
+ * IRQ handling is turned on. Otherwise, make sure it's turned off. This
+ * function may return -EAGAIN to indicate it should be rerun due to
+ * possibly missed IRQs if IRQs has just been turned on.
+ */
+static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
+{
+ int notempty = 0;
+ struct vmw_cmdbuf_context *ctx;
+ int i;
+
+ for_each_cmdbuf_ctx(man, i, ctx)
+ vmw_cmdbuf_ctx_process(man, ctx, &notempty);
+
+ if (man->irq_on && !notempty) {
+ vmw_generic_waiter_remove(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+ man->irq_on = false;
+ } else if (!man->irq_on && notempty) {
+ vmw_generic_waiter_add(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+ man->irq_on = true;
+
+ /* Rerun in case we just missed an irq. */
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
+ * command buffer context
+ *
+ * @man: The command buffer manager.
+ * @header: The header of the buffer to submit.
+ * @cb_context: The command buffer context to use.
+ *
+ * This function adds @header to the "submitted" queue of the command
+ * buffer context identified by @cb_context. It then calls the command buffer
+ * manager processing to potentially submit the buffer to hardware.
+ * @man->lock needs to be held when calling this function.
+ */
+static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_header *header,
+ SVGACBContext cb_context)
+{
+ if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
+ header->cb_header->dxContext = 0;
+ header->cb_context = cb_context;
+ list_add_tail(&header->list, &man->ctx[cb_context].submitted);
+
+ if (vmw_cmdbuf_man_process(man) == -EAGAIN)
+ vmw_cmdbuf_man_process(man);
+}
+
+/**
+ * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
+ * handler implemented as a tasklet.
+ *
+ * @data: Tasklet closure. A pointer to the command buffer manager cast to
+ * an unsigned long.
+ *
+ * The bottom half (tasklet) of the interrupt handler simply calls into the
+ * command buffer processor to free finished buffers and submit any
+ * queued buffers to hardware.
+ */
+static void vmw_cmdbuf_man_tasklet(unsigned long data)
+{
+ struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
+
+ spin_lock(&man->lock);
+ if (vmw_cmdbuf_man_process(man) == -EAGAIN)
+ (void) vmw_cmdbuf_man_process(man);
+ spin_unlock(&man->lock);
+}
+
+/**
+ * vmw_cmdbuf_work_func - The deferred work function that handles
+ * command buffer errors.
+ *
+ * @work: The work func closure argument.
+ *
+ * Restarting the command buffer context after an error requires process
+ * context, so it is deferred to this work function.
+ */
+static void vmw_cmdbuf_work_func(struct work_struct *work)
+{
+ struct vmw_cmdbuf_man *man =
+ container_of(work, struct vmw_cmdbuf_man, work);
+ struct vmw_cmdbuf_header *entry, *next;
+ bool restart = false;
+
+ spin_lock_bh(&man->lock);
+ list_for_each_entry_safe(entry, next, &man->error, list) {
+ restart = true;
+ DRM_ERROR("Command buffer error.\n");
+
+ list_del(&entry->list);
+ __vmw_cmdbuf_header_free(entry);
+ wake_up_all(&man->idle_queue);
+ }
+ spin_unlock_bh(&man->lock);
+
+ if (restart && vmw_cmdbuf_startstop(man, true))
+ DRM_ERROR("Failed restarting command buffer context 0.\n");
+
+}
+
+/**
+ * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
+ *
+ * @man: The command buffer manager.
+ * @check_preempted: Check also the preempted queue for pending command buffers.
+ *
+ */
+static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
+ bool check_preempted)
+{
+ struct vmw_cmdbuf_context *ctx;
+ bool idle = false;
+ int i;
+
+ spin_lock_bh(&man->lock);
+ vmw_cmdbuf_man_process(man);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (!list_empty(&ctx->submitted) ||
+ !list_empty(&ctx->hw_submitted) ||
+ (check_preempted && !list_empty(&ctx->preempted)))
+ goto out_unlock;
+ }
+
+ idle = list_empty(&man->error);
+
+out_unlock:
+ spin_unlock_bh(&man->lock);
+
+ return idle;
+}
+
+/**
+ * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed. Call with @man->cur_mutex held.
+ */
+static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
+{
+ struct vmw_cmdbuf_header *cur = man->cur;
+
+ WARN_ON(!mutex_is_locked(&man->cur_mutex));
+
+ if (!cur)
+ return;
+
+ spin_lock_bh(&man->lock);
+ if (man->cur_pos == 0) {
+ __vmw_cmdbuf_header_free(cur);
+ goto out_unlock;
+ }
+
+ man->cur->cb_header->length = man->cur_pos;
+ vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
+out_unlock:
+ spin_unlock_bh(&man->lock);
+ man->cur = NULL;
+ man->cur_pos = 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Whether to sleep interruptible when sleeping.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed.
+ */
+int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+ bool interruptible)
+{
+ int ret = vmw_cmdbuf_cur_lock(man, interruptible);
+
+ if (ret)
+ return ret;
+
+ __vmw_cmdbuf_cur_flush(man);
+ vmw_cmdbuf_cur_unlock(man);
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_idle - Wait for command buffer manager idle.
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Sleep interruptible while waiting.
+ * @timeout: Time out after this many ticks.
+ *
+ * Wait until the command buffer manager has processed all command buffers,
+ * or until a timeout occurs. If a timeout occurs, the function will return
+ * -EBUSY.
+ */
+int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = vmw_cmdbuf_cur_flush(man, interruptible);
+ vmw_generic_waiter_add(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+
+ if (interruptible) {
+ ret = wait_event_interruptible_timeout
+ (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+ timeout);
+ } else {
+ ret = wait_event_timeout
+ (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+ timeout);
+ }
+ vmw_generic_waiter_remove(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+ if (ret == 0) {
+ if (!vmw_cmdbuf_man_idle(man, true))
+ ret = -EBUSY;
+ else
+ ret = 0;
+ }
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @info: Allocation info. Will hold the size on entry and allocated mm node
+ * on successful return.
+ *
+ * Try to allocate buffer space from the main pool. Returns true if succeeded.
+ * If a fatal error was hit, the error code is returned in @info->ret.
+ */
+static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_alloc_info *info)
+{
+ int ret;
+
+ if (info->done)
+ return true;
+
+ memset(info->node, 0, sizeof(*info->node));
+ spin_lock_bh(&man->lock);
+ ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
+ 0, 0,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ spin_unlock_bh(&man->lock);
+ info->done = !ret;
+
+ return info->done;
+}
+
+/**
+ * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @node: Pointer to pre-allocated range-manager node.
+ * @size: The size of the allocation.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * This function allocates buffer space from the main pool, and if there is
+ * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
+ * become available.
+ */
+static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
+ struct drm_mm_node *node,
+ size_t size,
+ bool interruptible)
+{
+ struct vmw_cmdbuf_alloc_info info;
+
+ info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ info.node = node;
+ info.done = false;
+
+ /*
+ * To prevent starvation of large requests, only one allocating call
+ * at a time waiting for space.
+ */
+ if (interruptible) {
+ if (mutex_lock_interruptible(&man->space_mutex))
+ return -ERESTARTSYS;
+ } else {
+ mutex_lock(&man->space_mutex);
+ }
+
+ /* Try to allocate space without waiting. */
+ if (vmw_cmdbuf_try_alloc(man, &info))
+ goto out_unlock;
+
+ vmw_generic_waiter_add(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+
+ if (interruptible) {
+ int ret;
+
+ ret = wait_event_interruptible
+ (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+ if (ret) {
+ vmw_generic_waiter_remove
+ (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+ mutex_unlock(&man->space_mutex);
+ return ret;
+ }
+ } else {
+ wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+ }
+ vmw_generic_waiter_remove(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+
+out_unlock:
+ mutex_unlock(&man->space_mutex);
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
+ * space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ */
+static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_header *header,
+ size_t size,
+ bool interruptible)
+{
+ SVGACBHeader *cb_hdr;
+ size_t offset;
+ int ret;
+
+ if (!man->has_pool)
+ return -ENOMEM;
+
+ ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
+
+ if (ret)
+ return ret;
+
+ header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
+ &header->handle);
+ if (!header->cb_header) {
+ ret = -ENOMEM;
+ goto out_no_cb_header;
+ }
+
+ header->size = header->node.size << PAGE_SHIFT;
+ cb_hdr = header->cb_header;
+ offset = header->node.start << PAGE_SHIFT;
+ header->cmd = man->map + offset;
+ memset(cb_hdr, 0, sizeof(*cb_hdr));
+ if (man->using_mob) {
+ cb_hdr->flags = SVGA_CB_FLAG_MOB;
+ cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+ cb_hdr->ptr.mob.mobOffset = offset;
+ } else {
+ cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
+ }
+
+ return 0;
+
+out_no_cb_header:
+ spin_lock_bh(&man->lock);
+ drm_mm_remove_node(&header->node);
+ spin_unlock_bh(&man->lock);
+
+ return ret;
+}
+
+/**
+ * vmw_cmdbuf_space_inline - Set up a command buffer header with
+ * inline command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ */
+static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_header *header,
+ int size)
+{
+ struct vmw_cmdbuf_dheader *dheader;
+ SVGACBHeader *cb_hdr;
+
+ if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
+ return -ENOMEM;
+
+ dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
+ &header->handle);
+ if (!dheader)
+ return -ENOMEM;
+
+ header->inline_space = true;
+ header->size = VMW_CMDBUF_INLINE_SIZE;
+ cb_hdr = &dheader->cb_header;
+ header->cb_header = cb_hdr;
+ header->cmd = dheader->cmd;
+ memset(dheader, 0, sizeof(*dheader));
+ cb_hdr->status = SVGA_CB_STATUS_NONE;
+ cb_hdr->flags = SVGA_CB_FLAG_NONE;
+ cb_hdr->ptr.pa = (u64)header->handle +
+ (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
+ * command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @p_header: points to a header pointer to populate on successful return.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer. The header pointer returned in @p_header should
+ * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
+ */
+void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+ size_t size, bool interruptible,
+ struct vmw_cmdbuf_header **p_header)
+{
+ struct vmw_cmdbuf_header *header;
+ int ret = 0;
+
+ *p_header = NULL;
+
+ header = kzalloc(sizeof(*header), GFP_KERNEL);
+ if (!header)
+ return ERR_PTR(-ENOMEM);
+
+ if (size <= VMW_CMDBUF_INLINE_SIZE)
+ ret = vmw_cmdbuf_space_inline(man, header, size);
+ else
+ ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
+
+ if (ret) {
+ kfree(header);
+ return ERR_PTR(ret);
+ }
+
+ header->man = man;
+ INIT_LIST_HEAD(&header->list);
+ header->cb_header->status = SVGA_CB_STATUS_NONE;
+ *p_header = header;
+
+ return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
+ * command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
+ size_t size,
+ int ctx_id,
+ bool interruptible)
+{
+ struct vmw_cmdbuf_header *cur;
+ void *ret;
+
+ if (vmw_cmdbuf_cur_lock(man, interruptible))
+ return ERR_PTR(-ERESTARTSYS);
+
+ cur = man->cur;
+ if (cur && (size + man->cur_pos > cur->size ||
+ ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
+ ctx_id != cur->cb_header->dxContext)))
+ __vmw_cmdbuf_cur_flush(man);
+
+ if (!man->cur) {
+ ret = vmw_cmdbuf_alloc(man,
+ max_t(size_t, size, man->default_size),
+ interruptible, &man->cur);
+ if (IS_ERR(ret)) {
+ vmw_cmdbuf_cur_unlock(man);
+ return ret;
+ }
+
+ cur = man->cur;
+ }
+
+ if (ctx_id != SVGA3D_INVALID_ID) {
+ cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+ cur->cb_header->dxContext = ctx_id;
+ }
+
+ cur->reserved = size;
+
+ return (void *) (man->cur->cmd + man->cur_pos);
+}
+
+/**
+ * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
+ size_t size, bool flush)
+{
+ struct vmw_cmdbuf_header *cur = man->cur;
+
+ WARN_ON(!mutex_is_locked(&man->cur_mutex));
+
+ WARN_ON(size > cur->reserved);
+ man->cur_pos += size;
+ if (!size)
+ cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+ if (flush)
+ __vmw_cmdbuf_cur_flush(man);
+ vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+ int ctx_id, bool interruptible,
+ struct vmw_cmdbuf_header *header)
+{
+ if (!header)
+ return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
+
+ if (size > header->size)
+ return ERR_PTR(-EINVAL);
+
+ if (ctx_id != SVGA3D_INVALID_ID) {
+ header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+ header->cb_header->dxContext = ctx_id;
+ }
+
+ header->reserved = size;
+ return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_commit - Commit commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+ struct vmw_cmdbuf_header *header, bool flush)
+{
+ if (!header) {
+ vmw_cmdbuf_commit_cur(man, size, flush);
+ return;
+ }
+
+ (void) vmw_cmdbuf_cur_lock(man, false);
+ __vmw_cmdbuf_cur_flush(man);
+ WARN_ON(size > header->reserved);
+ man->cur = header;
+ man->cur_pos = size;
+ if (!size)
+ header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+ if (flush)
+ __vmw_cmdbuf_cur_flush(man);
+ vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
+ *
+ * @man: The command buffer manager.
+ */
+void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
+{
+ if (!man)
+ return;
+
+ tasklet_schedule(&man->tasklet);
+}
+
+/**
+ * vmw_cmdbuf_send_device_command - Send a command through the device context.
+ *
+ * @man: The command buffer manager.
+ * @command: Pointer to the command to send.
+ * @size: Size of the command.
+ *
+ * Synchronously sends a device context command.
+ */
+static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
+ const void *command,
+ size_t size)
+{
+ struct vmw_cmdbuf_header *header;
+ int status;
+ void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
+
+ if (IS_ERR(cmd))
+ return PTR_ERR(cmd);
+
+ memcpy(cmd, command, size);
+ header->cb_header->length = size;
+ header->cb_context = SVGA_CB_CONTEXT_DEVICE;
+ spin_lock_bh(&man->lock);
+ status = vmw_cmdbuf_header_submit(header);
+ spin_unlock_bh(&man->lock);
+ vmw_cmdbuf_header_free(header);
+
+ if (status != SVGA_CB_STATUS_COMPLETED) {
+ DRM_ERROR("Device context command failed with status %d\n",
+ status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_startstop - Send a start / stop command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @enable: Whether to enable or disable the context.
+ *
+ * Synchronously sends a device start / stop context command.
+ */
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
+ bool enable)
+{
+ struct {
+ uint32 id;
+ SVGADCCmdStartStop body;
+ } __packed cmd;
+
+ cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
+ cmd.body.enable = (enable) ? 1 : 0;
+ cmd.body.context = SVGA_CB_CONTEXT_0;
+
+ return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+/**
+ * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the main space pool.
+ * @default_size: The default size of the command buffer for small kernel
+ * submissions.
+ *
+ * Set the size and allocate the main command buffer space pool,
+ * as well as the default size of the command buffer for
+ * small kernel submissions. If successful, this enables large command
+ * submissions. Note that this function requires that rudimentary command
+ * submission is already available and that the MOB memory manager is alive.
+ * Returns 0 on success. Negative error code on failure.
+ */
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+ size_t size, size_t default_size)
+{
+ struct vmw_private *dev_priv = man->dev_priv;
+ bool dummy;
+ int ret;
+
+ if (man->has_pool)
+ return -EINVAL;
+
+ /* First, try to allocate a huge chunk of DMA memory */
+ size = PAGE_ALIGN(size);
+ man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+ &man->handle, GFP_KERNEL);
+ if (man->map) {
+ man->using_mob = false;
+ } else {
+ /*
+ * DMA memory failed. If we can have command buffers in a
+ * MOB, try to use that instead. Note that this will
+ * actually call into the already enabled manager, when
+ * binding the MOB.
+ */
+ if (!(dev_priv->capabilities & SVGA_CAP_DX))
+ return -ENOMEM;
+
+ ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
+ &vmw_mob_ne_placement, 0, false, NULL,
+ &man->cmd_space);
+ if (ret)
+ return ret;
+
+ man->using_mob = true;
+ ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
+ &man->map_obj);
+ if (ret)
+ goto out_no_map;
+
+ man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
+ }
+
+ man->size = size;
+ drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
+
+ man->has_pool = true;
+ man->default_size = default_size;
+ DRM_INFO("Using command buffers with %s pool.\n",
+ (man->using_mob) ? "MOB" : "DMA");
+
+ return 0;
+
+out_no_map:
+ if (man->using_mob)
+ ttm_bo_unref(&man->cmd_space);
+
+ return ret;
+}
+
+/**
+ * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
+ * inline command buffer submissions only.
+ *
+ * @dev_priv: Pointer to device private structure.
+ *
+ * Returns a pointer to a cummand buffer manager to success or error pointer
+ * on failure. The command buffer manager will be enabled for submissions of
+ * size VMW_CMDBUF_INLINE_SIZE only.
+ */
+struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_cmdbuf_man *man;
+ struct vmw_cmdbuf_context *ctx;
+ int i;
+ int ret;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
+ return ERR_PTR(-ENOSYS);
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (!man)
+ return ERR_PTR(-ENOMEM);
+
+ man->headers = dma_pool_create("vmwgfx cmdbuf",
+ &dev_priv->dev->pdev->dev,
+ sizeof(SVGACBHeader),
+ 64, PAGE_SIZE);
+ if (!man->headers) {
+ ret = -ENOMEM;
+ goto out_no_pool;
+ }
+
+ man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
+ &dev_priv->dev->pdev->dev,
+ sizeof(struct vmw_cmdbuf_dheader),
+ 64, PAGE_SIZE);
+ if (!man->dheaders) {
+ ret = -ENOMEM;
+ goto out_no_dpool;
+ }
+
+ for_each_cmdbuf_ctx(man, i, ctx)
+ vmw_cmdbuf_ctx_init(ctx);
+
+ INIT_LIST_HEAD(&man->error);
+ spin_lock_init(&man->lock);
+ mutex_init(&man->cur_mutex);
+ mutex_init(&man->space_mutex);
+ tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
+ (unsigned long) man);
+ man->default_size = VMW_CMDBUF_INLINE_SIZE;
+ init_waitqueue_head(&man->alloc_queue);
+ init_waitqueue_head(&man->idle_queue);
+ man->dev_priv = dev_priv;
+ man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
+ INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
+ vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
+ &dev_priv->error_waiters);
+ ret = vmw_cmdbuf_startstop(man, true);
+ if (ret) {
+ DRM_ERROR("Failed starting command buffer context 0.\n");
+ vmw_cmdbuf_man_destroy(man);
+ return ERR_PTR(ret);
+ }
+
+ return man;
+
+out_no_dpool:
+ dma_pool_destroy(man->headers);
+out_no_pool:
+ kfree(man);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function removes the main buffer space pool, and should be called
+ * before MOB memory management is removed. When this function has been called,
+ * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
+ * less are allowed, and the default size of the command buffer for small kernel
+ * submissions is also set to this size.
+ */
+void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
+{
+ if (!man->has_pool)
+ return;
+
+ man->has_pool = false;
+ man->default_size = VMW_CMDBUF_INLINE_SIZE;
+ (void) vmw_cmdbuf_idle(man, false, 10*HZ);
+ if (man->using_mob) {
+ (void) ttm_bo_kunmap(&man->map_obj);
+ ttm_bo_unref(&man->cmd_space);
+ } else {
+ dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+ man->size, man->map, man->handle);
+ }
+}
+
+/**
+ * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function idles and then destroys a command buffer manager.
+ */
+void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
+{
+ WARN_ON_ONCE(man->has_pool);
+ (void) vmw_cmdbuf_idle(man, false, 10*HZ);
+ if (vmw_cmdbuf_startstop(man, false))
+ DRM_ERROR("Failed stopping command buffer context 0.\n");
+
+ vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
+ &man->dev_priv->error_waiters);
+ tasklet_kill(&man->tasklet);
+ (void) cancel_work_sync(&man->work);
+ dma_pool_destroy(man->dheaders);
+ dma_pool_destroy(man->headers);
+ mutex_destroy(&man->cur_mutex);
+ mutex_destroy(&man->space_mutex);
+ kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 21e9b7f8dad0..13db8a2851ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,15 +26,10 @@
**************************************************************************/
#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
-enum vmw_cmdbuf_res_state {
- VMW_CMDBUF_RES_COMMITED,
- VMW_CMDBUF_RES_ADD,
- VMW_CMDBUF_RES_DEL
-};
-
/**
* struct vmw_cmdbuf_res - Command buffer managed resource entry.
*
@@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
list_for_each_entry_safe(entry, next, list, head) {
list_del(&entry->head);
+ if (entry->res->func->commit_notify)
+ entry->res->func->commit_notify(entry->res,
+ entry->state);
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
- entry->state = VMW_CMDBUF_RES_COMMITED;
+ entry->state = VMW_CMDBUF_RES_COMMITTED;
list_add_tail(&entry->head, &entry->man->list);
break;
case VMW_CMDBUF_RES_DEL:
@@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
&entry->hash);
list_del(&entry->head);
list_add_tail(&entry->head, &entry->man->list);
- entry->state = VMW_CMDBUF_RES_COMMITED;
+ entry->state = VMW_CMDBUF_RES_COMMITTED;
break;
default:
BUG();
@@ -231,6 +229,9 @@ out_invalid_key:
* @res_type: The resource type.
* @user_key: The user-space id of the resource.
* @list: The staging list.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
*
* This function looks up the struct vmw_cmdbuf_res entry from the manager
* hash table and, if it exists, removes it. Depending on its current staging
@@ -240,7 +241,8 @@ out_invalid_key:
int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
- struct list_head *list)
+ struct list_head *list,
+ struct vmw_resource **res_p)
{
struct vmw_cmdbuf_res *entry;
struct drm_hash_item *hash;
@@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
vmw_cmdbuf_res_free(man, entry);
+ *res_p = NULL;
break;
- case VMW_CMDBUF_RES_COMMITED:
+ case VMW_CMDBUF_RES_COMMITTED:
(void) drm_ht_remove_item(&man->resources, &entry->hash);
list_del(&entry->head);
entry->state = VMW_CMDBUF_RES_DEL;
list_add_tail(&entry->head, list);
+ *res_p = entry->res;
break;
default:
BUG();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 5ac92874404d..443d1ed00de7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,19 +27,19 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
#include "ttm/ttm_placement.h"
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
- struct vmw_ctx_binding_state cbs;
+ struct vmw_ctx_binding_state *cbs;
struct vmw_cmdbuf_res_manager *man;
+ struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+ spinlock_t cotable_lock;
+ struct vmw_dma_buffer *dx_query_mob;
};
-
-
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
-
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -51,12 +51,14 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
- bool rebind);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+static int vmw_dx_context_create(struct vmw_resource *res);
+static int vmw_dx_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_destroy(struct vmw_resource *res);
+
static uint64_t vmw_user_context_size;
static const struct vmw_user_resource_conv user_context_conv = {
@@ -93,15 +95,38 @@ static const struct vmw_res_func vmw_gb_context_func = {
.unbind = vmw_gb_context_unbind
};
-static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
- [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
- [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
- [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+static const struct vmw_res_func vmw_dx_context_func = {
+ .res_type = vmw_res_dx_context,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "dx contexts",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_dx_context_create,
+ .destroy = vmw_dx_context_destroy,
+ .bind = vmw_dx_context_bind,
+ .unbind = vmw_dx_context_unbind
+};
/**
* Context management:
*/
+static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+{
+ struct vmw_resource *res;
+ int i;
+
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ spin_lock(&uctx->cotable_lock);
+ res = uctx->cotables[i];
+ uctx->cotables[i] = NULL;
+ spin_unlock(&uctx->cotable_lock);
+
+ if (res)
+ vmw_resource_unreference(&res);
+ }
+}
+
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
struct vmw_user_context *uctx =
@@ -113,17 +138,19 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
} *cmd;
- if (res->func->destroy == vmw_gb_context_destroy) {
+ if (res->func->destroy == vmw_gb_context_destroy ||
+ res->func->destroy == vmw_dx_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
- (void) vmw_context_binding_state_kill(&uctx->cbs);
- (void) vmw_gb_context_destroy(res);
+ vmw_binding_state_kill(uctx->cbs);
+ (void) res->func->destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
+ vmw_context_cotables_unref(uctx);
return;
}
@@ -135,43 +162,67 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
return;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
+ cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
}
static int vmw_gb_context_init(struct vmw_private *dev_priv,
+ bool dx,
struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
+ void (*res_free)(struct vmw_resource *res))
{
- int ret;
+ int ret, i;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
+ res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+ SVGA3D_CONTEXT_DATA_SIZE);
ret = vmw_resource_init(dev_priv, res, true,
- res_free, &vmw_gb_context_func);
- res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+ res_free,
+ dx ? &vmw_dx_context_func :
+ &vmw_gb_context_func);
if (unlikely(ret != 0))
goto out_err;
if (dev_priv->has_mob) {
uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
- if (unlikely(IS_ERR(uctx->man))) {
+ if (IS_ERR(uctx->man)) {
ret = PTR_ERR(uctx->man);
uctx->man = NULL;
goto out_err;
}
}
- memset(&uctx->cbs, 0, sizeof(uctx->cbs));
- INIT_LIST_HEAD(&uctx->cbs.list);
+ uctx->cbs = vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(uctx->cbs)) {
+ ret = PTR_ERR(uctx->cbs);
+ goto out_err;
+ }
+
+ spin_lock_init(&uctx->cotable_lock);
+
+ if (dx) {
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
+ &uctx->res, i);
+ if (unlikely(uctx->cotables[i] == NULL)) {
+ ret = -ENOMEM;
+ goto out_cotables;
+ }
+ }
+ }
+
+
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
+out_cotables:
+ vmw_context_cotables_unref(uctx);
out_err:
if (res_free)
res_free(res);
@@ -182,7 +233,8 @@ out_err:
static int vmw_context_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
+ void (*res_free)(struct vmw_resource *res),
+ bool dx)
{
int ret;
@@ -192,7 +244,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
} *cmd;
if (dev_priv->has_mob)
- return vmw_gb_context_init(dev_priv, res, res_free);
+ return vmw_gb_context_init(dev_priv, dx, res, res_free);
ret = vmw_resource_init(dev_priv, res, false,
res_free, &vmw_legacy_context_func);
@@ -215,12 +267,12 @@ static int vmw_context_init(struct vmw_private *dev_priv,
return -ENOMEM;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
+ cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
@@ -232,19 +284,10 @@ out_early:
return ret;
}
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
- struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
- int ret;
-
- if (unlikely(res == NULL))
- return NULL;
-
- ret = vmw_context_init(dev_priv, res, NULL);
-
- return (ret == 0) ? res : NULL;
-}
+/*
+ * GB context.
+ */
static int vmw_gb_context_create(struct vmw_resource *res)
{
@@ -281,7 +324,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -309,7 +352,6 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
"binding.\n");
return -ENOMEM;
}
-
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
@@ -346,7 +388,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_state_scrub(&uctx->cbs);
+ vmw_binding_state_scrub(uctx->cbs);
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
@@ -414,7 +456,231 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
+
+ return 0;
+}
+
+/*
+ * DX context.
+ */
+
+static int vmw_dx_context_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineContext body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a context id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_fifo_resource_inc(dev_priv);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+static int vmw_dx_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindContext body;
+ } *cmd;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.validContents = res->backup_dirty;
+ res->backup_dirty = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+
+ return 0;
+}
+
+/**
+ * vmw_dx_context_scrub_cotables - Scrub all bindings and
+ * cotables from a context
+ *
+ * @ctx: Pointer to the context resource
+ * @readback: Whether to save the otable contents on scrubbing.
+ *
+ * COtables must be unbound before their context, but unbinding requires
+ * the backup buffer being reserved, whereas scrubbing does not.
+ * This function scrubs all cotables of a context, potentially reading back
+ * the contents into their backup buffers. However, scrubbing cotables
+ * also makes the device context invalid, so scrub all bindings first so
+ * that doesn't have to be done later with an invalid context.
+ */
+void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+ bool readback)
+{
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ int i;
+
+ vmw_binding_state_scrub(uctx->cbs);
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ struct vmw_resource *res;
+
+ /* Avoid racing with ongoing cotable destruction. */
+ spin_lock(&uctx->cotable_lock);
+ res = uctx->cotables[vmw_cotable_scrub_order[i]];
+ if (res)
+ res = vmw_resource_reference_unless_doomed(res);
+ spin_unlock(&uctx->cotable_lock);
+ if (!res)
+ continue;
+
+ WARN_ON(vmw_cotable_scrub(res, readback));
+ vmw_resource_unreference(&res);
+ }
+}
+
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXReadbackContext body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindContext body;
+ } *cmd2;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_dx_context_scrub_cotables(res, readback);
+
+ if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
+ readback) {
+ WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
+ if (vmw_query_readback_all(uctx->dx_query_mob))
+ DRM_ERROR("Failed to read back query states\n");
+ }
+
+ submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "unbinding.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd2 = (void *) cmd;
+ if (readback) {
+ cmd1 = (void *) cmd;
+ cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.cid = res->id;
+ cmd2 = (void *) (&cmd1[1]);
+ }
+ cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.cid = res->id;
+ cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+ vmw_fifo_commit(dev_priv, submit_size);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_dx_context_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDestroyContext body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "destruction.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (dev_priv->query_cid == res->id)
+ dev_priv->query_cid_valid = false;
+ vmw_resource_release_id(res);
+ vmw_fifo_resource_dec(dev_priv);
return 0;
}
@@ -435,6 +701,11 @@ static void vmw_user_context_free(struct vmw_resource *res)
container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
+ if (ctx->cbs)
+ vmw_binding_state_free(ctx->cbs);
+
+ (void) vmw_context_bind_dx_query(res, NULL);
+
ttm_base_object_kfree(ctx, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
@@ -465,8 +736,8 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
}
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int vmw_context_define(struct drm_device *dev, void *data,
+ struct drm_file *file_priv, bool dx)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_context *ctx;
@@ -476,6 +747,10 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
+ if (!dev_priv->has_dx && dx) {
+ DRM_ERROR("DX contexts not supported by device.\n");
+ return -EINVAL;
+ }
/*
* Approximate idr memory usage with 128 bytes. It will be limited
@@ -516,7 +791,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
* From here on, the destructor takes over resource freeing.
*/
- ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
if (unlikely(ret != 0))
goto out_unlock;
@@ -535,387 +810,128 @@ out_err:
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
-
-}
-
-/**
- * vmw_context_scrub_shader - scrub a shader binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
-{
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShader body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "unbinding.\n");
- return -ENOMEM;
- }
-
- cmd->header.id = SVGA_3D_CMD_SET_SHADER;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.cid = bi->ctx->id;
- cmd->body.type = bi->i1.shader_type;
- cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
-}
-
-/**
- * vmw_context_scrub_render_target - scrub a render target binding
- * from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
- bool rebind)
-{
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetRenderTarget body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for render target "
- "unbinding.\n");
- return -ENOMEM;
- }
-
- cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.cid = bi->ctx->id;
- cmd->body.type = bi->i1.rt_type;
- cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- cmd->body.target.face = 0;
- cmd->body.target.mipmap = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
}
-/**
- * vmw_context_scrub_texture - scrub a texture binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- *
- * TODO: Possibly complement this function with a function that takes
- * a list of texture bindings and combines them to a single command.
- */
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
- bool rebind)
-{
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- struct {
- SVGA3dCmdSetTextureState c;
- SVGA3dTextureState s1;
- } body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for texture "
- "unbinding.\n");
- return -ENOMEM;
- }
-
-
- cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.c.cid = bi->ctx->id;
- cmd->body.s1.stage = bi->i1.texture_stage;
- cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
- cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
-}
-
-/**
- * vmw_context_binding_drop: Stop tracking a context binding
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Stops tracking a context binding, and re-initializes its storage.
- * Typically used when the context binding is replaced with a binding to
- * another (or the same, for that matter) resource.
- */
-static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- list_del(&cb->ctx_list);
- if (!list_empty(&cb->res_list))
- list_del(&cb->res_list);
- cb->bi.ctx = NULL;
+ return vmw_context_define(dev, data, file_priv, false);
}
-/**
- * vmw_context_binding_add: Start tracking a context binding
- *
- * @cbs: Pointer to the context binding state tracker.
- * @bi: Information about the binding to track.
- *
- * Performs basic checks on the binding to make sure arguments are within
- * bounds and then starts tracking the binding in the context binding
- * state structure @cbs.
- */
-int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
- const struct vmw_ctx_bindinfo *bi)
+int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct vmw_ctx_binding *loc;
-
- switch (bi->bt) {
- case vmw_ctx_binding_rt:
- if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
- DRM_ERROR("Illegal render target type %u.\n",
- (unsigned) bi->i1.rt_type);
- return -EINVAL;
- }
- loc = &cbs->render_targets[bi->i1.rt_type];
- break;
- case vmw_ctx_binding_tex:
- if (unlikely((unsigned)bi->i1.texture_stage >=
- SVGA3D_NUM_TEXTURE_UNITS)) {
- DRM_ERROR("Illegal texture/sampler unit %u.\n",
- (unsigned) bi->i1.texture_stage);
- return -EINVAL;
- }
- loc = &cbs->texture_units[bi->i1.texture_stage];
- break;
- case vmw_ctx_binding_shader:
- if (unlikely((unsigned)bi->i1.shader_type >=
- SVGA3D_SHADERTYPE_MAX)) {
- DRM_ERROR("Illegal shader type %u.\n",
- (unsigned) bi->i1.shader_type);
- return -EINVAL;
- }
- loc = &cbs->shaders[bi->i1.shader_type];
- break;
+ union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
+ struct drm_vmw_context_arg *rep = &arg->rep;
+
+ switch (arg->req) {
+ case drm_vmw_context_legacy:
+ return vmw_context_define(dev, rep, file_priv, false);
+ case drm_vmw_context_dx:
+ return vmw_context_define(dev, rep, file_priv, true);
default:
- BUG();
- }
-
- if (loc->bi.ctx != NULL)
- vmw_context_binding_drop(loc);
-
- loc->bi = *bi;
- loc->bi.scrubbed = false;
- list_add_tail(&loc->ctx_list, &cbs->list);
- INIT_LIST_HEAD(&loc->res_list);
-
- return 0;
-}
-
-/**
- * vmw_context_binding_transfer: Transfer a context binding tracking entry.
- *
- * @cbs: Pointer to the persistent context binding state tracker.
- * @bi: Information about the binding to track.
- *
- */
-static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
- const struct vmw_ctx_bindinfo *bi)
-{
- struct vmw_ctx_binding *loc;
-
- switch (bi->bt) {
- case vmw_ctx_binding_rt:
- loc = &cbs->render_targets[bi->i1.rt_type];
break;
- case vmw_ctx_binding_tex:
- loc = &cbs->texture_units[bi->i1.texture_stage];
- break;
- case vmw_ctx_binding_shader:
- loc = &cbs->shaders[bi->i1.shader_type];
- break;
- default:
- BUG();
- }
-
- if (loc->bi.ctx != NULL)
- vmw_context_binding_drop(loc);
-
- if (bi->res != NULL) {
- loc->bi = *bi;
- list_add_tail(&loc->ctx_list, &cbs->list);
- list_add_tail(&loc->res_list, &bi->res->binding_head);
}
+ return -EINVAL;
}
/**
- * vmw_context_binding_kill - Kill a binding on the device
- * and stop tracking it.
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Emits FIFO commands to scrub a binding represented by @cb.
- * Then stops tracking the binding and re-initializes its storage.
- */
-static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
-{
- if (!cb->bi.scrubbed) {
- (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
- cb->bi.scrubbed = true;
- }
- vmw_context_binding_drop(cb);
-}
-
-/**
- * vmw_context_binding_state_kill - Kill all bindings associated with a
- * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ * vmw_context_binding_list - Return a list of context bindings
*
- * @cbs: Pointer to the context binding state tracker.
+ * @ctx: The context resource
*
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker. Then re-initializes the whole structure.
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry, *next;
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
- list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
- vmw_context_binding_kill(entry);
+ return vmw_binding_state_list(uctx->cbs);
}
-/**
- * vmw_context_binding_state_scrub - Scrub all bindings associated with a
- * struct vmw_ctx_binding state structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker.
- */
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry;
-
- list_for_each_entry(entry, &cbs->list, ctx_list) {
- if (!entry->bi.scrubbed) {
- (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
- entry->bi.scrubbed = true;
- }
- }
+ return container_of(ctx, struct vmw_user_context, res)->man;
}
-/**
- * vmw_context_binding_res_list_kill - Kill all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Kills all bindings associated with a specific resource. Typically
- * called before the resource is destroyed.
- */
-void vmw_context_binding_res_list_kill(struct list_head *head)
+struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+ SVGACOTableType cotable_type)
{
- struct vmw_ctx_binding *entry, *next;
+ if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+ return ERR_PTR(-EINVAL);
- list_for_each_entry_safe(entry, next, head, res_list)
- vmw_context_binding_kill(entry);
+ return vmw_resource_reference
+ (container_of(ctx, struct vmw_user_context, res)->
+ cotables[cotable_type]);
}
/**
- * vmw_context_binding_res_list_scrub - Scrub all bindings on a
- * resource binding list
+ * vmw_context_binding_state -
+ * Return a pointer to a context binding state structure
*
- * @head: list head of resource binding list
+ * @ctx: The context resource
*
- * Scrub all bindings associated with a specific resource. Typically
- * called before the resource is evicted.
+ * Returns the current state of bindings of the given context. Note that
+ * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
-void vmw_context_binding_res_list_scrub(struct list_head *head)
+struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry;
-
- list_for_each_entry(entry, head, res_list) {
- if (!entry->bi.scrubbed) {
- (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
- entry->bi.scrubbed = true;
- }
- }
+ return container_of(ctx, struct vmw_user_context, res)->cbs;
}
/**
- * vmw_context_binding_state_transfer - Commit staged binding info
+ * vmw_context_bind_dx_query -
+ * Sets query MOB for the context. If @mob is NULL, then this function will
+ * remove the association between the MOB and the context. This function
+ * assumes the binding_mutex is held.
*
- * @ctx: Pointer to context to commit the staged binding info to.
- * @from: Staged binding info built during execbuf.
+ * @ctx_res: The context resource
+ * @mob: a reference to the query MOB
*
- * Transfers binding info from a temporary structure to the persistent
- * structure in the context. This can be done once commands
+ * Returns -EINVAL if a MOB has already been set and does not match the one
+ * specified in the parameter. 0 otherwise.
*/
-void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
- struct vmw_ctx_binding_state *from)
+int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+ struct vmw_dma_buffer *mob)
{
struct vmw_user_context *uctx =
- container_of(ctx, struct vmw_user_context, res);
- struct vmw_ctx_binding *entry, *next;
-
- list_for_each_entry_safe(entry, next, &from->list, ctx_list)
- vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
-}
+ container_of(ctx_res, struct vmw_user_context, res);
-/**
- * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
- *
- * @ctx: The context resource
- *
- * Walks through the context binding list and rebinds all scrubbed
- * resources.
- */
-int vmw_context_rebind_all(struct vmw_resource *ctx)
-{
- struct vmw_ctx_binding *entry;
- struct vmw_user_context *uctx =
- container_of(ctx, struct vmw_user_context, res);
- struct vmw_ctx_binding_state *cbs = &uctx->cbs;
- int ret;
+ if (mob == NULL) {
+ if (uctx->dx_query_mob) {
+ uctx->dx_query_mob->dx_query_ctx = NULL;
+ vmw_dmabuf_unreference(&uctx->dx_query_mob);
+ uctx->dx_query_mob = NULL;
+ }
- list_for_each_entry(entry, &cbs->list, ctx_list) {
- if (likely(!entry->bi.scrubbed))
- continue;
+ return 0;
+ }
- if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
- SVGA3D_INVALID_ID))
- continue;
+ /* Can only have one MOB per context for queries */
+ if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
+ return -EINVAL;
- ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
- if (unlikely(ret != 0))
- return ret;
+ mob->dx_query_ctx = ctx_res;
- entry->bi.scrubbed = false;
- }
+ if (!uctx->dx_query_mob)
+ uctx->dx_query_mob = vmw_dmabuf_reference(mob);
return 0;
}
/**
- * vmw_context_binding_list - Return a list of context bindings
- *
- * @ctx: The context resource
+ * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
*
- * Returns the current list of bindings of the given context. Note that
- * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ * @ctx_res: The context resource
*/
-struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
- return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
-}
+ struct vmw_user_context *uctx =
+ container_of(ctx_res, struct vmw_user_context, res);
-struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
-{
- return container_of(ctx, struct vmw_user_context, res)->man;
+ return uctx->dx_query_mob;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
new file mode 100644
index 000000000000..ce659a125f2b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -0,0 +1,662 @@
+/**************************************************************************
+ *
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Treat context OTables as resources to make use of the resource
+ * backing MOB eviction mechanism, that is used to read back the COTable
+ * whenever the backing MOB is evicted.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+
+/**
+ * struct vmw_cotable - Context Object Table resource
+ *
+ * @res: struct vmw_resource we are deriving from.
+ * @ctx: non-refcounted pointer to the owning context.
+ * @size_read_back: Size of data read back during eviction.
+ * @seen_entries: Seen entries in command stream for this cotable.
+ * @type: The cotable type.
+ * @scrubbed: Whether the cotable has been scrubbed.
+ * @resource_list: List of resources in the cotable.
+ */
+struct vmw_cotable {
+ struct vmw_resource res;
+ struct vmw_resource *ctx;
+ size_t size_read_back;
+ int seen_entries;
+ u32 type;
+ bool scrubbed;
+ struct list_head resource_list;
+};
+
+/**
+ * struct vmw_cotable_info - Static info about cotable types
+ *
+ * @min_initial_entries: Min number of initial intries at cotable allocation
+ * for this cotable type.
+ * @size: Size of each entry.
+ */
+struct vmw_cotable_info {
+ u32 min_initial_entries;
+ u32 size;
+ void (*unbind_func)(struct vmw_private *, struct list_head *,
+ bool);
+};
+
+static const struct vmw_cotable_info co_info[] = {
+ {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
+ {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
+ {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
+ {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
+ {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
+ {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
+ {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
+ {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
+ {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+ {1, sizeof(SVGACOTableDXQueryEntry), NULL},
+ {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+};
+
+/*
+ * Cotables with bindings that we remove must be scrubbed first,
+ * otherwise, the device will swap in an invalid context when we remove
+ * bindings before scrubbing a cotable...
+ */
+const SVGACOTableType vmw_cotable_scrub_order[] = {
+ SVGA_COTABLE_RTVIEW,
+ SVGA_COTABLE_DSVIEW,
+ SVGA_COTABLE_SRVIEW,
+ SVGA_COTABLE_DXSHADER,
+ SVGA_COTABLE_ELEMENTLAYOUT,
+ SVGA_COTABLE_BLENDSTATE,
+ SVGA_COTABLE_DEPTHSTENCIL,
+ SVGA_COTABLE_RASTERIZERSTATE,
+ SVGA_COTABLE_SAMPLER,
+ SVGA_COTABLE_STREAMOUTPUT,
+ SVGA_COTABLE_DXQUERY,
+};
+
+static int vmw_cotable_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_create(struct vmw_resource *res);
+static int vmw_cotable_destroy(struct vmw_resource *res);
+
+static const struct vmw_res_func vmw_cotable_func = {
+ .res_type = vmw_res_cotable,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "context guest backed object tables",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_cotable_create,
+ .destroy = vmw_cotable_destroy,
+ .bind = vmw_cotable_bind,
+ .unbind = vmw_cotable_unbind,
+};
+
+/**
+ * vmw_cotable - Convert a struct vmw_resource pointer to a struct
+ * vmw_cotable pointer
+ *
+ * @res: Pointer to the resource.
+ */
+static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_cotable, res);
+}
+
+/**
+ * vmw_cotable_destroy - Cotable resource destroy callback
+ *
+ * @res: Pointer to the cotable resource.
+ *
+ * There is no device cotable destroy command, so this function only
+ * makes sure that the resource id is set to invalid.
+ */
+static int vmw_cotable_destroy(struct vmw_resource *res)
+{
+ res->id = -1;
+ return 0;
+}
+
+/**
+ * vmw_cotable_unscrub - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ * This is identical to bind() except the function interface looks different.
+ */
+static int vmw_cotable_unscrub(struct vmw_resource *res)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = &res->backup->base;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetCOTable body;
+ } *cmd;
+
+ WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+ lockdep_assert_held(&bo->resv->lock.base);
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving FIFO space for cotable "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
+ WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+ cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = vcotbl->ctx->id;
+ cmd->body.type = vcotbl->type;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.validSizeInBytes = vcotbl->size_read_back;
+
+ vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+ vcotbl->scrubbed = false;
+
+ return 0;
+}
+
+/**
+ * vmw_cotable_bind - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ */
+static int vmw_cotable_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ /*
+ * The create() callback may have changed @res->backup without
+ * the caller noticing, and with val_buf->bo still pointing to
+ * the old backup buffer. Although hackish, and not used currently,
+ * take the opportunity to correct the value here so that it's not
+ * misused in the future.
+ */
+ val_buf->bo = &res->backup->base;
+
+ return vmw_cotable_unscrub(res);
+}
+
+/**
+ * vmw_cotable_scrub - Scrub the cotable from the device.
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether initiate a readback of the cotable data to the backup
+ * buffer.
+ *
+ * In some situations (context swapouts) it might be desirable to make the
+ * device forget about the cotable without performing a full unbind. A full
+ * unbind requires reserved backup buffers and it might not be possible to
+ * reserve them due to locking order violation issues. The vmw_cotable_scrub
+ * function implements a partial unbind() without that requirement but with the
+ * following restrictions.
+ * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
+ * be called.
+ * 2) Before the cotable backing buffer is used by the CPU, or during the
+ * resource destruction, vmw_cotable_unbind() must be called.
+ */
+int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ size_t submit_size;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXReadbackCOTable body;
+ } *cmd0;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetCOTable body;
+ } *cmd1;
+
+ if (vcotbl->scrubbed)
+ return 0;
+
+ if (co_info[vcotbl->type].unbind_func)
+ co_info[vcotbl->type].unbind_func(dev_priv,
+ &vcotbl->resource_list,
+ readback);
+ submit_size = sizeof(*cmd1);
+ if (readback)
+ submit_size += sizeof(*cmd0);
+
+ cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
+ if (!cmd1) {
+ DRM_ERROR("Failed reserving FIFO space for cotable "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ vcotbl->size_read_back = 0;
+ if (readback) {
+ cmd0 = (void *) cmd1;
+ cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+ cmd0->header.size = sizeof(cmd0->body);
+ cmd0->body.cid = vcotbl->ctx->id;
+ cmd0->body.type = vcotbl->type;
+ cmd1 = (void *) &cmd0[1];
+ vcotbl->size_read_back = res->backup_size;
+ }
+ cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.cid = vcotbl->ctx->id;
+ cmd1->body.type = vcotbl->type;
+ cmd1->body.mobid = SVGA3D_INVALID_ID;
+ cmd1->body.validSizeInBytes = 0;
+ vmw_fifo_commit_flush(dev_priv, submit_size);
+ vcotbl->scrubbed = true;
+
+ /* Trigger a create() on next validate. */
+ res->id = -1;
+
+ return 0;
+}
+
+/**
+ * vmw_cotable_unbind - Cotable resource unbind callback
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether to read back cotable data to the backup buffer.
+ * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * Unbinds the cotable from the device and fences the backup buffer.
+ */
+static int vmw_cotable_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+ int ret;
+
+ if (list_empty(&res->mob_head))
+ return 0;
+
+ WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+ lockdep_assert_held(&bo->resv->lock.base);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ if (!vcotbl->scrubbed)
+ vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
+ mutex_unlock(&dev_priv->binding_mutex);
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ vmw_fence_single_bo(bo, fence);
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return ret;
+}
+
+/**
+ * vmw_cotable_readback - Read back a cotable without unbinding.
+ *
+ * @res: The cotable resource.
+ *
+ * Reads back a cotable to its backing mob without scrubbing the MOB from
+ * the cotable. The MOB is fenced for subsequent CPU access.
+ */
+static int vmw_cotable_readback(struct vmw_resource *res)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXReadbackCOTable body;
+ } *cmd;
+ struct vmw_fence_obj *fence;
+
+ if (!vcotbl->scrubbed) {
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+ SVGA3D_INVALID_ID);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving FIFO space for cotable "
+ "readback.\n");
+ return -ENOMEM;
+ }
+ cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = vcotbl->ctx->id;
+ cmd->body.type = vcotbl->type;
+ vcotbl->size_read_back = res->backup_size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ }
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ vmw_fence_single_bo(&res->backup->base, fence);
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+/**
+ * vmw_cotable_resize - Resize a cotable.
+ *
+ * @res: The cotable resource.
+ * @new_size: The new size.
+ *
+ * Resizes a cotable and binds the new backup buffer.
+ * On failure the cotable is left intact.
+ * Important! This function may not fail once the MOB switch has been
+ * committed to hardware. That would put the device context in an
+ * invalid state which we can't currently recover from.
+ */
+static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ struct vmw_dma_buffer *buf, *old_buf = res->backup;
+ struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
+ size_t old_size = res->backup_size;
+ size_t old_size_read_back = vcotbl->size_read_back;
+ size_t cur_size_read_back;
+ struct ttm_bo_kmap_obj old_map, new_map;
+ int ret;
+ size_t i;
+
+ ret = vmw_cotable_readback(res);
+ if (ret)
+ return ret;
+
+ cur_size_read_back = vcotbl->size_read_back;
+ vcotbl->size_read_back = old_size_read_back;
+
+ /*
+ * While device is processing, Allocate and reserve a buffer object
+ * for the new COTable. Initially pin the buffer object to make sure
+ * we can use tryreserve without failure.
+ */
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+ true, vmw_dmabuf_bo_free);
+ if (ret) {
+ DRM_ERROR("Failed initializing new cotable MOB.\n");
+ return ret;
+ }
+
+ bo = &buf->base;
+ WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
+
+ ret = ttm_bo_wait(old_bo, false, false, false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed waiting for cotable unbind.\n");
+ goto out_wait;
+ }
+
+ /*
+ * Do a page by page copy of COTables. This eliminates slow vmap()s.
+ * This should really be a TTM utility.
+ */
+ for (i = 0; i < old_bo->num_pages; ++i) {
+ bool dummy;
+
+ ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed mapping old COTable on resize.\n");
+ goto out_wait;
+ }
+ ret = ttm_bo_kmap(bo, i, 1, &new_map);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed mapping new COTable on resize.\n");
+ goto out_map_new;
+ }
+ memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
+ ttm_kmap_obj_virtual(&old_map, &dummy),
+ PAGE_SIZE);
+ ttm_bo_kunmap(&new_map);
+ ttm_bo_kunmap(&old_map);
+ }
+
+ /* Unpin new buffer, and switch backup buffers. */
+ ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed validating new COTable backup buffer.\n");
+ goto out_wait;
+ }
+
+ res->backup = buf;
+ res->backup_size = new_size;
+ vcotbl->size_read_back = cur_size_read_back;
+
+ /*
+ * Now tell the device to switch. If this fails, then we need to
+ * revert the full resize.
+ */
+ ret = vmw_cotable_unscrub(res);
+ if (ret) {
+ DRM_ERROR("Failed switching COTable backup buffer.\n");
+ res->backup = old_buf;
+ res->backup_size = old_size;
+ vcotbl->size_read_back = old_size_read_back;
+ goto out_wait;
+ }
+
+ /* Let go of the old mob. */
+ list_del(&res->mob_head);
+ list_add_tail(&res->mob_head, &buf->res_list);
+ vmw_dmabuf_unreference(&old_buf);
+ res->id = vcotbl->type;
+
+ return 0;
+
+out_map_new:
+ ttm_bo_kunmap(&old_map);
+out_wait:
+ ttm_bo_unreserve(bo);
+ vmw_dmabuf_unreference(&buf);
+
+ return ret;
+}
+
+/**
+ * vmw_cotable_create - Cotable resource create callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * There is no separate create command for cotables, so this callback, which
+ * is called before bind() in the validation sequence is instead used for two
+ * things.
+ * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
+ * buffer, that is, if @res->mob_head is non-empty.
+ * 2) Resize the cotable if needed.
+ */
+static int vmw_cotable_create(struct vmw_resource *res)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ size_t new_size = res->backup_size;
+ size_t needed_size;
+ int ret;
+
+ /* Check whether we need to resize the cotable */
+ needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
+ while (needed_size > new_size)
+ new_size *= 2;
+
+ if (likely(new_size <= res->backup_size)) {
+ if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
+ ret = vmw_cotable_unscrub(res);
+ if (ret)
+ return ret;
+ }
+ res->id = vcotbl->type;
+ return 0;
+ }
+
+ return vmw_cotable_resize(res, new_size);
+}
+
+/**
+ * vmw_hw_cotable_destroy - Cotable hw_destroy callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * The final (part of resource destruction) destroy callback.
+ */
+static void vmw_hw_cotable_destroy(struct vmw_resource *res)
+{
+ (void) vmw_cotable_destroy(res);
+}
+
+static size_t cotable_acc_size;
+
+/**
+ * vmw_cotable_free - Cotable resource destructor
+ *
+ * @res: Pointer to a cotable resource.
+ */
+static void vmw_cotable_free(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ kfree(res);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+}
+
+/**
+ * vmw_cotable_alloc - Create a cotable resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @ctx: Pointer to the context resource.
+ * The cotable resource will not add a refcount.
+ * @type: The cotable type.
+ */
+struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+ struct vmw_resource *ctx,
+ u32 type)
+{
+ struct vmw_cotable *vcotbl;
+ int ret;
+ u32 num_entries;
+
+ if (unlikely(cotable_acc_size == 0))
+ cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ cotable_acc_size, false, true);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+
+ vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
+ if (unlikely(vcotbl == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_alloc;
+ }
+
+ ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
+ vmw_cotable_free, &vmw_cotable_func);
+ if (unlikely(ret != 0))
+ goto out_no_init;
+
+ INIT_LIST_HEAD(&vcotbl->resource_list);
+ vcotbl->res.id = type;
+ vcotbl->res.backup_size = PAGE_SIZE;
+ num_entries = PAGE_SIZE / co_info[type].size;
+ if (num_entries < co_info[type].min_initial_entries) {
+ vcotbl->res.backup_size = co_info[type].min_initial_entries *
+ co_info[type].size;
+ vcotbl->res.backup_size =
+ (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+ }
+
+ vcotbl->scrubbed = true;
+ vcotbl->seen_entries = -1;
+ vcotbl->type = type;
+ vcotbl->ctx = ctx;
+
+ vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
+
+ return &vcotbl->res;
+
+out_no_init:
+ kfree(vcotbl);
+out_no_alloc:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cotable_notify - Notify the cotable about an item creation
+ *
+ * @res: Pointer to a cotable resource.
+ * @id: Item id.
+ */
+int vmw_cotable_notify(struct vmw_resource *res, int id)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+
+ if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
+ DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
+ (unsigned) vcotbl->type, id);
+ return -EINVAL;
+ }
+
+ if (vcotbl->seen_entries < id) {
+ /* Trigger a call to create() on next validate */
+ res->id = -1;
+ vcotbl->seen_entries = id;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ *
+ * @res: pointer struct vmw_resource representing the cotable.
+ * @head: pointer to the struct list_head member of the resource, dedicated
+ * to the cotable active resource list.
+ */
+void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
+{
+ struct vmw_cotable *vcotbl =
+ container_of(res, struct vmw_cotable, res);
+
+ list_add_tail(head, &vcotbl->resource_list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 914b375763dc..299925a1f6c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,25 +32,20 @@
/**
- * vmw_dmabuf_to_placement - Validate a buffer to placement.
+ * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
- * @pin: Pin buffer if true.
+ * @placement: The placement to pin it.
* @interruptible: Use interruptible wait.
*
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
-int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- struct ttm_placement *placement,
- bool interruptible)
+int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ struct ttm_placement *placement,
+ bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
@@ -66,6 +61,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
goto err;
ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
@@ -75,12 +72,10 @@ err:
}
/**
- * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
+ * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
*
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
@@ -90,55 +85,34 @@ err:
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
-int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement *placement;
int ret;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
- if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
- /**
- * Put BO in VRAM if there is space, otherwise as a GMR.
- * If there is no space in VRAM and GMR ids are all used up,
- * start evicting GMRs to make room. If the DMA buffer can't be
- * used as a GMR, this will return -ENOMEM.
- */
-
- if (pin)
- placement = &vmw_vram_gmr_ne_placement;
- else
- placement = &vmw_vram_gmr_placement;
-
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+ false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
- goto err_unreserve;
-
+ goto out_unreserve;
- /**
- * If that failed, try VRAM again, this time evicting
- * previous contents.
- */
-
- if (pin)
- placement = &vmw_vram_ne_placement;
- else
- placement = &vmw_vram_placement;
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+out_unreserve:
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
-err_unreserve:
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&dev_priv->reservation_sem);
@@ -146,67 +120,50 @@ err:
}
/**
- * vmw_dmabuf_to_vram - Move a buffer to vram.
+ * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
*
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
- * @pin: Pin buffer in vram if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
-int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible)
{
- struct ttm_placement *placement;
-
- if (pin)
- placement = &vmw_vram_ne_placement;
- else
- placement = &vmw_vram_placement;
-
- return vmw_dmabuf_to_placement(dev_priv, buf,
- placement,
- interruptible);
+ return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+ interruptible);
}
/**
- * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
+ * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
*
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @pin: Pin buffer in vram if true.
+ * @buf: DMA buffer to pin.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
-int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
- if (pin)
- place = vmw_vram_ne_placement.placement[0];
- else
- place = vmw_vram_placement.placement[0];
+ place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages;
-
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
@@ -216,13 +173,16 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
- /* Is this buffer already in vram but not at the start of it? */
+ /*
+ * Is this buffer already in vram but not at the start of it?
+ * In that case, evict it first because TTM isn't good at handling
+ * that situation.
+ */
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0)
@@ -230,8 +190,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
ret = ttm_bo_validate(bo, &placement, interruptible, false);
- /* For some reason we didn't up at the start of vram */
+ /* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err_unlock:
@@ -240,13 +202,10 @@ err_unlock:
return ret;
}
-
/**
- * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
+ * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
*
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
+ * This function takes the reservation_sem in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to unpin.
@@ -259,16 +218,25 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
- /*
- * We could in theory early out if the buffer is
- * unpinned but we need to lock and reserve the buffer
- * anyways so we don't gain much by that.
- */
- return vmw_dmabuf_to_placement(dev_priv, buf,
- &vmw_evictable_placement,
- interruptible);
-}
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ vmw_bo_pin_reserved(buf, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
/**
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
@@ -291,21 +259,31 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
/**
- * vmw_bo_pin - Pin or unpin a buffer object without moving it.
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
*
- * @bo: The buffer object. Must be reserved.
+ * @vbo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin.
*
*/
-void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
{
struct ttm_place pl;
struct ttm_placement placement;
+ struct ttm_buffer_object *bo = &vbo->base;
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
lockdep_assert_held(&bo->resv->lock.base);
+ if (pin) {
+ if (vbo->pin_count++ > 0)
+ return;
+ } else {
+ WARN_ON(vbo->pin_count <= 0);
+ if (--vbo->pin_count > 0)
+ return;
+ }
+
pl.fpfn = 0;
pl.lpfn = 0;
pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 620bb5cf617c..e13b20bd9908 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
@@ -127,6 +128,9 @@
#define DRM_IOCTL_VMW_SYNCCPU \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
struct drm_vmw_synccpu_arg)
+#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
+ struct drm_vmw_context_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
@@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
+ vmw_extended_context_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -278,6 +285,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" Command Buffers 2.\n");
if (capabilities & SVGA_CAP_GBOBJECTS)
DRM_INFO(" Guest Backed Resources.\n");
+ if (capabilities & SVGA_CAP_DX)
+ DRM_INFO(" DX Features.\n");
}
/**
@@ -296,30 +305,31 @@ static void vmw_print_capabilities(uint32_t capabilities)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
- struct ttm_buffer_object *bo;
+ struct vmw_dma_buffer *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
/*
- * Create the bo as pinned, so that a tryreserve will
+ * Create the vbo as pinned, so that a tryreserve will
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
- ret = ttm_bo_create(&dev_priv->bdev,
- PAGE_SIZE,
- ttm_bo_type_device,
- &vmw_sys_ne_placement,
- 0, false, NULL,
- &bo);
+ vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
+ if (!vbo)
+ return -ENOMEM;
+ ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
+ &vmw_sys_ne_placement, false,
+ &vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
BUG_ON(ret != 0);
+ vmw_bo_pin_reserved(vbo, true);
- ret = ttm_bo_kmap(bo, 0, 1, &map);
+ ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
@@ -327,18 +337,55 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
result->result32 = 0xff;
ttm_bo_kunmap(&map);
}
- vmw_bo_pin(bo, false);
- ttm_bo_unreserve(bo);
+ vmw_bo_pin_reserved(vbo, false);
+ ttm_bo_unreserve(&vbo->base);
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
- ttm_bo_unref(&bo);
+ vmw_dmabuf_unreference(&vbo);
} else
- dev_priv->dummy_query_bo = bo;
+ dev_priv->dummy_query_bo = vbo;
return ret;
}
+/**
+ * vmw_request_device_late - Perform late device setup
+ *
+ * @dev_priv: Pointer to device private.
+ *
+ * This function performs setup of otables and enables large command
+ * buffer submission. These tasks are split out to a separate function
+ * because it reverts vmw_release_device_early and is intended to be used
+ * by an error path in the hibernation code.
+ */
+static int vmw_request_device_late(struct vmw_private *dev_priv)
+{
+ int ret;
+
+ if (dev_priv->has_mob) {
+ ret = vmw_otables_setup(dev_priv);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to initialize "
+ "guest Memory OBjects.\n");
+ return ret;
+ }
+ }
+
+ if (dev_priv->cman) {
+ ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
+ 256*4096, 2*4096);
+ if (ret) {
+ struct vmw_cmdbuf_man *man = dev_priv->cman;
+
+ dev_priv->cman = NULL;
+ vmw_cmdbuf_man_destroy(man);
+ }
+ }
+
+ return 0;
+}
+
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
@@ -349,14 +396,16 @@ static int vmw_request_device(struct vmw_private *dev_priv)
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
- if (dev_priv->has_mob) {
- ret = vmw_otables_setup(dev_priv);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Unable to initialize "
- "guest Memory OBjects.\n");
- goto out_no_mob;
- }
+ dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
+ if (IS_ERR(dev_priv->cman)) {
+ dev_priv->cman = NULL;
+ dev_priv->has_dx = false;
}
+
+ ret = vmw_request_device_late(dev_priv);
+ if (ret)
+ goto out_no_mob;
+
ret = vmw_dummy_query_bo_create(dev_priv);
if (unlikely(ret != 0))
goto out_no_query_bo;
@@ -364,15 +413,29 @@ static int vmw_request_device(struct vmw_private *dev_priv)
return 0;
out_no_query_bo:
- if (dev_priv->has_mob)
+ if (dev_priv->cman)
+ vmw_cmdbuf_remove_pool(dev_priv->cman);
+ if (dev_priv->has_mob) {
+ (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
vmw_otables_takedown(dev_priv);
+ }
+ if (dev_priv->cman)
+ vmw_cmdbuf_man_destroy(dev_priv->cman);
out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
return ret;
}
-static void vmw_release_device(struct vmw_private *dev_priv)
+/**
+ * vmw_release_device_early - Early part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the first part of command submission takedown, to be called before
+ * buffer management is taken down.
+ */
+static void vmw_release_device_early(struct vmw_private *dev_priv)
{
/*
* Previous destructions should've released
@@ -381,65 +444,31 @@ static void vmw_release_device(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL);
- ttm_bo_unref(&dev_priv->dummy_query_bo);
- if (dev_priv->has_mob)
- vmw_otables_takedown(dev_priv);
- vmw_fence_fifo_down(dev_priv->fman);
- vmw_fifo_release(dev_priv, &dev_priv->fifo);
-}
-
+ vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+ if (dev_priv->cman)
+ vmw_cmdbuf_remove_pool(dev_priv->cman);
-/**
- * Increase the 3d resource refcount.
- * If the count was prevously zero, initialize the fifo, switching to svga
- * mode. Note that the master holds a ref as well, and may request an
- * explicit switch to svga mode if fb is not running, using @unhide_svga.
- */
-int vmw_3d_resource_inc(struct vmw_private *dev_priv,
- bool unhide_svga)
-{
- int ret = 0;
-
- mutex_lock(&dev_priv->release_mutex);
- if (unlikely(dev_priv->num_3d_resources++ == 0)) {
- ret = vmw_request_device(dev_priv);
- if (unlikely(ret != 0))
- --dev_priv->num_3d_resources;
- } else if (unhide_svga) {
- vmw_write(dev_priv, SVGA_REG_ENABLE,
- vmw_read(dev_priv, SVGA_REG_ENABLE) &
- ~SVGA_REG_ENABLE_HIDE);
+ if (dev_priv->has_mob) {
+ ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+ vmw_otables_takedown(dev_priv);
}
-
- mutex_unlock(&dev_priv->release_mutex);
- return ret;
}
/**
- * Decrease the 3d resource refcount.
- * If the count reaches zero, disable the fifo, switching to vga mode.
- * Note that the master holds a refcount as well, and may request an
- * explicit switch to vga mode when it releases its refcount to account
- * for the situation of an X server vt switch to VGA with 3d resources
- * active.
+ * vmw_release_device_late - Late part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the last part of the command submission takedown, to be called when
+ * command submission is no longer needed. It may wait on pending fences.
*/
-void vmw_3d_resource_dec(struct vmw_private *dev_priv,
- bool hide_svga)
+static void vmw_release_device_late(struct vmw_private *dev_priv)
{
- int32_t n3d;
-
- mutex_lock(&dev_priv->release_mutex);
- if (unlikely(--dev_priv->num_3d_resources == 0))
- vmw_release_device(dev_priv);
- else if (hide_svga)
- vmw_write(dev_priv, SVGA_REG_ENABLE,
- vmw_read(dev_priv, SVGA_REG_ENABLE) |
- SVGA_REG_ENABLE_HIDE);
-
- n3d = (int32_t) dev_priv->num_3d_resources;
- mutex_unlock(&dev_priv->release_mutex);
+ vmw_fence_fifo_down(dev_priv->fman);
+ if (dev_priv->cman)
+ vmw_cmdbuf_man_destroy(dev_priv->cman);
- BUG_ON(n3d < 0);
+ vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
/**
@@ -603,6 +632,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
+ spin_lock_init(&dev_priv->svga_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@@ -673,22 +703,31 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
dev_priv->max_mob_size =
vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- } else
+ dev_priv->stdu_max_width =
+ vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
+ dev_priv->stdu_max_height =
+ vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
+
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+ SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
+ dev_priv->texture_max_width = vmw_read(dev_priv,
+ SVGA_REG_DEV_CAP);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+ SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
+ dev_priv->texture_max_height = vmw_read(dev_priv,
+ SVGA_REG_DEV_CAP);
+ } else {
+ dev_priv->texture_max_width = 8192;
+ dev_priv->texture_max_height = 8192;
dev_priv->prim_bb_mem = dev_priv->vram_size;
+ }
+
+ vmw_print_capabilities(dev_priv->capabilities);
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0))
goto out_err0;
- /*
- * Limit back buffer size to VRAM size. Remove this once
- * screen targets are implemented.
- */
- if (dev_priv->prim_bb_mem > dev_priv->vram_size)
- dev_priv->prim_bb_mem = dev_priv->vram_size;
-
- vmw_print_capabilities(dev_priv->capabilities);
-
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
@@ -714,17 +753,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->active_master = &dev_priv->fbdev_master;
- ret = ttm_bo_device_init(&dev_priv->bdev,
- dev_priv->bo_global_ref.ref.object,
- &vmw_bo_driver,
- dev->anon_inode->i_mapping,
- VMWGFX_FILE_PAGE_OFFSET,
- false);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed initializing TTM buffer object driver.\n");
- goto out_err1;
- }
-
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
dev_priv->mmio_size);
@@ -787,13 +815,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_fman;
}
+ ret = ttm_bo_device_init(&dev_priv->bdev,
+ dev_priv->bo_global_ref.ref.object,
+ &vmw_bo_driver,
+ dev->anon_inode->i_mapping,
+ VMWGFX_FILE_PAGE_OFFSET,
+ false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+ goto out_no_bdev;
+ }
+ /*
+ * Enable VRAM, but initially don't use it until SVGA is enabled and
+ * unhidden.
+ */
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
(dev_priv->vram_size >> PAGE_SHIFT));
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
goto out_no_vram;
}
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
dev_priv->has_gmr = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
@@ -814,18 +857,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
}
- vmw_kms_save_vga(dev_priv);
+ if (dev_priv->has_mob) {
+ spin_lock(&dev_priv->cap_lock);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+ dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ spin_unlock(&dev_priv->cap_lock);
+ }
+
- /* Start kms and overlay systems, needs fifo. */
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
vmw_overlay_init(dev_priv);
+ ret = vmw_request_device(dev_priv);
+ if (ret)
+ goto out_no_fifo;
+
+ DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
+
if (dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv, true);
- if (unlikely(ret != 0))
- goto out_no_fifo;
+ vmw_fifo_resource_inc(dev_priv);
+ vmw_svga_enable(dev_priv);
vmw_fb_init(dev_priv);
}
@@ -838,13 +891,14 @@ out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
- vmw_kms_restore_vga(dev_priv);
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -860,13 +914,13 @@ out_err4:
iounmap(dev_priv->mmio_virt);
out_err3:
arch_phys_wc_del(dev_priv->mmio_mtrr);
- (void)ttm_bo_device_release(&dev_priv->bdev);
-out_err1:
vmw_ttm_global_release(dev_priv);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
+ if (dev_priv->ctx.staged_bindings)
+ vmw_binding_state_free(dev_priv->ctx.staged_bindings);
kfree(dev_priv);
return ret;
}
@@ -882,19 +936,24 @@ static int vmw_driver_unload(struct drm_device *dev)
drm_ht_remove(&dev_priv->ctx.res_ht);
vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
+ vmw_fb_off(dev_priv);
vmw_fb_close(dev_priv);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
+ vmw_svga_disable(dev_priv);
}
+
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
- if (dev_priv->has_mob)
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ vmw_release_device_early(dev_priv);
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+ (void) ttm_bo_device_release(&dev_priv->bdev);
+ vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
@@ -906,7 +965,8 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr);
- (void)ttm_bo_device_release(&dev_priv->bdev);
+ if (dev_priv->ctx.staged_bindings)
+ vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv);
for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -992,10 +1052,15 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
}
/*
- * Check if we were previously master, but now dropped.
+ * Check if we were previously master, but now dropped. In that
+ * case, allow at least render node functionality.
*/
if (vmw_fp->locked_master) {
mutex_unlock(&dev->master_mutex);
+
+ if (flags & DRM_RENDER_ALLOW)
+ return NULL;
+
DRM_ERROR("Dropped master trying to access ioctl that "
"requires authentication.\n");
return ERR_PTR(-EACCES);
@@ -1044,17 +1109,27 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
- if (unlikely(ioctl->cmd != cmd)) {
- DRM_ERROR("Invalid command format, ioctl %d\n",
- nr - DRM_COMMAND_BASE);
- return -EINVAL;
+ if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
+ ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
+ goto out_io_encoding;
+
+ return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
+ _IOC_SIZE(cmd));
}
+
+ if (unlikely(ioctl->cmd != cmd))
+ goto out_io_encoding;
+
flags = ioctl->flags;
} else if (!drm_ioctl_flags(nr, &flags))
return -EINVAL;
vmaster = vmw_master_check(dev, file_priv, flags);
- if (unlikely(IS_ERR(vmaster))) {
+ if (IS_ERR(vmaster)) {
ret = PTR_ERR(vmaster);
if (ret != -ERESTARTSYS)
@@ -1068,6 +1143,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
ttm_read_unlock(&vmaster->lock);
return ret;
+
+out_io_encoding:
+ DRM_ERROR("Invalid command format, ioctl %d\n",
+ nr - DRM_COMMAND_BASE);
+
+ return -EINVAL;
}
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1086,30 +1167,11 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
static void vmw_lastclose(struct drm_device *dev)
{
- struct drm_crtc *crtc;
- struct drm_mode_set set;
- int ret;
-
- set.x = 0;
- set.y = 0;
- set.fb = NULL;
- set.mode = NULL;
- set.connectors = NULL;
- set.num_connectors = 0;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- set.crtc = crtc;
- ret = drm_mode_set_config_internal(&set);
- WARN_ON(ret != 0);
- }
-
}
static void vmw_master_init(struct vmw_master *vmaster)
{
ttm_lock_init(&vmaster->lock);
- INIT_LIST_HEAD(&vmaster->fb_surf);
- mutex_init(&vmaster->fb_surf_mutex);
}
static int vmw_master_create(struct drm_device *dev,
@@ -1137,7 +1199,6 @@ static void vmw_master_destroy(struct drm_device *dev,
kfree(vmaster);
}
-
static int vmw_master_set(struct drm_device *dev,
struct drm_file *file_priv,
bool from_open)
@@ -1148,27 +1209,13 @@ static int vmw_master_set(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
- if (!dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv, true);
- if (unlikely(ret != 0))
- return ret;
- vmw_kms_save_vga(dev_priv);
- vmw_write(dev_priv, SVGA_REG_TRACES, 0);
- }
-
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
if (unlikely(ret != 0))
- goto out_no_active_lock;
+ return ret;
ttm_lock_set_kill(&active->lock, true, SIGTERM);
- ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Unable to clean VRAM on "
- "master drop.\n");
- }
-
dev_priv->active_master = NULL;
}
@@ -1182,14 +1229,6 @@ static int vmw_master_set(struct drm_device *dev,
dev_priv->active_master = vmaster;
return 0;
-
-out_no_active_lock:
- if (!dev_priv->enable_fb) {
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
- vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- }
- return ret;
}
static void vmw_master_drop(struct drm_device *dev,
@@ -1214,16 +1253,9 @@ static void vmw_master_drop(struct drm_device *dev,
}
ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
- vmw_execbuf_release_pinned_bo(dev_priv);
- if (!dev_priv->enable_fb) {
- ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
- if (unlikely(ret != 0))
- DRM_ERROR("Unable to clean VRAM on master drop.\n");
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
- vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- }
+ if (!dev_priv->enable_fb)
+ vmw_svga_disable(dev_priv);
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
@@ -1233,6 +1265,76 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fb_on(dev_priv);
}
+/**
+ * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in non-exclusive mode.
+ */
+static void __vmw_svga_enable(struct vmw_private *dev_priv)
+{
+ spin_lock(&dev_priv->svga_lock);
+ if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+ }
+ spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ */
+void vmw_svga_enable(struct vmw_private *dev_priv)
+{
+ ttm_read_lock(&dev_priv->reservation_sem, false);
+ __vmw_svga_enable(dev_priv);
+ ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in exclusive mode.
+ * Will not empty VRAM. VRAM must be emptied by caller.
+ */
+static void __vmw_svga_disable(struct vmw_private *dev_priv)
+{
+ spin_lock(&dev_priv->svga_lock);
+ if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ SVGA_REG_ENABLE_HIDE |
+ SVGA_REG_ENABLE_ENABLE);
+ }
+ spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
+ * running.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Will empty VRAM.
+ */
+void vmw_svga_disable(struct vmw_private *dev_priv)
+{
+ ttm_write_lock(&dev_priv->reservation_sem, false);
+ spin_lock(&dev_priv->svga_lock);
+ if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ spin_unlock(&dev_priv->svga_lock);
+ if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+ DRM_ERROR("Failed evicting VRAM buffers.\n");
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ SVGA_REG_ENABLE_HIDE |
+ SVGA_REG_ENABLE_ENABLE);
+ } else
+ spin_unlock(&dev_priv->svga_lock);
+ ttm_write_unlock(&dev_priv->reservation_sem);
+}
static void vmw_remove(struct pci_dev *pdev)
{
@@ -1250,23 +1352,26 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
switch (val) {
case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
+ if (dev_priv->enable_fb)
+ vmw_fb_off(dev_priv);
ttm_suspend_lock(&dev_priv->reservation_sem);
- /**
+ /*
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
+ vmw_release_device_early(dev_priv);
ttm_bo_swapout_all(&dev_priv->bdev);
-
+ vmw_fence_fifo_down(dev_priv->fman);
break;
case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
case PM_POST_RESTORE:
+ vmw_fence_fifo_up(dev_priv->fman);
ttm_suspend_unlock(&dev_priv->reservation_sem);
-
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
break;
case PM_RESTORE_PREPARE:
break;
@@ -1276,20 +1381,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
return 0;
}
-/**
- * These might not be needed with the virtual SVGA device.
- */
-
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- if (dev_priv->num_3d_resources != 0) {
- DRM_INFO("Can't suspend or hibernate "
- "while 3D resources are active.\n");
+ if (dev_priv->refuse_hibernation)
return -EBUSY;
- }
pci_save_state(pdev);
pci_disable_device(pdev);
@@ -1321,56 +1419,62 @@ static int vmw_pm_resume(struct device *kdev)
return vmw_pci_resume(pdev);
}
-static int vmw_pm_prepare(struct device *kdev)
+static int vmw_pm_freeze(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- /**
- * Release 3d reference held by fbdev and potentially
- * stop fifo.
- */
dev_priv->suspended = true;
if (dev_priv->enable_fb)
- vmw_3d_resource_dec(dev_priv, true);
-
- if (dev_priv->num_3d_resources != 0) {
-
- DRM_INFO("Can't suspend or hibernate "
- "while 3D resources are active.\n");
+ vmw_fifo_resource_dec(dev_priv);
+ if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
+ DRM_ERROR("Can't hibernate while 3D resources are active.\n");
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv, true);
+ vmw_fifo_resource_inc(dev_priv);
+ WARN_ON(vmw_request_device_late(dev_priv));
dev_priv->suspended = false;
return -EBUSY;
}
+ if (dev_priv->enable_fb)
+ __vmw_svga_disable(dev_priv);
+
+ vmw_release_device_late(dev_priv);
+
return 0;
}
-static void vmw_pm_complete(struct device *kdev)
+static int vmw_pm_restore(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ int ret;
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
(void) vmw_read(dev_priv, SVGA_REG_ID);
- /**
- * Reclaim 3d reference held by fbdev and potentially
- * start fifo.
- */
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
+
+ ret = vmw_request_device(dev_priv);
+ if (ret)
+ return ret;
+
+ if (dev_priv->enable_fb)
+ __vmw_svga_enable(dev_priv);
dev_priv->suspended = false;
+
+ return 0;
}
static const struct dev_pm_ops vmw_pm_ops = {
- .prepare = vmw_pm_prepare,
- .complete = vmw_pm_complete,
+ .freeze = vmw_pm_freeze,
+ .thaw = vmw_pm_restore,
+ .restore = vmw_pm_restore,
.suspend = vmw_pm_suspend,
.resume = vmw_pm_resume,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 0336d49e3d4c..6d02de6dc36c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -40,17 +40,17 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20140704"
+#define VMWGFX_DRIVER_DATE "20150810"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 6
-#define VMWGFX_DRIVER_PATCHLEVEL 1
+#define VMWGFX_DRIVER_MINOR 9
+#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
-#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
/*
* Perhaps we should have sysfs entries for these.
@@ -59,6 +59,8 @@
#define VMWGFX_NUM_GB_SHADER 20000
#define VMWGFX_NUM_GB_SURFACE 32768
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_DXCONTEXT 256
+#define VMWGFX_NUM_DXQUERY 512
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
VMWGFX_NUM_GB_SHADER +\
VMWGFX_NUM_GB_SURFACE +\
@@ -85,6 +87,9 @@ struct vmw_fpriv {
struct vmw_dma_buffer {
struct ttm_buffer_object base;
struct list_head res_list;
+ s32 pin_count;
+ /* Not ref-counted. Protected by binding_mutex */
+ struct vmw_resource *dx_query_ctx;
};
/**
@@ -113,6 +118,7 @@ struct vmw_resource {
bool backup_dirty; /* Protected by backup buffer reserved */
struct vmw_dma_buffer *backup;
unsigned long backup_offset;
+ unsigned long pin_count; /* Protected by resource reserved */
const struct vmw_res_func *func;
struct list_head lru_head; /* Protected by the resource lock */
struct list_head mob_head; /* Protected by @backup reserved */
@@ -130,6 +136,9 @@ enum vmw_res_type {
vmw_res_surface,
vmw_res_stream,
vmw_res_shader,
+ vmw_res_dx_context,
+ vmw_res_cotable,
+ vmw_res_view,
vmw_res_max
};
@@ -137,7 +146,8 @@ enum vmw_res_type {
* Resources that are managed using command streams.
*/
enum vmw_cmdbuf_res_type {
- vmw_cmdbuf_res_compat_shader
+ vmw_cmdbuf_res_shader,
+ vmw_cmdbuf_res_view
};
struct vmw_cmdbuf_res_manager;
@@ -160,11 +170,13 @@ struct vmw_surface {
struct drm_vmw_size *sizes;
uint32_t num_sizes;
bool scanout;
+ uint32_t array_size;
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
struct vmw_surface_offset *offsets;
SVGA3dTextureFilter autogen_filter;
uint32_t multisample_count;
+ struct list_head view_list;
};
struct vmw_marker_queue {
@@ -176,14 +188,15 @@ struct vmw_marker_queue {
struct vmw_fifo_state {
unsigned long reserved_size;
- __le32 *dynamic_buffer;
- __le32 *static_buffer;
+ u32 *dynamic_buffer;
+ u32 *static_buffer;
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
struct vmw_marker_queue marker_queue;
+ bool dx;
};
struct vmw_relocation {
@@ -264,70 +277,15 @@ struct vmw_piter {
};
/*
- * enum vmw_ctx_binding_type - abstract resource to context binding types
+ * enum vmw_display_unit_type - Describes the display unit
*/
-enum vmw_ctx_binding_type {
- vmw_ctx_binding_shader,
- vmw_ctx_binding_rt,
- vmw_ctx_binding_tex,
- vmw_ctx_binding_max
+enum vmw_display_unit_type {
+ vmw_du_invalid = 0,
+ vmw_du_legacy,
+ vmw_du_screen_object,
+ vmw_du_screen_target
};
-/**
- * struct vmw_ctx_bindinfo - structure representing a single context binding
- *
- * @ctx: Pointer to the context structure. NULL means the binding is not
- * active.
- * @res: Non ref-counted pointer to the bound resource.
- * @bt: The binding type.
- * @i1: Union of information needed to unbind.
- */
-struct vmw_ctx_bindinfo {
- struct vmw_resource *ctx;
- struct vmw_resource *res;
- enum vmw_ctx_binding_type bt;
- bool scrubbed;
- union {
- SVGA3dShaderType shader_type;
- SVGA3dRenderTargetType rt_type;
- uint32 texture_stage;
- } i1;
-};
-
-/**
- * struct vmw_ctx_binding - structure representing a single context binding
- * - suitable for tracking in a context
- *
- * @ctx_list: List head for context.
- * @res_list: List head for bound resource.
- * @bi: Binding info
- */
-struct vmw_ctx_binding {
- struct list_head ctx_list;
- struct list_head res_list;
- struct vmw_ctx_bindinfo bi;
-};
-
-
-/**
- * struct vmw_ctx_binding_state - context binding state
- *
- * @list: linked list of individual bindings.
- * @render_targets: Render target bindings.
- * @texture_units: Texture units/samplers bindings.
- * @shaders: Shader bindings.
- *
- * Note that this structure also provides storage space for the individual
- * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
- * for individual bindings.
- *
- */
-struct vmw_ctx_binding_state {
- struct list_head list;
- struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
- struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
- struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
-};
struct vmw_sw_context{
struct drm_open_hash res_ht;
@@ -342,15 +300,21 @@ struct vmw_sw_context{
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct list_head resource_list;
- struct ttm_buffer_object *cur_query_bo;
+ struct list_head ctx_resource_list; /* For contexts and cotables */
+ struct vmw_dma_buffer *cur_query_bo;
struct list_head res_relocations;
uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max];
struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
- struct vmw_ctx_binding_state staged_bindings;
+ struct vmw_ctx_binding_state *staged_bindings;
+ bool staged_bindings_inuse;
struct list_head staged_cmd_res;
+ struct vmw_resource_val_node *dx_ctx_node;
+ struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_resource *dx_query_ctx;
+ struct vmw_cmdbuf_res_manager *man;
};
struct vmw_legacy_display;
@@ -358,8 +322,6 @@ struct vmw_overlay;
struct vmw_master {
struct ttm_lock lock;
- struct mutex fb_surf_mutex;
- struct list_head fb_surf;
};
struct vmw_vga_topology_state {
@@ -370,6 +332,26 @@ struct vmw_vga_topology_state {
uint32_t pos_y;
};
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size: Size of the table (page-aligned).
+ * @page_table: Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+ unsigned long size;
+ struct vmw_mob *page_table;
+ bool enabled;
+};
+
+struct vmw_otable_batch {
+ unsigned num_otables;
+ struct vmw_otable *otables;
+ struct vmw_resource *context;
+ struct ttm_buffer_object *otable_bo;
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
@@ -387,9 +369,13 @@ struct vmw_private {
uint32_t mmio_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
+ uint32_t texture_max_width;
+ uint32_t texture_max_height;
+ uint32_t stdu_max_width;
+ uint32_t stdu_max_height;
uint32_t initial_width;
uint32_t initial_height;
- __le32 __iomem *mmio_virt;
+ u32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
uint32_t max_gmr_ids;
@@ -401,6 +387,7 @@ struct vmw_private {
bool has_mob;
spinlock_t hw_lock;
spinlock_t cap_lock;
+ bool has_dx;
/*
* VGA registers.
@@ -420,6 +407,7 @@ struct vmw_private {
*/
void *fb_info;
+ enum vmw_display_unit_type active_display_unit;
struct vmw_legacy_display *ldu_priv;
struct vmw_screen_object_display *sou_priv;
struct vmw_overlay *overlay_priv;
@@ -453,6 +441,8 @@ struct vmw_private {
spinlock_t waiter_lock;
int fence_queue_waiters; /* Protected by waiter_lock */
int goal_queue_waiters; /* Protected by waiter_lock */
+ int cmdbuf_waiters; /* Protected by irq_lock */
+ int error_waiters; /* Protected by irq_lock */
atomic_t fifo_queue_waiters;
uint32_t last_read_seqno;
spinlock_t irq_lock;
@@ -484,6 +474,7 @@ struct vmw_private {
bool stealth;
bool enable_fb;
+ spinlock_t svga_lock;
/**
* Master management.
@@ -493,9 +484,10 @@ struct vmw_private {
struct vmw_master fbdev_master;
struct notifier_block pm_nb;
bool suspended;
+ bool refuse_hibernation;
struct mutex release_mutex;
- uint32_t num_3d_resources;
+ atomic_t num_fifo_resources;
/*
* Replace this with an rwsem as soon as we have down_xx_interruptible()
@@ -507,8 +499,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex.
*/
- struct ttm_buffer_object *dummy_query_bo;
- struct ttm_buffer_object *pinned_bo;
+ struct vmw_dma_buffer *dummy_query_bo;
+ struct vmw_dma_buffer *pinned_bo;
uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
@@ -531,8 +523,9 @@ struct vmw_private {
/*
* Guest Backed stuff
*/
- struct ttm_buffer_object *otable_bo;
- struct vmw_otable *otables;
+ struct vmw_otable_batch otable_batch;
+
+ struct vmw_cmdbuf_man *cman;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -587,8 +580,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
return val;
}
-int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
-void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
+extern void vmw_svga_enable(struct vmw_private *dev_priv);
+extern void vmw_svga_disable(struct vmw_private *dev_priv);
+
/**
* GMR utilities - vmwgfx_gmr.c
@@ -610,7 +604,8 @@ extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
-extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+ bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
@@ -660,10 +655,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
uint32_t *inout_id,
struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res,
+ bool switch_backup,
struct vmw_dma_buffer *new_backup,
unsigned long new_backup_offset);
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
+extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
@@ -671,25 +670,25 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
/**
* DMA buffer helper routines - vmwgfx_dmabuf.c
*/
-extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- struct ttm_placement *placement,
- bool interruptible);
-extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible);
-extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible);
-extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
- bool pin, bool interruptible);
+ struct ttm_placement *placement,
+ bool interruptible);
+extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible);
+extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible);
+extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo,
+ bool interruptible);
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
-extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
+extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -717,7 +716,10 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
+extern void *
+vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *seqno);
extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
@@ -726,6 +728,8 @@ extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid);
+extern int vmw_fifo_flush(struct vmw_private *dev_priv,
+ bool interruptible);
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -750,6 +754,7 @@ extern struct ttm_placement vmw_sys_ne_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
+extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
@@ -800,14 +805,15 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter)
* Command submission - vmwgfx_execbuf.c
*/
-extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+ struct drm_file *file_priv, size_t size);
extern int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
+ uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj **out_fence);
@@ -826,6 +832,11 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
*user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle);
+extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool validate_as_mob);
+
/**
* IRQs and wating - vmwgfx_irq.c
@@ -833,8 +844,8 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
extern irqreturn_t vmw_irq_handler(int irq, void *arg);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
- uint32_t seqno, bool interruptible,
- unsigned long timeout);
+ uint32_t seqno, bool interruptible,
+ unsigned long timeout);
extern void vmw_irq_preinstall(struct drm_device *dev);
extern int vmw_irq_postinstall(struct drm_device *dev);
extern void vmw_irq_uninstall(struct drm_device *dev);
@@ -852,6 +863,10 @@ extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+ int *waiter_count);
+extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count);
/**
* Rudimentary fence-like objects currently used only for throttling -
@@ -861,9 +876,9 @@ extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
extern int vmw_marker_push(struct vmw_marker_queue *queue,
- uint32_t seqno);
+ uint32_t seqno);
extern int vmw_marker_pull(struct vmw_marker_queue *queue,
- uint32_t signaled_seqno);
+ uint32_t signaled_seqno);
extern int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_marker_queue *queue, uint32_t us);
@@ -908,12 +923,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t sid, int32_t destX, int32_t destY,
struct drm_vmw_rect *clips,
uint32_t num_clips);
-int vmw_kms_readback(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_vmw_rect *clips,
- uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -927,6 +936,10 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
+extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
+extern void vmw_resource_unpin(struct vmw_resource *res);
+extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
+
/**
* Overlay control - vmwgfx_overlay.c
*/
@@ -982,27 +995,33 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
extern const struct vmw_user_resource_conv *user_context_converter;
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
-
extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id,
struct vmw_resource **p_res);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
- const struct vmw_ctx_bindinfo *ci);
-extern void
-vmw_context_binding_state_transfer(struct vmw_resource *res,
- struct vmw_ctx_binding_state *cbs);
-extern void vmw_context_binding_res_list_kill(struct list_head *head);
-extern void vmw_context_binding_res_list_scrub(struct list_head *head);
-extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
extern struct vmw_cmdbuf_res_manager *
vmw_context_res_man(struct vmw_resource *ctx);
+extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+ SVGACOTableType cotable_type);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+struct vmw_ctx_binding_state;
+extern struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx);
+extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+ bool readback);
+extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+ struct vmw_dma_buffer *mob);
+extern struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
+
+
/*
* Surface management - vmwgfx_surface.c
*/
@@ -1025,6 +1044,16 @@ extern int vmw_surface_check(struct vmw_private *dev_priv,
uint32_t handle, int *id);
extern int vmw_surface_validate(struct vmw_private *dev_priv,
struct vmw_surface *srf);
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+ uint32_t user_accounting_size,
+ uint32_t svga3d_flags,
+ SVGA3dSurfaceFormat format,
+ bool for_scanout,
+ uint32_t num_mip_levels,
+ uint32_t multisample_count,
+ uint32_t array_size,
+ struct drm_vmw_size size,
+ struct vmw_surface **srf_out);
/*
* Shader management - vmwgfx_shader.c
@@ -1042,12 +1071,21 @@ extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
SVGA3dShaderType shader_type,
size_t size,
struct list_head *list);
-extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
- u32 user_key, SVGA3dShaderType shader_type,
- struct list_head *list);
+extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx,
+ u32 user_key,
+ SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback);
+
extern struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
- u32 user_key, SVGA3dShaderType shader_type);
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type);
/*
* Command buffer managed resources - vmwgfx_cmdbuf_res.c
@@ -1071,7 +1109,48 @@ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
- struct list_head *list);
+ struct list_head *list,
+ struct vmw_resource **res);
+
+/*
+ * COTable management - vmwgfx_cotable.c
+ */
+extern const SVGACOTableType vmw_cotable_scrub_order[];
+extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+ struct vmw_resource *ctx,
+ u32 type);
+extern int vmw_cotable_notify(struct vmw_resource *res, int id);
+extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
+extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
+ struct list_head *head);
+
+/*
+ * Command buffer managerment vmwgfx_cmdbuf.c
+ */
+struct vmw_cmdbuf_man;
+struct vmw_cmdbuf_header;
+
+extern struct vmw_cmdbuf_man *
+vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
+extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+ size_t size, size_t default_size);
+extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
+extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
+extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+ unsigned long timeout);
+extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+ int ctx_id, bool interruptible,
+ struct vmw_cmdbuf_header *header);
+extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+ struct vmw_cmdbuf_header *header,
+ bool flush);
+extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
+extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+ size_t size, bool interruptible,
+ struct vmw_cmdbuf_header **p_header);
+extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
+extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+ bool interruptible);
/**
@@ -1116,4 +1195,14 @@ static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
{
return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
}
+
+static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
+{
+ atomic_inc(&dev_priv->num_fifo_resources);
+}
+
+static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
+{
+ atomic_dec(&dev_priv->num_fifo_resources);
+}
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 97ad3bcb99a7..b56565457c96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,8 @@
#include "vmwgfx_reg.h"
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
#define VMW_RES_HT_ORDER 12
@@ -59,8 +61,11 @@ struct vmw_resource_relocation {
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
- * @no_buffer_needed: Resources do not need to allocate buffer backup on
- * reservation. The command stream will provide one.
+ * @switching_backup: The command stream provides a new backup buffer for a
+ * resource.
+ * @no_buffer_needed: This means @switching_backup is true on first buffer
+ * reference. So resource reservation does not need to allocate a backup
+ * buffer for the resource.
*/
struct vmw_resource_val_node {
struct list_head head;
@@ -69,8 +74,9 @@ struct vmw_resource_val_node {
struct vmw_dma_buffer *new_backup;
struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
- bool first_usage;
- bool no_buffer_needed;
+ u32 first_usage : 1;
+ u32 switching_backup : 1;
+ u32 no_buffer_needed : 1;
};
/**
@@ -92,22 +98,40 @@ struct vmw_cmd_entry {
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
(_gb_disable), (_gb_enable)}
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource *ctx);
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGAMobId *id,
+ struct vmw_dma_buffer **vmw_bo_p);
+static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
+ struct vmw_dma_buffer *vbo,
+ bool validate_as_mob,
+ uint32_t *p_val_node);
+
+
/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
+ * vmw_resources_unreserve - unreserve resources previously reserved for
* command submission.
*
- * @list_head: list of resources to unreserve.
+ * @sw_context: pointer to the software context
* @backoff: Whether command submission failed.
*/
-static void vmw_resource_list_unreserve(struct list_head *list,
- bool backoff)
+static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
+ bool backoff)
{
struct vmw_resource_val_node *val;
+ struct list_head *list = &sw_context->resource_list;
+
+ if (sw_context->dx_query_mob && !backoff)
+ vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+ sw_context->dx_query_mob);
list_for_each_entry(val, list, head) {
struct vmw_resource *res = val->res;
- struct vmw_dma_buffer *new_backup =
- backoff ? NULL : val->new_backup;
+ bool switch_backup =
+ (backoff) ? false : val->switching_backup;
/*
* Transfer staged context bindings to the
@@ -115,18 +139,71 @@ static void vmw_resource_list_unreserve(struct list_head *list,
*/
if (unlikely(val->staged_bindings)) {
if (!backoff) {
- vmw_context_binding_state_transfer
- (val->res, val->staged_bindings);
+ vmw_binding_state_commit
+ (vmw_context_binding_state(val->res),
+ val->staged_bindings);
}
- kfree(val->staged_bindings);
+
+ if (val->staged_bindings != sw_context->staged_bindings)
+ vmw_binding_state_free(val->staged_bindings);
+ else
+ sw_context->staged_bindings_inuse = false;
val->staged_bindings = NULL;
}
- vmw_resource_unreserve(res, new_backup,
- val->new_backup_offset);
+ vmw_resource_unreserve(res, switch_backup, val->new_backup,
+ val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup);
}
}
+/**
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
+ * added to the validate list.
+ *
+ * @dev_priv: Pointer to the device private:
+ * @sw_context: The validation context:
+ * @node: The validation node holding this context.
+ */
+static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource_val_node *node)
+{
+ int ret;
+
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ if (!sw_context->staged_bindings) {
+ sw_context->staged_bindings =
+ vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(sw_context->staged_bindings)) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ ret = PTR_ERR(sw_context->staged_bindings);
+ sw_context->staged_bindings = NULL;
+ goto out_err;
+ }
+ }
+
+ if (sw_context->staged_bindings_inuse) {
+ node->staged_bindings = vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(node->staged_bindings)) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ ret = PTR_ERR(node->staged_bindings);
+ node->staged_bindings = NULL;
+ goto out_err;
+ }
+ } else {
+ node->staged_bindings = sw_context->staged_bindings;
+ sw_context->staged_bindings_inuse = true;
+ }
+
+ return 0;
+out_err:
+ return ret;
+}
/**
* vmw_resource_val_add - Add a resource to the software context's
@@ -141,6 +218,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *res,
struct vmw_resource_val_node **p_node)
{
+ struct vmw_private *dev_priv = res->dev_priv;
struct vmw_resource_val_node *node;
struct drm_hash_item *hash;
int ret;
@@ -169,14 +247,90 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
kfree(node);
return ret;
}
- list_add_tail(&node->head, &sw_context->resource_list);
node->res = vmw_resource_reference(res);
node->first_usage = true;
-
if (unlikely(p_node != NULL))
*p_node = node;
- return 0;
+ if (!dev_priv->has_mob) {
+ list_add_tail(&node->head, &sw_context->resource_list);
+ return 0;
+ }
+
+ switch (vmw_res_type(res)) {
+ case vmw_res_context:
+ case vmw_res_dx_context:
+ list_add(&node->head, &sw_context->ctx_resource_list);
+ ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
+ break;
+ case vmw_res_cotable:
+ list_add_tail(&node->head, &sw_context->ctx_resource_list);
+ break;
+ default:
+ list_add_tail(&node->head, &sw_context->resource_list);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to
+ * to the validation list
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view: Pointer to the view resource.
+ *
+ * Returns 0 if success, negative error code otherwise.
+ */
+static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *view)
+{
+ int ret;
+
+ /*
+ * First add the resource the view is pointing to, otherwise
+ * it may be swapped out when the view is validated.
+ */
+ ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+ if (ret)
+ return ret;
+
+ return vmw_resource_val_add(sw_context, view, NULL);
+}
+
+/**
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's
+ * pointing to to the validation list.
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view_type: The view type to look up.
+ * @id: view id of the view.
+ *
+ * The view is represented by a view id and the DX context it's created on,
+ * or scheduled for creation on. If there is no DX context set, the function
+ * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
+ */
+static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+ enum vmw_view_type view_type, u32 id)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *view;
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ view = vmw_view_lookup(sw_context->man, view_type, id);
+ if (IS_ERR(view))
+ return PTR_ERR(view);
+
+ ret = vmw_view_res_val_add(sw_context, view);
+ vmw_resource_unreference(&view);
+
+ return ret;
}
/**
@@ -195,24 +349,56 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_resource *ctx)
{
struct list_head *binding_list;
- struct vmw_ctx_binding *entry;
+ struct vmw_ctx_bindinfo *entry;
int ret = 0;
struct vmw_resource *res;
+ u32 i;
+ /* Add all cotables to the validation list. */
+ if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ res = vmw_context_cotable(ctx, i);
+ if (IS_ERR(res))
+ continue;
+
+ ret = vmw_resource_val_add(sw_context, res, NULL);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ }
+
+
+ /* Add all resources bound to the context to the validation list */
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
list_for_each_entry(entry, binding_list, ctx_list) {
- res = vmw_resource_reference_unless_doomed(entry->bi.res);
+ /* entry->res is not refcounted */
+ res = vmw_resource_reference_unless_doomed(entry->res);
if (unlikely(res == NULL))
continue;
- ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+ if (vmw_res_type(entry->res) == vmw_res_view)
+ ret = vmw_view_res_val_add(sw_context, entry->res);
+ else
+ ret = vmw_resource_val_add(sw_context, entry->res,
+ NULL);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
break;
}
+ if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+ struct vmw_dma_buffer *dx_query_mob;
+
+ dx_query_mob = vmw_context_get_dx_query_mob(ctx);
+ if (dx_query_mob)
+ ret = vmw_bo_to_validate_list(sw_context,
+ dx_query_mob,
+ true, NULL);
+ }
+
mutex_unlock(&dev_priv->binding_mutex);
return ret;
}
@@ -308,7 +494,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct ttm_buffer_object *bo,
+ struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node)
{
@@ -318,7 +504,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct drm_hash_item *hash;
int ret;
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
@@ -336,7 +522,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
return -EINVAL;
}
vval_buf = &sw_context->val_bufs[val_node];
- vval_buf->hash.key = (unsigned long) bo;
+ vval_buf->hash.key = (unsigned long) vbo;
ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a buffer validation "
@@ -345,7 +531,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
}
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
- val_buf->bo = ttm_bo_reference(bo);
+ val_buf->bo = ttm_bo_reference(&vbo->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob;
@@ -370,27 +556,39 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
- int ret;
+ int ret = 0;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
- ret = vmw_resource_reserve(res, val->no_buffer_needed);
+ ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
if (unlikely(ret != 0))
return ret;
if (res->backup) {
- struct ttm_buffer_object *bo = &res->backup->base;
+ struct vmw_dma_buffer *vbo = res->backup;
ret = vmw_bo_to_validate_list
- (sw_context, bo,
+ (sw_context, vbo,
vmw_resource_needs_backup(res), NULL);
if (unlikely(ret != 0))
return ret;
}
}
- return 0;
+
+ if (sw_context->dx_query_mob) {
+ struct vmw_dma_buffer *expected_dx_query_mob;
+
+ expected_dx_query_mob =
+ vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
+ if (expected_dx_query_mob &&
+ expected_dx_query_mob != sw_context->dx_query_mob) {
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
}
/**
@@ -409,6 +607,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
+ struct vmw_dma_buffer *backup = res->backup;
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
@@ -416,18 +615,29 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
DRM_ERROR("Failed to validate resource.\n");
return ret;
}
+
+ /* Check if the resource switched backup buffer */
+ if (backup && res->backup && (backup != res->backup)) {
+ struct vmw_dma_buffer *vbo = res->backup;
+
+ ret = vmw_bo_to_validate_list
+ (sw_context, vbo,
+ vmw_resource_needs_backup(res), NULL);
+ if (ret) {
+ ttm_bo_unreserve(&vbo->base);
+ return ret;
+ }
+ }
}
return 0;
}
-
/**
* vmw_cmd_res_reloc_add - Add a resource to a software context's
* relocation- and validation lists.
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @sw_context: Pointer to the software context.
- * @res_type: Resource type.
* @id_loc: Pointer to where the id that needs translation is located.
* @res: Valid pointer to a struct vmw_resource.
* @p_val: If non null, a pointer to the struct vmw_resource_validate_node
@@ -435,7 +645,6 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
*/
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
uint32_t *id_loc,
struct vmw_resource *res,
struct vmw_resource_val_node **p_val)
@@ -454,29 +663,6 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- if (res_type == vmw_res_context && dev_priv->has_mob &&
- node->first_usage) {
-
- /*
- * Put contexts first on the list to be able to exit
- * list traversal for contexts early.
- */
- list_del(&node->head);
- list_add(&node->head, &sw_context->resource_list);
-
- ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
- if (unlikely(ret != 0))
- return ret;
- node->staged_bindings =
- kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
- if (node->staged_bindings == NULL) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&node->staged_bindings->list);
- }
-
if (p_val)
*p_val = node;
@@ -554,7 +740,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
rcache->res = res;
rcache->handle = *id_loc;
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+ ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -573,6 +759,46 @@ out_no_reloc:
}
/**
+ * vmw_rebind_dx_query - Rebind DX query associated with the context
+ *
+ * @ctx_res: context the query belongs to
+ *
+ * This function assumes binding_mutex is held.
+ */
+static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
+{
+ struct vmw_private *dev_priv = ctx_res->dev_priv;
+ struct vmw_dma_buffer *dx_query_mob;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindAllQuery body;
+ } *cmd;
+
+
+ dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
+
+ if (!dx_query_mob || dx_query_mob->dx_query_ctx)
+ return 0;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
+
+ if (cmd == NULL) {
+ DRM_ERROR("Failed to rebind queries.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = ctx_res->id;
+ cmd->body.mobid = dx_query_mob->base.mem.start;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ vmw_context_bind_dx_query(ctx_res, dx_query_mob);
+
+ return 0;
+}
+
+/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
@@ -589,12 +815,80 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
if (unlikely(!val->staged_bindings))
break;
- ret = vmw_context_rebind_all(val->res);
+ ret = vmw_binding_rebind_all
+ (vmw_context_binding_state(val->res));
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
+
+ ret = vmw_rebind_all_dx_query(val->res);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_view_bindings_add - Add an array of view bindings to a context
+ * binding state tracker.
+ *
+ * @sw_context: The execbuf state used for this command.
+ * @view_type: View type for the bindings.
+ * @binding_type: Binding type for the bindings.
+ * @shader_slot: The shader slot to user for the bindings.
+ * @view_ids: Array of view ids to be bound.
+ * @num_views: Number of view ids in @view_ids.
+ * @first_slot: The binding slot to be used for the first view id in @view_ids.
+ */
+static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
+ enum vmw_view_type view_type,
+ enum vmw_ctx_binding_type binding_type,
+ uint32 shader_slot,
+ uint32 view_ids[], u32 num_views,
+ u32 first_slot)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_cmdbuf_res_manager *man;
+ u32 i;
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ man = sw_context->man;
+ for (i = 0; i < num_views; ++i) {
+ struct vmw_ctx_bindinfo_view binding;
+ struct vmw_resource *view = NULL;
+
+ if (view_ids[i] != SVGA3D_INVALID_ID) {
+ view = vmw_view_lookup(man, view_type, view_ids[i]);
+ if (IS_ERR(view)) {
+ DRM_ERROR("View not found.\n");
+ return PTR_ERR(view);
+ }
+
+ ret = vmw_view_res_val_add(sw_context, view);
+ if (ret) {
+ DRM_ERROR("Could not add view to "
+ "validation list.\n");
+ vmw_resource_unreference(&view);
+ return ret;
+ }
+ }
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = view;
+ binding.bi.bt = binding_type;
+ binding.shader_slot = shader_slot;
+ binding.slot = first_slot + i;
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ shader_slot, binding.slot);
+ if (view)
+ vmw_resource_unreference(&view);
}
return 0;
@@ -638,6 +932,12 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
+ if (cmd->body.type >= SVGA3D_RT_MAX) {
+ DRM_ERROR("Illegal render target type %u.\n",
+ (unsigned) cmd->body.type);
+ return -EINVAL;
+ }
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
@@ -651,13 +951,14 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
return ret;
if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
-
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_rt;
- bi.i1.rt_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ struct vmw_ctx_bindinfo_view binding;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_rt;
+ binding.slot = cmd->body.type;
+ vmw_binding_add(ctx_node->staged_bindings,
+ &binding.bi, 0, binding.slot);
}
return 0;
@@ -674,16 +975,62 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src.sid, NULL);
- if (unlikely(ret != 0))
+ if (ret)
return ret;
+
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
}
+static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBufferCopy body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest, NULL);
+}
+
+static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXPredCopyRegion body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcSid, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dstSid, NULL);
+}
+
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -752,7 +1099,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- struct ttm_buffer_object *new_query_bo,
+ struct vmw_dma_buffer *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
@@ -764,7 +1111,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->num_pages > 4)) {
+ if (unlikely(new_query_bo->base.num_pages > 4)) {
DRM_ERROR("Query buffer too large.\n");
return -EINVAL;
}
@@ -833,12 +1180,12 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) {
- vmw_bo_pin(dev_priv->pinned_bo, false);
- ttm_bo_unref(&dev_priv->pinned_bo);
+ vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+ vmw_dmabuf_unreference(&dev_priv->pinned_bo);
}
if (!sw_context->needs_post_query_barrier) {
- vmw_bo_pin(sw_context->cur_query_bo, true);
+ vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
/*
* We pin also the dummy_query_bo buffer so that we
@@ -846,14 +1193,17 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* dummy queries in context destroy paths.
*/
- vmw_bo_pin(dev_priv->dummy_query_bo, true);
- dev_priv->dummy_query_bo_pinned = true;
+ if (!dev_priv->dummy_query_bo_pinned) {
+ vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
+ true);
+ dev_priv->dummy_query_bo_pinned = true;
+ }
BUG_ON(sw_context->last_query_ctx == NULL);
dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true;
dev_priv->pinned_bo =
- ttm_bo_reference(sw_context->cur_query_bo);
+ vmw_dmabuf_reference(sw_context->cur_query_bo);
}
}
}
@@ -882,7 +1232,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
@@ -893,7 +1242,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
ret = -EINVAL;
goto out_no_reloc;
}
- bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
@@ -906,7 +1254,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
reloc->mob_loc = id;
reloc->location = NULL;
- ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -944,7 +1292,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
@@ -955,7 +1302,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
ret = -EINVAL;
goto out_no_reloc;
}
- bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
@@ -967,7 +1313,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -980,6 +1326,98 @@ out_no_reloc:
return ret;
}
+
+
+/**
+ * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * This function adds the new query into the query COTABLE
+ */
+static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dx_define_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineQuery q;
+ } *cmd;
+
+ int ret;
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *cotable_res;
+
+
+ if (ctx_node == NULL) {
+ DRM_ERROR("DX Context not set for query.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
+
+ if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
+ cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
+ return -EINVAL;
+
+ cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
+ ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
+ vmw_resource_unreference(&cotable_res);
+
+ return ret;
+}
+
+
+
+/**
+ * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * The query bind operation will eventually associate the query ID
+ * with its backing MOB. In this function, we take the user mode
+ * MOB ID and use vmw_translate_mob_ptr() to translate it to its
+ * kernel mode equivalent.
+ */
+static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dx_bind_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindQuery q;
+ } *cmd;
+
+ struct vmw_dma_buffer *vmw_bo;
+ int ret;
+
+
+ cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
+
+ /*
+ * Look up the buffer pointed to by q.mobid, put it on the relocation
+ * list so its kernel mode MOB ID can be filled in later
+ */
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
+ &vmw_bo);
+
+ if (ret != 0)
+ return ret;
+
+ sw_context->dx_query_mob = vmw_bo;
+ sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+
+ return ret;
+}
+
+
+
/**
* vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
*
@@ -1074,7 +1512,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
@@ -1128,7 +1566,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
@@ -1363,6 +1801,12 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
+ if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
+ DRM_ERROR("Illegal texture/sampler unit %u.\n",
+ (unsigned) cur_state->stage);
+ return -EINVAL;
+ }
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cur_state->value, &res_node);
@@ -1370,14 +1814,14 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
return ret;
if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
-
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_tex;
- bi.i1.texture_stage = cur_state->stage;
- vmw_context_binding_add(ctx_node->staged_bindings,
- &bi);
+ struct vmw_ctx_bindinfo_tex binding;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_tex;
+ binding.texture_stage = cur_state->stage;
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ 0, binding.texture_stage);
}
}
@@ -1407,6 +1851,47 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return ret;
}
+
+/**
+ * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
+ * switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @val_node: The validation node representing the resource.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
+ */
+static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource_val_node *val_node,
+ uint32_t *buf_id,
+ unsigned long backup_offset)
+{
+ struct vmw_dma_buffer *dma_buf;
+ int ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+ if (ret)
+ return ret;
+
+ val_node->switching_backup = true;
+ if (val_node->first_usage)
+ val_node->no_buffer_needed = true;
+
+ vmw_dmabuf_unreference(&val_node->new_backup);
+ val_node->new_backup = dma_buf;
+ val_node->new_backup_offset = backup_offset;
+
+ return 0;
+}
+
+
/**
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching
*
@@ -1420,7 +1905,8 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
* @backup_offset: Offset of backup into MOB.
*
* This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving.
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -1431,27 +1917,16 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id,
unsigned long backup_offset)
{
- int ret;
- struct vmw_dma_buffer *dma_buf;
struct vmw_resource_val_node *val_node;
+ int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
converter, res_id, &val_node);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
- if (unlikely(ret != 0))
+ if (ret)
return ret;
- if (val_node->first_usage)
- val_node->no_buffer_needed = true;
-
- vmw_dmabuf_unreference(&val_node->new_backup);
- val_node->new_backup = dma_buf;
- val_node->new_backup_offset = backup_offset;
-
- return 0;
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+ buf_id, backup_offset);
}
/**
@@ -1703,10 +2178,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
- cmd->body.shid,
- cmd->body.type,
- &sw_context->staged_cmd_res);
+ ret = vmw_shader_remove(vmw_context_res_man(val->res),
+ cmd->body.shid,
+ cmd->body.type,
+ &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
@@ -1734,13 +2209,19 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdSetShader body;
} *cmd;
struct vmw_resource_val_node *ctx_node, *res_node = NULL;
- struct vmw_ctx_bindinfo bi;
+ struct vmw_ctx_bindinfo_shader binding;
struct vmw_resource *res = NULL;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
header);
+ if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
+ DRM_ERROR("Illegal shader type %u.\n",
+ (unsigned) cmd->body.type);
+ return -EINVAL;
+ }
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
@@ -1751,14 +2232,12 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return 0;
if (cmd->body.shid != SVGA3D_INVALID_ID) {
- res = vmw_compat_shader_lookup
- (vmw_context_res_man(ctx_node->res),
- cmd->body.shid,
- cmd->body.type);
+ res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ cmd->body.shid,
+ cmd->body.type);
if (!IS_ERR(res)) {
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
- vmw_res_shader,
&cmd->body.shid, res,
&res_node);
vmw_resource_unreference(&res);
@@ -1776,11 +2255,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return ret;
}
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_shader;
- bi.i1.shader_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_shader;
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ binding.shader_slot, 0);
+ return 0;
}
/**
@@ -1842,6 +2323,690 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
cmd->body.offsetInBytes);
}
+/**
+ * vmw_cmd_dx_set_single_constant_buffer - Validate an
+ * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetSingleConstantBuffer body;
+ } *cmd;
+ struct vmw_resource_val_node *res_node = NULL;
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_cb binding;
+ int ret;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_cb;
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+ binding.offset = cmd->body.offsetInBytes;
+ binding.size = cmd->body.sizeInBytes;
+ binding.slot = cmd->body.slot;
+
+ if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+ binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+ DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
+ (unsigned) cmd->body.type,
+ (unsigned) binding.slot);
+ return -EINVAL;
+ }
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ binding.shader_slot, binding.slot);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_shader_res - Validate an
+ * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetShaderResources body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dShaderResourceViewId);
+
+ if ((u64) cmd->body.startView + (u64) num_sr_view >
+ (u64) SVGA3D_DX_MAX_SRVIEWS ||
+ cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+ DRM_ERROR("Invalid shader binding.\n");
+ return -EINVAL;
+ }
+
+ return vmw_view_bindings_add(sw_context, vmw_view_sr,
+ vmw_ctx_binding_sr,
+ cmd->body.type - SVGA3D_SHADERTYPE_MIN,
+ (void *) &cmd[1], num_sr_view,
+ cmd->body.startView);
+}
+
+/**
+ * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetShader body;
+ } *cmd;
+ struct vmw_resource *res = NULL;
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_shader binding;
+ int ret = 0;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+
+ if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+ DRM_ERROR("Illegal shader type %u.\n",
+ (unsigned) cmd->body.type);
+ return -EINVAL;
+ }
+
+ if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
+ res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
+ if (IS_ERR(res)) {
+ DRM_ERROR("Could not find shader for binding.\n");
+ return PTR_ERR(res);
+ }
+
+ ret = vmw_resource_val_add(sw_context, res, NULL);
+ if (ret)
+ goto out_unref;
+ }
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res;
+ binding.bi.bt = vmw_ctx_binding_dx_shader;
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ binding.shader_slot, 0);
+out_unref:
+ if (res)
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_cmd_dx_set_vertex_buffers - Validates an
+ * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_vb binding;
+ struct vmw_resource_val_node *res_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetVertexBuffers body;
+ SVGA3dVertexBuffer buf[];
+ } *cmd;
+ int i, ret, num;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ num = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dVertexBuffer);
+ if ((u64)num + (u64)cmd->body.startBuffer >
+ (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
+ DRM_ERROR("Invalid number of vertex buffers.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->buf[i].sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.bt = vmw_ctx_binding_vb;
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.offset = cmd->buf[i].offset;
+ binding.stride = cmd->buf[i].stride;
+ binding.slot = i + cmd->body.startBuffer;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ 0, binding.slot);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
+ * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_ib binding;
+ struct vmw_resource_val_node *res_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetIndexBuffer body;
+ } *cmd;
+ int ret;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.bt = vmw_ctx_binding_ib;
+ binding.offset = cmd->body.offset;
+ binding.format = cmd->body.format;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_rendertarget - Validate an
+ * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetRenderTargets body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+ u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dRenderTargetViewId);
+
+ if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
+ DRM_ERROR("Invalid DX Rendertarget binding.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
+ vmw_ctx_binding_ds, 0,
+ &cmd->body.depthStencilViewId, 1, 0);
+ if (ret)
+ return ret;
+
+ return vmw_view_bindings_add(sw_context, vmw_view_rt,
+ vmw_ctx_binding_dx_rt, 0,
+ (void *)&cmd[1], num_rt_view, 0);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXClearRenderTargetView body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_view_id_val_add(sw_context, vmw_view_rt,
+ cmd->body.renderTargetViewId);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXClearDepthStencilView body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_view_id_val_add(sw_context, vmw_view_ds,
+ cmd->body.depthStencilViewId);
+}
+
+static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource_val_node *srf_node;
+ struct vmw_resource *res;
+ enum vmw_view_type view_type;
+ int ret;
+ /*
+ * This is based on the fact that all affected define commands have
+ * the same initial command body layout.
+ */
+ struct {
+ SVGA3dCmdHeader header;
+ uint32 defined_id;
+ uint32 sid;
+ } *cmd;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ view_type = vmw_view_cmd_to_type(header->id);
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->sid, &srf_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+ ret = vmw_cotable_notify(res, cmd->defined_id);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_view_add(sw_context->man,
+ ctx_node->res,
+ srf_node->res,
+ view_type,
+ cmd->defined_id,
+ header,
+ header->size + sizeof(*header),
+ &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_set_so_targets - Validate an
+ * SVGA_3D_CMD_DX_SET_SOTARGETS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_so binding;
+ struct vmw_resource_val_node *res_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetSOTargets body;
+ SVGA3dSoTarget targets[];
+ } *cmd;
+ int i, ret, num;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ num = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dSoTarget);
+
+ if (num > SVGA3D_DX_MAX_SOTARGETS) {
+ DRM_ERROR("Invalid DX SO binding.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->targets[i].sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.bt = vmw_ctx_binding_so,
+ binding.offset = cmd->targets[i].offset;
+ binding.size = cmd->targets[i].sizeInBytes;
+ binding.slot = i;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ 0, binding.slot);
+ }
+
+ return 0;
+}
+
+static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ /*
+ * This is based on the fact that all affected define commands have
+ * the same initial command body layout.
+ */
+ struct {
+ SVGA3dCmdHeader header;
+ uint32 defined_id;
+ } *cmd;
+ enum vmw_so_type so_type;
+ int ret;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ so_type = vmw_so_cmd_to_type(header->id);
+ res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cotable_notify(res, cmd->defined_id);
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_cmd_dx_check_subresource - Validate an
+ * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ union {
+ SVGA3dCmdDXReadbackSubResource r_body;
+ SVGA3dCmdDXInvalidateSubResource i_body;
+ SVGA3dCmdDXUpdateSubResource u_body;
+ SVGA3dSurfaceId sid;
+ };
+ } *cmd;
+
+ BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
+ offsetof(typeof(*cmd), sid));
+ BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
+ offsetof(typeof(*cmd), sid));
+ BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
+ offsetof(typeof(*cmd), sid));
+
+ cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->sid, NULL);
+}
+
+static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_view_remove - validate a view remove command and
+ * schedule the view resource for removal.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * Check that the view exists, and if it was not created using this
+ * command batch, make sure it's validated (present in the device) so that
+ * the remove command will not confuse the device.
+ */
+static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct {
+ SVGA3dCmdHeader header;
+ union vmw_view_destroy body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
+ struct vmw_resource *view;
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_view_remove(sw_context->man,
+ cmd->body.view_id, view_type,
+ &sw_context->staged_cmd_res,
+ &view);
+ if (ret || !view)
+ return ret;
+
+ /*
+ * Add view to the validate list iff it was not created using this
+ * command batch.
+ */
+ return vmw_view_res_val_add(sw_context, view);
+}
+
+/**
+ * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineShader body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+ ret = vmw_cotable_notify(res, cmd->body.shaderId);
+ vmw_resource_unreference(&res);
+ if (ret)
+ return ret;
+
+ return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+ cmd->body.shaderId, cmd->body.type,
+ &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDestroyShader body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
+ &sw_context->staged_cmd_res);
+ if (ret)
+ DRM_ERROR("Could not find shader to remove.\n");
+
+ return ret;
+}
+
+/**
+ * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindShader body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (cmd->body.cid != SVGA3D_INVALID_ID) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter,
+ &cmd->body.cid, &ctx_node);
+ if (ret)
+ return ret;
+ } else {
+ ctx_node = sw_context->dx_ctx_node;
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+ }
+
+ res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ cmd->body.shid, 0);
+ if (IS_ERR(res)) {
+ DRM_ERROR("Could not find shader to bind.\n");
+ return PTR_ERR(res);
+ }
+
+ ret = vmw_resource_val_add(sw_context, res, &res_node);
+ if (ret) {
+ DRM_ERROR("Error creating resource validation node.\n");
+ goto out_unref;
+ }
+
+
+ ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
+ &cmd->body.mobid,
+ cmd->body.offsetInBytes);
+out_unref:
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -1849,7 +3014,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
uint32_t size_remaining = *size;
uint32_t cmd_id;
- cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+ cmd_id = ((uint32_t *)buf)[0];
switch (cmd_id) {
case SVGA_CMD_UPDATE:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
@@ -1980,7 +3145,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
false, false, true),
@@ -2051,7 +3216,147 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
- true, false, true)
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
+ false, false, true),
+
+ /*
+ * DX commands
+ */
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
+ &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
+ &vmw_cmd_dx_set_shader_res, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
+ &vmw_cmd_dx_set_vertex_buffers, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
+ &vmw_cmd_dx_set_index_buffer, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
+ &vmw_cmd_dx_set_rendertargets, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
+ &vmw_cmd_ok, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
+ &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
+ &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
+ &vmw_cmd_dx_check_subresource, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
+ &vmw_cmd_dx_check_subresource, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
+ &vmw_cmd_dx_check_subresource, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
+ &vmw_cmd_dx_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+ &vmw_cmd_dx_view_remove, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
+ &vmw_cmd_dx_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+ &vmw_cmd_dx_view_remove, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
+ &vmw_cmd_dx_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+ &vmw_cmd_dx_view_remove, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
+ &vmw_cmd_dx_define_shader, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
+ &vmw_cmd_dx_destroy_shader, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
+ &vmw_cmd_dx_bind_shader, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
+ &vmw_cmd_dx_set_so_targets, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
+ &vmw_cmd_buffer_copy_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
+ &vmw_cmd_pred_copy_check, true, false, true),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -2065,14 +3370,14 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
const struct vmw_cmd_entry *entry;
bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
- cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+ cmd_id = ((uint32_t *)buf)[0];
/* Handle any none 3D commands */
if (unlikely(cmd_id < SVGA_CMD_MAX))
return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
- cmd_id = le32_to_cpu(header->id);
- *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
+ cmd_id = header->id;
+ *size = header->size + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(*size > size_remaining))
@@ -2184,7 +3489,8 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
*
* @list: The resource list.
*/
-static void vmw_resource_list_unreference(struct list_head *list)
+static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
+ struct list_head *list)
{
struct vmw_resource_val_node *val, *val_next;
@@ -2195,8 +3501,15 @@ static void vmw_resource_list_unreference(struct list_head *list)
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
- if (unlikely(val->staged_bindings))
- kfree(val->staged_bindings);
+
+ if (val->staged_bindings) {
+ if (val->staged_bindings != sw_context->staged_bindings)
+ vmw_binding_state_free(val->staged_bindings);
+ else
+ sw_context->staged_bindings_inuse = false;
+ val->staged_bindings = NULL;
+ }
+
kfree(val);
}
}
@@ -2222,24 +3535,21 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
-static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- bool validate_as_mob)
+int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool validate_as_mob)
{
+ struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
+ base);
int ret;
-
- /*
- * Don't validate pinned buffers.
- */
-
- if (bo == dev_priv->pinned_bo ||
- (bo == dev_priv->dummy_query_bo &&
- dev_priv->dummy_query_bo_pinned))
+ if (vbo->pin_count > 0)
return 0;
if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+ return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
+ false);
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
@@ -2248,7 +3558,8 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+ false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
@@ -2257,8 +3568,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
- DRM_INFO("Falling through to VRAM.\n");
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
return ret;
}
@@ -2270,6 +3580,7 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+ true,
entry->validate_as_mob);
if (unlikely(ret != 0))
return ret;
@@ -2417,7 +3728,164 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
}
}
+/**
+ * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
+ * the fifo.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @kernel_commands: Pointer to the unpatched command batch.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command batch
+ * pointed to by @kernel_commands will have been modified.
+ */
+static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
+ void *kernel_commands,
+ u32 command_size,
+ struct vmw_sw_context *sw_context)
+{
+ void *cmd;
+ if (sw_context->dx_ctx_node)
+ cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
+ sw_context->dx_ctx_node->res->id);
+ else
+ cmd = vmw_fifo_reserve(dev_priv, command_size);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving fifo space for commands.\n");
+ return -ENOMEM;
+ }
+
+ vmw_apply_relocations(sw_context);
+ memcpy(cmd, kernel_commands, command_size);
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_fifo_commit(dev_priv, command_size);
+
+ return 0;
+}
+
+/**
+ * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
+ * the command buffer manager.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @header: Opaque handle to the command buffer allocation.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command buffer
+ * represented by @header will have been modified.
+ */
+static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
+ struct vmw_cmdbuf_header *header,
+ u32 command_size,
+ struct vmw_sw_context *sw_context)
+{
+ u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+ SVGA3D_INVALID_ID);
+ void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
+ id, false, header);
+
+ vmw_apply_relocations(sw_context);
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
+
+ return 0;
+}
+
+/**
+ * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
+ * submission using a command buffer.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @user_commands: User-space pointer to the commands to be submitted.
+ * @command_size: Size of the unpatched command batch.
+ * @header: Out parameter returning the opaque pointer to the command buffer.
+ *
+ * This function checks whether we can use the command buffer manager for
+ * submission and if so, creates a command buffer of suitable size and
+ * copies the user data into that buffer.
+ *
+ * On successful return, the function returns a pointer to the data in the
+ * command buffer and *@header is set to non-NULL.
+ * If command buffers could not be used, the function will return the value
+ * of @kernel_commands on function call. That value may be NULL. In that case,
+ * the value of *@header will be set to NULL.
+ * If an error is encountered, the function will return a pointer error value.
+ * If the function is interrupted by a signal while sleeping, it will return
+ * -ERESTARTSYS casted to a pointer error value.
+ */
+static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
+ void __user *user_commands,
+ void *kernel_commands,
+ u32 command_size,
+ struct vmw_cmdbuf_header **header)
+{
+ size_t cmdbuf_size;
+ int ret;
+
+ *header = NULL;
+ if (!dev_priv->cman || kernel_commands)
+ return kernel_commands;
+
+ if (command_size > SVGA_CB_MAX_SIZE) {
+ DRM_ERROR("Command buffer is too large.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* If possible, add a little space for fencing. */
+ cmdbuf_size = command_size + 512;
+ cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
+ kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
+ true, header);
+ if (IS_ERR(kernel_commands))
+ return kernel_commands;
+
+ ret = copy_from_user(kernel_commands, user_commands,
+ command_size);
+ if (ret) {
+ DRM_ERROR("Failed copying commands.\n");
+ vmw_cmdbuf_header_free(*header);
+ *header = NULL;
+ return ERR_PTR(-EFAULT);
+ }
+
+ return kernel_commands;
+}
+
+static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ uint32_t handle)
+{
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource *res;
+ int ret;
+
+ if (handle == SVGA3D_INVALID_ID)
+ return 0;
+
+ ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
+ handle, user_context_converter,
+ &res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or user DX context 0x%08x.\n",
+ (unsigned) handle);
+ return ret;
+ }
+
+ ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ sw_context->dx_ctx_node = ctx_node;
+ sw_context->man = vmw_context_res_man(res);
+out_err:
+ vmw_resource_unreference(&res);
+ return ret;
+}
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
@@ -2425,6 +3893,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
+ uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj **out_fence)
{
@@ -2432,18 +3901,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource;
struct list_head resource_list;
+ struct vmw_cmdbuf_header *header;
struct ww_acquire_ctx ticket;
uint32_t handle;
- void *cmd;
int ret;
+ if (throttle_us) {
+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+ throttle_us);
+
+ if (ret)
+ return ret;
+ }
+
+ kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
+ kernel_commands, command_size,
+ &header);
+ if (IS_ERR(kernel_commands))
+ return PTR_ERR(kernel_commands);
+
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
- if (unlikely(ret != 0))
- return -ERESTARTSYS;
+ if (ret) {
+ ret = -ERESTARTSYS;
+ goto out_free_header;
+ }
+ sw_context->kernel = false;
if (kernel_commands == NULL) {
- sw_context->kernel = false;
-
ret = vmw_resize_cmd_bounce(sw_context, command_size);
if (unlikely(ret != 0))
goto out_unlock;
@@ -2458,19 +3942,26 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
}
kernel_commands = sw_context->cmd_bounce;
- } else
+ } else if (!header)
sw_context->kernel = true;
sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
INIT_LIST_HEAD(&sw_context->resource_list);
+ INIT_LIST_HEAD(&sw_context->ctx_resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
+ sw_context->dx_ctx_node = NULL;
+ sw_context->dx_query_mob = NULL;
+ sw_context->dx_query_ctx = NULL;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
+ if (sw_context->staged_bindings)
+ vmw_binding_state_reset(sw_context->staged_bindings);
+
if (!sw_context->res_ht_initialized) {
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
@@ -2478,10 +3969,24 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
-
INIT_LIST_HEAD(&resource_list);
+ ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
+ if (unlikely(ret != 0)) {
+ list_splice_init(&sw_context->ctx_resource_list,
+ &sw_context->resource_list);
+ goto out_err_nores;
+ }
+
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
+ /*
+ * Merge the resource lists before checking the return status
+ * from vmd_cmd_check_all so that all the open hashtabs will
+ * be handled properly even if vmw_cmd_check_all fails.
+ */
+ list_splice_init(&sw_context->ctx_resource_list,
+ &sw_context->resource_list);
+
if (unlikely(ret != 0))
goto out_err_nores;
@@ -2502,14 +4007,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err;
- if (throttle_us) {
- ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
- throttle_us);
-
- if (unlikely(ret != 0))
- goto out_err;
- }
-
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
if (unlikely(ret != 0)) {
ret = -ERESTARTSYS;
@@ -2522,21 +4019,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock_binding;
}
- cmd = vmw_fifo_reserve(dev_priv, command_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving fifo space for commands.\n");
- ret = -ENOMEM;
- goto out_unlock_binding;
+ if (!header) {
+ ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
+ command_size, sw_context);
+ } else {
+ ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
+ sw_context);
+ header = NULL;
}
-
- vmw_apply_relocations(sw_context);
- memcpy(cmd, kernel_commands, command_size);
-
- vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
- vmw_resource_relocations_free(&sw_context->res_relocations);
-
- vmw_fifo_commit(dev_priv, command_size);
mutex_unlock(&dev_priv->binding_mutex);
+ if (ret)
+ goto out_err;
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +4044,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
- vmw_resource_list_unreserve(&sw_context->resource_list, false);
+ vmw_resources_unreserve(sw_context, false);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
@@ -2580,7 +4073,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(&resource_list);
+ vmw_resource_list_unreference(sw_context, &resource_list);
return 0;
@@ -2589,7 +4082,7 @@ out_unlock_binding:
out_err:
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
out_err_nores:
- vmw_resource_list_unreserve(&sw_context->resource_list, true);
+ vmw_resources_unreserve(sw_context, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
@@ -2607,9 +4100,12 @@ out_unlock:
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(&resource_list);
+ vmw_resource_list_unreference(sw_context, &resource_list);
if (unlikely(error_resource != NULL))
vmw_resource_unreference(&error_resource);
+out_free_header:
+ if (header)
+ vmw_cmdbuf_header_free(header);
return ret;
}
@@ -2628,9 +4124,11 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
- vmw_bo_pin(dev_priv->pinned_bo, false);
- vmw_bo_pin(dev_priv->dummy_query_bo, false);
- dev_priv->dummy_query_bo_pinned = false;
+ vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+ if (dev_priv->dummy_query_bo_pinned) {
+ vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+ dev_priv->dummy_query_bo_pinned = false;
+ }
}
@@ -2672,11 +4170,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&validate_list);
- pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+ pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
pinned_val.shared = false;
list_add_tail(&pinned_val.head, &validate_list);
- query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+ query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
query_val.shared = false;
list_add_tail(&query_val.head, &validate_list);
@@ -2697,10 +4195,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
dev_priv->query_cid_valid = false;
}
- vmw_bo_pin(dev_priv->pinned_bo, false);
- vmw_bo_pin(dev_priv->dummy_query_bo, false);
- dev_priv->dummy_query_bo_pinned = false;
-
+ vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+ if (dev_priv->dummy_query_bo_pinned) {
+ vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+ dev_priv->dummy_query_bo_pinned = false;
+ }
if (fence == NULL) {
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
NULL);
@@ -2712,7 +4211,9 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- ttm_bo_unref(&dev_priv->pinned_bo);
+ vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ DRM_INFO("Dummy query bo pin count: %d\n",
+ dev_priv->dummy_query_bo->pin_count);
out_unlock:
return;
@@ -2722,7 +4223,7 @@ out_no_emit:
out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- ttm_bo_unref(&dev_priv->pinned_bo);
+ vmw_dmabuf_unreference(&dev_priv->pinned_bo);
}
/**
@@ -2751,36 +4252,68 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
-
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+ struct drm_file *file_priv, size_t size)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+ struct drm_vmw_execbuf_arg arg;
int ret;
+ static const size_t copy_offset[] = {
+ offsetof(struct drm_vmw_execbuf_arg, context_handle),
+ sizeof(struct drm_vmw_execbuf_arg)};
+
+ if (unlikely(size < copy_offset[0])) {
+ DRM_ERROR("Invalid command size, ioctl %d\n",
+ DRM_VMW_EXECBUF);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
+ return -EFAULT;
/*
- * This will allow us to extend the ioctl argument while
+ * Extend the ioctl argument while
* maintaining backwards compatibility:
* We take different code paths depending on the value of
- * arg->version.
+ * arg.version.
*/
- if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+ if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
+ arg.version == 0)) {
DRM_ERROR("Incorrect execbuf version.\n");
- DRM_ERROR("You're running outdated experimental "
- "vmwgfx user-space drivers.");
return -EINVAL;
}
+ if (arg.version > 1 &&
+ copy_from_user(&arg.context_handle,
+ (void __user *) (data + copy_offset[0]),
+ copy_offset[arg.version - 1] -
+ copy_offset[0]) != 0)
+ return -EFAULT;
+
+ switch (arg.version) {
+ case 1:
+ arg.context_handle = (uint32_t) -1;
+ break;
+ case 2:
+ if (arg.pad64 != 0) {
+ DRM_ERROR("Unused IOCTL data not set to zero.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
ret = vmw_execbuf_process(file_priv, dev_priv,
- (void __user *)(unsigned long)arg->commands,
- NULL, arg->command_size, arg->throttle_us,
- (void __user *)(unsigned long)arg->fence_rep,
+ (void __user *)(unsigned long)arg.commands,
+ NULL, arg.command_size, arg.throttle_us,
+ arg.context_handle,
+ (void __user *)(unsigned long)arg.fence_rep,
NULL);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 0a474f391fad..3b1faf7862a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -1,7 +1,7 @@
/**************************************************************************
*
* Copyright © 2007 David Airlie
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,7 @@
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
#include <drm/ttm/ttm_placement.h>
@@ -40,21 +41,22 @@ struct vmw_fb_par {
void *vmalloc;
+ struct mutex bo_mutex;
struct vmw_dma_buffer *vmw_bo;
struct ttm_bo_kmap_obj map;
+ void *bo_ptr;
+ unsigned bo_size;
+ struct drm_framebuffer *set_fb;
+ struct drm_display_mode *set_mode;
+ u32 fb_x;
+ u32 fb_y;
+ bool bo_iowrite;
u32 pseudo_palette[17];
- unsigned depth;
- unsigned bpp;
-
unsigned max_width;
unsigned max_height;
- void *bo_ptr;
- unsigned bo_size;
- bool bo_iowrite;
-
struct {
spinlock_t lock;
bool active;
@@ -63,6 +65,10 @@ struct vmw_fb_par {
unsigned x2;
unsigned y2;
} dirty;
+
+ struct drm_crtc *crtc;
+ struct drm_connector *con;
+ struct delayed_work local_work;
};
static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -77,7 +83,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;
}
- switch (par->depth) {
+ switch (par->set_fb->depth) {
case 24:
case 32:
pal[regno] = ((red & 0xff00) << 8) |
@@ -85,7 +91,8 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
((blue & 0xff00) >> 8);
break;
default:
- DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
+ DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
+ par->set_fb->bits_per_pixel);
return 1;
}
@@ -134,12 +141,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
- (var->xoffset != 0 || var->yoffset != 0)) {
- DRM_ERROR("Can not handle panning without display topology\n");
- return -EINVAL;
- }
-
if ((var->xoffset + var->xres) > par->max_width ||
(var->yoffset + var->yres) > par->max_height) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
@@ -156,46 +157,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return 0;
}
-static int vmw_fb_set_par(struct fb_info *info)
-{
- struct vmw_fb_par *par = info->par;
- struct vmw_private *vmw_priv = par->vmw_priv;
- int ret;
-
- info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
-
- ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
- info->fix.line_length,
- par->bpp, par->depth);
- if (ret)
- return ret;
-
- if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
- /* TODO check if pitch and offset changes */
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- }
-
- /* This is really helpful since if this fails the user
- * can probably not see anything on the screen.
- */
- WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
-
- return 0;
-}
-
-static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- return 0;
-}
-
static int vmw_fb_blank(int blank, struct fb_info *info)
{
return 0;
@@ -205,65 +166,89 @@ static int vmw_fb_blank(int blank, struct fb_info *info)
* Dirty code
*/
-static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
+static void vmw_fb_dirty_flush(struct work_struct *work)
{
+ struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
+ local_work.work);
struct vmw_private *vmw_priv = par->vmw_priv;
struct fb_info *info = vmw_priv->fb_info;
- int stride = (info->fix.line_length / 4);
- int *src = (int *)info->screen_base;
- __le32 __iomem *vram_mem = par->bo_ptr;
- unsigned long flags;
- unsigned x, y, w, h;
- int i, k;
- struct {
- uint32_t header;
- SVGAFifoCmdUpdate body;
- } *cmd;
+ unsigned long irq_flags;
+ s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
+ u32 cpp, max_x, max_y;
+ struct drm_clip_rect clip;
+ struct drm_framebuffer *cur_fb;
+ u8 *src_ptr, *dst_ptr;
if (vmw_priv->suspended)
return;
- spin_lock_irqsave(&par->dirty.lock, flags);
- if (!par->dirty.active) {
- spin_unlock_irqrestore(&par->dirty.lock, flags);
- return;
- }
- x = par->dirty.x1;
- y = par->dirty.y1;
- w = min(par->dirty.x2, info->var.xres) - x;
- h = min(par->dirty.y2, info->var.yres) - y;
- par->dirty.x1 = par->dirty.x2 = 0;
- par->dirty.y1 = par->dirty.y2 = 0;
- spin_unlock_irqrestore(&par->dirty.lock, flags);
+ mutex_lock(&par->bo_mutex);
+ cur_fb = par->set_fb;
+ if (!cur_fb)
+ goto out_unlock;
- for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
- for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
- iowrite32(src[k], vram_mem + k);
+ spin_lock_irqsave(&par->dirty.lock, irq_flags);
+ if (!par->dirty.active) {
+ spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+ goto out_unlock;
}
-#if 0
- DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
-#endif
+ /*
+ * Handle panning when copying from vmalloc to framebuffer.
+ * Clip dirty area to framebuffer.
+ */
+ cpp = (cur_fb->bits_per_pixel + 7) / 8;
+ max_x = par->fb_x + cur_fb->width;
+ max_y = par->fb_y + cur_fb->height;
+
+ dst_x1 = par->dirty.x1 - par->fb_x;
+ dst_y1 = par->dirty.y1 - par->fb_y;
+ dst_x1 = max_t(s32, dst_x1, 0);
+ dst_y1 = max_t(s32, dst_y1, 0);
+
+ dst_x2 = par->dirty.x2 - par->fb_x;
+ dst_y2 = par->dirty.y2 - par->fb_y;
+ dst_x2 = min_t(s32, dst_x2, max_x);
+ dst_y2 = min_t(s32, dst_y2, max_y);
+ w = dst_x2 - dst_x1;
+ h = dst_y2 - dst_y1;
+ w = max_t(s32, 0, w);
+ h = max_t(s32, 0, h);
- cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- return;
+ par->dirty.x1 = par->dirty.x2 = 0;
+ par->dirty.y1 = par->dirty.y2 = 0;
+ spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+
+ if (w && h) {
+ dst_ptr = (u8 *)par->bo_ptr +
+ (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
+ src_ptr = (u8 *)par->vmalloc +
+ ((dst_y1 + par->fb_y) * info->fix.line_length +
+ (dst_x1 + par->fb_x) * cpp);
+
+ while (h-- > 0) {
+ memcpy(dst_ptr, src_ptr, w*cpp);
+ dst_ptr += par->set_fb->pitches[0];
+ src_ptr += info->fix.line_length;
+ }
+
+ clip.x1 = dst_x1;
+ clip.x2 = dst_x2;
+ clip.y1 = dst_y1;
+ clip.y2 = dst_y2;
+
+ WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
+ &clip, 1));
+ vmw_fifo_flush(vmw_priv, false);
}
-
- cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
- cmd->body.x = cpu_to_le32(x);
- cmd->body.y = cpu_to_le32(y);
- cmd->body.width = cpu_to_le32(w);
- cmd->body.height = cpu_to_le32(h);
- vmw_fifo_commit(vmw_priv, sizeof(*cmd));
+out_unlock:
+ mutex_unlock(&par->bo_mutex);
}
static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
unsigned x1, unsigned y1,
unsigned width, unsigned height)
{
- struct fb_info *info = par->vmw_priv->fb_info;
unsigned long flags;
unsigned x2 = x1 + width;
unsigned y2 = y1 + height;
@@ -277,7 +262,8 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
/* if we are active start the dirty work
* we share the work with the defio system */
if (par->dirty.active)
- schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
+ schedule_delayed_work(&par->local_work,
+ VMW_DIRTY_DELAY);
} else {
if (x1 < par->dirty.x1)
par->dirty.x1 = x1;
@@ -291,6 +277,28 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
spin_unlock_irqrestore(&par->dirty.lock, flags);
}
+static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct vmw_fb_par *par = info->par;
+
+ if ((var->xoffset + var->xres) > var->xres_virtual ||
+ (var->yoffset + var->yres) > var->yres_virtual) {
+ DRM_ERROR("Requested panning can not fit in framebuffer\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&par->bo_mutex);
+ par->fb_x = var->xoffset;
+ par->fb_y = var->yoffset;
+ if (par->set_fb)
+ vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
+ par->set_fb->height);
+ mutex_unlock(&par->bo_mutex);
+
+ return 0;
+}
+
static void vmw_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
@@ -319,12 +327,17 @@ static void vmw_deferred_io(struct fb_info *info,
par->dirty.x2 = info->var.xres;
par->dirty.y2 = y2;
spin_unlock_irqrestore(&par->dirty.lock, flags);
- }
- vmw_fb_dirty_flush(par);
+ /*
+ * Since we've already waited on this work once, try to
+ * execute asap.
+ */
+ cancel_delayed_work(&par->local_work);
+ schedule_delayed_work(&par->local_work, 0);
+ }
};
-struct fb_deferred_io vmw_defio = {
+static struct fb_deferred_io vmw_defio = {
.delay = VMW_DIRTY_DELAY,
.deferred_io = vmw_deferred_io,
};
@@ -358,33 +371,12 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
* Bring up code
*/
-static struct fb_ops vmw_fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = vmw_fb_check_var,
- .fb_set_par = vmw_fb_set_par,
- .fb_setcolreg = vmw_fb_setcolreg,
- .fb_fillrect = vmw_fb_fillrect,
- .fb_copyarea = vmw_fb_copyarea,
- .fb_imageblit = vmw_fb_imageblit,
- .fb_pan_display = vmw_fb_pan_display,
- .fb_blank = vmw_fb_blank,
-};
-
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
size_t size, struct vmw_dma_buffer **out)
{
struct vmw_dma_buffer *vmw_bo;
- struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
- struct ttm_placement ne_placement;
int ret;
- ne_placement.num_placement = 1;
- ne_placement.placement = &ne_place;
- ne_placement.num_busy_placement = 1;
- ne_placement.busy_placement = &ne_place;
-
- ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
@@ -394,31 +386,261 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
}
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
- &ne_placement,
+ &vmw_sys_placement,
false,
&vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */
*out = vmw_bo;
-
- ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+ ttm_write_unlock(&vmw_priv->reservation_sem);
return 0;
err_unlock:
- ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+ ttm_write_unlock(&vmw_priv->reservation_sem);
return ret;
}
+static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
+ int *depth)
+{
+ switch (var->bits_per_pixel) {
+ case 32:
+ *depth = (var->transp.length > 0) ? 32 : 24;
+ break;
+ default:
+ DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vmw_fb_kms_detach(struct vmw_fb_par *par,
+ bool detach_bo,
+ bool unref_bo)
+{
+ struct drm_framebuffer *cur_fb = par->set_fb;
+ int ret;
+
+ /* Detach the KMS framebuffer from crtcs */
+ if (par->set_mode) {
+ struct drm_mode_set set;
+
+ set.crtc = par->crtc;
+ set.x = 0;
+ set.y = 0;
+ set.mode = NULL;
+ set.fb = NULL;
+ set.num_connectors = 1;
+ set.connectors = &par->con;
+ ret = drm_mode_set_config_internal(&set);
+ if (ret) {
+ DRM_ERROR("Could not unset a mode.\n");
+ return ret;
+ }
+ drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
+ par->set_mode = NULL;
+ }
+
+ if (cur_fb) {
+ drm_framebuffer_unreference(cur_fb);
+ par->set_fb = NULL;
+ }
+
+ if (par->vmw_bo && detach_bo) {
+ if (par->bo_ptr) {
+ ttm_bo_kunmap(&par->map);
+ par->bo_ptr = NULL;
+ }
+ if (unref_bo)
+ vmw_dmabuf_unreference(&par->vmw_bo);
+ else
+ vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
+ }
+
+ return 0;
+}
+
+static int vmw_fb_kms_framebuffer(struct fb_info *info)
+{
+ struct drm_mode_fb_cmd mode_cmd;
+ struct vmw_fb_par *par = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct drm_framebuffer *cur_fb;
+ struct vmw_framebuffer *vfb;
+ int ret = 0;
+ size_t new_bo_size;
+
+ ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
+ if (ret)
+ return ret;
+
+ mode_cmd.width = var->xres;
+ mode_cmd.height = var->yres;
+ mode_cmd.bpp = var->bits_per_pixel;
+ mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
+
+ cur_fb = par->set_fb;
+ if (cur_fb && cur_fb->width == mode_cmd.width &&
+ cur_fb->height == mode_cmd.height &&
+ cur_fb->bits_per_pixel == mode_cmd.bpp &&
+ cur_fb->depth == mode_cmd.depth &&
+ cur_fb->pitches[0] == mode_cmd.pitch)
+ return 0;
+
+ /* Need new buffer object ? */
+ new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
+ ret = vmw_fb_kms_detach(par,
+ par->bo_size < new_bo_size ||
+ par->bo_size > 2*new_bo_size,
+ true);
+ if (ret)
+ return ret;
+
+ if (!par->vmw_bo) {
+ ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
+ &par->vmw_bo);
+ if (ret) {
+ DRM_ERROR("Failed creating a buffer object for "
+ "fbdev.\n");
+ return ret;
+ }
+ par->bo_size = new_bo_size;
+ }
+
+ vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
+ true, &mode_cmd);
+ if (IS_ERR(vfb))
+ return PTR_ERR(vfb);
+
+ par->set_fb = &vfb->base;
+
+ if (!par->bo_ptr) {
+ /*
+ * Pin before mapping. Since we don't know in what placement
+ * to pin, call into KMS to do it for us.
+ */
+ ret = vfb->pin(vfb);
+ if (ret) {
+ DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+ par->vmw_bo->base.num_pages, &par->map);
+ if (ret) {
+ vfb->unpin(vfb);
+ DRM_ERROR("Could not map the fbdev framebuffer.\n");
+ return ret;
+ }
+
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+ }
+
+ return 0;
+}
+
+static int vmw_fb_set_par(struct fb_info *info)
+{
+ struct vmw_fb_par *par = info->par;
+ struct vmw_private *vmw_priv = par->vmw_priv;
+ struct drm_mode_set set;
+ struct fb_var_screeninfo *var = &info->var;
+ struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
+ DRM_MODE_TYPE_DRIVER,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
+ struct drm_display_mode *old_mode;
+ struct drm_display_mode *mode;
+ int ret;
+
+ old_mode = par->set_mode;
+ mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
+ if (!mode) {
+ DRM_ERROR("Could not create new fb mode.\n");
+ return -ENOMEM;
+ }
+
+ mode->hdisplay = var->xres;
+ mode->vdisplay = var->yres;
+ vmw_guess_mode_timing(mode);
+
+ if (old_mode && drm_mode_equal(old_mode, mode)) {
+ drm_mode_destroy(vmw_priv->dev, mode);
+ mode = old_mode;
+ old_mode = NULL;
+ } else if (!vmw_kms_validate_mode_vram(vmw_priv,
+ mode->hdisplay *
+ (var->bits_per_pixel + 7) / 8,
+ mode->vdisplay)) {
+ drm_mode_destroy(vmw_priv->dev, mode);
+ return -EINVAL;
+ }
+
+ mutex_lock(&par->bo_mutex);
+ drm_modeset_lock_all(vmw_priv->dev);
+ ret = vmw_fb_kms_framebuffer(info);
+ if (ret)
+ goto out_unlock;
+
+ par->fb_x = var->xoffset;
+ par->fb_y = var->yoffset;
+
+ set.crtc = par->crtc;
+ set.x = 0;
+ set.y = 0;
+ set.mode = mode;
+ set.fb = par->set_fb;
+ set.num_connectors = 1;
+ set.connectors = &par->con;
+
+ ret = drm_mode_set_config_internal(&set);
+ if (ret)
+ goto out_unlock;
+
+ vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
+ par->set_fb->width, par->set_fb->height);
+
+ /* If there already was stuff dirty we wont
+ * schedule a new work, so lets do it now */
+
+ schedule_delayed_work(&par->local_work, 0);
+
+out_unlock:
+ if (old_mode)
+ drm_mode_destroy(vmw_priv->dev, old_mode);
+ par->set_mode = mode;
+
+ drm_modeset_unlock_all(vmw_priv->dev);
+ mutex_unlock(&par->bo_mutex);
+
+ return ret;
+}
+
+
+static struct fb_ops vmw_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = vmw_fb_check_var,
+ .fb_set_par = vmw_fb_set_par,
+ .fb_setcolreg = vmw_fb_setcolreg,
+ .fb_fillrect = vmw_fb_fillrect,
+ .fb_copyarea = vmw_fb_copyarea,
+ .fb_imageblit = vmw_fb_imageblit,
+ .fb_pan_display = vmw_fb_pan_display,
+ .fb_blank = vmw_fb_blank,
+};
+
int vmw_fb_init(struct vmw_private *vmw_priv)
{
struct device *device = &vmw_priv->dev->pdev->dev;
struct vmw_fb_par *par;
struct fb_info *info;
- unsigned initial_width, initial_height;
unsigned fb_width, fb_height;
unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+ struct drm_display_mode *init_mode;
int ret;
fb_bpp = 32;
@@ -428,9 +650,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
- initial_width = min(vmw_priv->initial_width, fb_width);
- initial_height = min(vmw_priv->initial_height, fb_height);
-
fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
@@ -444,35 +663,35 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
*/
vmw_priv->fb_info = info;
par = info->par;
+ memset(par, 0, sizeof(*par));
+ INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
par->vmw_priv = vmw_priv;
- par->depth = fb_depth;
- par->bpp = fb_bpp;
par->vmalloc = NULL;
par->max_width = fb_width;
par->max_height = fb_height;
+ drm_modeset_lock_all(vmw_priv->dev);
+ ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
+ par->max_height, &par->con,
+ &par->crtc, &init_mode);
+ if (ret) {
+ drm_modeset_unlock_all(vmw_priv->dev);
+ goto err_kms;
+ }
+
+ info->var.xres = init_mode->hdisplay;
+ info->var.yres = init_mode->vdisplay;
+ drm_modeset_unlock_all(vmw_priv->dev);
+
/*
* Create buffers and alloc memory
*/
- par->vmalloc = vmalloc(fb_size);
+ par->vmalloc = vzalloc(fb_size);
if (unlikely(par->vmalloc == NULL)) {
ret = -ENOMEM;
goto err_free;
}
- ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
- if (unlikely(ret != 0))
- goto err_free;
-
- ret = ttm_bo_kmap(&par->vmw_bo->base,
- 0,
- par->vmw_bo->base.num_pages,
- &par->map);
- if (unlikely(ret != 0))
- goto err_unref;
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
- par->bo_size = fb_size;
-
/*
* Fixed and var
*/
@@ -490,7 +709,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->fix.smem_len = fb_size;
info->pseudo_palette = par->pseudo_palette;
- info->screen_base = par->vmalloc;
+ info->screen_base = (char __iomem *)par->vmalloc;
info->screen_size = fb_size;
info->flags = FBINFO_DEFAULT;
@@ -508,18 +727,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->var.xres_virtual = fb_width;
info->var.yres_virtual = fb_height;
- info->var.bits_per_pixel = par->bpp;
+ info->var.bits_per_pixel = fb_bpp;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
info->var.height = -1;
info->var.width = -1;
- info->var.xres = initial_width;
- info->var.yres = initial_height;
-
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
@@ -535,6 +750,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
par->dirty.y1 = par->dirty.y2 = 0;
par->dirty.active = true;
spin_lock_init(&par->dirty.lock);
+ mutex_init(&par->bo_mutex);
info->fbdefio = &vmw_defio;
fb_deferred_io_init(info);
@@ -542,16 +758,16 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
if (unlikely(ret != 0))
goto err_defio;
+ vmw_fb_set_par(info);
+
return 0;
err_defio:
fb_deferred_io_cleanup(info);
err_aper:
- ttm_bo_kunmap(&par->map);
-err_unref:
- ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
err_free:
vfree(par->vmalloc);
+err_kms:
framebuffer_release(info);
vmw_priv->fb_info = NULL;
@@ -562,22 +778,19 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
{
struct fb_info *info;
struct vmw_fb_par *par;
- struct ttm_buffer_object *bo;
if (!vmw_priv->fb_info)
return 0;
info = vmw_priv->fb_info;
par = info->par;
- bo = &par->vmw_bo->base;
- par->vmw_bo = NULL;
/* ??? order */
fb_deferred_io_cleanup(info);
+ cancel_delayed_work_sync(&par->local_work);
unregister_framebuffer(info);
- ttm_bo_kunmap(&par->map);
- ttm_bo_unref(&bo);
+ (void) vmw_fb_kms_detach(par, true, true);
vfree(par->vmalloc);
framebuffer_release(info);
@@ -602,11 +815,11 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
spin_unlock_irqrestore(&par->dirty.lock, flags);
flush_delayed_work(&info->deferred_work);
+ flush_delayed_work(&par->local_work);
- par->bo_ptr = NULL;
- ttm_bo_kunmap(&par->map);
-
- vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
+ mutex_lock(&par->bo_mutex);
+ (void) vmw_fb_kms_detach(par, true, false);
+ mutex_unlock(&par->bo_mutex);
return 0;
}
@@ -616,8 +829,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
struct fb_info *info;
struct vmw_fb_par *par;
unsigned long flags;
- bool dummy;
- int ret;
if (!vmw_priv->fb_info)
return -EINVAL;
@@ -625,38 +836,10 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
info = vmw_priv->fb_info;
par = info->par;
- /* we are already active */
- if (par->bo_ptr != NULL)
- return 0;
-
- /* Make sure that all overlays are stoped when we take over */
- vmw_overlay_stop_all(vmw_priv);
-
- ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
- if (unlikely(ret != 0)) {
- DRM_ERROR("could not move buffer to start of VRAM\n");
- goto err_no_buffer;
- }
-
- ret = ttm_bo_kmap(&par->vmw_bo->base,
- 0,
- par->vmw_bo->base.num_pages,
- &par->map);
- BUG_ON(ret != 0);
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
-
+ vmw_fb_set_par(info);
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = true;
spin_unlock_irqrestore(&par->dirty.lock, flags);
-
-err_no_buffer:
- vmw_fb_set_par(info);
-
- vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
-
- /* If there already was stuff dirty we wont
- * schedule a new work, so lets do it now */
- schedule_delayed_work(&info->deferred_work, 0);
-
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 945f1e0dad92..567ddede51d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -142,7 +142,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
@@ -386,7 +386,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
- __le32 __iomem *fifo_mem;
+ u32 __iomem *fifo_mem;
struct vmw_fence_obj *fence;
if (likely(!fman->seqno_valid))
@@ -430,7 +430,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
- __le32 __iomem *fifo_mem;
+ u32 __iomem *fifo_mem;
if (fence_is_signaled_locked(&fence->base))
return false;
@@ -453,7 +453,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
struct list_head action_list;
bool needs_rerun;
uint32_t seqno, new_seqno;
- __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
rerun:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 26a4add39208..8be6c29f5eb5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39f2b03888e7..80c40c31d4f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,9 +29,14 @@
#include <drm/drmP.h>
#include <drm/ttm/ttm_placement.h>
+struct vmw_temp_set_context {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXTempSetContext body;
+};
+
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
@@ -71,8 +76,8 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
return false;
- /* Non-Screen Object path does not support surfaces */
- if (!dev_priv->sou_priv)
+ /* Legacy Display Unit does not support surfaces */
+ if (dev_priv->active_display_unit == vmw_du_legacy)
return false;
return true;
@@ -80,7 +85,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
@@ -95,11 +100,11 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
- uint32_t dummy;
+ fifo->dx = false;
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
@@ -112,10 +117,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem);
- /*
- * Allow mapping the first page read-only to user-space.
- */
-
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
@@ -123,7 +124,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
- vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+
+ vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
+ SVGA_REG_ENABLE_HIDE);
+ vmw_write(dev_priv, SVGA_REG_TRACES, 0);
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
@@ -155,12 +159,13 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
vmw_marker_queue_init(&fifo->marker_queue);
- return vmw_fifo_send_fence(dev_priv, &dummy);
+
+ return 0;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
static DEFINE_SPINLOCK(ping_lock);
unsigned long irq_flags;
@@ -178,7 +183,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
@@ -208,7 +213,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -312,10 +317,11 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
* Returns:
* Pointer to the fifo, or null on error (possible hardware hang).
*/
-void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
+ uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
@@ -372,7 +378,8 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
if (reserveable)
iowrite32(bytes, fifo_mem +
SVGA_FIFO_RESERVED);
- return fifo_mem + (next_cmd >> 2);
+ return (void __force *) (fifo_mem +
+ (next_cmd >> 2));
} else {
need_bounce = true;
}
@@ -391,11 +398,36 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
out_err:
fifo_state->reserved_size = 0;
mutex_unlock(&fifo_state->fifo_mutex);
+
return NULL;
}
+void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+ int ctx_id)
+{
+ void *ret;
+
+ if (dev_priv->cman)
+ ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
+ ctx_id, false, NULL);
+ else if (ctx_id == SVGA3D_INVALID_ID)
+ ret = vmw_local_fifo_reserve(dev_priv, bytes);
+ else {
+ WARN_ON("Command buffer has not been allocated.\n");
+ ret = NULL;
+ }
+ if (IS_ERR_OR_NULL(ret)) {
+ DRM_ERROR("Fifo reserve failure of %u bytes.\n",
+ (unsigned) bytes);
+ dump_stack();
+ return NULL;
+ }
+
+ return ret;
+}
+
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
- __le32 __iomem *fifo_mem,
+ u32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -417,7 +449,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
- __le32 __iomem *fifo_mem,
+ u32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -436,15 +468,19 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
}
}
-void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+ if (fifo_state->dx)
+ bytes += sizeof(struct vmw_temp_set_context);
+
+ fifo_state->dx = false;
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
@@ -482,13 +518,53 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
mutex_unlock(&fifo_state->fifo_mutex);
}
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ if (dev_priv->cman)
+ vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
+ else
+ vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+
+/**
+ * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @bytes: Number of bytes to commit.
+ */
+void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ if (dev_priv->cman)
+ vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
+ else
+ vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+/**
+ * vmw_fifo_flush - Flush any buffered commands and make sure command processing
+ * starts.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @interruptible: Whether to wait interruptible if function needs to sleep.
+ */
+int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+{
+ might_sleep();
+
+ if (dev_priv->cman)
+ return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
+ else
+ return 0;
+}
+
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
- void *fm;
+ u32 *fm;
int ret = 0;
- uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
+ uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
@@ -514,12 +590,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
return 0;
}
- *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
- cmd_fence = (struct svga_fifo_cmd_fence *)
- ((unsigned long)fm + sizeof(__le32));
-
- iowrite32(*seqno, &cmd_fence->fence);
- vmw_fifo_commit(dev_priv, bytes);
+ *fm++ = SVGA_CMD_FENCE;
+ cmd_fence = (struct svga_fifo_cmd_fence *) fm;
+ cmd_fence->fence = *seqno;
+ vmw_fifo_commit_flush(dev_priv, bytes);
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
vmw_update_seqno(dev_priv, fifo_state);
@@ -545,7 +619,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
@@ -594,7 +668,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;
@@ -647,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
}
+
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 61d8d803199f..66ffa1d4759c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 69c8ce23123c..0a970afed93b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
#include "vmwgfx_drv.h"
#include <drm/vmwgfx_drm.h>
#include "vmwgfx_kms.h"
+#include "device_include/svga3d_caps.h"
struct svga_3d_compat_cap {
SVGA3dCapsRecordHeader header;
@@ -63,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -105,6 +106,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_MAX_MOB_SIZE:
param->value = dev_priv->max_mob_size;
break;
+ case DRM_VMW_PARAM_SCREEN_TARGET:
+ param->value =
+ (dev_priv->active_display_unit == vmw_du_screen_target);
+ break;
+ case DRM_VMW_PARAM_DX:
+ param->value = dev_priv->has_dx;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
@@ -154,7 +162,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_get_3d_cap_arg *) data;
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t size;
- __le32 __iomem *fifo_mem;
+ u32 __iomem *fifo_mem;
void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
void *bounce;
int ret;
@@ -235,7 +243,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
int ret;
num_clips = arg->num_clips;
- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
if (unlikely(num_clips == 0))
return 0;
@@ -318,7 +326,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
int ret;
num_clips = arg->num_clips;
- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
if (unlikely(num_clips == 0))
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 9fe9827ee499..9498a5e33c12 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -56,6 +56,9 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
+ if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+ SVGA_IRQFLAG_ERROR))
+ vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
return IRQ_HANDLED;
}
@@ -69,7 +72,7 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (dev_priv->last_read_seqno != seqno) {
@@ -131,8 +134,16 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
* Block command submission while waiting for idle.
*/
- if (fifo_idle)
+ if (fifo_idle) {
down_read(&fifo_state->rwsem);
+ if (dev_priv->cman) {
+ ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
+ 10*HZ);
+ if (ret)
+ goto out_err;
+ }
+ }
+
signal_seq = atomic_read(&dev_priv->marker_seq);
ret = 0;
@@ -167,10 +178,11 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
}
finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle) {
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
}
wake_up_all(&dev_priv->fence_queue);
+out_err:
if (fifo_idle)
up_read(&fifo_state->rwsem);
@@ -315,3 +327,30 @@ void vmw_irq_uninstall(struct drm_device *dev)
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
+
+void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ if ((*waiter_count)++ == 0) {
+ outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ dev_priv->irq_mask |= flag;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+}
+
+void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ if (--(*waiter_count) == 0) {
+ dev_priv->irq_mask &= ~flag;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 07cda8cbbddb..61fb7f3de311 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,45 +31,7 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
-
-struct vmw_clip_rect {
- int x1, x2, y1, y2;
-};
-
-/**
- * Clip @num_rects number of @rects against @clip storing the
- * results in @out_rects and the number of passed rects in @out_num.
- */
-static void vmw_clip_cliprects(struct drm_clip_rect *rects,
- int num_rects,
- struct vmw_clip_rect clip,
- SVGASignedRect *out_rects,
- int *out_num)
-{
- int i, k;
-
- for (i = 0, k = 0; i < num_rects; i++) {
- int x1 = max_t(int, clip.x1, rects[i].x1);
- int y1 = max_t(int, clip.y1, rects[i].y1);
- int x2 = min_t(int, clip.x2, rects[i].x2);
- int y2 = min_t(int, clip.y2, rects[i].y2);
-
- if (x1 >= x2)
- continue;
- if (y1 >= y2)
- continue;
-
- out_rects[k].left = x1;
- out_rects[k].top = y1;
- out_rects[k].right = x2;
- out_rects[k].bottom = y2;
- k++;
- }
-
- *out_num = k;
-}
-
-void vmw_display_unit_cleanup(struct vmw_display_unit *du)
+void vmw_du_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
vmw_surface_unreference(&du->cursor_surface);
@@ -109,12 +71,12 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
memcpy(&cmd[1], image, image_size);
- cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
- cmd->cursor.id = cpu_to_le32(0);
- cmd->cursor.width = cpu_to_le32(width);
- cmd->cursor.height = cpu_to_le32(height);
- cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
- cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
+ cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+ cmd->cursor.id = 0;
+ cmd->cursor.width = width;
+ cmd->cursor.height = height;
+ cmd->cursor.hotspotX = hotspotX;
+ cmd->cursor.hotspotY = hotspotY;
vmw_fifo_commit(dev_priv, cmd_size);
@@ -161,7 +123,7 @@ err_unreserve:
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
@@ -367,15 +329,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
srf->snooper.age++;
- /* we can't call this function from this function since execbuf has
- * reserved fifo space.
- *
- * if (srf->snooper.crtc)
- * vmw_ldu_crtc_cursor_update_image(dev_priv,
- * srf->snooper.image, 64, 64,
- * du->hotspot_x, du->hotspot_y);
- */
-
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(bo);
@@ -412,183 +365,19 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
* Surface framebuffer code
*/
-#define vmw_framebuffer_to_vfbs(x) \
- container_of(x, struct vmw_framebuffer_surface, base.base)
-
-struct vmw_framebuffer_surface {
- struct vmw_framebuffer base;
- struct vmw_surface *surface;
- struct vmw_dma_buffer *buffer;
- struct list_head head;
- struct drm_master *master;
-};
-
static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
- struct vmw_master *vmaster = vmw_master(vfbs->master);
-
- mutex_lock(&vmaster->fb_surf_mutex);
- list_del(&vfbs->head);
- mutex_unlock(&vmaster->fb_surf_mutex);
-
- drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
- ttm_base_object_unref(&vfbs->base.user_obj);
+ if (vfbs->base.user_obj)
+ ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
-static int do_surface_dirty_sou(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence)
-{
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *clips_ptr;
- struct drm_clip_rect *tmp;
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, num_units;
- int ret = 0; /* silence warning */
- int left, right, top, bottom;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
- SVGASignedRect *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
- head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(!clips || !num_clips);
-
- tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
- if (unlikely(tmp == NULL)) {
- DRM_ERROR("Temporary cliprect memory alloc failed.\n");
- return -ENOMEM;
- }
-
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
- cmd = kzalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Temporary fifo memory alloc failed.\n");
- ret = -ENOMEM;
- goto out_free_tmp;
- }
-
- /* setup blits pointer */
- blits = (SVGASignedRect *)&cmd[1];
-
- /* initial clip region */
- left = clips->x1;
- right = clips->x2;
- top = clips->y1;
- bottom = clips->y2;
-
- /* skip the first clip rect */
- for (i = 1, clips_ptr = clips + inc;
- i < num_clips; i++, clips_ptr += inc) {
- left = min_t(int, left, (int)clips_ptr->x1);
- right = max_t(int, right, (int)clips_ptr->x2);
- top = min_t(int, top, (int)clips_ptr->y1);
- bottom = max_t(int, bottom, (int)clips_ptr->y2);
- }
-
- /* only need to do this once */
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
- cmd->body.srcRect.left = left;
- cmd->body.srcRect.right = right;
- cmd->body.srcRect.top = top;
- cmd->body.srcRect.bottom = bottom;
-
- clips_ptr = clips;
- for (i = 0; i < num_clips; i++, clips_ptr += inc) {
- tmp[i].x1 = clips_ptr->x1 - left;
- tmp[i].x2 = clips_ptr->x2 - left;
- tmp[i].y1 = clips_ptr->y1 - top;
- tmp[i].y2 = clips_ptr->y2 - top;
- }
-
- /* do per unit writing, reuse fifo for each */
- for (i = 0; i < num_units; i++) {
- struct vmw_display_unit *unit = units[i];
- struct vmw_clip_rect clip;
- int num;
-
- clip.x1 = left - unit->crtc.x;
- clip.y1 = top - unit->crtc.y;
- clip.x2 = right - unit->crtc.x;
- clip.y2 = bottom - unit->crtc.y;
-
- /* skip any crtcs that misses the clip region */
- if (clip.x1 >= unit->crtc.mode.hdisplay ||
- clip.y1 >= unit->crtc.mode.vdisplay ||
- clip.x2 <= 0 || clip.y2 <= 0)
- continue;
-
- /*
- * In order for the clip rects to be correctly scaled
- * the src and dest rects needs to be the same size.
- */
- cmd->body.destRect.left = clip.x1;
- cmd->body.destRect.right = clip.x2;
- cmd->body.destRect.top = clip.y1;
- cmd->body.destRect.bottom = clip.y2;
-
- /* create a clip rect of the crtc in dest coords */
- clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
- clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
- clip.x1 = 0 - clip.x1;
- clip.y1 = 0 - clip.y1;
-
- /* need to reset sid as it is changed by execbuf */
- cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
- cmd->body.destScreenId = unit->unit;
-
- /* clip and write blits to cmd stream */
- vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
- /* if no cliprects hit skip this */
- if (num == 0)
- continue;
-
- /* only return the last fence */
- if (out_fence && *out_fence)
- vmw_fence_obj_unreference(out_fence);
-
- /* recalculate package length */
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, out_fence);
-
- if (unlikely(ret != 0))
- break;
- }
-
-
- kfree(cmd);
-out_free_tmp:
- kfree(tmp);
-
- return ret;
-}
-
static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
@@ -601,11 +390,8 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_clip_rect norect;
int ret, inc = 1;
- if (unlikely(vfbs->master != file_priv->master))
- return -EINVAL;
-
- /* Require ScreenObject support for 3D */
- if (!dev_priv->sou_priv)
+ /* Legacy Display Unit does not support 3D */
+ if (dev_priv->active_display_unit == vmw_du_legacy)
return -EINVAL;
drm_modeset_lock_all(dev_priv->dev);
@@ -627,10 +413,16 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
inc = 2; /* skip source rects */
}
- ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
- flags, color,
- clips, num_clips, inc, NULL);
+ if (dev_priv->active_display_unit == vmw_du_screen_object)
+ ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
+ clips, NULL, NULL, 0, 0,
+ num_clips, inc, NULL);
+ else
+ ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
+ clips, NULL, NULL, 0, 0,
+ num_clips, inc, NULL);
+ vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
@@ -638,27 +430,66 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
return 0;
}
+/**
+ * vmw_kms_readback - Perform a readback from the screen system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips)
+{
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_screen_object:
+ return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
+ user_fence_rep, vclips, num_clips);
+ case vmw_du_screen_target:
+ return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
+ user_fence_rep, NULL, vclips, num_clips,
+ 1, false, true);
+ default:
+ WARN_ONCE(true,
+ "Readback called with invalid display system.\n");
+}
+
+ return -ENOSYS;
+}
+
+
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd
- *mode_cmd)
+ *mode_cmd,
+ bool is_dmabuf_proxy)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- /* 3D is only supported on HWv8 hosts which supports screen objects */
- if (!dev_priv->sou_priv)
+ /* 3D is only supported on HWv8 and newer hosts */
+ if (dev_priv->active_display_unit == vmw_du_legacy)
return -ENOSYS;
/*
@@ -692,15 +523,16 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
case 15:
format = SVGA3D_A1R5G5B5;
break;
- case 8:
- format = SVGA3D_LUMINANCE8;
- break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
- if (unlikely(format != surface->format)) {
+ /*
+ * For DX, surface format validation is done when surface->scanout
+ * is set.
+ */
+ if (!dev_priv->has_dx && format != surface->format) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
@@ -711,38 +543,27 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- if (!vmw_surface_reference(surface)) {
- DRM_ERROR("failed to reference surface %p\n", surface);
- ret = -EINVAL;
- goto out_err2;
- }
-
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
vfbs->base.base.pitches[0] = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
- vfbs->surface = surface;
+ vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handle;
- vfbs->master = drm_master_get(file_priv->master);
-
- mutex_lock(&vmaster->fb_surf_mutex);
- list_add_tail(&vfbs->head, &vmaster->fb_surf);
- mutex_unlock(&vmaster->fb_surf_mutex);
+ vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
*out = &vfbs->base;
ret = drm_framebuffer_init(dev, &vfbs->base.base,
&vmw_framebuffer_surface_funcs);
if (ret)
- goto out_err3;
+ goto out_err2;
return 0;
-out_err3:
- vmw_surface_unreference(&surface);
out_err2:
+ vmw_surface_unreference(&surface);
kfree(vfbs);
out_err1:
return ret;
@@ -752,14 +573,6 @@ out_err1:
* Dmabuf framebuffer code
*/
-#define vmw_framebuffer_to_vfbd(x) \
- container_of(x, struct vmw_framebuffer_dmabuf, base.base)
-
-struct vmw_framebuffer_dmabuf {
- struct vmw_framebuffer base;
- struct vmw_dma_buffer *buffer;
-};
-
static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_dmabuf *vfbd =
@@ -767,185 +580,12 @@ static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer);
- ttm_base_object_unref(&vfbd->base.user_obj);
+ if (vfbd->base.user_obj)
+ ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
-static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment)
-{
- size_t fifo_size;
- int i;
-
- struct {
- uint32_t header;
- SVGAFifoCmdUpdate body;
- } *cmd;
-
- fifo_size = sizeof(*cmd) * num_clips;
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- for (i = 0; i < num_clips; i++, clips += increment) {
- cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
- cmd[i].body.x = cpu_to_le32(clips->x1);
- cmd[i].body.y = cpu_to_le32(clips->y1);
- cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
- cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
- }
-
- vmw_fifo_commit(dev_priv, fifo_size);
- return 0;
-}
-
-static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer)
-{
- int depth = framebuffer->base.depth;
- size_t fifo_size;
- int ret;
-
- struct {
- uint32_t header;
- SVGAFifoCmdDefineGMRFB body;
- } *cmd;
-
- /* Emulate RGBA support, contrary to svga_reg.h this is not
- * supported by hosts. This is only a problem if we are reading
- * this value later and expecting what we uploaded back.
- */
- if (depth == 32)
- depth = 24;
-
- fifo_size = sizeof(*cmd);
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- cmd->header = SVGA_CMD_DEFINE_GMRFB;
- cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
- cmd->body.format.colorDepth = depth;
- cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = framebuffer->base.pitches[0];
- cmd->body.ptr.gmrId = framebuffer->user_handle;
- cmd->body.ptr.offset = 0;
-
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, NULL);
-
- kfree(cmd);
-
- return ret;
-}
-
-static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment,
- struct vmw_fence_obj **out_fence)
-{
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *clips_ptr;
- int i, k, num_units, ret;
- struct drm_crtc *crtc;
- size_t fifo_size;
-
- struct {
- uint32_t header;
- SVGAFifoCmdBlitGMRFBToScreen body;
- } *blits;
-
- ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
- if (unlikely(ret != 0))
- return ret; /* define_gmrfb prints warnings */
-
- fifo_size = sizeof(*blits) * num_clips;
- blits = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(blits == NULL)) {
- DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
- return -ENOMEM;
- }
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- for (k = 0; k < num_units; k++) {
- struct vmw_display_unit *unit = units[k];
- int hit_num = 0;
-
- clips_ptr = clips;
- for (i = 0; i < num_clips; i++, clips_ptr += increment) {
- int clip_x1 = clips_ptr->x1 - unit->crtc.x;
- int clip_y1 = clips_ptr->y1 - unit->crtc.y;
- int clip_x2 = clips_ptr->x2 - unit->crtc.x;
- int clip_y2 = clips_ptr->y2 - unit->crtc.y;
- int move_x, move_y;
-
- /* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
- continue;
-
- /* clip size to crtc size */
- clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
- clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
-
- /* translate both src and dest to bring clip into screen */
- move_x = min_t(int, clip_x1, 0);
- move_y = min_t(int, clip_y1, 0);
-
- /* actual translate done here */
- blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
- blits[hit_num].body.destScreenId = unit->unit;
- blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
- blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
- blits[hit_num].body.destRect.left = clip_x1 - move_x;
- blits[hit_num].body.destRect.top = clip_y1 - move_y;
- blits[hit_num].body.destRect.right = clip_x2;
- blits[hit_num].body.destRect.bottom = clip_y2;
- hit_num++;
- }
-
- /* no clips hit the crtc */
- if (hit_num == 0)
- continue;
-
- /* only return the last fence */
- if (out_fence && *out_fence)
- vmw_fence_obj_unreference(out_fence);
-
- fifo_size = sizeof(*blits) * hit_num;
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
- fifo_size, 0, NULL, out_fence);
-
- if (unlikely(ret != 0))
- break;
- }
-
- kfree(blits);
-
- return ret;
-}
-
static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
@@ -977,16 +617,29 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
increment = 2;
}
- if (dev_priv->ldu_priv) {
- ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
- flags, color,
- clips, num_clips, increment);
- } else {
- ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
- flags, color,
- clips, num_clips, increment, NULL);
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_screen_target:
+ ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
+ clips, NULL, num_clips, increment,
+ true, true);
+ break;
+ case vmw_du_screen_object:
+ ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
+ clips, num_clips, increment,
+ true,
+ NULL);
+ break;
+ case vmw_du_legacy:
+ ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
+ clips, num_clips, increment);
+ break;
+ default:
+ ret = -EINVAL;
+ WARN_ONCE(true, "Dirty called with invalid display system.\n");
+ break;
}
+ vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
@@ -1002,41 +655,133 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
/**
* Pin the dmabuffer to the start of vram.
*/
-static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
+static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(&vfb->base);
+ struct vmw_dma_buffer *buf;
int ret;
- /* This code should not be used with screen objects */
- BUG_ON(dev_priv->sou_priv);
-
- vmw_overlay_pause_all(dev_priv);
+ buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
- ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
-
- vmw_overlay_resume_all(dev_priv);
+ if (!buf)
+ return 0;
- WARN_ON(ret != 0);
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_legacy:
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+ vmw_overlay_resume_all(dev_priv);
+ break;
+ case vmw_du_screen_object:
+ case vmw_du_screen_target:
+ if (vfb->dmabuf)
+ return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
+ false);
+
+ return vmw_dmabuf_pin_in_placement(dev_priv, buf,
+ &vmw_mob_placement, false);
+ default:
+ return -EINVAL;
+ }
- return 0;
+ return ret;
}
-static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
+static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(&vfb->base);
+ struct vmw_dma_buffer *buf;
- if (!vfbd->buffer) {
- WARN_ON(!vfbd->buffer);
+ buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
+
+ if (WARN_ON(!buf))
return 0;
+
+ return vmw_dmabuf_unpin(dev_priv, buf, false);
+}
+
+/**
+ * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ *
+ * @dev: DRM device
+ * @mode_cmd: parameters for the new surface
+ * @dmabuf_mob: MOB backing the DMA buf
+ * @srf_out: newly created surface
+ *
+ * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * same buffer. This way we can do a surface copy rather than a surface DMA.
+ * This is a more efficient approach
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_create_dmabuf_proxy(struct drm_device *dev,
+ const struct drm_mode_fb_cmd *mode_cmd,
+ struct vmw_dma_buffer *dmabuf_mob,
+ struct vmw_surface **srf_out)
+{
+ uint32_t format;
+ struct drm_vmw_size content_base_size;
+ struct vmw_resource *res;
+ int ret;
+
+ switch (mode_cmd->depth) {
+ case 32:
+ case 24:
+ format = SVGA3D_X8R8G8B8;
+ break;
+
+ case 16:
+ case 15:
+ format = SVGA3D_R5G6B5;
+ break;
+
+ case 8:
+ format = SVGA3D_P8;
+ break;
+
+ default:
+ DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
+ return -EINVAL;
}
- return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
+ content_base_size.width = mode_cmd->width;
+ content_base_size.height = mode_cmd->height;
+ content_base_size.depth = 1;
+
+ ret = vmw_surface_gb_priv_define(dev,
+ 0, /* kernel visible only */
+ 0, /* flags */
+ format,
+ true, /* can be a scanout buffer */
+ 1, /* num of mip levels */
+ 0,
+ 0,
+ content_base_size,
+ srf_out);
+ if (ret) {
+ DRM_ERROR("Failed to allocate proxy content buffer\n");
+ return ret;
+ }
+
+ res = &(*srf_out)->res;
+
+ /* Reserve and switch the backing mob. */
+ mutex_lock(&res->dev_priv->cmdbuf_mutex);
+ (void) vmw_resource_reserve(res, false, true);
+ vmw_dmabuf_unreference(&res->backup);
+ res->backup = vmw_dmabuf_reference(dmabuf_mob);
+ res->backup_offset = 0;
+ vmw_resource_unreserve(res, false, NULL, 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+
+ return 0;
}
+
+
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
@@ -1057,7 +802,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
}
/* Limited framebuffer color depth support for screen objects */
- if (dev_priv->sou_priv) {
+ if (dev_priv->active_display_unit == vmw_du_screen_object) {
switch (mode_cmd->depth) {
case 32:
case 24:
@@ -1089,41 +834,96 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err1;
}
- if (!vmw_dmabuf_reference(dmabuf)) {
- DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
- ret = -EINVAL;
- goto out_err2;
- }
-
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
vfbd->base.base.pitches[0] = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
- if (!dev_priv->sou_priv) {
- vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
- vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
- }
vfbd->base.dmabuf = true;
- vfbd->buffer = dmabuf;
+ vfbd->buffer = vmw_dmabuf_reference(dmabuf);
vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
&vmw_framebuffer_dmabuf_funcs);
if (ret)
- goto out_err3;
+ goto out_err2;
return 0;
-out_err3:
- vmw_dmabuf_unreference(&dmabuf);
out_err2:
+ vmw_dmabuf_unreference(&dmabuf);
kfree(vfbd);
out_err1:
return ret;
}
+/**
+ * vmw_kms_new_framebuffer - Create a new framebuffer.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @surface: Pointer to a surface to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @only_2d: No presents will occur to this dma buffer based framebuffer. This
+ * Helps the code to do some important optimizations.
+ * @mode_cmd: Frame-buffer metadata.
+ */
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_surface *surface,
+ bool only_2d,
+ const struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct vmw_framebuffer *vfb = NULL;
+ bool is_dmabuf_proxy = false;
+ int ret;
+
+ /*
+ * We cannot use the SurfaceDMA command in an non-accelerated VM,
+ * therefore, wrap the DMA buf in a surface so we can use the
+ * SurfaceCopy command.
+ */
+ if (dmabuf && only_2d &&
+ dev_priv->active_display_unit == vmw_du_screen_target) {
+ ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
+ dmabuf, &surface);
+ if (ret)
+ return ERR_PTR(ret);
+
+ is_dmabuf_proxy = true;
+ }
+
+ /* Create the new framebuffer depending one what we have */
+ if (surface) {
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+ mode_cmd,
+ is_dmabuf_proxy);
+
+ /*
+ * vmw_create_dmabuf_proxy() adds a reference that is no longer
+ * needed
+ */
+ if (is_dmabuf_proxy)
+ vmw_surface_unreference(&surface);
+ } else if (dmabuf) {
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
+ mode_cmd);
+ } else {
+ BUG();
+ }
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ vfb->pin = vmw_framebuffer_pin;
+ vfb->unpin = vmw_framebuffer_unpin;
+
+ return vfb;
+}
+
/*
* Generic Kernel modesetting functions
*/
@@ -1157,7 +957,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (!vmw_kms_validate_mode_vram(dev_priv,
mode_cmd.pitch,
mode_cmd.height)) {
- DRM_ERROR("VRAM size is too small for requested mode.\n");
+ DRM_ERROR("Requested mode exceed bounding box limit.\n");
return ERR_PTR(-ENOMEM);
}
@@ -1187,15 +987,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (ret)
goto err_out;
- /* Create the new framebuffer depending one what we got back */
- if (bo)
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- &mode_cmd);
- else if (surface)
- ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
- surface, &vfb, &mode_cmd);
- else
- BUG();
+ vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
+ !(dev_priv->capabilities & SVGA_CAP_3D),
+ &mode_cmd);
+ if (IS_ERR(vfb)) {
+ ret = PTR_ERR(vfb);
+ goto err_out;
+ }
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
@@ -1218,6 +1016,21 @@ static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
+static int vmw_kms_generic_present(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct vmw_surface *surface,
+ uint32_t sid,
+ int32_t destX, int32_t destY,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips)
+{
+ return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
+ &surface->res, destX, destY,
+ num_clips, 1, NULL);
+}
+
+
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
@@ -1227,238 +1040,31 @@ int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *tmp;
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, k, num_units;
- int ret = 0; /* silence warning */
- int left, right, top, bottom;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
- SVGASignedRect *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &vfb->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(surface == NULL);
- BUG_ON(!clips || !num_clips);
-
- tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
- if (unlikely(tmp == NULL)) {
- DRM_ERROR("Temporary cliprect memory alloc failed.\n");
- return -ENOMEM;
- }
-
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- ret = -ENOMEM;
- goto out_free_tmp;
- }
-
- left = clips->x;
- right = clips->x + clips->w;
- top = clips->y;
- bottom = clips->y + clips->h;
-
- for (i = 1; i < num_clips; i++) {
- left = min_t(int, left, (int)clips[i].x);
- right = max_t(int, right, (int)clips[i].x + clips[i].w);
- top = min_t(int, top, (int)clips[i].y);
- bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
- }
-
- /* only need to do this once */
- memset(cmd, 0, fifo_size);
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-
- blits = (SVGASignedRect *)&cmd[1];
-
- cmd->body.srcRect.left = left;
- cmd->body.srcRect.right = right;
- cmd->body.srcRect.top = top;
- cmd->body.srcRect.bottom = bottom;
-
- for (i = 0; i < num_clips; i++) {
- tmp[i].x1 = clips[i].x - left;
- tmp[i].x2 = clips[i].x + clips[i].w - left;
- tmp[i].y1 = clips[i].y - top;
- tmp[i].y2 = clips[i].y + clips[i].h - top;
- }
-
- for (k = 0; k < num_units; k++) {
- struct vmw_display_unit *unit = units[k];
- struct vmw_clip_rect clip;
- int num;
-
- clip.x1 = left + destX - unit->crtc.x;
- clip.y1 = top + destY - unit->crtc.y;
- clip.x2 = right + destX - unit->crtc.x;
- clip.y2 = bottom + destY - unit->crtc.y;
-
- /* skip any crtcs that misses the clip region */
- if (clip.x1 >= unit->crtc.mode.hdisplay ||
- clip.y1 >= unit->crtc.mode.vdisplay ||
- clip.x2 <= 0 || clip.y2 <= 0)
- continue;
-
- /*
- * In order for the clip rects to be correctly scaled
- * the src and dest rects needs to be the same size.
- */
- cmd->body.destRect.left = clip.x1;
- cmd->body.destRect.right = clip.x2;
- cmd->body.destRect.top = clip.y1;
- cmd->body.destRect.bottom = clip.y2;
-
- /* create a clip rect of the crtc in dest coords */
- clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
- clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
- clip.x1 = 0 - clip.x1;
- clip.y1 = 0 - clip.y1;
-
- /* need to reset sid as it is changed by execbuf */
- cmd->body.srcImage.sid = sid;
- cmd->body.destScreenId = unit->unit;
-
- /* clip and write blits to cmd stream */
- vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
- /* if no cliprects hit skip this */
- if (num == 0)
- continue;
-
- /* recalculate package length */
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, NULL);
-
- if (unlikely(ret != 0))
- break;
- }
-
- kfree(cmd);
-out_free_tmp:
- kfree(tmp);
-
- return ret;
-}
-
-int vmw_kms_readback(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_vmw_rect *clips,
- uint32_t num_clips)
-{
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(&vfb->base);
- struct vmw_dma_buffer *dmabuf = vfbd->buffer;
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, k, ret, num_units, blits_pos;
-
- struct {
- uint32_t header;
- SVGAFifoCmdDefineGMRFB body;
- } *cmd;
- struct {
- uint32_t header;
- SVGAFifoCmdBlitScreenToGMRFB body;
- } *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &vfb->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(dmabuf == NULL);
- BUG_ON(!clips || !num_clips);
-
- /* take a safe guess at fifo size */
- fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- cmd->header = SVGA_CMD_DEFINE_GMRFB;
- cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
- cmd->body.format.colorDepth = vfb->base.depth;
- cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = vfb->base.pitches[0];
- cmd->body.ptr.gmrId = vfb->user_handle;
- cmd->body.ptr.offset = 0;
-
- blits = (void *)&cmd[1];
- blits_pos = 0;
- for (i = 0; i < num_units; i++) {
- struct drm_vmw_rect *c = clips;
- for (k = 0; k < num_clips; k++, c++) {
- /* transform clip coords to crtc origin based coords */
- int clip_x1 = c->x - units[i]->crtc.x;
- int clip_x2 = c->x - units[i]->crtc.x + c->w;
- int clip_y1 = c->y - units[i]->crtc.y;
- int clip_y2 = c->y - units[i]->crtc.y + c->h;
- int dest_x = c->x;
- int dest_y = c->y;
-
- /* compensate for clipping, we negate
- * a negative number and add that.
- */
- if (clip_x1 < 0)
- dest_x += -clip_x1;
- if (clip_y1 < 0)
- dest_y += -clip_y1;
-
- /* clip */
- clip_x1 = max(clip_x1, 0);
- clip_y1 = max(clip_y1, 0);
- clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
- clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
-
- /* and cull any rects that misses the crtc */
- if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
- clip_y1 >= units[i]->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
- continue;
-
- blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
- blits[blits_pos].body.srcScreenId = units[i]->unit;
- blits[blits_pos].body.destOrigin.x = dest_x;
- blits[blits_pos].body.destOrigin.y = dest_y;
+ int ret;
- blits[blits_pos].body.srcRect.left = clip_x1;
- blits[blits_pos].body.srcRect.top = clip_y1;
- blits[blits_pos].body.srcRect.right = clip_x2;
- blits[blits_pos].body.srcRect.bottom = clip_y2;
- blits_pos++;
- }
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_screen_target:
+ ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
+ &surface->res, destX, destY,
+ num_clips, 1, NULL);
+ break;
+ case vmw_du_screen_object:
+ ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
+ sid, destX, destY, clips,
+ num_clips);
+ break;
+ default:
+ WARN_ONCE(true,
+ "Present called with invalid display system.\n");
+ ret = -ENOSYS;
+ break;
}
- /* reset size here and use calculated exact size from loops */
- fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
-
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
- 0, user_fence_rep, NULL);
+ if (ret)
+ return ret;
- kfree(cmd);
+ vmw_fifo_flush(dev_priv, false);
- return ret;
+ return 0;
}
int vmw_kms_init(struct vmw_private *dev_priv)
@@ -1470,30 +1076,37 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
- /* assumed largest fb size */
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
+ dev->mode_config.max_width = dev_priv->texture_max_width;
+ dev->mode_config.max_height = dev_priv->texture_max_height;
- ret = vmw_kms_init_screen_object_display(dev_priv);
- if (ret) /* Fallback */
- (void)vmw_kms_init_legacy_display_system(dev_priv);
+ ret = vmw_kms_stdu_init_display(dev_priv);
+ if (ret) {
+ ret = vmw_kms_sou_init_display(dev_priv);
+ if (ret) /* Fallback */
+ ret = vmw_kms_ldu_init_display(dev_priv);
+ }
- return 0;
+ return ret;
}
int vmw_kms_close(struct vmw_private *dev_priv)
{
+ int ret;
+
/*
* Docs says we should take the lock before calling this function
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup(dev_priv->dev);
- if (dev_priv->sou_priv)
- vmw_kms_close_screen_object_display(dev_priv);
+ if (dev_priv->active_display_unit == vmw_du_screen_object)
+ ret = vmw_kms_sou_close_display(dev_priv);
+ else if (dev_priv->active_display_unit == vmw_du_screen_target)
+ ret = vmw_kms_stdu_close_display(dev_priv);
else
- vmw_kms_close_legacy_display_system(dev_priv);
- return 0;
+ ret = vmw_kms_ldu_close_display(dev_priv);
+
+ return ret;
}
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
@@ -1569,7 +1182,7 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
else if (vmw_fifo_have_pitchlock(vmw_priv))
vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
- SVGA_FIFO_PITCHLOCK);
+ SVGA_FIFO_PITCHLOCK);
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
@@ -1641,7 +1254,9 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
{
- return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
+ return ((u64) pitch * (u64) height) < (u64)
+ ((dev_priv->active_display_unit == vmw_du_screen_target) ?
+ dev_priv->prim_bb_mem : dev_priv->vram_size);
}
@@ -1715,75 +1330,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
return 0;
}
-int vmw_du_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
- struct drm_file *file_priv ;
- struct vmw_fence_obj *fence = NULL;
- struct drm_clip_rect clips;
- int ret;
-
- if (event == NULL)
- return -EINVAL;
-
- /* require ScreenObject support for page flipping */
- if (!dev_priv->sou_priv)
- return -ENOSYS;
-
- file_priv = event->base.file_priv;
- if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
- return -EINVAL;
-
- crtc->primary->fb = fb;
-
- /* do a full screen dirty update */
- clips.x1 = clips.y1 = 0;
- clips.x2 = fb->width;
- clips.y2 = fb->height;
-
- if (vfb->dmabuf)
- ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
- 0, 0, &clips, 1, 1, &fence);
- else
- ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
- 0, 0, &clips, 1, 1, &fence);
-
-
- if (ret != 0)
- goto out_no_fence;
- if (!fence) {
- ret = -EINVAL;
- goto out_no_fence;
- }
-
- ret = vmw_event_fence_action_queue(file_priv, fence,
- &event->base,
- &event->event.tv_sec,
- &event->event.tv_usec,
- true);
-
- /*
- * No need to hold on to this now. The only cleanup
- * we need to do if we fail is unref the fence.
- */
- vmw_fence_obj_unreference(&fence);
-
- if (vmw_crtc_to_du(crtc)->is_implicit)
- vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
-
- return ret;
-
-out_no_fence:
- crtc->primary->fb = old_fb;
- return ret;
-}
-
-
void vmw_du_crtc_save(struct drm_crtc *crtc)
{
}
@@ -1808,8 +1354,9 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
}
}
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
{
+ return 0;
}
void vmw_du_connector_save(struct drm_connector *connector)
@@ -1919,7 +1466,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
* @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
* members filled in.
*/
-static void vmw_guess_mode_timing(struct drm_display_mode *mode)
+void vmw_guess_mode_timing(struct drm_display_mode *mode)
{
mode->hsync_start = mode->hdisplay + 50;
mode->hsync_end = mode->hsync_start + 50;
@@ -1954,36 +1501,39 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
* If using screen objects, then assume 32-bpp because that's what the
* SVGA device is assuming
*/
- if (dev_priv->sou_priv)
+ if (dev_priv->active_display_unit == vmw_du_screen_object)
assumed_bpp = 4;
+ if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ max_width = min(max_width, dev_priv->stdu_max_width);
+ max_height = min(max_height, dev_priv->stdu_max_height);
+ }
+
/* Add preferred mode */
- {
- mode = drm_mode_duplicate(dev, &prefmode);
- if (!mode)
- return 0;
- mode->hdisplay = du->pref_width;
- mode->vdisplay = du->pref_height;
- vmw_guess_mode_timing(mode);
-
- if (vmw_kms_validate_mode_vram(dev_priv,
- mode->hdisplay * assumed_bpp,
- mode->vdisplay)) {
- drm_mode_probed_add(connector, mode);
- } else {
- drm_mode_destroy(dev, mode);
- mode = NULL;
- }
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+ mode->hdisplay = du->pref_width;
+ mode->vdisplay = du->pref_height;
+ vmw_guess_mode_timing(mode);
- if (du->pref_mode) {
- list_del_init(&du->pref_mode->head);
- drm_mode_destroy(dev, du->pref_mode);
- }
+ if (vmw_kms_validate_mode_vram(dev_priv,
+ mode->hdisplay * assumed_bpp,
+ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
+ } else {
+ drm_mode_destroy(dev, mode);
+ mode = NULL;
+ }
- /* mode might be null here, this is intended */
- du->pref_mode = mode;
+ if (du->pref_mode) {
+ list_del_init(&du->pref_mode->head);
+ drm_mode_destroy(dev, du->pref_mode);
}
+ /* mode might be null here, this is intended */
+ du->pref_mode = mode;
+
for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
bmode = &vmw_kms_connector_builtin[i];
if (bmode->hdisplay > max_width ||
@@ -2003,11 +1553,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
drm_mode_probed_add(connector, mode);
}
- /* Move the prefered mode first, help apps pick the right mode. */
- if (du->pref_mode)
- list_move(&du->pref_mode->head, &connector->probed_modes);
-
drm_mode_connector_list_update(connector, true);
+ /* Move the prefered mode first, help apps pick the right mode. */
+ drm_mode_sort(&connector->modes);
return 1;
}
@@ -2031,7 +1579,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
unsigned rects_size;
int ret;
int i;
+ u64 total_pixels = 0;
struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_vmw_rect bounding_box = {0};
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
@@ -2062,6 +1612,40 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto out_free;
}
+
+ /*
+ * bounding_box.w and bunding_box.h are used as
+ * lower-right coordinates
+ */
+ if (rects[i].x + rects[i].w > bounding_box.w)
+ bounding_box.w = rects[i].x + rects[i].w;
+
+ if (rects[i].y + rects[i].h > bounding_box.h)
+ bounding_box.h = rects[i].y + rects[i].h;
+
+ total_pixels += (u64) rects[i].w * (u64) rects[i].h;
+ }
+
+ if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ /*
+ * For Screen Targets, the limits for a toplogy are:
+ * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
+ * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
+ */
+ u64 bb_mem = bounding_box.w * bounding_box.h * 4;
+ u64 pixel_mem = total_pixels * 4;
+
+ if (bb_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Topology is beyond supported limits.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (pixel_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Combined output size too large\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
}
vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
@@ -2070,3 +1654,419 @@ out_free:
kfree(rects);
return ret;
}
+
+/**
+ * vmw_kms_helper_dirty - Helper to build commands and perform actions based
+ * on a set of cliprects and a set of display units.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @framebuffer: Pointer to the framebuffer on which to perform the actions.
+ * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
+ * Cliprects are given in framebuffer coordinates.
+ * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
+ * be NULL. Cliprects are given in source coordinates.
+ * @dest_x: X coordinate offset for the crtc / destination clip rects.
+ * @dest_y: Y coordinate offset for the crtc / destination clip rects.
+ * @num_clips: Number of cliprects in the @clips or @vclips array.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
+ */
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ const struct drm_clip_rect *clips,
+ const struct drm_vmw_rect *vclips,
+ s32 dest_x, s32 dest_y,
+ int num_clips,
+ int increment,
+ struct vmw_kms_dirty *dirty)
+{
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_crtc *crtc;
+ u32 num_units = 0;
+ u32 i, k;
+ int ret;
+
+ dirty->dev_priv = dev_priv;
+
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+ if (crtc->primary->fb != &framebuffer->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ for (k = 0; k < num_units; k++) {
+ struct vmw_display_unit *unit = units[k];
+ s32 crtc_x = unit->crtc.x;
+ s32 crtc_y = unit->crtc.y;
+ s32 crtc_width = unit->crtc.mode.hdisplay;
+ s32 crtc_height = unit->crtc.mode.vdisplay;
+ const struct drm_clip_rect *clips_ptr = clips;
+ const struct drm_vmw_rect *vclips_ptr = vclips;
+
+ dirty->unit = unit;
+ if (dirty->fifo_reserve_size > 0) {
+ dirty->cmd = vmw_fifo_reserve(dev_priv,
+ dirty->fifo_reserve_size);
+ if (!dirty->cmd) {
+ DRM_ERROR("Couldn't reserve fifo space "
+ "for dirty blits.\n");
+ return ret;
+ }
+ memset(dirty->cmd, 0, dirty->fifo_reserve_size);
+ }
+ dirty->num_hits = 0;
+ for (i = 0; i < num_clips; i++, clips_ptr += increment,
+ vclips_ptr += increment) {
+ s32 clip_left;
+ s32 clip_top;
+
+ /*
+ * Select clip array type. Note that integer type
+ * in @clips is unsigned short, whereas in @vclips
+ * it's 32-bit.
+ */
+ if (clips) {
+ dirty->fb_x = (s32) clips_ptr->x1;
+ dirty->fb_y = (s32) clips_ptr->y1;
+ dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
+ crtc_x;
+ dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
+ crtc_y;
+ } else {
+ dirty->fb_x = vclips_ptr->x;
+ dirty->fb_y = vclips_ptr->y;
+ dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
+ dest_x - crtc_x;
+ dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
+ dest_y - crtc_y;
+ }
+
+ dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
+ dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
+
+ /* Skip this clip if it's outside the crtc region */
+ if (dirty->unit_x1 >= crtc_width ||
+ dirty->unit_y1 >= crtc_height ||
+ dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
+ continue;
+
+ /* Clip right and bottom to crtc limits */
+ dirty->unit_x2 = min_t(s32, dirty->unit_x2,
+ crtc_width);
+ dirty->unit_y2 = min_t(s32, dirty->unit_y2,
+ crtc_height);
+
+ /* Clip left and top to crtc limits */
+ clip_left = min_t(s32, dirty->unit_x1, 0);
+ clip_top = min_t(s32, dirty->unit_y1, 0);
+ dirty->unit_x1 -= clip_left;
+ dirty->unit_y1 -= clip_top;
+ dirty->fb_x -= clip_left;
+ dirty->fb_y -= clip_top;
+
+ dirty->clip(dirty);
+ }
+
+ dirty->fifo_commit(dirty);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
+ * command submission.
+ *
+ * @dev_priv. Pointer to a device private structure.
+ * @buf: The buffer object
+ * @interruptible: Whether to perform waits as interruptible.
+ * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
+ * The buffer will be validated as a GMR. Already pinned buffers will not be
+ * validated.
+ *
+ * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible,
+ bool validate_as_mob)
+{
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+
+ ttm_bo_reserve(bo, false, false, interruptible, NULL);
+ ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
+ validate_as_mob);
+ if (ret)
+ ttm_bo_unreserve(bo);
+
+ return ret;
+}
+
+/**
+ * vmw_kms_helper_buffer_revert - Undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ *
+ * @res: Pointer to the buffer object.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ */
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+{
+ if (buf)
+ ttm_bo_unreserve(&buf->base);
+}
+
+/**
+ * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
+ * kms command submission.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @file_priv: Pointer to a struct drm_file representing the caller's
+ * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
+ * if non-NULL, @user_fence_rep must be non-NULL.
+ * @buf: The buffer object.
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ * @user_fence_rep: Optional pointer to a user-space provided struct
+ * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
+ * function copies fence data to user-space in a fail-safe manner.
+ */
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_dma_buffer *buf,
+ struct vmw_fence_obj **out_fence,
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep)
+{
+ struct vmw_fence_obj *fence;
+ uint32_t handle;
+ int ret;
+
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+ file_priv ? &handle : NULL);
+ if (buf)
+ vmw_fence_single_bo(&buf->base, fence);
+ if (file_priv)
+ vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
+ ret, user_fence_rep, fence,
+ handle);
+ if (out_fence)
+ *out_fence = fence;
+ else
+ vmw_fence_obj_unreference(&fence);
+
+ vmw_kms_helper_buffer_revert(buf);
+}
+
+
+/**
+ * vmw_kms_helper_resource_revert - Undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ */
+void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+{
+ vmw_kms_helper_buffer_revert(res->backup);
+ vmw_resource_unreserve(res, false, NULL, 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
+ * command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @interruptible: Whether to perform waits as interruptible.
+ *
+ * Reserves and validates also the backup buffer if a guest-backed resource.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+ bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible)
+ ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
+ else
+ mutex_lock(&res->dev_priv->cmdbuf_mutex);
+
+ if (unlikely(ret != 0))
+ return -ERESTARTSYS;
+
+ ret = vmw_resource_reserve(res, interruptible, false);
+ if (ret)
+ goto out_unlock;
+
+ if (res->backup) {
+ ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
+ interruptible,
+ res->dev_priv->has_mob);
+ if (ret)
+ goto out_unreserve;
+ }
+ ret = vmw_resource_validate(res);
+ if (ret)
+ goto out_revert;
+ return 0;
+
+out_revert:
+ vmw_kms_helper_buffer_revert(res->backup);
+out_unreserve:
+ vmw_resource_unreserve(res, false, NULL, 0);
+out_unlock:
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ return ret;
+}
+
+/**
+ * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
+ * kms command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ */
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+ struct vmw_fence_obj **out_fence)
+{
+ if (res->backup || out_fence)
+ vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+ out_fence, NULL);
+
+ vmw_resource_unreserve(res, false, NULL, 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_update_proxy - Helper function to update a proxy surface from
+ * its backing MOB.
+ *
+ * @res: Pointer to the surface resource
+ * @clips: Clip rects in framebuffer (surface) space.
+ * @num_clips: Number of clips in @clips.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ *
+ * This function makes sure the proxy surface is updated from its backing MOB
+ * using the region given by @clips. The surface resource @res and its backing
+ * MOB needs to be reserved and validated on call.
+ */
+int vmw_kms_update_proxy(struct vmw_resource *res,
+ const struct drm_clip_rect *clips,
+ unsigned num_clips,
+ int increment)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+ } *cmd;
+ SVGA3dBox *box;
+ size_t copy_size = 0;
+ int i;
+
+ if (!clips)
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+ if (!cmd) {
+ DRM_ERROR("Couldn't reserve fifo space for proxy surface "
+ "update.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
+ box = &cmd->body.box;
+
+ cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.image.sid = res->id;
+ cmd->body.image.face = 0;
+ cmd->body.image.mipmap = 0;
+
+ if (clips->x1 > size->width || clips->x2 > size->width ||
+ clips->y1 > size->height || clips->y2 > size->height) {
+ DRM_ERROR("Invalid clips outsize of framebuffer.\n");
+ return -EINVAL;
+ }
+
+ box->x = clips->x1;
+ box->y = clips->y1;
+ box->z = 0;
+ box->w = clips->x2 - clips->x1;
+ box->h = clips->y2 - clips->y1;
+ box->d = 1;
+
+ copy_size += sizeof(*cmd);
+ }
+
+ vmw_fifo_commit(dev_priv, copy_size);
+
+ return 0;
+}
+
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+ unsigned unit,
+ u32 max_width,
+ u32 max_height,
+ struct drm_connector **p_con,
+ struct drm_crtc **p_crtc,
+ struct drm_display_mode **p_mode)
+{
+ struct drm_connector *con;
+ struct vmw_display_unit *du;
+ struct drm_display_mode *mode;
+ int i = 0;
+
+ list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+ head) {
+ if (i == unit)
+ break;
+
+ ++i;
+ }
+
+ if (i != unit) {
+ DRM_ERROR("Could not find initial display unit.\n");
+ return -EINVAL;
+ }
+
+ if (list_empty(&con->modes))
+ (void) vmw_du_connector_fill_modes(con, max_width, max_height);
+
+ if (list_empty(&con->modes)) {
+ DRM_ERROR("Could not find initial display mode.\n");
+ return -EINVAL;
+ }
+
+ du = vmw_connector_to_du(con);
+ *p_con = con;
+ *p_crtc = &du->crtc;
+
+ list_for_each_entry(mode, &con->modes, head) {
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ break;
+ }
+
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ *p_mode = mode;
+ else {
+ WARN_ONCE(true, "Could not find initial preferred mode.\n");
+ *p_mode = list_first_entry(&con->modes,
+ struct drm_display_mode,
+ head);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8d038c36bd57..782df7ca9794 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,11 +32,60 @@
#include <drm/drm_crtc_helper.h>
#include "vmwgfx_drv.h"
+/**
+ * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
+ * function.
+ *
+ * @fifo_commit: Callback that is called once for each display unit after
+ * all clip rects. This function must commit the fifo space reserved by the
+ * helper. Set up by the caller.
+ * @clip: Callback that is called for each cliprect on each display unit.
+ * Set up by the caller.
+ * @fifo_reserve_size: Fifo size that the helper should try to allocat for
+ * each display unit. Set up by the caller.
+ * @dev_priv: Pointer to the device private. Set up by the helper.
+ * @unit: The current display unit. Set up by the helper before a call to @clip.
+ * @cmd: The allocated fifo space. Set up by the helper before the first @clip
+ * call.
+ * @num_hits: Number of clip rect commands for this display unit.
+ * Cleared by the helper before the first @clip call. Updated by the @clip
+ * callback.
+ * @fb_x: Clip rect left side in framebuffer coordinates.
+ * @fb_y: Clip rect right side in framebuffer coordinates.
+ * @unit_x1: Clip rect left side in crtc coordinates.
+ * @unit_y1: Clip rect top side in crtc coordinates.
+ * @unit_x2: Clip rect right side in crtc coordinates.
+ * @unit_y2: Clip rect bottom side in crtc coordinates.
+ *
+ * The clip rect coordinates are updated by the helper for each @clip call.
+ * Note that this may be derived from if more info needs to be passed between
+ * helper caller and helper callbacks.
+ */
+struct vmw_kms_dirty {
+ void (*fifo_commit)(struct vmw_kms_dirty *);
+ void (*clip)(struct vmw_kms_dirty *);
+ size_t fifo_reserve_size;
+ struct vmw_private *dev_priv;
+ struct vmw_display_unit *unit;
+ void *cmd;
+ u32 num_hits;
+ s32 fb_x;
+ s32 fb_y;
+ s32 unit_x1;
+ s32 unit_y1;
+ s32 unit_x2;
+ s32 unit_y2;
+};
+
#define VMWGFX_NUM_DISPLAY_UNITS 8
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
+#define vmw_framebuffer_to_vfbs(x) \
+ container_of(x, struct vmw_framebuffer_surface, base.base)
+#define vmw_framebuffer_to_vfbd(x) \
+ container_of(x, struct vmw_framebuffer_dmabuf, base.base)
/**
* Base class for framebuffers
@@ -53,9 +102,27 @@ struct vmw_framebuffer {
uint32_t user_handle;
};
+/*
+ * Clip rectangle
+ */
+struct vmw_clip_rect {
+ int x1, x2, y1, y2;
+};
+
+struct vmw_framebuffer_surface {
+ struct vmw_framebuffer base;
+ struct vmw_surface *surface;
+ struct vmw_dma_buffer *buffer;
+ struct list_head head;
+ bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */
+};
+
+
+struct vmw_framebuffer_dmabuf {
+ struct vmw_framebuffer base;
+ struct vmw_dma_buffer *buffer;
+};
-#define vmw_crtc_to_du(x) \
- container_of(x, struct vmw_display_unit, crtc)
/*
* Basic cursor manipulation
@@ -120,11 +187,7 @@ struct vmw_display_unit {
/*
* Shared display unit functions - vmwgfx_kms.c
*/
-void vmw_display_unit_cleanup(struct vmw_display_unit *du);
-int vmw_du_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags);
+void vmw_du_cleanup(struct vmw_display_unit *du);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
@@ -133,7 +196,7 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
void vmw_du_connector_save(struct drm_connector *connector);
void vmw_du_connector_restore(struct drm_connector *connector);
enum drm_connector_status
@@ -143,25 +206,118 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ const struct drm_clip_rect *clips,
+ const struct drm_vmw_rect *vclips,
+ s32 dest_x, s32 dest_y,
+ int num_clips,
+ int increment,
+ struct vmw_kms_dirty *dirty);
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible,
+ bool validate_as_mob);
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_dma_buffer *buf,
+ struct vmw_fence_obj **out_fence,
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep);
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+ bool interruptible);
+void vmw_kms_helper_resource_revert(struct vmw_resource *res);
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+ struct vmw_fence_obj **out_fence);
+int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips);
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_surface *surface,
+ bool only_2d,
+ const struct drm_mode_fb_cmd *mode_cmd);
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+ unsigned unit,
+ u32 max_width,
+ u32 max_height,
+ struct drm_connector **p_con,
+ struct drm_crtc **p_crtc,
+ struct drm_display_mode **p_mode);
+void vmw_guess_mode_timing(struct drm_display_mode *mode);
/*
* Legacy display unit functions - vmwgfx_ldu.c
*/
-int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
-int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment);
+int vmw_kms_update_proxy(struct vmw_resource *res,
+ const struct drm_clip_rect *clips,
+ unsigned num_clips,
+ int increment);
/*
* Screen Objects display functions - vmwgfx_scrn.c
*/
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
- struct drm_vmw_rect *rects);
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ struct vmw_resource *srf,
+ s32 dest_x,
+ s32 dest_y,
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence);
+int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment,
+ bool interruptible,
+ struct vmw_fence_obj **out_fence);
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips);
+
+/*
+ * Screen Target Display Unit functions - vmwgfx_stdu.c
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ struct vmw_resource *srf,
+ s32 dest_x,
+ s32 dest_y,
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence);
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ bool to_surface,
+ bool interruptible);
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5c289f748ab4..bb63e4d795fa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -57,7 +57,7 @@ struct vmw_legacy_display_unit {
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
{
list_del_init(&ldu->active);
- vmw_display_unit_cleanup(&ldu->base);
+ vmw_du_cleanup(&ldu->base);
kfree(ldu);
}
@@ -279,7 +279,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
return -EINVAL;
}
- vmw_fb_off(dev_priv);
+ vmw_svga_enable(dev_priv);
crtc->primary->fb = fb;
encoder->crtc = crtc;
@@ -385,7 +385,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
return 0;
}
-int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int i, ret;
@@ -422,6 +422,10 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
else
vmw_ldu_init(dev_priv, 0);
+ dev_priv->active_display_unit = vmw_du_legacy;
+
+ DRM_INFO("Legacy Display Unit initialized\n");
+
return 0;
err_vblank_cleanup:
@@ -432,7 +436,7 @@ err_free:
return ret;
}
-int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -447,3 +451,38 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
return 0;
}
+
+
+int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment)
+{
+ size_t fifo_size;
+ int i;
+
+ struct {
+ uint32_t header;
+ SVGAFifoCmdUpdate body;
+ } *cmd;
+
+ fifo_size = sizeof(*cmd) * num_clips;
+ cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, fifo_size);
+ for (i = 0; i < num_clips; i++, clips += increment) {
+ cmd[i].header = SVGA_CMD_UPDATE;
+ cmd[i].body.x = clips->x1;
+ cmd[i].body.y = clips->y1;
+ cmd[i].body.width = clips->x2 - clips->x1;
+ cmd[i].body.height = clips->y2 - clips->y1;
+ }
+
+ vmw_fifo_commit(dev_priv, fifo_size);
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 04a64b8cd3cd..23db16008e39 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,7 +31,7 @@
* If we set up the screen target otable, screen objects stop working.
*/
-#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
#ifdef CONFIG_64BIT
#define VMW_PPN_SIZE 8
@@ -67,9 +67,23 @@ struct vmw_mob {
* @size: Size of the table (page-aligned).
* @page_table: Pointer to a struct vmw_mob holding the page table.
*/
-struct vmw_otable {
- unsigned long size;
- struct vmw_mob *page_table;
+static const struct vmw_otable pre_dx_tables[] = {
+ {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+ NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
+};
+
+static const struct vmw_otable dx_tables[] = {
+ {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+ NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
+ {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
};
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
@@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
*/
static int vmw_setup_otable_base(struct vmw_private *dev_priv,
SVGAOTableType type,
+ struct ttm_buffer_object *otable_bo,
unsigned long offset,
struct vmw_otable *otable)
{
@@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
BUG_ON(otable->page_table != NULL);
- vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+ vsgt = vmw_bo_sg_table(otable_bo);
vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
WARN_ON(!vmw_piter_next(&iter));
@@ -142,7 +157,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
cmd->header.size = sizeof(cmd->body);
cmd->body.type = type;
- cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+ cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = otable->size;
cmd->body.validSizeInBytes = 0;
cmd->body.ptDepth = mob->pt_level;
@@ -191,18 +206,19 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable "
"takedown.\n");
- } else {
- memset(cmd, 0, sizeof(*cmd));
- cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.type = type;
- cmd->body.baseAddress = 0;
- cmd->body.sizeInBytes = 0;
- cmd->body.validSizeInBytes = 0;
- cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ return;
}
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
if (bo) {
int ret;
@@ -217,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
otable->page_table = NULL;
}
-/*
- * vmw_otables_setup - Set up guest backed memory object tables
- *
- * @dev_priv: Pointer to a device private structure
- *
- * Takes care of the device guest backed surface
- * initialization, by setting up the guest backed memory object tables.
- * Returns 0 on success and various error codes on failure. A succesful return
- * means the object tables can be taken down using the vmw_otables_takedown
- * function.
- */
-int vmw_otables_setup(struct vmw_private *dev_priv)
+
+static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
+ struct vmw_otable_batch *batch)
{
unsigned long offset;
unsigned long bo_size;
- struct vmw_otable *otables;
+ struct vmw_otable *otables = batch->otables;
SVGAOTableType i;
int ret;
- otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
- GFP_KERNEL);
- if (unlikely(otables == NULL)) {
- DRM_ERROR("Failed to allocate space for otable "
- "metadata.\n");
- return -ENOMEM;
- }
-
- otables[SVGA_OTABLE_MOB].size =
- VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
- otables[SVGA_OTABLE_SURFACE].size =
- VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
- otables[SVGA_OTABLE_CONTEXT].size =
- VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
- otables[SVGA_OTABLE_SHADER].size =
- VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
- otables[SVGA_OTABLE_SCREEN_TARGET].size =
- VMWGFX_NUM_GB_SCREEN_TARGET *
- SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
-
bo_size = 0;
- for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+ for (i = 0; i < batch->num_otables; ++i) {
+ if (!otables[i].enabled)
+ continue;
+
otables[i].size =
(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
bo_size += otables[i].size;
@@ -267,63 +257,105 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
ttm_bo_type_device,
&vmw_sys_ne_placement,
0, false, NULL,
- &dev_priv->otable_bo);
+ &batch->otable_bo);
if (unlikely(ret != 0))
goto out_no_bo;
- ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+ ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
if (unlikely(ret != 0))
goto out_unreserve;
- ret = vmw_bo_map_dma(dev_priv->otable_bo);
+ ret = vmw_bo_map_dma(batch->otable_bo);
if (unlikely(ret != 0))
goto out_unreserve;
- ttm_bo_unreserve(dev_priv->otable_bo);
+ ttm_bo_unreserve(batch->otable_bo);
offset = 0;
- for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
- ret = vmw_setup_otable_base(dev_priv, i, offset,
+ for (i = 0; i < batch->num_otables; ++i) {
+ if (!batch->otables[i].enabled)
+ continue;
+
+ ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
+ offset,
&otables[i]);
if (unlikely(ret != 0))
goto out_no_setup;
offset += otables[i].size;
}
- dev_priv->otables = otables;
return 0;
out_unreserve:
- ttm_bo_unreserve(dev_priv->otable_bo);
+ ttm_bo_unreserve(batch->otable_bo);
out_no_setup:
- for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
- vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+ for (i = 0; i < batch->num_otables; ++i) {
+ if (batch->otables[i].enabled)
+ vmw_takedown_otable_base(dev_priv, i,
+ &batch->otables[i]);
+ }
- ttm_bo_unref(&dev_priv->otable_bo);
+ ttm_bo_unref(&batch->otable_bo);
out_no_bo:
- kfree(otables);
return ret;
}
-
/*
- * vmw_otables_takedown - Take down guest backed memory object tables
+ * vmw_otables_setup - Set up guest backed memory object tables
*
* @dev_priv: Pointer to a device private structure
*
- * Take down the Guest Memory Object tables.
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A successful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
*/
-void vmw_otables_takedown(struct vmw_private *dev_priv)
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+ struct vmw_otable **otables = &dev_priv->otable_batch.otables;
+ int ret;
+
+ if (dev_priv->has_dx) {
+ *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
+ if (*otables == NULL)
+ return -ENOMEM;
+
+ memcpy(*otables, dx_tables, sizeof(dx_tables));
+ dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
+ } else {
+ *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
+ if (*otables == NULL)
+ return -ENOMEM;
+
+ memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
+ dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
+ }
+
+ ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
+ if (unlikely(ret != 0))
+ goto out_setup;
+
+ return 0;
+
+out_setup:
+ kfree(*otables);
+ return ret;
+}
+
+static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
+ struct vmw_otable_batch *batch)
{
SVGAOTableType i;
- struct ttm_buffer_object *bo = dev_priv->otable_bo;
+ struct ttm_buffer_object *bo = batch->otable_bo;
int ret;
- for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
- vmw_takedown_otable_base(dev_priv, i,
- &dev_priv->otables[i]);
+ for (i = 0; i < batch->num_otables; ++i)
+ if (batch->otables[i].enabled)
+ vmw_takedown_otable_base(dev_priv, i,
+ &batch->otables[i]);
ret = ttm_bo_reserve(bo, false, true, false, NULL);
BUG_ON(ret != 0);
@@ -331,11 +363,21 @@ void vmw_otables_takedown(struct vmw_private *dev_priv)
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
- ttm_bo_unref(&dev_priv->otable_bo);
- kfree(dev_priv->otables);
- dev_priv->otables = NULL;
+ ttm_bo_unref(&batch->otable_bo);
}
+/*
+ * vmw_otables_takedown - Take down guest backed memory object tables
+ *
+ * @dev_priv: Pointer to a device private structure
+ *
+ * Take down the Guest Memory Object tables.
+ */
+void vmw_otables_takedown(struct vmw_private *dev_priv)
+{
+ vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
+ kfree(dev_priv->otable_batch.otables);
+}
/*
* vmw_mob_calculate_pt_pages - Calculate the number of page table pages
@@ -409,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
goto out_unreserve;
ttm_bo_unreserve(mob->pt_bo);
-
+
return 0;
out_unreserve:
@@ -429,15 +471,15 @@ out_unreserve:
* *@addr according to the page table entry size.
*/
#if (VMW_PPN_SIZE == 8)
-static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
{
- *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
+ *((u64 *) *addr) = val >> PAGE_SHIFT;
*addr += 2;
}
#else
-static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
{
- *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
+ *(*addr)++ = val >> PAGE_SHIFT;
}
#endif
@@ -459,7 +501,7 @@ static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
unsigned long pt_page;
- __le32 *addr, *save_addr;
+ u32 *addr, *save_addr;
unsigned long i;
struct page *page;
@@ -574,7 +616,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
}
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
}
/*
@@ -627,7 +669,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
}
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -640,7 +682,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob_id;
cmd->body.ptDepth = mob->pt_level;
- cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+ cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -648,7 +690,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
return 0;
out_no_cmd_space:
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
if (pt_set_up)
ttm_bo_unref(&mob->pt_bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 87e39f68e9d0..76069f093ccf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,8 +31,8 @@
#include <drm/ttm/ttm_placement.h>
-#include "svga_overlay.h"
-#include "svga_escape.h"
+#include "device_include/svga_overlay.h"
+#include "device_include/svga_escape.h"
#define VMW_MAX_NUM_STREAMS 1
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
@@ -100,7 +100,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
{
struct vmw_escape_video_flush *flush;
size_t fifo_size;
- bool have_so = dev_priv->sou_priv ? true : false;
+ bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
int i, num_items;
SVGAGuestPtr ptr;
@@ -231,10 +231,10 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
if (!pin)
return vmw_dmabuf_unpin(dev_priv, buf, inter);
- if (!dev_priv->sou_priv)
- return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
+ if (dev_priv->active_display_unit == vmw_du_legacy)
+ return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
- return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
+ return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
}
/**
@@ -453,7 +453,7 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
static bool vmw_overlay_available(const struct vmw_private *dev_priv)
{
- return (dev_priv->overlay_priv != NULL &&
+ return (dev_priv->overlay_priv != NULL &&
((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
VMW_OVERLAY_CAP_MASK));
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index 9d0dd3a342eb..dce798053a96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,19 +39,17 @@
#define VMWGFX_IRQSTATUS_PORT 0x8
struct svga_guest_mem_descriptor {
- __le32 ppn;
- __le32 num_pages;
+ u32 ppn;
+ u32 num_pages;
};
struct svga_fifo_cmd_fence {
- __le32 fence;
+ u32 fence;
};
#define SVGA_SYNC_GENERIC 1
#define SVGA_SYNC_FIFOFULL 2
-#include "svga_types.h"
-
-#include "svga3d_reg.h"
+#include "device_include/svga3d_reg.h"
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 210ef15b1d09..c1912f852b42 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,6 +31,7 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
#define VMW_RES_EVICT_ERR_COUNT 10
@@ -121,6 +122,7 @@ static void vmw_resource_release(struct kref *kref)
int id;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
+ write_lock(&dev_priv->resource_lock);
res->avail = false;
list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
@@ -143,10 +145,10 @@ static void vmw_resource_release(struct kref *kref)
}
if (likely(res->hw_destroy != NULL)) {
- res->hw_destroy(res);
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_kill(&res->binding_head);
+ vmw_binding_res_list_kill(&res->binding_head);
mutex_unlock(&dev_priv->binding_mutex);
+ res->hw_destroy(res);
}
id = res->id;
@@ -156,20 +158,17 @@ static void vmw_resource_release(struct kref *kref)
kfree(res);
write_lock(&dev_priv->resource_lock);
-
if (id != -1)
idr_remove(idr, id);
+ write_unlock(&dev_priv->resource_lock);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
{
struct vmw_resource *res = *p_res;
- struct vmw_private *dev_priv = res->dev_priv;
*p_res = NULL;
- write_lock(&dev_priv->resource_lock);
kref_put(&res->kref, vmw_resource_release);
- write_unlock(&dev_priv->resource_lock);
}
@@ -260,17 +259,16 @@ void vmw_resource_activate(struct vmw_resource *res,
write_unlock(&dev_priv->resource_lock);
}
-struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
- struct idr *idr, int id)
+static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
+ struct idr *idr, int id)
{
struct vmw_resource *res;
read_lock(&dev_priv->resource_lock);
res = idr_find(idr, id);
- if (res && res->avail)
- kref_get(&res->kref);
- else
+ if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
res = NULL;
+
read_unlock(&dev_priv->resource_lock);
if (unlikely(res == NULL))
@@ -900,20 +898,21 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_stream_size,
false, true);
+ ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for stream"
" creation.\n");
- goto out_unlock;
- }
+ goto out_ret;
+ }
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
if (unlikely(stream == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
ret = -ENOMEM;
- goto out_unlock;
+ goto out_ret;
}
res = &stream->stream.res;
@@ -926,7 +925,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
if (unlikely(ret != 0))
- goto out_unlock;
+ goto out_ret;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -940,8 +939,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
arg->stream_id = res->id;
out_err:
vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
+out_ret:
return ret;
}
@@ -1152,14 +1150,16 @@ out_bind_failed:
* command submission.
*
* @res: Pointer to the struct vmw_resource to unreserve.
+ * @switch_backup: Backup buffer has been switched.
* @new_backup: Pointer to new backup buffer if command submission
- * switched.
- * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ * switched. May be NULL.
+ * @new_backup_offset: New backup offset if @switch_backup is true.
*
* Currently unreserving a resource means putting it back on the device's
* resource lru list, so that it can be evicted if necessary.
*/
void vmw_resource_unreserve(struct vmw_resource *res,
+ bool switch_backup,
struct vmw_dma_buffer *new_backup,
unsigned long new_backup_offset)
{
@@ -1168,22 +1168,25 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (!list_empty(&res->lru_head))
return;
- if (new_backup && new_backup != res->backup) {
-
+ if (switch_backup && new_backup != res->backup) {
if (res->backup) {
lockdep_assert_held(&res->backup->base.resv->lock.base);
list_del_init(&res->mob_head);
vmw_dmabuf_unreference(&res->backup);
}
- res->backup = vmw_dmabuf_reference(new_backup);
- lockdep_assert_held(&new_backup->base.resv->lock.base);
- list_add_tail(&res->mob_head, &new_backup->res_list);
+ if (new_backup) {
+ res->backup = vmw_dmabuf_reference(new_backup);
+ lockdep_assert_held(&new_backup->base.resv->lock.base);
+ list_add_tail(&res->mob_head, &new_backup->res_list);
+ } else {
+ res->backup = NULL;
+ }
}
- if (new_backup)
+ if (switch_backup)
res->backup_offset = new_backup_offset;
- if (!res->func->may_evict || res->id == -1)
+ if (!res->func->may_evict || res->id == -1 || res->pin_count)
return;
write_lock(&dev_priv->resource_lock);
@@ -1259,7 +1262,8 @@ out_no_reserve:
* the buffer may not be bound to the resource at this point.
*
*/
-int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+ bool no_backup)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
@@ -1270,9 +1274,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
if (res->func->needs_backup && res->backup == NULL &&
!no_backup) {
- ret = vmw_resource_buf_alloc(res, true);
- if (unlikely(ret != 0))
+ ret = vmw_resource_buf_alloc(res, interruptible);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a backup buffer "
+ "of size %lu. bytes\n",
+ (unsigned long) res->backup_size);
return ret;
+ }
}
return 0;
@@ -1305,7 +1313,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
* @res: The resource to evict.
* @interruptible: Whether to wait interruptible.
*/
-int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
@@ -1356,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res)
struct ttm_validate_buffer val_buf;
unsigned err_count = 0;
- if (likely(!res->func->may_evict))
+ if (!res->func->create)
return 0;
val_buf.bo = NULL;
@@ -1443,9 +1451,9 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
/**
* vmw_resource_move_notify - TTM move_notify_callback
*
- * @bo: The TTM buffer object about to move.
- * @mem: The truct ttm_mem_reg indicating to what memory
- * region the move is taking place.
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
*
* Evicts the Guest Backed hardware resource if the backup
* buffer is being moved out of MOB memory.
@@ -1495,6 +1503,101 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
}
}
+
+
+/**
+ * vmw_query_readback_all - Read back cached query states
+ *
+ * @dx_query_mob: Buffer containing the DX query MOB
+ *
+ * Read back cached states from the device if they exist. This function
+ * assumings binding_mutex is held.
+ */
+int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
+{
+ struct vmw_resource *dx_query_ctx;
+ struct vmw_private *dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXReadbackAllQuery body;
+ } *cmd;
+
+
+ /* No query bound, so do nothing */
+ if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
+ return 0;
+
+ dx_query_ctx = dx_query_mob->dx_query_ctx;
+ dev_priv = dx_query_ctx->dev_priv;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for "
+ "query MOB read back.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = dx_query_ctx->id;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ /* Triggers a rebind the next time affected context is bound */
+ dx_query_mob->dx_query_ctx = NULL;
+
+ return 0;
+}
+
+
+
+/**
+ * vmw_query_move_notify - Read back cached query states
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The memory region @bo is moving to.
+ *
+ * Called before the query MOB is swapped out to read back cached query
+ * states from the device.
+ */
+void vmw_query_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_dma_buffer *dx_query_mob;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct vmw_private *dev_priv;
+
+
+ dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+ mutex_lock(&dev_priv->binding_mutex);
+
+ dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
+ if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
+ mutex_unlock(&dev_priv->binding_mutex);
+ return;
+ }
+
+ /* If BO is being moved from MOB to system memory */
+ if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
+ struct vmw_fence_obj *fence;
+
+ (void) vmw_query_readback_all(dx_query_mob);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ /* Create a fence and attach the BO to it */
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ vmw_fence_single_bo(bo, fence);
+
+ if (fence != NULL)
+ vmw_fence_obj_unreference(&fence);
+
+ (void) ttm_bo_wait(bo, false, false, false);
+ } else
+ mutex_unlock(&dev_priv->binding_mutex);
+
+}
+
/**
* vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
*
@@ -1573,3 +1676,107 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
+
+/**
+ * vmw_resource_pin - Add a pin reference on a resource
+ *
+ * @res: The resource to add a pin reference on
+ *
+ * This function adds a pin reference, and if needed validates the resource.
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ * This function returns 0 on success and a negative error code on failure.
+ */
+int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+
+ ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ ret = vmw_resource_reserve(res, interruptible, false);
+ if (ret)
+ goto out_no_reserve;
+
+ if (res->pin_count == 0) {
+ struct vmw_dma_buffer *vbo = NULL;
+
+ if (res->backup) {
+ vbo = res->backup;
+
+ ttm_bo_reserve(&vbo->base, interruptible, false, false,
+ NULL);
+ if (!vbo->pin_count) {
+ ret = ttm_bo_validate
+ (&vbo->base,
+ res->func->backup_placement,
+ interruptible, false);
+ if (ret) {
+ ttm_bo_unreserve(&vbo->base);
+ goto out_no_validate;
+ }
+ }
+
+ /* Do we really need to pin the MOB as well? */
+ vmw_bo_pin_reserved(vbo, true);
+ }
+ ret = vmw_resource_validate(res);
+ if (vbo)
+ ttm_bo_unreserve(&vbo->base);
+ if (ret)
+ goto out_no_validate;
+ }
+ res->pin_count++;
+
+out_no_validate:
+ vmw_resource_unreserve(res, false, NULL, 0UL);
+out_no_reserve:
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ ttm_write_unlock(&dev_priv->reservation_sem);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_unpin - Remove a pin reference from a resource
+ *
+ * @res: The resource to remove a pin reference from
+ *
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ */
+void vmw_resource_unpin(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+
+ ttm_read_lock(&dev_priv->reservation_sem, false);
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+
+ ret = vmw_resource_reserve(res, false, true);
+ WARN_ON(ret);
+
+ WARN_ON(res->pin_count == 0);
+ if (--res->pin_count == 0 && res->backup) {
+ struct vmw_dma_buffer *vbo = res->backup;
+
+ ttm_bo_reserve(&vbo->base, false, false, false, NULL);
+ vmw_bo_pin_reserved(vbo, false);
+ ttm_bo_unreserve(&vbo->base);
+ }
+
+ vmw_resource_unreserve(res, false, NULL, 0UL);
+
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * vmw_res_type - Return the resource type
+ *
+ * @res: Pointer to the resource
+ */
+enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
+{
+ return res->func->res_type;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index f3adeed2854c..5994ef6265e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,12 @@
#include "vmwgfx_drv.h"
+enum vmw_cmdbuf_res_state {
+ VMW_CMDBUF_RES_COMMITTED,
+ VMW_CMDBUF_RES_ADD,
+ VMW_CMDBUF_RES_DEL
+};
+
/**
* struct vmw_user_resource_conv - Identify a derived user-exported resource
* type and provide a function to convert its ttm_base_object pointer to
@@ -55,8 +61,10 @@ struct vmw_user_resource_conv {
* @bind: Bind a hardware resource to persistent buffer storage.
* @unbind: Unbind a hardware resource from persistent
* buffer storage.
+ * @commit_notify: If the resource is a command buffer managed resource,
+ * callback to notify that a define or remove command
+ * has been committed to the device.
*/
-
struct vmw_res_func {
enum vmw_res_type res_type;
bool needs_backup;
@@ -71,6 +79,8 @@ struct vmw_res_func {
int (*unbind) (struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
+ void (*commit_notify)(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state);
};
int vmw_resource_alloc_id(struct vmw_resource *res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 7dc591d04d9a..b96d1ab610c5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,10 +36,55 @@
#define vmw_connector_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.connector)
+/**
+ * struct vmw_kms_sou_surface_dirty - Closure structure for
+ * blit surface to screen command.
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @dst_x: Difference between source clip rects and framebuffer coordinates.
+ * @dst_y: Difference between source clip rects and framebuffer coordinates.
+ * @sid: Surface id of surface to copy from.
+ */
+struct vmw_kms_sou_surface_dirty {
+ struct vmw_kms_dirty base;
+ s32 left, right, top, bottom;
+ s32 dst_x, dst_y;
+ u32 sid;
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_kms_sou_readback_blit {
+ uint32 header;
+ SVGAFifoCmdBlitScreenToGMRFB body;
+};
+
+struct vmw_kms_sou_dmabuf_blit {
+ uint32 header;
+ SVGAFifoCmdBlitGMRFBToScreen body;
+};
+
+struct vmw_kms_sou_dirty_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBlitSurfaceToScreen body;
+};
+
+
+/*
+ * Other structs.
+ */
+
struct vmw_screen_object_display {
unsigned num_implicit;
struct vmw_framebuffer *implicit_fb;
+ SVGAFifoCmdDefineGMRFB cur;
+ struct vmw_dma_buffer *pinned_gmrfb;
};
/**
@@ -57,7 +102,7 @@ struct vmw_screen_object_unit {
static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
{
- vmw_display_unit_cleanup(&sou->base);
+ vmw_du_cleanup(&sou->base);
kfree(sou);
}
@@ -72,7 +117,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
}
static void vmw_sou_del_active(struct vmw_private *vmw_priv,
- struct vmw_screen_object_unit *sou)
+ struct vmw_screen_object_unit *sou)
{
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
@@ -84,8 +129,8 @@ static void vmw_sou_del_active(struct vmw_private *vmw_priv,
}
static void vmw_sou_add_active(struct vmw_private *vmw_priv,
- struct vmw_screen_object_unit *sou,
- struct vmw_framebuffer *vfb)
+ struct vmw_screen_object_unit *sou,
+ struct vmw_framebuffer *vfb)
{
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
@@ -202,14 +247,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
static void vmw_sou_backing_free(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou)
{
- struct ttm_buffer_object *bo;
-
- if (unlikely(sou->buffer == NULL))
- return;
-
- bo = &sou->buffer->base;
- ttm_bo_unref(&bo);
- sou->buffer = NULL;
+ vmw_dmabuf_unreference(&sou->buffer);
sou->buffer_size = 0;
}
@@ -274,13 +312,13 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
dev_priv = vmw_priv(crtc->dev);
if (set->num_connectors > 1) {
- DRM_ERROR("to many connectors\n");
+ DRM_ERROR("Too many connectors\n");
return -EINVAL;
}
if (set->num_connectors == 1 &&
set->connectors[0] != &sou->base.connector) {
- DRM_ERROR("connector doesn't match %p %p\n",
+ DRM_ERROR("Connector doesn't match %p %p\n",
set->connectors[0], &sou->base.connector);
return -EINVAL;
}
@@ -331,7 +369,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return -EINVAL;
}
- vmw_fb_off(dev_priv);
+ vmw_svga_enable(dev_priv);
if (mode->hdisplay != crtc->mode.hdisplay ||
mode->vdisplay != crtc->mode.vdisplay) {
@@ -390,6 +428,108 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return 0;
}
+/**
+ * Returns if this unit can be page flipped.
+ * Must be called with the mode_config mutex held.
+ */
+static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+ if (!sou->base.is_implicit)
+ return true;
+
+ if (dev_priv->sou_priv->num_implicit != 1)
+ return false;
+
+ return true;
+}
+
+/**
+ * Update the implicit fb to the current fb of this crtc.
+ * Must be called with the mode_config mutex held.
+ */
+static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+ BUG_ON(!sou->base.is_implicit);
+
+ dev_priv->sou_priv->implicit_fb =
+ vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
+}
+
+static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
+ struct vmw_fence_obj *fence = NULL;
+ struct drm_clip_rect clips;
+ int ret;
+
+ /* require ScreenObject support for page flipping */
+ if (!dev_priv->sou_priv)
+ return -ENOSYS;
+
+ if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
+ return -EINVAL;
+
+ crtc->primary->fb = fb;
+
+ /* do a full screen dirty update */
+ clips.x1 = clips.y1 = 0;
+ clips.x2 = fb->width;
+ clips.y2 = fb->height;
+
+ if (vfb->dmabuf)
+ ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
+ &clips, 1, 1,
+ true, &fence);
+ else
+ ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
+ &clips, NULL, NULL,
+ 0, 0, 1, 1, &fence);
+
+
+ if (ret != 0)
+ goto out_no_fence;
+ if (!fence) {
+ ret = -EINVAL;
+ goto out_no_fence;
+ }
+
+ if (event) {
+ struct drm_file *file_priv = event->base.file_priv;
+
+ ret = vmw_event_fence_action_queue(file_priv, fence,
+ &event->base,
+ &event->event.tv_sec,
+ &event->event.tv_usec,
+ true);
+ }
+
+ /*
+ * No need to hold on to this now. The only cleanup
+ * we need to do if we fail is unref the fence.
+ */
+ vmw_fence_obj_unreference(&fence);
+
+ if (vmw_crtc_to_du(crtc)->is_implicit)
+ vmw_sou_update_implicit_fb(dev_priv, crtc);
+
+ return ret;
+
+out_no_fence:
+ crtc->primary->fb = old_fb;
+ return ret;
+}
+
static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.save = vmw_du_crtc_save,
.restore = vmw_du_crtc_restore,
@@ -398,7 +538,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
.set_config = vmw_sou_crtc_set_config,
- .page_flip = vmw_du_page_flip,
+ .page_flip = vmw_sou_crtc_page_flip,
};
/*
@@ -423,7 +563,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
vmw_sou_destroy(vmw_connector_to_sou(connector));
}
-static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+static struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.save = vmw_du_connector_save,
.restore = vmw_du_connector_restore,
@@ -458,7 +598,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
sou->base.pref_mode = NULL;
sou->base.is_implicit = true;
- drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+ drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
connector->status = vmw_du_connector_detect(connector, true);
@@ -481,7 +621,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
return 0;
}
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int i, ret;
@@ -516,7 +656,9 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
- DRM_INFO("Screen objects system initialized\n");
+ dev_priv->active_display_unit = vmw_du_screen_object;
+
+ DRM_INFO("Screen Objects Display Unit initialized\n");
return 0;
@@ -529,7 +671,7 @@ err_no_mem:
return ret;
}
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
+int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -543,35 +685,369 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
return 0;
}
+static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer)
+{
+ struct vmw_dma_buffer *buf =
+ container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+ base)->buffer;
+ int depth = framebuffer->base.depth;
+ struct {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+ } *cmd;
+
+ /* Emulate RGBA support, contrary to svga_reg.h this is not
+ * supported by hosts. This is only a problem if we are reading
+ * this value later and expecting what we uploaded back.
+ */
+ if (depth == 32)
+ depth = 24;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (!cmd) {
+ DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header = SVGA_CMD_DEFINE_GMRFB;
+ cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+ cmd->body.format.colorDepth = depth;
+ cmd->body.format.reserved = 0;
+ cmd->body.bytesPerLine = framebuffer->base.pitches[0];
+ /* Buffer is reserved in vram or GMR */
+ vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
/**
- * Returns if this unit can be page flipped.
- * Must be called with the mode_config mutex held.
+ * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
+ * blit surface to screen command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in the command, and translates the cliprects
+ * to match the destination bounding box encoded.
*/
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
+static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
{
- struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+ struct vmw_kms_sou_surface_dirty *sdirty =
+ container_of(dirty, typeof(*sdirty), base);
+ struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+ s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
+ s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
+ size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
+ SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+ int i;
+
+ cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+ cmd->header.size = sizeof(cmd->body) + region_size;
+
+ /*
+ * Use the destination bounding box to specify destination - and
+ * source bounding regions.
+ */
+ cmd->body.destRect.left = sdirty->left;
+ cmd->body.destRect.right = sdirty->right;
+ cmd->body.destRect.top = sdirty->top;
+ cmd->body.destRect.bottom = sdirty->bottom;
+
+ cmd->body.srcRect.left = sdirty->left + trans_x;
+ cmd->body.srcRect.right = sdirty->right + trans_x;
+ cmd->body.srcRect.top = sdirty->top + trans_y;
+ cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
+
+ cmd->body.srcImage.sid = sdirty->sid;
+ cmd->body.destScreenId = dirty->unit->unit;
+
+ /* Blits are relative to the destination rect. Translate. */
+ for (i = 0; i < dirty->num_hits; ++i, ++blit) {
+ blit->left -= sdirty->left;
+ blit->right -= sdirty->left;
+ blit->top -= sdirty->top;
+ blit->bottom -= sdirty->top;
+ }
- if (!sou->base.is_implicit)
- return true;
+ vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
- if (dev_priv->sou_priv->num_implicit != 1)
- return false;
+ sdirty->left = sdirty->top = S32_MAX;
+ sdirty->right = sdirty->bottom = S32_MIN;
+}
- return true;
+/**
+ * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a SVGASignedRect cliprect and updates the bounding box of the
+ * BLIT_SURFACE_TO_SCREEN command.
+ */
+static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_kms_sou_surface_dirty *sdirty =
+ container_of(dirty, typeof(*sdirty), base);
+ struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+ SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+
+ /* Destination rect. */
+ blit += dirty->num_hits;
+ blit->left = dirty->unit_x1;
+ blit->top = dirty->unit_y1;
+ blit->right = dirty->unit_x2;
+ blit->bottom = dirty->unit_y2;
+
+ /* Destination bounding box */
+ sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+ sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+ sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+ sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+
+ dirty->num_hits++;
}
/**
- * Update the implicit fb to the current fb of this crtc.
- * Must be called with the mode_config mutex held.
+ * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
*/
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ struct vmw_resource *srf,
+ s32 dest_x,
+ s32 dest_y,
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence)
{
- struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(framebuffer, typeof(*vfbs), base);
+ struct vmw_kms_sou_surface_dirty sdirty;
+ int ret;
- BUG_ON(!sou->base.is_implicit);
+ if (!srf)
+ srf = &vfbs->surface->res;
- dev_priv->sou_priv->implicit_fb =
- vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
+ ret = vmw_kms_helper_resource_prepare(srf, true);
+ if (ret)
+ return ret;
+
+ sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
+ sdirty.base.clip = vmw_sou_surface_clip;
+ sdirty.base.dev_priv = dev_priv;
+ sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
+ sizeof(SVGASignedRect) * num_clips;
+
+ sdirty.sid = srf->id;
+ sdirty.left = sdirty.top = S32_MAX;
+ sdirty.right = sdirty.bottom = S32_MIN;
+ sdirty.dst_x = dest_x;
+ sdirty.dst_y = dest_y;
+
+ ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+ dest_x, dest_y, num_clips, inc,
+ &sdirty.base);
+ vmw_kms_helper_resource_finish(srf, out_fence);
+
+ return ret;
+}
+
+/**
+ * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+ vmw_fifo_commit(dirty->dev_priv,
+ sizeof(struct vmw_kms_sou_dmabuf_blit) *
+ dirty->num_hits);
+}
+
+/**
+ * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
+ */
+static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
+
+ blit += dirty->num_hits;
+ blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+ blit->body.destScreenId = dirty->unit->unit;
+ blit->body.srcOrigin.x = dirty->fb_x;
+ blit->body.srcOrigin.y = dirty->fb_y;
+ blit->body.destRect.left = dirty->unit_x1;
+ blit->body.destRect.top = dirty->unit_y1;
+ blit->body.destRect.right = dirty->unit_x2;
+ blit->body.destRect.bottom = dirty->unit_y2;
+ dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the dma-buffer backed framebuffer.
+ * @clips: Array of clip rects.
+ * @num_clips: Number of clip rects in @clips.
+ * @increment: Increment to use when looping over @clips.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment,
+ bool interruptible,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_dma_buffer *buf =
+ container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+ base)->buffer;
+ struct vmw_kms_dirty dirty;
+ int ret;
+
+ ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
+ false);
+ if (ret)
+ return ret;
+
+ ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
+ if (unlikely(ret != 0))
+ goto out_revert;
+
+ dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
+ dirty.clip = vmw_sou_dmabuf_clip;
+ dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
+ num_clips;
+ ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
+ 0, 0, num_clips, increment, &dirty);
+ vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
+
+ return ret;
+
+out_revert:
+ vmw_kms_helper_buffer_revert(buf);
+
+ return ret;
+}
+
+
+/**
+ * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+ vmw_fifo_commit(dirty->dev_priv,
+ sizeof(struct vmw_kms_sou_readback_blit) *
+ dirty->num_hits);
+}
+
+/**
+ * vmw_sou_readback_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
+ */
+static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
+
+ blit += dirty->num_hits;
+ blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
+ blit->body.srcScreenId = dirty->unit->unit;
+ blit->body.destOrigin.x = dirty->fb_x;
+ blit->body.destOrigin.y = dirty->fb_y;
+ blit->body.srcRect.left = dirty->unit_x1;
+ blit->body.srcRect.top = dirty->unit_y1;
+ blit->body.srcRect.right = dirty->unit_x2;
+ blit->body.srcRect.bottom = dirty->unit_y2;
+ dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_sou_readback - Perform a readback from the screen object system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips)
+{
+ struct vmw_dma_buffer *buf =
+ container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+ struct vmw_kms_dirty dirty;
+ int ret;
+
+ ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
+ if (ret)
+ return ret;
+
+ ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
+ if (unlikely(ret != 0))
+ goto out_revert;
+
+ dirty.fifo_commit = vmw_sou_readback_fifo_commit;
+ dirty.clip = vmw_sou_readback_clip;
+ dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
+ num_clips;
+ ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
+ 0, 0, num_clips, 1, &dirty);
+ vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
+ user_fence_rep);
+
+ return ret;
+
+out_revert:
+ vmw_kms_helper_buffer_revert(buf);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 6a4584a43aa6..bba1ee395478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,12 +27,15 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
#include "ttm/ttm_placement.h"
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
uint32_t size;
+ uint8_t num_input_sig;
+ uint8_t num_output_sig;
};
struct vmw_user_shader {
@@ -40,8 +43,18 @@ struct vmw_user_shader {
struct vmw_shader shader;
};
+struct vmw_dx_shader {
+ struct vmw_resource res;
+ struct vmw_resource *ctx;
+ struct vmw_resource *cotable;
+ u32 id;
+ bool committed;
+ struct list_head cotable_head;
+};
+
static uint64_t vmw_user_shader_size;
static uint64_t vmw_shader_size;
+static size_t vmw_shader_dx_size;
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
@@ -55,6 +68,18 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_shader_destroy(struct vmw_resource *res);
+static int vmw_dx_shader_create(struct vmw_resource *res);
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state);
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
+static uint64_t vmw_user_shader_size;
+
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
.base_obj_to_res = vmw_user_shader_base_to_res,
@@ -77,6 +102,24 @@ static const struct vmw_res_func vmw_gb_shader_func = {
.unbind = vmw_gb_shader_unbind
};
+static const struct vmw_res_func vmw_dx_shader_func = {
+ .res_type = vmw_res_shader,
+ .needs_backup = true,
+ .may_evict = false,
+ .type_name = "dx shaders",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_dx_shader_create,
+ /*
+ * The destroy callback is only called with a committed resource on
+ * context destroy, in which case we destroy the cotable anyway,
+ * so there's no need to destroy DX shaders separately.
+ */
+ .destroy = NULL,
+ .bind = vmw_dx_shader_bind,
+ .unbind = vmw_dx_shader_unbind,
+ .commit_notify = vmw_dx_shader_commit_notify,
+};
+
/**
* Shader management:
*/
@@ -87,25 +130,42 @@ vmw_res_to_shader(struct vmw_resource *res)
return container_of(res, struct vmw_shader, res);
}
+/**
+ * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
+ * struct vmw_dx_shader
+ *
+ * @res: Pointer to the struct vmw_resource.
+ */
+static inline struct vmw_dx_shader *
+vmw_res_to_dx_shader(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_dx_shader, res);
+}
+
static void vmw_hw_shader_destroy(struct vmw_resource *res)
{
- (void) vmw_gb_shader_destroy(res);
+ if (likely(res->func->destroy))
+ (void) res->func->destroy(res);
+ else
+ res->id = -1;
}
+
static int vmw_gb_shader_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
uint32_t size,
uint64_t offset,
SVGA3dShaderType type,
+ uint8_t num_input_sig,
+ uint8_t num_output_sig,
struct vmw_dma_buffer *byte_code,
void (*res_free) (struct vmw_resource *res))
{
struct vmw_shader *shader = vmw_res_to_shader(res);
int ret;
- ret = vmw_resource_init(dev_priv, res, true,
- res_free, &vmw_gb_shader_func);
-
+ ret = vmw_resource_init(dev_priv, res, true, res_free,
+ &vmw_gb_shader_func);
if (unlikely(ret != 0)) {
if (res_free)
@@ -122,11 +182,17 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
}
shader->size = size;
shader->type = type;
+ shader->num_input_sig = num_input_sig;
+ shader->num_output_sig = num_output_sig;
vmw_resource_activate(res, vmw_hw_shader_destroy);
return 0;
}
+/*
+ * GB shader code:
+ */
+
static int vmw_gb_shader_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
@@ -165,7 +231,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
cmd->body.type = shader->type;
cmd->body.sizeInBytes = shader->size;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -259,7 +325,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_scrub(&res->binding_head);
+ vmw_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -275,12 +341,327 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
vmw_fifo_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
+
+ return 0;
+}
+
+/*
+ * DX shader code:
+ */
+
+/**
+ * vmw_dx_shader_commit_notify - Notify that a shader operation has been
+ * committed to hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the shader resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state)
+{
+ struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ if (state == VMW_CMDBUF_RES_ADD) {
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_cotable_add_resource(shader->cotable,
+ &shader->cotable_head);
+ shader->committed = true;
+ res->id = shader->id;
+ mutex_unlock(&dev_priv->binding_mutex);
+ } else {
+ mutex_lock(&dev_priv->binding_mutex);
+ list_del_init(&shader->cotable_head);
+ shader->committed = false;
+ res->id = -1;
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+}
+
+/**
+ * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function reverts a scrub operation.
+ */
+static int vmw_dx_shader_unscrub(struct vmw_resource *res)
+{
+ struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindShader body;
+ } *cmd;
+
+ if (!list_empty(&shader->cotable_head) || !shader->committed)
+ return 0;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+ shader->ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "scrubbing.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = shader->ctx->id;
+ cmd->body.shid = shader->id;
+ cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.offsetInBytes = res->backup_offset;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
+
+ return 0;
+}
+
+/**
+ * vmw_dx_shader_create - The DX shader create callback
+ *
+ * @res: The DX shader resource
+ *
+ * The create callback is called as part of resource validation and
+ * makes sure that we unscrub the shader if it's previously been scrubbed.
+ */
+static int vmw_dx_shader_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+ int ret = 0;
+
+ WARN_ON_ONCE(!shader->committed);
+
+ if (!list_empty(&res->mob_head)) {
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_shader_unscrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+
+ res->id = shader->id;
+ return ret;
+}
+
+/**
+ * vmw_dx_shader_bind - The DX shader bind callback
+ *
+ * @res: The DX shader resource
+ * @val_buf: Pointer to the validate buffer.
+ *
+ */
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_dx_shader_unscrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ return 0;
+}
+
+/**
+ * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function unbinds a MOB from the DX shader without requiring the
+ * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
+ * However, once the driver eventually decides to unbind the MOB, it doesn't
+ * need to access the context.
+ */
+static int vmw_dx_shader_scrub(struct vmw_resource *res)
+{
+ struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindShader body;
+ } *cmd;
+
+ if (list_empty(&shader->cotable_head))
+ return 0;
+
+ WARN_ON_ONCE(!shader->committed);
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "scrubbing.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = shader->ctx->id;
+ cmd->body.shid = res->id;
+ cmd->body.mobid = SVGA3D_INVALID_ID;
+ cmd->body.offsetInBytes = 0;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ res->id = -1;
+ list_del_init(&shader->cotable_head);
return 0;
}
/**
+ * vmw_dx_shader_unbind - The dx shader unbind callback.
+ *
+ * @res: The shader resource
+ * @readback: Whether this is a readback unbind. Currently unused.
+ * @val_buf: MOB buffer information.
+ */
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_fence_obj *fence;
+ int ret;
+
+ BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_shader_scrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ if (ret)
+ return ret;
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+/**
+ * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
+ * DX shaders.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ *
+ * Scrubs all shader MOBs so that any subsequent shader unbind or shader
+ * destroy operation won't need to swap in the context.
+ */
+void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback)
+{
+ struct vmw_dx_shader *entry, *next;
+
+ WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+ list_for_each_entry_safe(entry, next, list, cotable_head) {
+ WARN_ON(vmw_dx_shader_scrub(&entry->res));
+ if (!readback)
+ entry->committed = false;
+ }
+}
+
+/**
+ * vmw_dx_shader_res_free - The DX shader free callback
+ *
+ * @res: The shader resource
+ *
+ * Frees the DX shader resource and updates memory accounting.
+ */
+static void vmw_dx_shader_res_free(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+
+ vmw_resource_unreference(&shader->cotable);
+ kfree(shader);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+}
+
+/**
+ * vmw_dx_shader_add - Add a shader resource as a command buffer managed
+ * resource.
+ *
+ * @man: The command buffer resource manager.
+ * @ctx: Pointer to the context resource.
+ * @user_key: The id used for this shader.
+ * @shader_type: The shader type.
+ * @list: The list of staged command buffer managed resources.
+ */
+int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx,
+ u32 user_key,
+ SVGA3dShaderType shader_type,
+ struct list_head *list)
+{
+ struct vmw_dx_shader *shader;
+ struct vmw_resource *res;
+ struct vmw_private *dev_priv = ctx->dev_priv;
+ int ret;
+
+ if (!vmw_shader_dx_size)
+ vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
+
+ if (!vmw_shader_id_ok(user_key, shader_type))
+ return -EINVAL;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
+ false, true);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ return ret;
+ }
+
+ shader = kmalloc(sizeof(*shader), GFP_KERNEL);
+ if (!shader) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+ return -ENOMEM;
+ }
+
+ res = &shader->res;
+ shader->ctx = ctx;
+ shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
+ shader->id = user_key;
+ shader->committed = false;
+ INIT_LIST_HEAD(&shader->cotable_head);
+ ret = vmw_resource_init(dev_priv, res, true,
+ vmw_dx_shader_res_free, &vmw_dx_shader_func);
+ if (ret)
+ goto out_resource_init;
+
+ /*
+ * The user_key name-space is not per shader type for DX shaders,
+ * so when hashing, use a single zero shader type.
+ */
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+ vmw_shader_key(user_key, 0),
+ res, list);
+ if (ret)
+ goto out_resource_init;
+
+ res->id = shader->id;
+ vmw_resource_activate(res, vmw_hw_shader_destroy);
+
+out_resource_init:
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+
+
+/**
* User-space shader management:
*/
@@ -341,6 +722,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
+ uint8_t num_input_sig,
+ uint8_t num_output_sig,
struct ttm_object_file *tfile,
u32 *handle)
{
@@ -383,7 +766,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
*/
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
- offset, shader_type, buffer,
+ offset, shader_type, num_input_sig,
+ num_output_sig, buffer,
vmw_user_shader_free);
if (unlikely(ret != 0))
goto out;
@@ -407,11 +791,11 @@ out:
}
-struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
- size_t shader_size,
- size_t offset,
- SVGA3dShaderType shader_type)
+static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type)
{
struct vmw_shader *shader;
struct vmw_resource *res;
@@ -449,7 +833,7 @@ struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
- offset, shader_type, buffer,
+ offset, shader_type, 0, 0, buffer,
vmw_shader_free);
out_err:
@@ -457,19 +841,20 @@ out_err:
}
-int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
+ enum drm_vmw_shader_type shader_type_drm,
+ u32 buffer_handle, size_t size, size_t offset,
+ uint8_t num_input_sig, uint8_t num_output_sig,
+ uint32_t *shader_handle)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_shader_create_arg *arg =
- (struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_dma_buffer *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
- if (arg->buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+ if (buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
&buffer);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader "
@@ -478,23 +863,20 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
}
if ((u64)buffer->base.num_pages * PAGE_SIZE <
- (u64)arg->size + (u64)arg->offset) {
+ (u64)size + (u64)offset) {
DRM_ERROR("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
}
}
- switch (arg->shader_type) {
+ switch (shader_type_drm) {
case drm_vmw_shader_type_vs:
shader_type = SVGA3D_SHADERTYPE_VS;
break;
case drm_vmw_shader_type_ps:
shader_type = SVGA3D_SHADERTYPE_PS;
break;
- case drm_vmw_shader_type_gs:
- shader_type = SVGA3D_SHADERTYPE_GS;
- break;
default:
DRM_ERROR("Illegal shader type.\n");
ret = -EINVAL;
@@ -505,8 +887,9 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_bad_arg;
- ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
- shader_type, tfile, &arg->shader_handle);
+ ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
+ shader_type, num_input_sig,
+ num_output_sig, tfile, shader_handle);
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
@@ -515,7 +898,7 @@ out_bad_arg:
}
/**
- * vmw_compat_shader_id_ok - Check whether a compat shader user key and
+ * vmw_shader_id_ok - Check whether a compat shader user key and
* shader type are within valid bounds.
*
* @user_key: User space id of the shader.
@@ -523,13 +906,13 @@ out_bad_arg:
*
* Returns true if valid false if not.
*/
-static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
}
/**
- * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
+ * vmw_shader_key - Compute a hash key suitable for a compat shader.
*
* @user_key: User space id of the shader.
* @shader_type: Shader type.
@@ -537,13 +920,13 @@ static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
* Returns a hash key suitable for a command buffer managed resource
* manager hash table.
*/
-static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key | (shader_type << 20);
}
/**
- * vmw_compat_shader_remove - Stage a compat shader for removal.
+ * vmw_shader_remove - Stage a compat shader for removal.
*
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
@@ -551,17 +934,18 @@ static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
* @shader_type: Shader type.
* @list: Caller's list of staged command buffer resource actions.
*/
-int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
- u32 user_key, SVGA3dShaderType shader_type,
- struct list_head *list)
+int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list)
{
- if (!vmw_compat_shader_id_ok(user_key, shader_type))
+ struct vmw_resource *dummy;
+
+ if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
- vmw_compat_shader_key(user_key,
- shader_type),
- list);
+ return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
+ vmw_shader_key(user_key, shader_type),
+ list, &dummy);
}
/**
@@ -591,7 +975,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
int ret;
struct vmw_resource *res;
- if (!vmw_compat_shader_id_ok(user_key, shader_type))
+ if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
/* Allocate and pin a DMA buffer */
@@ -628,8 +1012,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto no_reserve;
- ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
- vmw_compat_shader_key(user_key, shader_type),
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+ vmw_shader_key(user_key, shader_type),
res, list);
vmw_resource_unreference(&res);
no_reserve:
@@ -639,7 +1023,7 @@ out:
}
/**
- * vmw_compat_shader_lookup - Look up a compat shader
+ * vmw_shader_lookup - Look up a compat shader
*
* @man: Pointer to the command buffer managed resource manager identifying
* the shader namespace.
@@ -650,14 +1034,26 @@ out:
* found. An error pointer otherwise.
*/
struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
- u32 user_key,
- SVGA3dShaderType shader_type)
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type)
{
- if (!vmw_compat_shader_id_ok(user_key, shader_type))
+ if (!vmw_shader_id_ok(user_key, shader_type))
return ERR_PTR(-EINVAL);
- return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
- vmw_compat_shader_key(user_key,
- shader_type));
+ return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
+ vmw_shader_key(user_key, shader_type));
+}
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_shader_create_arg *arg =
+ (struct drm_vmw_shader_create_arg *)data;
+
+ return vmw_shader_define(dev, file_priv, arg->shader_type,
+ arg->buffer_handle,
+ arg->size, arg->offset,
+ 0, 0,
+ &arg->shader_handle);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
new file mode 100644
index 000000000000..5a73eebd0f35
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -0,0 +1,555 @@
+/**************************************************************************
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
+
+/*
+ * The currently only reason we need to keep track of views is that if we
+ * destroy a hardware surface, all views pointing to it must also be destroyed,
+ * otherwise the device will error.
+ * So in particuar if a surface is evicted, we must destroy all views pointing
+ * to it, and all context bindings of that view. Similarly we must restore
+ * the view bindings, views and surfaces pointed to by the views when a
+ * context is referenced in the command stream.
+ */
+
+/**
+ * struct vmw_view - view metadata
+ *
+ * @res: The struct vmw_resource we derive from
+ * @ctx: Non-refcounted pointer to the context this view belongs to.
+ * @srf: Refcounted pointer to the surface pointed to by this view.
+ * @cotable: Refcounted pointer to the cotable holding this view.
+ * @srf_head: List head for the surface-to-view list.
+ * @cotable_head: List head for the cotable-to_view list.
+ * @view_type: View type.
+ * @view_id: User-space per context view id. Currently used also as per
+ * context device view id.
+ * @cmd_size: Size of the SVGA3D define view command that we've copied from the
+ * command stream.
+ * @committed: Whether the view is actually created or pending creation at the
+ * device level.
+ * @cmd: The SVGA3D define view command copied from the command stream.
+ */
+struct vmw_view {
+ struct rcu_head rcu;
+ struct vmw_resource res;
+ struct vmw_resource *ctx; /* Immutable */
+ struct vmw_resource *srf; /* Immutable */
+ struct vmw_resource *cotable; /* Immutable */
+ struct list_head srf_head; /* Protected by binding_mutex */
+ struct list_head cotable_head; /* Protected by binding_mutex */
+ unsigned view_type; /* Immutable */
+ unsigned view_id; /* Immutable */
+ u32 cmd_size; /* Immutable */
+ bool committed; /* Protected by binding_mutex */
+ u32 cmd[1]; /* Immutable */
+};
+
+static int vmw_view_create(struct vmw_resource *res);
+static int vmw_view_destroy(struct vmw_resource *res);
+static void vmw_hw_view_destroy(struct vmw_resource *res);
+static void vmw_view_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state);
+
+static const struct vmw_res_func vmw_view_func = {
+ .res_type = vmw_res_view,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "DX view",
+ .backup_placement = NULL,
+ .create = vmw_view_create,
+ .commit_notify = vmw_view_commit_notify,
+};
+
+/**
+ * struct vmw_view - view define command body stub
+ *
+ * @view_id: The device id of the view being defined
+ * @sid: The surface id of the view being defined
+ *
+ * This generic struct is used by the code to change @view_id and @sid of a
+ * saved view define command.
+ */
+struct vmw_view_define {
+ uint32 view_id;
+ uint32 sid;
+};
+
+/**
+ * vmw_view - Convert a struct vmw_resource to a struct vmw_view
+ *
+ * @res: Pointer to the resource to convert.
+ *
+ * Returns a pointer to a struct vmw_view.
+ */
+static struct vmw_view *vmw_view(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_view, res);
+}
+
+/**
+ * vmw_view_commit_notify - Notify that a view operation has been committed to
+ * hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the view resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_view_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state)
+{
+ struct vmw_view *view = vmw_view(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ if (state == VMW_CMDBUF_RES_ADD) {
+ struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+
+ list_add_tail(&view->srf_head, &srf->view_list);
+ vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+ view->committed = true;
+ res->id = view->view_id;
+
+ } else {
+ list_del_init(&view->cotable_head);
+ list_del_init(&view->srf_head);
+ view->committed = false;
+ res->id = -1;
+ }
+ mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_create - Create a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Create a hardware view. Typically used if that view has previously been
+ * destroyed by an eviction operation.
+ */
+static int vmw_view_create(struct vmw_resource *res)
+{
+ struct vmw_view *view = vmw_view(res);
+ struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ struct vmw_view_define body;
+ } *cmd;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ if (!view->committed) {
+ mutex_unlock(&dev_priv->binding_mutex);
+ return 0;
+ }
+
+ cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
+ view->ctx->id);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving FIFO space for view creation.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+ memcpy(cmd, &view->cmd, view->cmd_size);
+ WARN_ON(cmd->body.view_id != view->view_id);
+ /* Sid may have changed due to surface eviction. */
+ WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
+ cmd->body.sid = view->srf->id;
+ vmw_fifo_commit(res->dev_priv, view->cmd_size);
+ res->id = view->view_id;
+ list_add_tail(&view->srf_head, &srf->view_list);
+ vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ return 0;
+}
+
+/**
+ * vmw_view_destroy - Destroy a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view. Typically used on unexpected termination of the
+ * owning process or if the surface the view is pointing to is destroyed.
+ */
+static int vmw_view_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_view *view = vmw_view(res);
+ struct {
+ SVGA3dCmdHeader header;
+ union vmw_view_destroy body;
+ } *cmd;
+
+ WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+ vmw_binding_res_list_scrub(&res->binding_head);
+
+ if (!view->committed || res->id == -1)
+ return 0;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving FIFO space for view "
+ "destruction.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = vmw_view_destroy_cmds[view->view_type];
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.view_id = view->view_id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ res->id = -1;
+ list_del_init(&view->cotable_head);
+ list_del_init(&view->srf_head);
+
+ return 0;
+}
+
+/**
+ * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static void vmw_hw_view_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ WARN_ON(vmw_view_destroy(res));
+ res->id = -1;
+ mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
+{
+ return user_key | (view_type << 20);
+}
+
+/**
+ * vmw_view_id_ok - Basic view id and type range checks.
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Checks that the view id and type (typically provided by user-space) is
+ * valid.
+ */
+static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
+{
+ return (user_key < SVGA_COTABLE_MAX_IDS &&
+ view_type < vmw_view_max);
+}
+
+/**
+ * vmw_view_res_free - resource res_free callback for view resources
+ *
+ * @res: Pointer to a struct vmw_resource
+ *
+ * Frees memory and memory accounting held by a struct vmw_view.
+ */
+static void vmw_view_res_free(struct vmw_resource *res)
+{
+ struct vmw_view *view = vmw_view(res);
+ size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ vmw_resource_unreference(&view->cotable);
+ vmw_resource_unreference(&view->srf);
+ kfree_rcu(view, rcu);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_view_add - Create a view resource and stage it for addition
+ * as a command buffer managed resource.
+ *
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
+ * @ctx: Pointer to a struct vmw_resource identifying the active context.
+ * @srf: Pointer to a struct vmw_resource identifying the surface the view
+ * points to.
+ * @view_type: The view type deduced from the view create command.
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the view type and to the context.
+ * @cmd: Pointer to the view create command in the command stream.
+ * @cmd_size: Size of the view create command in the command stream.
+ * @list: Caller's list of staged command buffer resource actions.
+ */
+int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx,
+ struct vmw_resource *srf,
+ enum vmw_view_type view_type,
+ u32 user_key,
+ const void *cmd,
+ size_t cmd_size,
+ struct list_head *list)
+{
+ static const size_t vmw_view_define_sizes[] = {
+ [vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
+ [vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
+ [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
+ };
+
+ struct vmw_private *dev_priv = ctx->dev_priv;
+ struct vmw_resource *res;
+ struct vmw_view *view;
+ size_t size;
+ int ret;
+
+ if (cmd_size != vmw_view_define_sizes[view_type] +
+ sizeof(SVGA3dCmdHeader)) {
+ DRM_ERROR("Illegal view create command size.\n");
+ return -EINVAL;
+ }
+
+ if (!vmw_view_id_ok(user_key, view_type)) {
+ DRM_ERROR("Illegal view add view id.\n");
+ return -EINVAL;
+ }
+
+ size = offsetof(struct vmw_view, cmd) + cmd_size;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for view"
+ " creation.\n");
+ return ret;
+ }
+
+ view = kmalloc(size, GFP_KERNEL);
+ if (!view) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+ return -ENOMEM;
+ }
+
+ res = &view->res;
+ view->ctx = ctx;
+ view->srf = vmw_resource_reference(srf);
+ view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
+ view->view_type = view_type;
+ view->view_id = user_key;
+ view->cmd_size = cmd_size;
+ view->committed = false;
+ INIT_LIST_HEAD(&view->srf_head);
+ INIT_LIST_HEAD(&view->cotable_head);
+ memcpy(&view->cmd, cmd, cmd_size);
+ ret = vmw_resource_init(dev_priv, res, true,
+ vmw_view_res_free, &vmw_view_func);
+ if (ret)
+ goto out_resource_init;
+
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
+ vmw_view_key(user_key, view_type),
+ res, list);
+ if (ret)
+ goto out_resource_init;
+
+ res->id = view->view_id;
+ vmw_resource_activate(res, vmw_hw_view_destroy);
+
+out_resource_init:
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_view_remove - Stage a view for removal.
+ *
+ * @man: Pointer to the view manager identifying the shader namespace.
+ * @user_key: The key that is used to identify the view. The key is
+ * unique to the view type.
+ * @view_type: View type
+ * @list: Caller's list of staged command buffer resource actions.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
+ */
+int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, enum vmw_view_type view_type,
+ struct list_head *list,
+ struct vmw_resource **res_p)
+{
+ if (!vmw_view_id_ok(user_key, view_type)) {
+ DRM_ERROR("Illegal view remove view id.\n");
+ return -EINVAL;
+ }
+
+ return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
+ vmw_view_key(user_key, view_type),
+ list, res_p);
+}
+
+/**
+ * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views belonging to a cotable.
+ * @readback: Unused. Needed for function interface only.
+ *
+ * This function evicts all views belonging to a cotable.
+ * It must be called with the binding_mutex held, and the caller must hold
+ * a reference to the view resource. This is typically called before the
+ * cotable is paged out.
+ */
+void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback)
+{
+ struct vmw_view *entry, *next;
+
+ WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+ list_for_each_entry_safe(entry, next, list, cotable_head)
+ WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_surface_list_destroy - Evict all views pointing to a surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views pointing to a surface.
+ *
+ * This function evicts all views pointing to a surface. This is typically
+ * called before the surface is evicted.
+ */
+void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+ struct list_head *list)
+{
+ struct vmw_view *entry, *next;
+
+ WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+ list_for_each_entry_safe(entry, next, list, srf_head)
+ WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_srf - Return a non-refcounted pointer to the surface a view is
+ * pointing to.
+ *
+ * @res: pointer to a view resource.
+ *
+ * Note that the view itself is holding a reference, so as long
+ * the view resource is alive, the surface resource will be.
+ */
+struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
+{
+ return vmw_view(res)->srf;
+}
+
+/**
+ * vmw_view_lookup - Look up a view.
+ *
+ * @man: The context's cmdbuf ref manager.
+ * @view_type: The view type.
+ * @user_key: The view user id.
+ *
+ * returns a refcounted pointer to a view or an error pointer if not found.
+ */
+struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_view_type view_type,
+ u32 user_key)
+{
+ return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
+ vmw_view_key(user_key, view_type));
+}
+
+const u32 vmw_view_destroy_cmds[] = {
+ [vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+ [vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+ [vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+};
+
+const SVGACOTableType vmw_view_cotables[] = {
+ [vmw_view_sr] = SVGA_COTABLE_SRVIEW,
+ [vmw_view_rt] = SVGA_COTABLE_RTVIEW,
+ [vmw_view_ds] = SVGA_COTABLE_DSVIEW,
+};
+
+const SVGACOTableType vmw_so_cotables[] = {
+ [vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
+ [vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
+ [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
+ [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
+ [vmw_so_ss] = SVGA_COTABLE_SAMPLER,
+ [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
+};
+
+
+/* To remove unused function warning */
+static void vmw_so_build_asserts(void) __attribute__((used));
+
+
+/*
+ * This function is unused at run-time, and only used to dump various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_so_build_asserts(void)
+{
+ /* Assert that our vmw_view_cmd_to_type() function is correct. */
+ BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
+ BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
+ BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
+ BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
+ BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
+
+ /* Assert that our "one body fits all" assumption is valid */
+ BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
+
+ /* Assert that the view key space can hold all view ids. */
+ BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
+
+ /*
+ * Assert that the offset of sid in all view define commands
+ * is what we assume it to be.
+ */
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
new file mode 100644
index 000000000000..268738387b5e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -0,0 +1,160 @@
+/**************************************************************************
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef VMW_SO_H
+#define VMW_SO_H
+
+enum vmw_view_type {
+ vmw_view_sr,
+ vmw_view_rt,
+ vmw_view_ds,
+ vmw_view_max,
+};
+
+enum vmw_so_type {
+ vmw_so_el,
+ vmw_so_bs,
+ vmw_so_ds,
+ vmw_so_rs,
+ vmw_so_ss,
+ vmw_so_so,
+ vmw_so_max,
+};
+
+/**
+ * union vmw_view_destroy - view destruction command body
+ *
+ * @rtv: RenderTarget view destruction command body
+ * @srv: ShaderResource view destruction command body
+ * @dsv: DepthStencil view destruction command body
+ * @view_id: A single u32 view id.
+ *
+ * The assumption here is that all union members are really represented by a
+ * single u32 in the command stream. If that's not the case,
+ * the size of this union will not equal the size of an u32, and the
+ * assumption is invalid, and we detect that at compile time in the
+ * vmw_so_build_asserts() function.
+ */
+union vmw_view_destroy {
+ struct SVGA3dCmdDXDestroyRenderTargetView rtv;
+ struct SVGA3dCmdDXDestroyShaderResourceView srv;
+ struct SVGA3dCmdDXDestroyDepthStencilView dsv;
+ u32 view_id;
+};
+
+/* Map enum vmw_view_type to view destroy command ids*/
+extern const u32 vmw_view_destroy_cmds[];
+
+/* Map enum vmw_view_type to SVGACOTableType */
+extern const SVGACOTableType vmw_view_cotables[];
+
+/* Map enum vmw_so_type to SVGACOTableType */
+extern const SVGACOTableType vmw_so_cotables[];
+
+/*
+ * vmw_view_cmd_to_type - Return the view type for a create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given view create or destroy command id, return the corresponding
+ * enum vmw_view_type. If the command is unknown, return vmw_view_max.
+ * The validity of the simplified calculation is verified in the
+ * vmw_so_build_asserts() function.
+ */
+static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
+{
+ u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
+
+ if (tmp > (u32)vmw_view_max)
+ return vmw_view_max;
+
+ return (enum vmw_view_type) tmp;
+}
+
+/*
+ * vmw_so_cmd_to_type - Return the state object type for a
+ * create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given state object create or destroy command id,
+ * return the corresponding enum vmw_so_type. If the command is uknown,
+ * return vmw_so_max. We should perhaps optimize this function using
+ * a similar strategy as vmw_view_cmd_to_type().
+ */
+static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
+{
+ switch (id) {
+ case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
+ case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
+ return vmw_so_el;
+ case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
+ case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
+ return vmw_so_bs;
+ case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
+ case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
+ return vmw_so_ds;
+ case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
+ case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
+ return vmw_so_rs;
+ case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
+ case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
+ return vmw_so_ss;
+ case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
+ case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
+ return vmw_so_so;
+ default:
+ break;
+ }
+ return vmw_so_max;
+}
+
+/*
+ * View management - vmwgfx_so.c
+ */
+extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx,
+ struct vmw_resource *srf,
+ enum vmw_view_type view_type,
+ u32 user_key,
+ const void *cmd,
+ size_t cmd_size,
+ struct list_head *list);
+
+extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, enum vmw_view_type view_type,
+ struct list_head *list,
+ struct vmw_resource **res_p);
+
+extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+ struct list_head *view_list);
+extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback);
+extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
+extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_view_type view_type,
+ u32 user_key);
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
new file mode 100644
index 000000000000..c22e2df1b336
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -0,0 +1,1266 @@
+/******************************************************************************
+ *
+ * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#include "vmwgfx_kms.h"
+#include "device_include/svga3d_surfacedefs.h"
+#include <drm/drm_plane_helper.h>
+
+#define vmw_crtc_to_stdu(x) \
+ container_of(x, struct vmw_screen_target_display_unit, base.crtc)
+#define vmw_encoder_to_stdu(x) \
+ container_of(x, struct vmw_screen_target_display_unit, base.encoder)
+#define vmw_connector_to_stdu(x) \
+ container_of(x, struct vmw_screen_target_display_unit, base.connector)
+
+
+
+enum stdu_content_type {
+ SAME_AS_DISPLAY = 0,
+ SEPARATE_SURFACE,
+ SEPARATE_DMA
+};
+
+/**
+ * struct vmw_stdu_dirty - closure structure for the update functions
+ *
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @transfer: Transfer direction for DMA command.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @buf: DMA buffer when DMA-ing between buffer and screen targets.
+ * @sid: Surface ID when copying between surface and screen targets.
+ */
+struct vmw_stdu_dirty {
+ struct vmw_kms_dirty base;
+ SVGA3dTransferType transfer;
+ s32 left, right, top, bottom;
+ u32 pitch;
+ union {
+ struct vmw_dma_buffer *buf;
+ u32 sid;
+ };
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_stdu_update {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBScreenTarget body;
+};
+
+struct vmw_stdu_dma {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA body;
+};
+
+struct vmw_stdu_surface_copy {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceCopy body;
+};
+
+
+/**
+ * struct vmw_screen_target_display_unit
+ *
+ * @base: VMW specific DU structure
+ * @display_srf: surface to be displayed. The dimension of this will always
+ * match the display mode. If the display mode matches
+ * content_vfbs dimensions, then this is a pointer into the
+ * corresponding field in content_vfbs. If not, then this
+ * is a separate buffer to which content_vfbs will blit to.
+ * @content_fb: holds the rendered content, can be a surface or DMA buffer
+ * @content_type: content_fb type
+ * @defined: true if the current display unit has been initialized
+ */
+struct vmw_screen_target_display_unit {
+ struct vmw_display_unit base;
+
+ struct vmw_surface *display_srf;
+ struct drm_framebuffer *content_fb;
+
+ enum stdu_content_type content_fb_type;
+
+ bool defined;
+};
+
+
+
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit helper Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_pin_display - pins the resource associated with the display surface
+ *
+ * @stdu: contains the display surface
+ *
+ * Since the display surface can either be a private surface allocated by us,
+ * or it can point to the content surface, we use this function to not pin the
+ * same resource twice.
+ */
+static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
+{
+ return vmw_resource_pin(&stdu->display_srf->res, false);
+}
+
+
+
+/**
+ * vmw_stdu_unpin_display - unpins the resource associated with display surface
+ *
+ * @stdu: contains the display surface
+ *
+ * If the display surface was privatedly allocated by
+ * vmw_surface_gb_priv_define() and not registered as a framebuffer, then it
+ * won't be automatically cleaned up when all the framebuffers are freed. As
+ * such, we have to explicitly call vmw_resource_unreference() to get it freed.
+ */
+static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu)
+{
+ if (stdu->display_srf) {
+ struct vmw_resource *res = &stdu->display_srf->res;
+
+ vmw_resource_unpin(res);
+
+ if (stdu->content_fb_type != SAME_AS_DISPLAY) {
+ vmw_resource_unreference(&res);
+ stdu->content_fb_type = SAME_AS_DISPLAY;
+ }
+
+ stdu->display_srf = NULL;
+ }
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit CRTC Functions
+ *****************************************************************************/
+
+
+/**
+ * vmw_stdu_crtc_destroy - cleans up the STDU
+ *
+ * @crtc: used to get a reference to the containing STDU
+ */
+static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
+{
+ vmw_stdu_destroy(vmw_crtc_to_stdu(crtc));
+}
+
+/**
+ * vmw_stdu_define_st - Defines a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit to create a Screen Target for
+ *
+ * Creates a STDU that we can used later. This function is called whenever the
+ * framebuffer size changes.
+ *
+ * RETURNs:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_define_st(struct vmw_private *dev_priv,
+ struct vmw_screen_target_display_unit *stdu)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBScreenTarget body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of FIFO space defining Screen Target\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
+ cmd->header.size = sizeof(cmd->body);
+
+ cmd->body.stid = stdu->base.unit;
+ cmd->body.width = stdu->display_srf->base_size.width;
+ cmd->body.height = stdu->display_srf->base_size.height;
+ cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
+ cmd->body.dpi = 0;
+ cmd->body.xRoot = stdu->base.crtc.x;
+ cmd->body.yRoot = stdu->base.crtc.y;
+
+ if (!stdu->base.is_implicit) {
+ cmd->body.xRoot = stdu->base.gui_x;
+ cmd->body.yRoot = stdu->base.gui_y;
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ stdu->defined = true;
+
+ return 0;
+}
+
+
+
+/**
+ * vmw_stdu_bind_st - Binds a surface to a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ * @res: Buffer to bind to the screen target. Set to NULL to blank screen.
+ *
+ * Binding a surface to a Screen Target the same as flipping
+ */
+static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
+ struct vmw_screen_target_display_unit *stdu,
+ struct vmw_resource *res)
+{
+ SVGA3dSurfaceImageId image;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBScreenTarget body;
+ } *cmd;
+
+
+ if (!stdu->defined) {
+ DRM_ERROR("No screen target defined\n");
+ return -EINVAL;
+ }
+
+ /* Set up image using information in vfb */
+ memset(&image, 0, sizeof(image));
+ image.sid = res ? res->id : SVGA3D_INVALID_ID;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of FIFO space binding a screen target\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
+ cmd->header.size = sizeof(cmd->body);
+
+ cmd->body.stid = stdu->base.unit;
+ cmd->body.image = image;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a
+ * bounding box.
+ *
+ * @cmd: Pointer to command stream.
+ * @unit: Screen target unit.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ */
+static void vmw_stdu_populate_update(void *cmd, int unit,
+ s32 left, s32 right, s32 top, s32 bottom)
+{
+ struct vmw_stdu_update *update = cmd;
+
+ update->header.id = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET;
+ update->header.size = sizeof(update->body);
+
+ update->body.stid = unit;
+ update->body.rect.x = left;
+ update->body.rect.y = top;
+ update->body.rect.w = right - left;
+ update->body.rect.h = bottom - top;
+}
+
+/**
+ * vmw_stdu_update_st - Full update of a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ *
+ * This function needs to be called whenever the content of a screen
+ * target has changed completely. Typically as a result of a backing
+ * surface change.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_update_st(struct vmw_private *dev_priv,
+ struct vmw_screen_target_display_unit *stdu)
+{
+ struct vmw_stdu_update *cmd;
+ struct drm_crtc *crtc = &stdu->base.crtc;
+
+ if (!stdu->defined) {
+ DRM_ERROR("No screen target defined");
+ return -EINVAL;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of FIFO space updating a Screen Target\n");
+ return -ENOMEM;
+ }
+
+ vmw_stdu_populate_update(cmd, stdu->base.unit, 0, crtc->mode.hdisplay,
+ 0, crtc->mode.vdisplay);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+
+
+/**
+ * vmw_stdu_destroy_st - Destroy a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit to destroy
+ */
+static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
+ struct vmw_screen_target_display_unit *stdu)
+{
+ int ret;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBScreenTarget body;
+ } *cmd;
+
+
+ /* Nothing to do if not successfully defined */
+ if (unlikely(!stdu->defined))
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
+ cmd->header.size = sizeof(cmd->body);
+
+ cmd->body.stid = stdu->base.unit;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ /* Force sync */
+ ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+ if (unlikely(ret != 0))
+ DRM_ERROR("Failed to sync with HW");
+
+ stdu->defined = false;
+
+ return ret;
+}
+
+
+
+/**
+ * vmw_stdu_crtc_set_config - Sets a mode
+ *
+ * @set: mode parameters
+ *
+ * This function is the device-specific portion of the DRM CRTC mode set.
+ * For the SVGA device, we do this by defining a Screen Target, binding a
+ * GB Surface to that target, and finally update the screen target.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+{
+ struct vmw_private *dev_priv;
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer *vfb;
+ struct vmw_framebuffer_surface *new_vfbs;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *new_fb;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+
+ if (!set || !set->crtc)
+ return -EINVAL;
+
+ crtc = set->crtc;
+ crtc->x = set->x;
+ crtc->y = set->y;
+ stdu = vmw_crtc_to_stdu(crtc);
+ mode = set->mode;
+ new_fb = set->fb;
+ dev_priv = vmw_priv(crtc->dev);
+
+
+ if (set->num_connectors > 1) {
+ DRM_ERROR("Too many connectors\n");
+ return -EINVAL;
+ }
+
+ if (set->num_connectors == 1 &&
+ set->connectors[0] != &stdu->base.connector) {
+ DRM_ERROR("Connectors don't match %p %p\n",
+ set->connectors[0], &stdu->base.connector);
+ return -EINVAL;
+ }
+
+
+ /* Since they always map one to one these are safe */
+ connector = &stdu->base.connector;
+ encoder = &stdu->base.encoder;
+
+
+ /*
+ * After this point the CRTC will be considered off unless a new fb
+ * is bound
+ */
+ if (stdu->defined) {
+ /* Unbind current surface by binding an invalid one */
+ ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ /* Update Screen Target, display will now be blank */
+ if (crtc->primary->fb) {
+ vmw_stdu_update_st(dev_priv, stdu);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ crtc->primary->fb = NULL;
+ crtc->enabled = false;
+ encoder->crtc = NULL;
+ connector->encoder = NULL;
+
+ vmw_stdu_unpin_display(stdu);
+ stdu->content_fb = NULL;
+ stdu->content_fb_type = SAME_AS_DISPLAY;
+
+ ret = vmw_stdu_destroy_st(dev_priv, stdu);
+ /* The hardware is hung, give up */
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+
+ /* Any of these conditions means the caller wants CRTC off */
+ if (set->num_connectors == 0 || !mode || !new_fb)
+ return 0;
+
+
+ if (set->x + mode->hdisplay > new_fb->width ||
+ set->y + mode->vdisplay > new_fb->height) {
+ DRM_ERROR("Set outside of framebuffer\n");
+ return -EINVAL;
+ }
+
+ stdu->content_fb = new_fb;
+ vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
+
+ if (vfb->dmabuf)
+ stdu->content_fb_type = SEPARATE_DMA;
+
+ /*
+ * If the requested mode is different than the width and height
+ * of the FB or if the content buffer is a DMA buf, then allocate
+ * a display FB that matches the dimension of the mode
+ */
+ if (mode->hdisplay != new_fb->width ||
+ mode->vdisplay != new_fb->height ||
+ stdu->content_fb_type != SAME_AS_DISPLAY) {
+ struct vmw_surface content_srf;
+ struct drm_vmw_size display_base_size = {0};
+ struct vmw_surface *display_srf;
+
+
+ display_base_size.width = mode->hdisplay;
+ display_base_size.height = mode->vdisplay;
+ display_base_size.depth = 1;
+
+ /*
+ * If content buffer is a DMA buf, then we have to construct
+ * surface info
+ */
+ if (stdu->content_fb_type == SEPARATE_DMA) {
+
+ switch (new_fb->bits_per_pixel) {
+ case 32:
+ content_srf.format = SVGA3D_X8R8G8B8;
+ break;
+
+ case 16:
+ content_srf.format = SVGA3D_R5G6B5;
+ break;
+
+ case 8:
+ content_srf.format = SVGA3D_P8;
+ break;
+
+ default:
+ DRM_ERROR("Invalid format\n");
+ ret = -EINVAL;
+ goto err_unref_content;
+ }
+
+ content_srf.flags = 0;
+ content_srf.mip_levels[0] = 1;
+ content_srf.multisample_count = 0;
+ } else {
+
+ stdu->content_fb_type = SEPARATE_SURFACE;
+
+ new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+ content_srf = *new_vfbs->surface;
+ }
+
+
+ ret = vmw_surface_gb_priv_define(crtc->dev,
+ 0, /* because kernel visible only */
+ content_srf.flags,
+ content_srf.format,
+ true, /* a scanout buffer */
+ content_srf.mip_levels[0],
+ content_srf.multisample_count,
+ 0,
+ display_base_size,
+ &display_srf);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Cannot allocate a display FB.\n");
+ goto err_unref_content;
+ }
+
+ stdu->display_srf = display_srf;
+ } else {
+ new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+ stdu->display_srf = new_vfbs->surface;
+ }
+
+
+ ret = vmw_stdu_pin_display(stdu);
+ if (unlikely(ret != 0)) {
+ stdu->display_srf = NULL;
+ goto err_unref_content;
+ }
+
+ vmw_svga_enable(dev_priv);
+
+ /*
+ * Steps to displaying a surface, assume surface is already
+ * bound:
+ * 1. define a screen target
+ * 2. bind a fb to the screen target
+ * 3. update that screen target (this is done later by
+ * vmw_kms_stdu_do_surface_dirty_or_present)
+ */
+ ret = vmw_stdu_define_st(dev_priv, stdu);
+ if (unlikely(ret != 0))
+ goto err_unpin_display_and_content;
+
+ ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+ if (unlikely(ret != 0))
+ goto err_unpin_destroy_st;
+
+
+ connector->encoder = encoder;
+ encoder->crtc = crtc;
+
+ crtc->mode = *mode;
+ crtc->primary->fb = new_fb;
+ crtc->enabled = true;
+
+ return ret;
+
+err_unpin_destroy_st:
+ vmw_stdu_destroy_st(dev_priv, stdu);
+err_unpin_display_and_content:
+ vmw_stdu_unpin_display(stdu);
+err_unref_content:
+ stdu->content_fb = NULL;
+ return ret;
+}
+
+
+
+/**
+ * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
+ *
+ * @crtc: CRTC to attach FB to
+ * @fb: FB to attach
+ * @event: Event to be posted. This event should've been alloced
+ * using k[mz]alloc, and should've been completely initialized.
+ * @page_flip_flags: Input flags.
+ *
+ * If the STDU uses the same display and content buffers, i.e. a true flip,
+ * this function will replace the existing display buffer with the new content
+ * buffer.
+ *
+ * If the STDU uses different display and content buffers, i.e. a blit, then
+ * only the content buffer will be updated.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *new_fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct vmw_screen_target_display_unit *stdu;
+ int ret;
+
+ if (crtc == NULL)
+ return -EINVAL;
+
+ dev_priv = vmw_priv(crtc->dev);
+ stdu = vmw_crtc_to_stdu(crtc);
+ crtc->primary->fb = new_fb;
+ stdu->content_fb = new_fb;
+
+ if (stdu->display_srf) {
+ /*
+ * If the display surface is the same as the content surface
+ * then remove the reference
+ */
+ if (stdu->content_fb_type == SAME_AS_DISPLAY) {
+ if (stdu->defined) {
+ /* Unbind the current surface */
+ ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+ if (unlikely(ret != 0))
+ goto err_out;
+ }
+ vmw_stdu_unpin_display(stdu);
+ stdu->display_srf = NULL;
+ }
+ }
+
+
+ if (!new_fb) {
+ /* Blanks the display */
+ (void) vmw_stdu_update_st(dev_priv, stdu);
+
+ return 0;
+ }
+
+
+ if (stdu->content_fb_type == SAME_AS_DISPLAY) {
+ stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
+ ret = vmw_stdu_pin_display(stdu);
+ if (ret) {
+ stdu->display_srf = NULL;
+ goto err_out;
+ }
+
+ /* Bind display surface */
+ ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+ if (unlikely(ret != 0))
+ goto err_unpin_display_and_content;
+ }
+
+ /* Update display surface: after this point everything is bound */
+ ret = vmw_stdu_update_st(dev_priv, stdu);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (event) {
+ struct vmw_fence_obj *fence = NULL;
+ struct drm_file *file_priv = event->base.file_priv;
+
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = vmw_event_fence_action_queue(file_priv, fence,
+ &event->base,
+ &event->event.tv_sec,
+ &event->event.tv_usec,
+ true);
+ vmw_fence_obj_unreference(&fence);
+ }
+
+ return ret;
+
+err_unpin_display_and_content:
+ vmw_stdu_unpin_display(stdu);
+err_out:
+ crtc->primary->fb = NULL;
+ stdu->content_fb = NULL;
+ return ret;
+}
+
+
+/**
+ * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface DMA command cliprect and updates the bounding box
+ * for the DMA.
+ */
+static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_stdu_dirty *ddirty =
+ container_of(dirty, struct vmw_stdu_dirty, base);
+ struct vmw_stdu_dma *cmd = dirty->cmd;
+ struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+ blit += dirty->num_hits;
+ blit->srcx = dirty->fb_x;
+ blit->srcy = dirty->fb_y;
+ blit->x = dirty->unit_x1;
+ blit->y = dirty->unit_y1;
+ blit->d = 1;
+ blit->w = dirty->unit_x2 - dirty->unit_x1;
+ blit->h = dirty->unit_y2 - dirty->unit_y1;
+ dirty->num_hits++;
+
+ if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
+ return;
+
+ /* Destination bounding box */
+ ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
+ ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
+ ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
+ ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a DMA command, and optionally encodes
+ * a screen target update command, depending on transfer direction.
+ */
+static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_stdu_dirty *ddirty =
+ container_of(dirty, struct vmw_stdu_dirty, base);
+ struct vmw_screen_target_display_unit *stdu =
+ container_of(dirty->unit, typeof(*stdu), base);
+ struct vmw_stdu_dma *cmd = dirty->cmd;
+ struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+ SVGA3dCmdSurfaceDMASuffix *suffix =
+ (SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
+ size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
+
+ if (!dirty->num_hits) {
+ vmw_fifo_commit(dirty->dev_priv, 0);
+ return;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
+ cmd->header.size = sizeof(cmd->body) + blit_size;
+ vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
+ cmd->body.guest.pitch = ddirty->pitch;
+ cmd->body.host.sid = stdu->display_srf->res.id;
+ cmd->body.host.face = 0;
+ cmd->body.host.mipmap = 0;
+ cmd->body.transfer = ddirty->transfer;
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+
+ if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
+ blit_size += sizeof(struct vmw_stdu_update);
+
+ vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
+ ddirty->left, ddirty->right,
+ ddirty->top, ddirty->bottom);
+ }
+
+ vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+
+ ddirty->left = ddirty->top = S32_MAX;
+ ddirty->right = ddirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
+ * framebuffer and the screen target system.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm-file identifying the caller. May be
+ * set to NULL, but then @user_fence_rep must also be set to NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @num_clips: Number of clip rects in @clips or @vclips.
+ * @increment: Increment to use when looping over @clips or @vclips.
+ * @to_surface: Whether to DMA to the screen target system as opposed to
+ * from the screen target system.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ *
+ * If DMA-ing till the screen target system, the function will also notify
+ * the screen target system that a bounding box of the cliprects has been
+ * updated.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ bool to_surface,
+ bool interruptible)
+{
+ struct vmw_dma_buffer *buf =
+ container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+ struct vmw_stdu_dirty ddirty;
+ int ret;
+
+ ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
+ false);
+ if (ret)
+ return ret;
+
+ ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+ SVGA3D_READ_HOST_VRAM;
+ ddirty.left = ddirty.top = S32_MAX;
+ ddirty.right = ddirty.bottom = S32_MIN;
+ ddirty.pitch = vfb->base.pitches[0];
+ ddirty.buf = buf;
+ ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
+ ddirty.base.clip = vmw_stdu_dmabuf_clip;
+ ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
+ num_clips * sizeof(SVGA3dCopyBox) +
+ sizeof(SVGA3dCmdSurfaceDMASuffix);
+ if (to_surface)
+ ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
+
+ ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
+ 0, 0, num_clips, increment, &ddirty.base);
+ vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
+ user_fence_rep);
+
+ return ret;
+}
+
+/**
+ * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface copy command cliprect and updates the bounding box
+ * for the copy.
+ */
+static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_stdu_dirty *sdirty =
+ container_of(dirty, struct vmw_stdu_dirty, base);
+ struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+ struct vmw_screen_target_display_unit *stdu =
+ container_of(dirty->unit, typeof(*stdu), base);
+
+ if (sdirty->sid != stdu->display_srf->res.id) {
+ struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+ blit += dirty->num_hits;
+ blit->srcx = dirty->fb_x;
+ blit->srcy = dirty->fb_y;
+ blit->x = dirty->unit_x1;
+ blit->y = dirty->unit_y1;
+ blit->d = 1;
+ blit->w = dirty->unit_x2 - dirty->unit_x1;
+ blit->h = dirty->unit_y2 - dirty->unit_y1;
+ }
+
+ dirty->num_hits++;
+
+ /* Destination bounding box */
+ sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+ sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+ sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+ sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
+ * copy command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a surface copy command, and encodes a screen
+ * target update command.
+ */
+static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_stdu_dirty *sdirty =
+ container_of(dirty, struct vmw_stdu_dirty, base);
+ struct vmw_screen_target_display_unit *stdu =
+ container_of(dirty->unit, typeof(*stdu), base);
+ struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+ struct vmw_stdu_update *update;
+ size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits;
+ size_t commit_size;
+
+ if (!dirty->num_hits) {
+ vmw_fifo_commit(dirty->dev_priv, 0);
+ return;
+ }
+
+ if (sdirty->sid != stdu->display_srf->res.id) {
+ struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_COPY;
+ cmd->header.size = sizeof(cmd->body) + blit_size;
+ cmd->body.src.sid = sdirty->sid;
+ cmd->body.dest.sid = stdu->display_srf->res.id;
+ update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
+ commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
+ } else {
+ update = dirty->cmd;
+ commit_size = sizeof(*update);
+ }
+
+ vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
+ sdirty->right, sdirty->top, sdirty->bottom);
+
+ vmw_fifo_commit(dirty->dev_priv, commit_size);
+
+ sdirty->left = sdirty->top = S32_MAX;
+ sdirty->right = sdirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ struct vmw_resource *srf,
+ s32 dest_x,
+ s32 dest_y,
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(framebuffer, typeof(*vfbs), base);
+ struct vmw_stdu_dirty sdirty;
+ int ret;
+
+ if (!srf)
+ srf = &vfbs->surface->res;
+
+ ret = vmw_kms_helper_resource_prepare(srf, true);
+ if (ret)
+ return ret;
+
+ if (vfbs->is_dmabuf_proxy) {
+ ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
+ if (ret)
+ goto out_finish;
+ }
+
+ sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
+ sdirty.base.clip = vmw_kms_stdu_surface_clip;
+ sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
+ sizeof(SVGA3dCopyBox) * num_clips +
+ sizeof(struct vmw_stdu_update);
+ sdirty.sid = srf->id;
+ sdirty.left = sdirty.top = S32_MAX;
+ sdirty.right = sdirty.bottom = S32_MIN;
+
+ ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+ dest_x, dest_y, num_clips, inc,
+ &sdirty.base);
+out_finish:
+ vmw_kms_helper_resource_finish(srf, out_fence);
+
+ return ret;
+}
+
+
+/*
+ * Screen Target CRTC dispatch table
+ */
+static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
+ .save = vmw_du_crtc_save,
+ .restore = vmw_du_crtc_restore,
+ .cursor_set = vmw_du_crtc_cursor_set,
+ .cursor_move = vmw_du_crtc_cursor_move,
+ .gamma_set = vmw_du_crtc_gamma_set,
+ .destroy = vmw_stdu_crtc_destroy,
+ .set_config = vmw_stdu_crtc_set_config,
+ .page_flip = vmw_stdu_crtc_page_flip,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Encoder Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_encoder_destroy - cleans up the STDU
+ *
+ * @encoder: used the get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op. Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
+{
+ vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
+}
+
+static struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
+ .destroy = vmw_stdu_encoder_destroy,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Connector Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_connector_destroy - cleans up the STDU
+ *
+ * @connector: used to get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op. Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_connector_destroy(struct drm_connector *connector)
+{
+ vmw_stdu_destroy(vmw_connector_to_stdu(connector));
+}
+
+
+
+static struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ .dpms = vmw_du_connector_dpms,
+ .save = vmw_du_connector_save,
+ .restore = vmw_du_connector_restore,
+ .detect = vmw_du_connector_detect,
+ .fill_modes = vmw_du_connector_fill_modes,
+ .set_property = vmw_du_connector_set_property,
+ .destroy = vmw_stdu_connector_destroy,
+};
+
+
+
+/**
+ * vmw_stdu_init - Sets up a Screen Target Display Unit
+ *
+ * @dev_priv: VMW DRM device
+ * @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS
+ *
+ * This function is called once per CRTC, and allocates one Screen Target
+ * display unit to represent that CRTC. Since the SVGA device does not separate
+ * out encoder and connector, they are represented as part of the STDU as well.
+ */
+static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+
+ stdu = kzalloc(sizeof(*stdu), GFP_KERNEL);
+ if (!stdu)
+ return -ENOMEM;
+
+ stdu->base.unit = unit;
+ crtc = &stdu->base.crtc;
+ encoder = &stdu->base.encoder;
+ connector = &stdu->base.connector;
+
+ stdu->base.pref_active = (unit == 0);
+ stdu->base.pref_width = dev_priv->initial_width;
+ stdu->base.pref_height = dev_priv->initial_height;
+ stdu->base.is_implicit = true;
+
+ drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ connector->status = vmw_du_connector_detect(connector, false);
+
+ drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL);
+ drm_mode_connector_attach_encoder(connector, encoder);
+ encoder->possible_crtcs = (1 << unit);
+ encoder->possible_clones = 0;
+
+ (void) drm_connector_register(connector);
+
+ drm_crtc_init(dev, crtc, &vmw_stdu_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.dirty_info_property,
+ 1);
+
+ return 0;
+}
+
+
+
+/**
+ * vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit
+ *
+ * @stdu: Screen Target Display Unit to be destroyed
+ *
+ * Clean up after vmw_stdu_init
+ */
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
+{
+ vmw_stdu_unpin_display(stdu);
+
+ vmw_du_cleanup(&stdu->base);
+ kfree(stdu);
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display KMS Functions
+ *
+ * These functions are called by the common KMS code in vmwgfx_kms.c
+ *****************************************************************************/
+
+/**
+ * vmw_kms_stdu_init_display - Initializes a Screen Target based display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * This function initialize a Screen Target based display device. It checks
+ * the capability bits to make sure the underlying hardware can support
+ * screen targets, and then creates the maximum number of CRTCs, a.k.a Display
+ * Units, as supported by the display hardware.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int i, ret;
+
+
+ /* Do nothing if Screen Target support is turned off */
+ if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+ return -ENOSYS;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
+ return -ENOSYS;
+
+ ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = drm_mode_create_dirty_info_property(dev);
+ if (unlikely(ret != 0))
+ goto err_vblank_cleanup;
+
+ dev_priv->active_display_unit = vmw_du_screen_target;
+
+ for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
+ ret = vmw_stdu_init(dev_priv, i);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize STDU %d", i);
+ goto err_vblank_cleanup;
+ }
+ }
+
+ DRM_INFO("Screen Target Display device initialized\n");
+
+ return 0;
+
+err_vblank_cleanup:
+ drm_vblank_cleanup(dev);
+ return ret;
+}
+
+
+
+/**
+ * vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * Frees up any resources allocated by vmw_kms_stdu_init_display
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int vmw_kms_stdu_close_display(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ drm_vblank_cleanup(dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 4ecdbf3e59da..3361769842f4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,8 +27,11 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
#include <ttm/ttm_placement.h>
-#include "svga3d_surfacedefs.h"
+#include "device_include/svga3d_surfacedefs.h"
+
/**
* struct vmw_user_surface - User-space visible surface resource
@@ -36,7 +39,7 @@
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
* @size: TTM accounting size for the surface.
- * @master: master of the creating client. Used for security check.
+ * @master: master of the creating client. Used for security check.
*/
struct vmw_user_surface {
struct ttm_prime_object prime;
@@ -220,7 +223,7 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
+ cmd->body.format = srf->format;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
@@ -340,7 +343,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
}
/**
@@ -576,14 +579,14 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
BUG_ON(res_free == NULL);
if (!dev_priv->has_mob)
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_init(dev_priv, res, true, res_free,
(dev_priv->has_mob) ? &vmw_gb_surface_func :
&vmw_legacy_surface_func);
if (unlikely(ret != 0)) {
if (!dev_priv->has_mob)
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
res_free(res);
return ret;
}
@@ -593,6 +596,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
* surface validate.
*/
+ INIT_LIST_HEAD(&srf->view_list);
vmw_resource_activate(res, vmw_hw_surface_destroy);
return ret;
}
@@ -723,6 +727,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
desc = svga3dsurface_get_desc(req->format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
DRM_ERROR("Invalid surface format for surface creation.\n");
+ DRM_ERROR("Format requested is: %d\n", req->format);
return -EINVAL;
}
@@ -906,6 +911,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
"surface reference.\n");
return -EACCES;
}
+ if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+ DRM_ERROR("Locked master refused legacy "
+ "surface reference.\n");
+ return -EACCES;
+ }
+
handle = u_handle;
}
@@ -1018,17 +1029,21 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf = vmw_res_to_srf(res);
- uint32_t cmd_len, submit_len;
+ uint32_t cmd_len, cmd_id, submit_len;
int ret;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface body;
} *cmd;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBSurface_v2 body;
+ } *cmd2;
if (likely(res->id != -1))
return 0;
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_alloc_id(res);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a surface id.\n");
@@ -1040,9 +1055,19 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd_len = sizeof(cmd->body);
- submit_len = sizeof(*cmd);
+ if (srf->array_size > 0) {
+ /* has_dx checked on creation time. */
+ cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
+ cmd_len = sizeof(cmd2->body);
+ submit_len = sizeof(*cmd2);
+ } else {
+ cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+ cmd_len = sizeof(cmd->body);
+ submit_len = sizeof(*cmd);
+ }
+
cmd = vmw_fifo_reserve(dev_priv, submit_len);
+ cmd2 = (typeof(cmd2))cmd;
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
@@ -1050,17 +1075,33 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
- cmd->header.size = cmd_len;
- cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
- cmd->body.numMipLevels = srf->mip_levels[0];
- cmd->body.multisampleCount = srf->multisample_count;
- cmd->body.autogenFilter = srf->autogen_filter;
- cmd->body.size.width = srf->base_size.width;
- cmd->body.size.height = srf->base_size.height;
- cmd->body.size.depth = srf->base_size.depth;
+ if (srf->array_size > 0) {
+ cmd2->header.id = cmd_id;
+ cmd2->header.size = cmd_len;
+ cmd2->body.sid = srf->res.id;
+ cmd2->body.surfaceFlags = srf->flags;
+ cmd2->body.format = cpu_to_le32(srf->format);
+ cmd2->body.numMipLevels = srf->mip_levels[0];
+ cmd2->body.multisampleCount = srf->multisample_count;
+ cmd2->body.autogenFilter = srf->autogen_filter;
+ cmd2->body.size.width = srf->base_size.width;
+ cmd2->body.size.height = srf->base_size.height;
+ cmd2->body.size.depth = srf->base_size.depth;
+ cmd2->body.arraySize = srf->array_size;
+ } else {
+ cmd->header.id = cmd_id;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ cmd->body.numMipLevels = srf->mip_levels[0];
+ cmd->body.multisampleCount = srf->multisample_count;
+ cmd->body.autogenFilter = srf->autogen_filter;
+ cmd->body.size.width = srf->base_size.width;
+ cmd->body.size.height = srf->base_size.height;
+ cmd->body.size.depth = srf->base_size.depth;
+ }
+
vmw_fifo_commit(dev_priv, submit_len);
return 0;
@@ -1068,7 +1109,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
out_no_fifo:
vmw_resource_release_id(res);
out_no_id:
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
return ret;
}
@@ -1188,6 +1229,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
static int vmw_gb_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf = vmw_res_to_srf(res);
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBSurface body;
@@ -1197,7 +1239,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_scrub(&res->binding_head);
+ vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
+ vmw_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -1213,11 +1256,12 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_fifo_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
return 0;
}
+
/**
* vmw_gb_surface_define_ioctl - Ioctl function implementing
* the user surface define functionality.
@@ -1241,77 +1285,51 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
- const struct svga3d_surface_desc *desc;
uint32_t backup_handle;
+
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
size = vmw_user_surface_size + 128;
- desc = svga3dsurface_get_desc(req->format);
- if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
- DRM_ERROR("Invalid surface format for surface creation.\n");
- return -EINVAL;
- }
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ /* Define a surface based on the parameters. */
+ ret = vmw_surface_gb_priv_define(dev,
+ size,
+ req->svga3d_flags,
+ req->format,
+ req->drm_surface_flags & drm_vmw_surface_flag_scanout,
+ req->mip_levels,
+ req->multisample_count,
+ req->array_size,
+ req->base_size,
+ &srf);
if (unlikely(ret != 0))
return ret;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
- goto out_unlock;
- }
-
- user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
- ret = -ENOMEM;
- goto out_no_user_srf;
- }
-
- srf = &user_srf->srf;
- res = &srf->res;
-
- srf->flags = req->svga3d_flags;
- srf->format = req->format;
- srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
- srf->mip_levels[0] = req->mip_levels;
- srf->num_sizes = 1;
- srf->sizes = NULL;
- srf->offsets = NULL;
- user_srf->size = size;
- srf->base_size = req->base_size;
- srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
- srf->multisample_count = req->multisample_count;
- res->backup_size = svga3dsurface_get_serialized_size
- (srf->format, srf->base_size, srf->mip_levels[0],
- srf->flags & SVGA3D_SURFACE_CUBEMAP);
-
- user_srf->prime.base.shareable = false;
- user_srf->prime.base.tfile = NULL;
+ user_srf = container_of(srf, struct vmw_user_surface, srf);
if (drm_is_primary_client(file_priv))
user_srf->master = drm_master_get(file_priv->master);
- /**
- * From this point, the generic resource management functions
- * destroy the object on failure.
- */
-
- ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
- goto out_unlock;
+ return ret;
+
+ res = &user_srf->srf.res;
+
if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup);
- } else if (req->drm_surface_flags &
- drm_vmw_surface_flag_create_buffer)
+ if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
+ res->backup_size) {
+ DRM_ERROR("Surface backup buffer is too small.\n");
+ vmw_dmabuf_unreference(&res->backup);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
res->backup_size,
req->drm_surface_flags &
@@ -1324,7 +1342,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- tmp = vmw_resource_reference(&srf->res);
+ tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->drm_surface_flags &
drm_vmw_surface_flag_shareable,
@@ -1337,7 +1355,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- rep->handle = user_srf->prime.base.hash.key;
+ rep->handle = user_srf->prime.base.hash.key;
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
@@ -1352,10 +1370,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
vmw_resource_unreference(&res);
- ttm_read_unlock(&dev_priv->reservation_sem);
- return 0;
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
@@ -1415,6 +1429,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
rep->creq.drm_surface_flags = 0;
rep->creq.multisample_count = srf->multisample_count;
rep->creq.autogen_filter = srf->autogen_filter;
+ rep->creq.array_size = srf->array_size;
rep->creq.buffer_handle = backup_handle;
rep->creq.base_size = srf->base_size;
rep->crep.handle = user_srf->prime.base.hash.key;
@@ -1429,3 +1444,137 @@ out_bad_resource:
return ret;
}
+
+/**
+ * vmw_surface_gb_priv_define - Define a private GB surface
+ *
+ * @dev: Pointer to a struct drm_device
+ * @user_accounting_size: Used to track user-space memory usage, set
+ * to 0 for kernel mode only memory
+ * @svga3d_flags: SVGA3d surface flags for the device
+ * @format: requested surface format
+ * @for_scanout: true if inteded to be used for scanout buffer
+ * @num_mip_levels: number of MIP levels
+ * @multisample_count:
+ * @array_size: Surface array size.
+ * @size: width, heigh, depth of the surface requested
+ * @user_srf_out: allocated user_srf. Set to NULL on failure.
+ *
+ * GB surfaces allocated by this function will not have a user mode handle, and
+ * thus will only be visible to vmwgfx. For optimization reasons the
+ * surface may later be given a user mode handle by another function to make
+ * it available to user mode drivers.
+ */
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+ uint32_t user_accounting_size,
+ uint32_t svga3d_flags,
+ SVGA3dSurfaceFormat format,
+ bool for_scanout,
+ uint32_t num_mip_levels,
+ uint32_t multisample_count,
+ uint32_t array_size,
+ struct drm_vmw_size size,
+ struct vmw_surface **srf_out)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ int ret;
+ u32 num_layers;
+
+ *srf_out = NULL;
+
+ if (for_scanout) {
+ if (!svga3dsurface_is_screen_target_format(format)) {
+ DRM_ERROR("Invalid Screen Target surface format.");
+ return -EINVAL;
+ }
+ } else {
+ const struct svga3d_surface_desc *desc;
+
+ desc = svga3dsurface_get_desc(format);
+ if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+ DRM_ERROR("Invalid surface format.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* array_size must be null for non-GL3 host. */
+ if (array_size > 0 && !dev_priv->has_dx) {
+ DRM_ERROR("Tried to create DX surface on non-DX host.\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ user_accounting_size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ *srf_out = &user_srf->srf;
+ user_srf->size = user_accounting_size;
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
+
+ srf = &user_srf->srf;
+ srf->flags = svga3d_flags;
+ srf->format = format;
+ srf->scanout = for_scanout;
+ srf->mip_levels[0] = num_mip_levels;
+ srf->num_sizes = 1;
+ srf->sizes = NULL;
+ srf->offsets = NULL;
+ srf->base_size = size;
+ srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ srf->array_size = array_size;
+ srf->multisample_count = multisample_count;
+
+ if (array_size)
+ num_layers = array_size;
+ else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
+ num_layers = SVGA3D_MAX_SURFACE_FACES;
+ else
+ num_layers = 1;
+
+ srf->res.backup_size =
+ svga3dsurface_get_serialized_size(srf->format,
+ srf->base_size,
+ srf->mip_levels[0],
+ num_layers);
+
+ if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+ srf->res.backup_size += sizeof(SVGA3dDXSOState);
+
+ if (dev_priv->active_display_unit == vmw_du_screen_target &&
+ for_scanout)
+ srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
+
+ /*
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
+
+out_unlock:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 98d6bfb3a997..e771091d2cd3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
OpenPOWER on IntegriCloud