Merge remote-tracking branch 'public/master' into vulkan
authorJason Ekstrand <jason.ekstrand@intel.com>
Tue, 15 Mar 2016 21:09:50 +0000 (14:09 -0700)
committerJason Ekstrand <jason.ekstrand@intel.com>
Tue, 15 Mar 2016 21:09:50 +0000 (14:09 -0700)
172 files changed:
README.intel-vulkan.txt [new file with mode: 0644]
configure.ac
include/vulkan/vk_icd.h [new file with mode: 0644]
include/vulkan/vk_platform.h [new file with mode: 0644]
include/vulkan/vulkan.h [new file with mode: 0644]
include/vulkan/vulkan_intel.h [new file with mode: 0644]
src/Makefile.am
src/compiler/Makefile.am
src/compiler/Makefile.sources
src/compiler/glsl/.gitignore
src/compiler/glsl/Makefile.am
src/compiler/glsl/Makefile.sources
src/compiler/glsl/glsl_parser_extras.cpp
src/compiler/glsl/standalone_scaffolding.cpp
src/compiler/nir/Makefile.sources
src/compiler/nir/glsl_to_nir.cpp
src/compiler/nir/nir.c
src/compiler/nir/nir.h
src/compiler/nir/nir_builder.h
src/compiler/nir/nir_clone.c
src/compiler/nir/nir_control_flow.c
src/compiler/nir/nir_dominance.c
src/compiler/nir/nir_gather_info.c [new file with mode: 0644]
src/compiler/nir/nir_inline_functions.c [new file with mode: 0644]
src/compiler/nir/nir_intrinsics.h
src/compiler/nir/nir_lower_atomics.c
src/compiler/nir/nir_lower_io.c
src/compiler/nir/nir_lower_outputs_to_temporaries.c
src/compiler/nir/nir_lower_returns.c [new file with mode: 0644]
src/compiler/nir/nir_lower_system_values.c
src/compiler/nir/nir_lower_vars_to_ssa.c
src/compiler/nir/nir_opcodes.py
src/compiler/nir/nir_opt_algebraic.py
src/compiler/nir/nir_phi_builder.c [new file with mode: 0644]
src/compiler/nir/nir_phi_builder.h [new file with mode: 0644]
src/compiler/nir/nir_print.c
src/compiler/nir/nir_remove_dead_variables.c
src/compiler/nir/nir_repair_ssa.c [new file with mode: 0644]
src/compiler/nir/nir_sweep.c
src/compiler/nir/nir_validate.c
src/compiler/nir/spirv/GLSL.std.450.h [new file with mode: 0644]
src/compiler/nir/spirv/nir_spirv.h [new file with mode: 0644]
src/compiler/nir/spirv/spirv.h [new file with mode: 0644]
src/compiler/nir/spirv/spirv_to_nir.c [new file with mode: 0644]
src/compiler/nir/spirv/vtn_alu.c [new file with mode: 0644]
src/compiler/nir/spirv/vtn_cfg.c [new file with mode: 0644]
src/compiler/nir/spirv/vtn_glsl450.c [new file with mode: 0644]
src/compiler/nir/spirv/vtn_private.h [new file with mode: 0644]
src/compiler/nir/spirv/vtn_variables.c [new file with mode: 0644]
src/compiler/nir/spirv2nir.c [new file with mode: 0644]
src/compiler/nir_types.cpp
src/compiler/nir_types.h
src/compiler/shader_enums.c
src/compiler/shader_enums.h
src/intel/Makefile.am [new file with mode: 0644]
src/intel/genxml/.gitignore [new file with mode: 0644]
src/intel/genxml/Makefile.am [new file with mode: 0644]
src/intel/genxml/README [new file with mode: 0644]
src/intel/genxml/gen7.xml [new file with mode: 0644]
src/intel/genxml/gen75.xml [new file with mode: 0644]
src/intel/genxml/gen8.xml [new file with mode: 0644]
src/intel/genxml/gen9.xml [new file with mode: 0644]
src/intel/genxml/genX_pack.h [new file with mode: 0644]
src/intel/genxml/gen_macros.h [new file with mode: 0644]
src/intel/genxml/gen_pack_header.py [new file with mode: 0755]
src/intel/isl/.gitignore [new file with mode: 0644]
src/intel/isl/Makefile.am [new file with mode: 0644]
src/intel/isl/README [new file with mode: 0644]
src/intel/isl/isl.c [new file with mode: 0644]
src/intel/isl/isl.h [new file with mode: 0644]
src/intel/isl/isl_format.c [new file with mode: 0644]
src/intel/isl/isl_format_layout.csv [new file with mode: 0644]
src/intel/isl/isl_format_layout_gen.bash [new file with mode: 0755]
src/intel/isl/isl_gen4.c [new file with mode: 0644]
src/intel/isl/isl_gen4.h [new file with mode: 0644]
src/intel/isl/isl_gen6.c [new file with mode: 0644]
src/intel/isl/isl_gen6.h [new file with mode: 0644]
src/intel/isl/isl_gen7.c [new file with mode: 0644]
src/intel/isl/isl_gen7.h [new file with mode: 0644]
src/intel/isl/isl_gen8.c [new file with mode: 0644]
src/intel/isl/isl_gen8.h [new file with mode: 0644]
src/intel/isl/isl_gen9.c [new file with mode: 0644]
src/intel/isl/isl_gen9.h [new file with mode: 0644]
src/intel/isl/isl_priv.h [new file with mode: 0644]
src/intel/isl/isl_storage_image.c [new file with mode: 0644]
src/intel/isl/isl_surface_state.c [new file with mode: 0644]
src/intel/isl/tests/.gitignore [new file with mode: 0644]
src/intel/isl/tests/isl_surf_get_image_offset_test.c [new file with mode: 0644]
src/intel/vulkan/.gitignore [new file with mode: 0644]
src/intel/vulkan/Makefile.am [new file with mode: 0644]
src/intel/vulkan/anv_allocator.c [new file with mode: 0644]
src/intel/vulkan/anv_batch_chain.c [new file with mode: 0644]
src/intel/vulkan/anv_cmd_buffer.c [new file with mode: 0644]
src/intel/vulkan/anv_descriptor_set.c [new file with mode: 0644]
src/intel/vulkan/anv_device.c [new file with mode: 0644]
src/intel/vulkan/anv_dump.c [new file with mode: 0644]
src/intel/vulkan/anv_entrypoints_gen.py [new file with mode: 0644]
src/intel/vulkan/anv_formats.c [new file with mode: 0644]
src/intel/vulkan/anv_gem.c [new file with mode: 0644]
src/intel/vulkan/anv_gem_stubs.c [new file with mode: 0644]
src/intel/vulkan/anv_genX.h [new file with mode: 0644]
src/intel/vulkan/anv_image.c [new file with mode: 0644]
src/intel/vulkan/anv_intel.c [new file with mode: 0644]
src/intel/vulkan/anv_meta.c [new file with mode: 0644]
src/intel/vulkan/anv_meta.h [new file with mode: 0644]
src/intel/vulkan/anv_meta_blit.c [new file with mode: 0644]
src/intel/vulkan/anv_meta_blit2d.c [new file with mode: 0644]
src/intel/vulkan/anv_meta_clear.c [new file with mode: 0644]
src/intel/vulkan/anv_meta_copy.c [new file with mode: 0644]
src/intel/vulkan/anv_meta_resolve.c [new file with mode: 0644]
src/intel/vulkan/anv_nir.h [new file with mode: 0644]
src/intel/vulkan/anv_nir_apply_dynamic_offsets.c [new file with mode: 0644]
src/intel/vulkan/anv_nir_apply_pipeline_layout.c [new file with mode: 0644]
src/intel/vulkan/anv_nir_lower_push_constants.c [new file with mode: 0644]
src/intel/vulkan/anv_pass.c [new file with mode: 0644]
src/intel/vulkan/anv_pipeline.c [new file with mode: 0644]
src/intel/vulkan/anv_pipeline_cache.c [new file with mode: 0644]
src/intel/vulkan/anv_private.h [new file with mode: 0644]
src/intel/vulkan/anv_query.c [new file with mode: 0644]
src/intel/vulkan/anv_util.c [new file with mode: 0644]
src/intel/vulkan/anv_wsi.c [new file with mode: 0644]
src/intel/vulkan/anv_wsi.h [new file with mode: 0644]
src/intel/vulkan/anv_wsi_wayland.c [new file with mode: 0644]
src/intel/vulkan/anv_wsi_x11.c [new file with mode: 0644]
src/intel/vulkan/dev_icd.json.in [new file with mode: 0644]
src/intel/vulkan/gen7_cmd_buffer.c [new file with mode: 0644]
src/intel/vulkan/gen7_pipeline.c [new file with mode: 0644]
src/intel/vulkan/gen8_cmd_buffer.c [new file with mode: 0644]
src/intel/vulkan/gen8_pipeline.c [new file with mode: 0644]
src/intel/vulkan/genX_cmd_buffer.c [new file with mode: 0644]
src/intel/vulkan/genX_pipeline.c [new file with mode: 0644]
src/intel/vulkan/genX_pipeline_util.h [new file with mode: 0644]
src/intel/vulkan/genX_state.c [new file with mode: 0644]
src/intel/vulkan/intel_icd.json.in [new file with mode: 0644]
src/intel/vulkan/tests/.gitignore [new file with mode: 0644]
src/intel/vulkan/tests/Makefile.am [new file with mode: 0644]
src/intel/vulkan/tests/block_pool_no_free.c [new file with mode: 0644]
src/intel/vulkan/tests/state_pool.c [new file with mode: 0644]
src/intel/vulkan/tests/state_pool_free_list_only.c [new file with mode: 0644]
src/intel/vulkan/tests/state_pool_no_free.c [new file with mode: 0644]
src/intel/vulkan/tests/state_pool_test_helper.h [new file with mode: 0644]
src/mesa/drivers/dri/i965/Makefile.sources
src/mesa/drivers/dri/i965/brw_compiler.c
src/mesa/drivers/dri/i965/brw_compiler.h
src/mesa/drivers/dri/i965/brw_defines.h
src/mesa/drivers/dri/i965/brw_device_info.c
src/mesa/drivers/dri/i965/brw_device_info.h
src/mesa/drivers/dri/i965/brw_draw.c
src/mesa/drivers/dri/i965/brw_fs.cpp
src/mesa/drivers/dri/i965/brw_fs.h
src/mesa/drivers/dri/i965/brw_fs_generator.cpp
src/mesa/drivers/dri/i965/brw_fs_nir.cpp
src/mesa/drivers/dri/i965/brw_fs_reg_allocate.cpp
src/mesa/drivers/dri/i965/brw_fs_surface_builder.cpp
src/mesa/drivers/dri/i965/brw_fs_visitor.cpp
src/mesa/drivers/dri/i965/brw_ir_fs.h
src/mesa/drivers/dri/i965/brw_nir.c
src/mesa/drivers/dri/i965/brw_nir.h
src/mesa/drivers/dri/i965/brw_program.c
src/mesa/drivers/dri/i965/brw_surface_formats.c
src/mesa/drivers/dri/i965/brw_surface_formats.h [new file with mode: 0644]
src/mesa/drivers/dri/i965/brw_util.c
src/mesa/drivers/dri/i965/brw_vec4.cpp
src/mesa/drivers/dri/i965/brw_vec4.h
src/mesa/drivers/dri/i965/brw_vec4_generator.cpp
src/mesa/drivers/dri/i965/brw_vec4_nir.cpp
src/mesa/drivers/dri/i965/brw_vec4_tcs.cpp
src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp
src/mesa/drivers/dri/i965/brw_vec4_vs_visitor.cpp
src/mesa/main/mtypes.h
src/util/bitset.h
src/util/list.h

diff --git a/README.intel-vulkan.txt b/README.intel-vulkan.txt
new file mode 100644 (file)
index 0000000..4bd4231
--- /dev/null
@@ -0,0 +1,133 @@
+Intel's Open Source Vulkan Driver
+Vulkan API Version: 1.0.2
+SPIR-V Version: 1.0
+
+Intro
+=====
+The Open Source Technology Center 3D graphics team at Intel has
+been working on a Vulkan implementation based on the Mesa open source
+OpenGL implementation.
+
+The Mesa project source and our driver implementation is under the MIT
+license [1], but is also covered by the Khronos IP framework as it
+pertains to a specification under construction [2].
+
+We welcome all feedback and contibutions, as long as the contributions
+are MIT licensed and can be open sourced with the driver.
+
+[1] https://opensource.org/licenses/MIT
+[2] https://www.khronos.org/members/ip-framework
+
+
+Maintainers
+===========
+Kristian Høgsberg Kristensen <kristian.h.kristensen@intel.com>
+Jason Ekstrand <jason.ekstrand@intel.com>
+Chad Versace <chad.versace@intel.com>
+
+
+Supported Hardware
+==================
+- Broadwell & Sky Lake, main development focus
+- Ivybridge
+- Haswell
+- Bay Trail
+- Cherryview
+- Broxton
+
+
+Conformance
+===========
+First-wave conformance has been submitted for Broadwell, Sky Lake, and
+Cherryview.  They all pass 100% of the mustpass tests as of January 30,
+2016.
+
+
+Supported OS Platforms
+======================
+ - Linux, tested on Fedora 22 with kernel >= 4.1
+     - X11 with DRI3
+     - Wayland
+ - Android
+     - TODO
+
+
+Building and Installing
+=======================
+This driver is intended to be used directly from the build tree. Installing the
+driver into a system location is not yet fully supported. If you require support
+for system-wide installation, please contact a maintainer.
+
+Throughout the instructions, MESA_TOP refers to the top of the Mesa repository.
+
+First, install the usual dependencies needed to build Mesa.
+
+        Fedora:
+            $ sudo yum builddep mesa
+        Ubunutu:
+            $ FINISHME
+
+Next, configure and build. The below commands will build Mesa in release mode.
+If you wish to build Mesa in debug mode, add option '--enable-debug' to the
+configure command.
+
+        $ cd $MESA_TOP
+        $ autoreconf -vfi
+        $ ./configure --with-dri-drivers=i965 --with-gallium-drivers=
+        $ make
+
+To use the driver's libvulkan.so directly, without LunarG's loader, you must set
+an environment variable before running your Vulkan application:
+
+        $ export LD_LIBRARY_PATH="$MESA_TOP/lib"
+        $ your-vk-app
+
+Alternatively, to use the driver with LunarG's loader:
+
+        $ export VK_ICD_FILENAMES="$MESA_TOP/src/vulkan/anv_icd.json"
+        $ your-vk-app
+
+
+File Structure and Naming
+=========================
+The core code of Intel's Mesa Vulkan driver lives in src/vulkan. Files prefixed
+with "gen8" support Broadwell; files prefixed with "gen7" support Ivybridge;
+files prefixed with "anv" are common to all hardware generations.
+
+Mesa is an umbrella open source project containing many drivers for multiple
+APIs. The codename for Intel's Mesa Vulkan driver is "Anvil", hence the filename
+prefix "anv".
+
+
+Feature Status
+==============
+The driver is still a work-in-progress. We do our best to keep the below list of
+features up-to-date.
+
+Supported Features:
+  - Index buffers, instanced draw, indirect draw
+  - Nested command buffers
+  - Consumes SPIR-V (no GLSL "backdoor")
+  - Fragment, vertex, geometry, and compute shaders
+  - Uniform buffers, sampled images, dynamic uniform buffers
+  - Shader storage buffers
+  - Push constants
+  - Color, depth and stencil attachments
+  - 1D, 2D, 3D textures, texture arrays
+  - Memory barrier
+  - Optionally integrates with LunarGs loader
+  - WSI extension for X11
+  - Fences
+  - Most copy/blit commands for color and depth buffers,
+    vkCmdCopyImageToBuffer for stencil buffers
+  - Occlution query and timestamps
+  - VkkSemaphore and VkEvent
+  - Shader specialization
+  - Storage images
+  - MSAA
+
+Unsupported Features:
+   - Tesselation shaders
+   - Push constants in GS and VS on HSW and prior
+   - Sparse resources
+   - Input attachments
index 31703b58301041f448fce8db983b8c1e48b483b2..384de4dbde62b28cc38e5e7f391412773788d9c3 100644 (file)
@@ -1636,6 +1636,8 @@ GBM_PC_LIB_PRIV="$DLOPEN_LIBS"
 AC_SUBST([GBM_PC_REQ_PRIV])
 AC_SUBST([GBM_PC_LIB_PRIV])
 
+AM_CONDITIONAL(HAVE_VULKAN, true)
+
 dnl
 dnl EGL configuration
 dnl
@@ -2457,6 +2459,13 @@ AC_SUBST([XA_MINOR], $XA_MINOR)
 AC_SUBST([XA_TINY], $XA_TINY)
 AC_SUBST([XA_VERSION], "$XA_MAJOR.$XA_MINOR.$XA_TINY")
 
+PKG_CHECK_MODULES(VALGRIND, [valgrind],
+                  [have_valgrind=yes], [have_valgrind=no])
+if test "x$have_valgrind" = "xyes"; then
+    AC_DEFINE([HAVE_VALGRIND], 1,
+              [Use valgrind intrinsics to suppress false warnings])
+fi
+
 dnl Restore LDFLAGS and CPPFLAGS
 LDFLAGS="$_SAVE_LDFLAGS"
 CPPFLAGS="$_SAVE_CPPFLAGS"
@@ -2551,6 +2560,11 @@ AC_CONFIG_FILES([Makefile
                src/glx/apple/Makefile
                src/glx/tests/Makefile
                src/gtest/Makefile
+               src/intel/Makefile
+               src/intel/genxml/Makefile
+               src/intel/isl/Makefile
+               src/intel/vulkan/Makefile
+               src/intel/vulkan/tests/Makefile
                src/loader/Makefile
                src/mapi/Makefile
                src/mapi/es1api/glesv1_cm.pc
diff --git a/include/vulkan/vk_icd.h b/include/vulkan/vk_icd.h
new file mode 100644 (file)
index 0000000..d664f2c
--- /dev/null
@@ -0,0 +1,85 @@
+#ifndef VKICD_H
+#define VKICD_H
+
+#include "vk_platform.h"
+
+/*
+ * The ICD must reserve space for a pointer for the loader's dispatch
+ * table, at the start of <each object>.
+ * The ICD must initialize this variable using the SET_LOADER_MAGIC_VALUE macro.
+ */
+
+#define ICD_LOADER_MAGIC   0x01CDC0DE
+
+typedef union _VK_LOADER_DATA {
+  uintptr_t loaderMagic;
+  void *loaderData;
+} VK_LOADER_DATA;
+
+static inline void set_loader_magic_value(void* pNewObject) {
+    VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *) pNewObject;
+    loader_info->loaderMagic = ICD_LOADER_MAGIC;
+}
+
+static inline bool valid_loader_magic_value(void* pNewObject) {
+    const VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *) pNewObject;
+    return (loader_info->loaderMagic & 0xffffffff) == ICD_LOADER_MAGIC;
+}
+
+/*
+ * Windows and Linux ICDs will treat VkSurfaceKHR as a pointer to a struct that
+ * contains the platform-specific connection and surface information.
+ */
+typedef enum _VkIcdWsiPlatform {
+    VK_ICD_WSI_PLATFORM_MIR,
+    VK_ICD_WSI_PLATFORM_WAYLAND,
+    VK_ICD_WSI_PLATFORM_WIN32,
+    VK_ICD_WSI_PLATFORM_XCB,
+    VK_ICD_WSI_PLATFORM_XLIB,
+} VkIcdWsiPlatform;
+
+typedef struct _VkIcdSurfaceBase {
+    VkIcdWsiPlatform   platform;
+} VkIcdSurfaceBase;
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+typedef struct _VkIcdSurfaceMir {
+    VkIcdSurfaceBase   base;
+    MirConnection*     connection;
+    MirSurface*        mirSurface;
+} VkIcdSurfaceMir;
+#endif // VK_USE_PLATFORM_MIR_KHR
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+typedef struct _VkIcdSurfaceWayland {
+    VkIcdSurfaceBase   base;
+    struct wl_display* display;
+    struct wl_surface* surface;
+} VkIcdSurfaceWayland;
+#endif // VK_USE_PLATFORM_WAYLAND_KHR
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+typedef struct _VkIcdSurfaceWin32 {
+    VkIcdSurfaceBase   base;
+    HINSTANCE          hinstance;
+    HWND               hwnd;
+} VkIcdSurfaceWin32;
+#endif // VK_USE_PLATFORM_WIN32_KHR
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+typedef struct _VkIcdSurfaceXcb {
+    VkIcdSurfaceBase   base;
+    xcb_connection_t*  connection;
+    xcb_window_t       window;
+} VkIcdSurfaceXcb;
+#endif // VK_USE_PLATFORM_XCB_KHR
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+typedef struct _VkIcdSurfaceXlib {
+    VkIcdSurfaceBase   base;
+    Display*           dpy;
+    Window             window;
+} VkIcdSurfaceXlib;
+#endif // VK_USE_PLATFORM_XLIB_KHR
+
+#endif // VKICD_H
diff --git a/include/vulkan/vk_platform.h b/include/vulkan/vk_platform.h
new file mode 100644 (file)
index 0000000..a53e725
--- /dev/null
@@ -0,0 +1,127 @@
+//
+// File: vk_platform.h
+//
+/*
+** Copyright (c) 2014-2015 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+
+
+#ifndef __VK_PLATFORM_H__
+#define __VK_PLATFORM_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+/*
+***************************************************************************************************
+*   Platform-specific directives and type declarations
+***************************************************************************************************
+*/
+
+/* Platform-specific calling convention macros.
+ *
+ * Platforms should define these so that Vulkan clients call Vulkan commands
+ * with the same calling conventions that the Vulkan implementation expects.
+ *
+ * VKAPI_ATTR - Placed before the return type in function declarations.
+ *              Useful for C++11 and GCC/Clang-style function attribute syntax.
+ * VKAPI_CALL - Placed after the return type in function declarations.
+ *              Useful for MSVC-style calling convention syntax.
+ * VKAPI_PTR  - Placed between the '(' and '*' in function pointer types.
+ *
+ * Function declaration:  VKAPI_ATTR void VKAPI_CALL vkCommand(void);
+ * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void);
+ */
+#if defined(_WIN32)
+    // On Windows, Vulkan commands use the stdcall convention
+    #define VKAPI_ATTR
+    #define VKAPI_CALL __stdcall
+    #define VKAPI_PTR  VKAPI_CALL
+#elif defined(__ANDROID__) && defined(__ARM_EABI__) && !defined(__ARM_ARCH_7A__)
+    // Android does not support Vulkan in native code using the "armeabi" ABI.
+    #error "Vulkan requires the 'armeabi-v7a' or 'armeabi-v7a-hard' ABI on 32-bit ARM CPUs"
+#elif defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
+    // On Android/ARMv7a, Vulkan functions use the armeabi-v7a-hard calling
+    // convention, even if the application's native code is compiled with the
+    // armeabi-v7a calling convention.
+    #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp")))
+    #define VKAPI_CALL
+    #define VKAPI_PTR  VKAPI_ATTR
+#else
+    // On other platforms, use the default calling convention
+    #define VKAPI_ATTR
+    #define VKAPI_CALL
+    #define VKAPI_PTR
+#endif
+
+#include <stddef.h>
+
+#if !defined(VK_NO_STDINT_H)
+    #if defined(_MSC_VER) && (_MSC_VER < 1600)
+        typedef signed   __int8  int8_t;
+        typedef unsigned __int8  uint8_t;
+        typedef signed   __int16 int16_t;
+        typedef unsigned __int16 uint16_t;
+        typedef signed   __int32 int32_t;
+        typedef unsigned __int32 uint32_t;
+        typedef signed   __int64 int64_t;
+        typedef unsigned __int64 uint64_t;
+    #else
+        #include <stdint.h>
+    #endif
+#endif // !defined(VK_NO_STDINT_H)
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+// Platform-specific headers required by platform window system extensions.
+// These are enabled prior to #including "vulkan.h". The same enable then
+// controls inclusion of the extension interfaces in vulkan.h.
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+#include <android/native_window.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+#include <mir_toolkit/client_types.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+#include <wayland-client.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+#include <windows.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+#include <X11/Xlib.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+#include <xcb/xcb.h>
+#endif
+
+#endif // __VK_PLATFORM_H__
diff --git a/include/vulkan/vulkan.h b/include/vulkan/vulkan.h
new file mode 100644 (file)
index 0000000..f5610c5
--- /dev/null
@@ -0,0 +1,3674 @@
+#ifndef __vulkan_h_
+#define __vulkan_h_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** Copyright (c) 2015 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+
+/*
+** This header is generated from the Khronos Vulkan XML API Registry.
+**
+*/
+
+
+#define VK_VERSION_1_0 1
+#include "vk_platform.h"
+
+#define VK_MAKE_VERSION(major, minor, patch) \
+    (((major) << 22) | ((minor) << 12) | (patch))
+
+// Vulkan API version supported by this file
+#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 3)
+
+#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22)
+#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff)
+#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff)
+
+#define VK_NULL_HANDLE 0
+        
+
+
+#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object;
+
+
+#if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+        #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object;
+#else
+        #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
+#endif
+        
+
+
+typedef uint32_t VkFlags;
+typedef uint32_t VkBool32;
+typedef uint64_t VkDeviceSize;
+typedef uint32_t VkSampleMask;
+
+VK_DEFINE_HANDLE(VkInstance)
+VK_DEFINE_HANDLE(VkPhysicalDevice)
+VK_DEFINE_HANDLE(VkDevice)
+VK_DEFINE_HANDLE(VkQueue)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore)
+VK_DEFINE_HANDLE(VkCommandBuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool)
+
+#define VK_LOD_CLAMP_NONE                 1000.0f
+#define VK_REMAINING_MIP_LEVELS           (~0U)
+#define VK_REMAINING_ARRAY_LAYERS         (~0U)
+#define VK_WHOLE_SIZE                     (~0ULL)
+#define VK_ATTACHMENT_UNUSED              (~0U)
+#define VK_TRUE                           1
+#define VK_FALSE                          0
+#define VK_QUEUE_FAMILY_IGNORED           (~0U)
+#define VK_SUBPASS_EXTERNAL               (~0U)
+#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE  256
+#define VK_UUID_SIZE                      16
+#define VK_MAX_MEMORY_TYPES               32
+#define VK_MAX_MEMORY_HEAPS               16
+#define VK_MAX_EXTENSION_NAME_SIZE        256
+#define VK_MAX_DESCRIPTION_SIZE           256
+
+
+typedef enum VkPipelineCacheHeaderVersion {
+    VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1,
+    VK_PIPELINE_CACHE_HEADER_VERSION_BEGIN_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
+    VK_PIPELINE_CACHE_HEADER_VERSION_END_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
+    VK_PIPELINE_CACHE_HEADER_VERSION_RANGE_SIZE = (VK_PIPELINE_CACHE_HEADER_VERSION_ONE - VK_PIPELINE_CACHE_HEADER_VERSION_ONE + 1),
+    VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineCacheHeaderVersion;
+
+typedef enum VkResult {
+    VK_SUCCESS = 0,
+    VK_NOT_READY = 1,
+    VK_TIMEOUT = 2,
+    VK_EVENT_SET = 3,
+    VK_EVENT_RESET = 4,
+    VK_INCOMPLETE = 5,
+    VK_ERROR_OUT_OF_HOST_MEMORY = -1,
+    VK_ERROR_OUT_OF_DEVICE_MEMORY = -2,
+    VK_ERROR_INITIALIZATION_FAILED = -3,
+    VK_ERROR_DEVICE_LOST = -4,
+    VK_ERROR_MEMORY_MAP_FAILED = -5,
+    VK_ERROR_LAYER_NOT_PRESENT = -6,
+    VK_ERROR_EXTENSION_NOT_PRESENT = -7,
+    VK_ERROR_FEATURE_NOT_PRESENT = -8,
+    VK_ERROR_INCOMPATIBLE_DRIVER = -9,
+    VK_ERROR_TOO_MANY_OBJECTS = -10,
+    VK_ERROR_FORMAT_NOT_SUPPORTED = -11,
+    VK_ERROR_SURFACE_LOST_KHR = -1000000000,
+    VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001,
+    VK_SUBOPTIMAL_KHR = 1000001003,
+    VK_ERROR_OUT_OF_DATE_KHR = -1000001004,
+    VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001,
+    VK_ERROR_VALIDATION_FAILED_EXT = -1000011001,
+    VK_RESULT_BEGIN_RANGE = VK_ERROR_FORMAT_NOT_SUPPORTED,
+    VK_RESULT_END_RANGE = VK_INCOMPLETE,
+    VK_RESULT_RANGE_SIZE = (VK_INCOMPLETE - VK_ERROR_FORMAT_NOT_SUPPORTED + 1),
+    VK_RESULT_MAX_ENUM = 0x7FFFFFFF
+} VkResult;
+
+typedef enum VkStructureType {
+    VK_STRUCTURE_TYPE_APPLICATION_INFO = 0,
+    VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1,
+    VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2,
+    VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3,
+    VK_STRUCTURE_TYPE_SUBMIT_INFO = 4,
+    VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5,
+    VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6,
+    VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7,
+    VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8,
+    VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9,
+    VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10,
+    VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11,
+    VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12,
+    VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13,
+    VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14,
+    VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15,
+    VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16,
+    VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17,
+    VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18,
+    VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19,
+    VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20,
+    VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21,
+    VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22,
+    VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23,
+    VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24,
+    VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25,
+    VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26,
+    VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27,
+    VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28,
+    VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29,
+    VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30,
+    VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31,
+    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32,
+    VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33,
+    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34,
+    VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35,
+    VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36,
+    VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37,
+    VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38,
+    VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39,
+    VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40,
+    VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41,
+    VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42,
+    VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43,
+    VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44,
+    VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45,
+    VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46,
+    VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47,
+    VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48,
+    VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000,
+    VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001,
+    VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000,
+    VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001,
+    VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000,
+    VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000,
+    VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000,
+    VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000,
+    VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000,
+    VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000,
+    VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000,
+    VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = 1000011000,
+    VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+    VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO,
+    VK_STRUCTURE_TYPE_RANGE_SIZE = (VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO - VK_STRUCTURE_TYPE_APPLICATION_INFO + 1),
+    VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkStructureType;
+
+typedef enum VkSystemAllocationScope {
+    VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0,
+    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1,
+    VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2,
+    VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3,
+    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4,
+    VK_SYSTEM_ALLOCATION_SCOPE_BEGIN_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND,
+    VK_SYSTEM_ALLOCATION_SCOPE_END_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE,
+    VK_SYSTEM_ALLOCATION_SCOPE_RANGE_SIZE = (VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE - VK_SYSTEM_ALLOCATION_SCOPE_COMMAND + 1),
+    VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF
+} VkSystemAllocationScope;
+
+typedef enum VkInternalAllocationType {
+    VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0,
+    VK_INTERNAL_ALLOCATION_TYPE_BEGIN_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE,
+    VK_INTERNAL_ALLOCATION_TYPE_END_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE,
+    VK_INTERNAL_ALLOCATION_TYPE_RANGE_SIZE = (VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE - VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE + 1),
+    VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkInternalAllocationType;
+
+typedef enum VkFormat {
+    VK_FORMAT_UNDEFINED = 0,
+    VK_FORMAT_R4G4_UNORM_PACK8 = 1,
+    VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2,
+    VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3,
+    VK_FORMAT_R5G6B5_UNORM_PACK16 = 4,
+    VK_FORMAT_B5G6R5_UNORM_PACK16 = 5,
+    VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6,
+    VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7,
+    VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8,
+    VK_FORMAT_R8_UNORM = 9,
+    VK_FORMAT_R8_SNORM = 10,
+    VK_FORMAT_R8_USCALED = 11,
+    VK_FORMAT_R8_SSCALED = 12,
+    VK_FORMAT_R8_UINT = 13,
+    VK_FORMAT_R8_SINT = 14,
+    VK_FORMAT_R8_SRGB = 15,
+    VK_FORMAT_R8G8_UNORM = 16,
+    VK_FORMAT_R8G8_SNORM = 17,
+    VK_FORMAT_R8G8_USCALED = 18,
+    VK_FORMAT_R8G8_SSCALED = 19,
+    VK_FORMAT_R8G8_UINT = 20,
+    VK_FORMAT_R8G8_SINT = 21,
+    VK_FORMAT_R8G8_SRGB = 22,
+    VK_FORMAT_R8G8B8_UNORM = 23,
+    VK_FORMAT_R8G8B8_SNORM = 24,
+    VK_FORMAT_R8G8B8_USCALED = 25,
+    VK_FORMAT_R8G8B8_SSCALED = 26,
+    VK_FORMAT_R8G8B8_UINT = 27,
+    VK_FORMAT_R8G8B8_SINT = 28,
+    VK_FORMAT_R8G8B8_SRGB = 29,
+    VK_FORMAT_B8G8R8_UNORM = 30,
+    VK_FORMAT_B8G8R8_SNORM = 31,
+    VK_FORMAT_B8G8R8_USCALED = 32,
+    VK_FORMAT_B8G8R8_SSCALED = 33,
+    VK_FORMAT_B8G8R8_UINT = 34,
+    VK_FORMAT_B8G8R8_SINT = 35,
+    VK_FORMAT_B8G8R8_SRGB = 36,
+    VK_FORMAT_R8G8B8A8_UNORM = 37,
+    VK_FORMAT_R8G8B8A8_SNORM = 38,
+    VK_FORMAT_R8G8B8A8_USCALED = 39,
+    VK_FORMAT_R8G8B8A8_SSCALED = 40,
+    VK_FORMAT_R8G8B8A8_UINT = 41,
+    VK_FORMAT_R8G8B8A8_SINT = 42,
+    VK_FORMAT_R8G8B8A8_SRGB = 43,
+    VK_FORMAT_B8G8R8A8_UNORM = 44,
+    VK_FORMAT_B8G8R8A8_SNORM = 45,
+    VK_FORMAT_B8G8R8A8_USCALED = 46,
+    VK_FORMAT_B8G8R8A8_SSCALED = 47,
+    VK_FORMAT_B8G8R8A8_UINT = 48,
+    VK_FORMAT_B8G8R8A8_SINT = 49,
+    VK_FORMAT_B8G8R8A8_SRGB = 50,
+    VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51,
+    VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52,
+    VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53,
+    VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54,
+    VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55,
+    VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56,
+    VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57,
+    VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58,
+    VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59,
+    VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60,
+    VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61,
+    VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62,
+    VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63,
+    VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64,
+    VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65,
+    VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66,
+    VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67,
+    VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68,
+    VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69,
+    VK_FORMAT_R16_UNORM = 70,
+    VK_FORMAT_R16_SNORM = 71,
+    VK_FORMAT_R16_USCALED = 72,
+    VK_FORMAT_R16_SSCALED = 73,
+    VK_FORMAT_R16_UINT = 74,
+    VK_FORMAT_R16_SINT = 75,
+    VK_FORMAT_R16_SFLOAT = 76,
+    VK_FORMAT_R16G16_UNORM = 77,
+    VK_FORMAT_R16G16_SNORM = 78,
+    VK_FORMAT_R16G16_USCALED = 79,
+    VK_FORMAT_R16G16_SSCALED = 80,
+    VK_FORMAT_R16G16_UINT = 81,
+    VK_FORMAT_R16G16_SINT = 82,
+    VK_FORMAT_R16G16_SFLOAT = 83,
+    VK_FORMAT_R16G16B16_UNORM = 84,
+    VK_FORMAT_R16G16B16_SNORM = 85,
+    VK_FORMAT_R16G16B16_USCALED = 86,
+    VK_FORMAT_R16G16B16_SSCALED = 87,
+    VK_FORMAT_R16G16B16_UINT = 88,
+    VK_FORMAT_R16G16B16_SINT = 89,
+    VK_FORMAT_R16G16B16_SFLOAT = 90,
+    VK_FORMAT_R16G16B16A16_UNORM = 91,
+    VK_FORMAT_R16G16B16A16_SNORM = 92,
+    VK_FORMAT_R16G16B16A16_USCALED = 93,
+    VK_FORMAT_R16G16B16A16_SSCALED = 94,
+    VK_FORMAT_R16G16B16A16_UINT = 95,
+    VK_FORMAT_R16G16B16A16_SINT = 96,
+    VK_FORMAT_R16G16B16A16_SFLOAT = 97,
+    VK_FORMAT_R32_UINT = 98,
+    VK_FORMAT_R32_SINT = 99,
+    VK_FORMAT_R32_SFLOAT = 100,
+    VK_FORMAT_R32G32_UINT = 101,
+    VK_FORMAT_R32G32_SINT = 102,
+    VK_FORMAT_R32G32_SFLOAT = 103,
+    VK_FORMAT_R32G32B32_UINT = 104,
+    VK_FORMAT_R32G32B32_SINT = 105,
+    VK_FORMAT_R32G32B32_SFLOAT = 106,
+    VK_FORMAT_R32G32B32A32_UINT = 107,
+    VK_FORMAT_R32G32B32A32_SINT = 108,
+    VK_FORMAT_R32G32B32A32_SFLOAT = 109,
+    VK_FORMAT_R64_UINT = 110,
+    VK_FORMAT_R64_SINT = 111,
+    VK_FORMAT_R64_SFLOAT = 112,
+    VK_FORMAT_R64G64_UINT = 113,
+    VK_FORMAT_R64G64_SINT = 114,
+    VK_FORMAT_R64G64_SFLOAT = 115,
+    VK_FORMAT_R64G64B64_UINT = 116,
+    VK_FORMAT_R64G64B64_SINT = 117,
+    VK_FORMAT_R64G64B64_SFLOAT = 118,
+    VK_FORMAT_R64G64B64A64_UINT = 119,
+    VK_FORMAT_R64G64B64A64_SINT = 120,
+    VK_FORMAT_R64G64B64A64_SFLOAT = 121,
+    VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122,
+    VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123,
+    VK_FORMAT_D16_UNORM = 124,
+    VK_FORMAT_X8_D24_UNORM_PACK32 = 125,
+    VK_FORMAT_D32_SFLOAT = 126,
+    VK_FORMAT_S8_UINT = 127,
+    VK_FORMAT_D16_UNORM_S8_UINT = 128,
+    VK_FORMAT_D24_UNORM_S8_UINT = 129,
+    VK_FORMAT_D32_SFLOAT_S8_UINT = 130,
+    VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131,
+    VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132,
+    VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133,
+    VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134,
+    VK_FORMAT_BC2_UNORM_BLOCK = 135,
+    VK_FORMAT_BC2_SRGB_BLOCK = 136,
+    VK_FORMAT_BC3_UNORM_BLOCK = 137,
+    VK_FORMAT_BC3_SRGB_BLOCK = 138,
+    VK_FORMAT_BC4_UNORM_BLOCK = 139,
+    VK_FORMAT_BC4_SNORM_BLOCK = 140,
+    VK_FORMAT_BC5_UNORM_BLOCK = 141,
+    VK_FORMAT_BC5_SNORM_BLOCK = 142,
+    VK_FORMAT_BC6H_UFLOAT_BLOCK = 143,
+    VK_FORMAT_BC6H_SFLOAT_BLOCK = 144,
+    VK_FORMAT_BC7_UNORM_BLOCK = 145,
+    VK_FORMAT_BC7_SRGB_BLOCK = 146,
+    VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147,
+    VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148,
+    VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149,
+    VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150,
+    VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151,
+    VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152,
+    VK_FORMAT_EAC_R11_UNORM_BLOCK = 153,
+    VK_FORMAT_EAC_R11_SNORM_BLOCK = 154,
+    VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155,
+    VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156,
+    VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157,
+    VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158,
+    VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159,
+    VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160,
+    VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161,
+    VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162,
+    VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163,
+    VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164,
+    VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165,
+    VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166,
+    VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167,
+    VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168,
+    VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169,
+    VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170,
+    VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171,
+    VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172,
+    VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173,
+    VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174,
+    VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175,
+    VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176,
+    VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177,
+    VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178,
+    VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179,
+    VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180,
+    VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181,
+    VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182,
+    VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183,
+    VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184,
+    VK_FORMAT_BEGIN_RANGE = VK_FORMAT_UNDEFINED,
+    VK_FORMAT_END_RANGE = VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
+    VK_FORMAT_RANGE_SIZE = (VK_FORMAT_ASTC_12x12_SRGB_BLOCK - VK_FORMAT_UNDEFINED + 1),
+    VK_FORMAT_MAX_ENUM = 0x7FFFFFFF
+} VkFormat;
+
+typedef enum VkImageType {
+    VK_IMAGE_TYPE_1D = 0,
+    VK_IMAGE_TYPE_2D = 1,
+    VK_IMAGE_TYPE_3D = 2,
+    VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_TYPE_1D,
+    VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_TYPE_3D,
+    VK_IMAGE_TYPE_RANGE_SIZE = (VK_IMAGE_TYPE_3D - VK_IMAGE_TYPE_1D + 1),
+    VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkImageType;
+
+typedef enum VkImageTiling {
+    VK_IMAGE_TILING_OPTIMAL = 0,
+    VK_IMAGE_TILING_LINEAR = 1,
+    VK_IMAGE_TILING_BEGIN_RANGE = VK_IMAGE_TILING_OPTIMAL,
+    VK_IMAGE_TILING_END_RANGE = VK_IMAGE_TILING_LINEAR,
+    VK_IMAGE_TILING_RANGE_SIZE = (VK_IMAGE_TILING_LINEAR - VK_IMAGE_TILING_OPTIMAL + 1),
+    VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF
+} VkImageTiling;
+
+typedef enum VkPhysicalDeviceType {
+    VK_PHYSICAL_DEVICE_TYPE_OTHER = 0,
+    VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1,
+    VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2,
+    VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3,
+    VK_PHYSICAL_DEVICE_TYPE_CPU = 4,
+    VK_PHYSICAL_DEVICE_TYPE_BEGIN_RANGE = VK_PHYSICAL_DEVICE_TYPE_OTHER,
+    VK_PHYSICAL_DEVICE_TYPE_END_RANGE = VK_PHYSICAL_DEVICE_TYPE_CPU,
+    VK_PHYSICAL_DEVICE_TYPE_RANGE_SIZE = (VK_PHYSICAL_DEVICE_TYPE_CPU - VK_PHYSICAL_DEVICE_TYPE_OTHER + 1),
+    VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkPhysicalDeviceType;
+
+typedef enum VkQueryType {
+    VK_QUERY_TYPE_OCCLUSION = 0,
+    VK_QUERY_TYPE_PIPELINE_STATISTICS = 1,
+    VK_QUERY_TYPE_TIMESTAMP = 2,
+    VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION,
+    VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_TIMESTAMP,
+    VK_QUERY_TYPE_RANGE_SIZE = (VK_QUERY_TYPE_TIMESTAMP - VK_QUERY_TYPE_OCCLUSION + 1),
+    VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkQueryType;
+
+typedef enum VkSharingMode {
+    VK_SHARING_MODE_EXCLUSIVE = 0,
+    VK_SHARING_MODE_CONCURRENT = 1,
+    VK_SHARING_MODE_BEGIN_RANGE = VK_SHARING_MODE_EXCLUSIVE,
+    VK_SHARING_MODE_END_RANGE = VK_SHARING_MODE_CONCURRENT,
+    VK_SHARING_MODE_RANGE_SIZE = (VK_SHARING_MODE_CONCURRENT - VK_SHARING_MODE_EXCLUSIVE + 1),
+    VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSharingMode;
+
+typedef enum VkImageLayout {
+    VK_IMAGE_LAYOUT_UNDEFINED = 0,
+    VK_IMAGE_LAYOUT_GENERAL = 1,
+    VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2,
+    VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3,
+    VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4,
+    VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5,
+    VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6,
+    VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7,
+    VK_IMAGE_LAYOUT_PREINITIALIZED = 8,
+    VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002,
+    VK_IMAGE_LAYOUT_BEGIN_RANGE = VK_IMAGE_LAYOUT_UNDEFINED,
+    VK_IMAGE_LAYOUT_END_RANGE = VK_IMAGE_LAYOUT_PREINITIALIZED,
+    VK_IMAGE_LAYOUT_RANGE_SIZE = (VK_IMAGE_LAYOUT_PREINITIALIZED - VK_IMAGE_LAYOUT_UNDEFINED + 1),
+    VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF
+} VkImageLayout;
+
+typedef enum VkImageViewType {
+    VK_IMAGE_VIEW_TYPE_1D = 0,
+    VK_IMAGE_VIEW_TYPE_2D = 1,
+    VK_IMAGE_VIEW_TYPE_3D = 2,
+    VK_IMAGE_VIEW_TYPE_CUBE = 3,
+    VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4,
+    VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5,
+    VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6,
+    VK_IMAGE_VIEW_TYPE_BEGIN_RANGE = VK_IMAGE_VIEW_TYPE_1D,
+    VK_IMAGE_VIEW_TYPE_END_RANGE = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
+    VK_IMAGE_VIEW_TYPE_RANGE_SIZE = (VK_IMAGE_VIEW_TYPE_CUBE_ARRAY - VK_IMAGE_VIEW_TYPE_1D + 1),
+    VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkImageViewType;
+
+typedef enum VkComponentSwizzle {
+    VK_COMPONENT_SWIZZLE_IDENTITY = 0,
+    VK_COMPONENT_SWIZZLE_ZERO = 1,
+    VK_COMPONENT_SWIZZLE_ONE = 2,
+    VK_COMPONENT_SWIZZLE_R = 3,
+    VK_COMPONENT_SWIZZLE_G = 4,
+    VK_COMPONENT_SWIZZLE_B = 5,
+    VK_COMPONENT_SWIZZLE_A = 6,
+    VK_COMPONENT_SWIZZLE_BEGIN_RANGE = VK_COMPONENT_SWIZZLE_IDENTITY,
+    VK_COMPONENT_SWIZZLE_END_RANGE = VK_COMPONENT_SWIZZLE_A,
+    VK_COMPONENT_SWIZZLE_RANGE_SIZE = (VK_COMPONENT_SWIZZLE_A - VK_COMPONENT_SWIZZLE_IDENTITY + 1),
+    VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF
+} VkComponentSwizzle;
+
+typedef enum VkVertexInputRate {
+    VK_VERTEX_INPUT_RATE_VERTEX = 0,
+    VK_VERTEX_INPUT_RATE_INSTANCE = 1,
+    VK_VERTEX_INPUT_RATE_BEGIN_RANGE = VK_VERTEX_INPUT_RATE_VERTEX,
+    VK_VERTEX_INPUT_RATE_END_RANGE = VK_VERTEX_INPUT_RATE_INSTANCE,
+    VK_VERTEX_INPUT_RATE_RANGE_SIZE = (VK_VERTEX_INPUT_RATE_INSTANCE - VK_VERTEX_INPUT_RATE_VERTEX + 1),
+    VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF
+} VkVertexInputRate;
+
+typedef enum VkPrimitiveTopology {
+    VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0,
+    VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1,
+    VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2,
+    VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3,
+    VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4,
+    VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5,
+    VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6,
+    VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7,
+    VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8,
+    VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9,
+    VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10,
+    VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+    VK_PRIMITIVE_TOPOLOGY_END_RANGE = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST,
+    VK_PRIMITIVE_TOPOLOGY_RANGE_SIZE = (VK_PRIMITIVE_TOPOLOGY_PATCH_LIST - VK_PRIMITIVE_TOPOLOGY_POINT_LIST + 1),
+    VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF
+} VkPrimitiveTopology;
+
+typedef enum VkPolygonMode {
+    VK_POLYGON_MODE_FILL = 0,
+    VK_POLYGON_MODE_LINE = 1,
+    VK_POLYGON_MODE_POINT = 2,
+    VK_POLYGON_MODE_BEGIN_RANGE = VK_POLYGON_MODE_FILL,
+    VK_POLYGON_MODE_END_RANGE = VK_POLYGON_MODE_POINT,
+    VK_POLYGON_MODE_RANGE_SIZE = (VK_POLYGON_MODE_POINT - VK_POLYGON_MODE_FILL + 1),
+    VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkPolygonMode;
+
+typedef enum VkFrontFace {
+    VK_FRONT_FACE_COUNTER_CLOCKWISE = 0,
+    VK_FRONT_FACE_CLOCKWISE = 1,
+    VK_FRONT_FACE_BEGIN_RANGE = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+    VK_FRONT_FACE_END_RANGE = VK_FRONT_FACE_CLOCKWISE,
+    VK_FRONT_FACE_RANGE_SIZE = (VK_FRONT_FACE_CLOCKWISE - VK_FRONT_FACE_COUNTER_CLOCKWISE + 1),
+    VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF
+} VkFrontFace;
+
+typedef enum VkCompareOp {
+    VK_COMPARE_OP_NEVER = 0,
+    VK_COMPARE_OP_LESS = 1,
+    VK_COMPARE_OP_EQUAL = 2,
+    VK_COMPARE_OP_LESS_OR_EQUAL = 3,
+    VK_COMPARE_OP_GREATER = 4,
+    VK_COMPARE_OP_NOT_EQUAL = 5,
+    VK_COMPARE_OP_GREATER_OR_EQUAL = 6,
+    VK_COMPARE_OP_ALWAYS = 7,
+    VK_COMPARE_OP_BEGIN_RANGE = VK_COMPARE_OP_NEVER,
+    VK_COMPARE_OP_END_RANGE = VK_COMPARE_OP_ALWAYS,
+    VK_COMPARE_OP_RANGE_SIZE = (VK_COMPARE_OP_ALWAYS - VK_COMPARE_OP_NEVER + 1),
+    VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF
+} VkCompareOp;
+
+typedef enum VkStencilOp {
+    VK_STENCIL_OP_KEEP = 0,
+    VK_STENCIL_OP_ZERO = 1,
+    VK_STENCIL_OP_REPLACE = 2,
+    VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3,
+    VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4,
+    VK_STENCIL_OP_INVERT = 5,
+    VK_STENCIL_OP_INCREMENT_AND_WRAP = 6,
+    VK_STENCIL_OP_DECREMENT_AND_WRAP = 7,
+    VK_STENCIL_OP_BEGIN_RANGE = VK_STENCIL_OP_KEEP,
+    VK_STENCIL_OP_END_RANGE = VK_STENCIL_OP_DECREMENT_AND_WRAP,
+    VK_STENCIL_OP_RANGE_SIZE = (VK_STENCIL_OP_DECREMENT_AND_WRAP - VK_STENCIL_OP_KEEP + 1),
+    VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF
+} VkStencilOp;
+
+typedef enum VkLogicOp {
+    VK_LOGIC_OP_CLEAR = 0,
+    VK_LOGIC_OP_AND = 1,
+    VK_LOGIC_OP_AND_REVERSE = 2,
+    VK_LOGIC_OP_COPY = 3,
+    VK_LOGIC_OP_AND_INVERTED = 4,
+    VK_LOGIC_OP_NO_OP = 5,
+    VK_LOGIC_OP_XOR = 6,
+    VK_LOGIC_OP_OR = 7,
+    VK_LOGIC_OP_NOR = 8,
+    VK_LOGIC_OP_EQUIVALENT = 9,
+    VK_LOGIC_OP_INVERT = 10,
+    VK_LOGIC_OP_OR_REVERSE = 11,
+    VK_LOGIC_OP_COPY_INVERTED = 12,
+    VK_LOGIC_OP_OR_INVERTED = 13,
+    VK_LOGIC_OP_NAND = 14,
+    VK_LOGIC_OP_SET = 15,
+    VK_LOGIC_OP_BEGIN_RANGE = VK_LOGIC_OP_CLEAR,
+    VK_LOGIC_OP_END_RANGE = VK_LOGIC_OP_SET,
+    VK_LOGIC_OP_RANGE_SIZE = (VK_LOGIC_OP_SET - VK_LOGIC_OP_CLEAR + 1),
+    VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF
+} VkLogicOp;
+
+typedef enum VkBlendFactor {
+    VK_BLEND_FACTOR_ZERO = 0,
+    VK_BLEND_FACTOR_ONE = 1,
+    VK_BLEND_FACTOR_SRC_COLOR = 2,
+    VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3,
+    VK_BLEND_FACTOR_DST_COLOR = 4,
+    VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5,
+    VK_BLEND_FACTOR_SRC_ALPHA = 6,
+    VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7,
+    VK_BLEND_FACTOR_DST_ALPHA = 8,
+    VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9,
+    VK_BLEND_FACTOR_CONSTANT_COLOR = 10,
+    VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11,
+    VK_BLEND_FACTOR_CONSTANT_ALPHA = 12,
+    VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13,
+    VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14,
+    VK_BLEND_FACTOR_SRC1_COLOR = 15,
+    VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16,
+    VK_BLEND_FACTOR_SRC1_ALPHA = 17,
+    VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18,
+    VK_BLEND_FACTOR_BEGIN_RANGE = VK_BLEND_FACTOR_ZERO,
+    VK_BLEND_FACTOR_END_RANGE = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA,
+    VK_BLEND_FACTOR_RANGE_SIZE = (VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA - VK_BLEND_FACTOR_ZERO + 1),
+    VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF
+} VkBlendFactor;
+
+typedef enum VkBlendOp {
+    VK_BLEND_OP_ADD = 0,
+    VK_BLEND_OP_SUBTRACT = 1,
+    VK_BLEND_OP_REVERSE_SUBTRACT = 2,
+    VK_BLEND_OP_MIN = 3,
+    VK_BLEND_OP_MAX = 4,
+    VK_BLEND_OP_BEGIN_RANGE = VK_BLEND_OP_ADD,
+    VK_BLEND_OP_END_RANGE = VK_BLEND_OP_MAX,
+    VK_BLEND_OP_RANGE_SIZE = (VK_BLEND_OP_MAX - VK_BLEND_OP_ADD + 1),
+    VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF
+} VkBlendOp;
+
+typedef enum VkDynamicState {
+    VK_DYNAMIC_STATE_VIEWPORT = 0,
+    VK_DYNAMIC_STATE_SCISSOR = 1,
+    VK_DYNAMIC_STATE_LINE_WIDTH = 2,
+    VK_DYNAMIC_STATE_DEPTH_BIAS = 3,
+    VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4,
+    VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5,
+    VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6,
+    VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7,
+    VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8,
+    VK_DYNAMIC_STATE_BEGIN_RANGE = VK_DYNAMIC_STATE_VIEWPORT,
+    VK_DYNAMIC_STATE_END_RANGE = VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+    VK_DYNAMIC_STATE_RANGE_SIZE = (VK_DYNAMIC_STATE_STENCIL_REFERENCE - VK_DYNAMIC_STATE_VIEWPORT + 1),
+    VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF
+} VkDynamicState;
+
+typedef enum VkFilter {
+    VK_FILTER_NEAREST = 0,
+    VK_FILTER_LINEAR = 1,
+    VK_FILTER_BEGIN_RANGE = VK_FILTER_NEAREST,
+    VK_FILTER_END_RANGE = VK_FILTER_LINEAR,
+    VK_FILTER_RANGE_SIZE = (VK_FILTER_LINEAR - VK_FILTER_NEAREST + 1),
+    VK_FILTER_MAX_ENUM = 0x7FFFFFFF
+} VkFilter;
+
+typedef enum VkSamplerMipmapMode {
+    VK_SAMPLER_MIPMAP_MODE_NEAREST = 0,
+    VK_SAMPLER_MIPMAP_MODE_LINEAR = 1,
+    VK_SAMPLER_MIPMAP_MODE_BEGIN_RANGE = VK_SAMPLER_MIPMAP_MODE_NEAREST,
+    VK_SAMPLER_MIPMAP_MODE_END_RANGE = VK_SAMPLER_MIPMAP_MODE_LINEAR,
+    VK_SAMPLER_MIPMAP_MODE_RANGE_SIZE = (VK_SAMPLER_MIPMAP_MODE_LINEAR - VK_SAMPLER_MIPMAP_MODE_NEAREST + 1),
+    VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSamplerMipmapMode;
+
+typedef enum VkSamplerAddressMode {
+    VK_SAMPLER_ADDRESS_MODE_REPEAT = 0,
+    VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1,
+    VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2,
+    VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3,
+    VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4,
+    VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE = VK_SAMPLER_ADDRESS_MODE_REPEAT,
+    VK_SAMPLER_ADDRESS_MODE_END_RANGE = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE,
+    VK_SAMPLER_ADDRESS_MODE_RANGE_SIZE = (VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE - VK_SAMPLER_ADDRESS_MODE_REPEAT + 1),
+    VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSamplerAddressMode;
+
+typedef enum VkBorderColor {
+    VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0,
+    VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1,
+    VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2,
+    VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3,
+    VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4,
+    VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5,
+    VK_BORDER_COLOR_BEGIN_RANGE = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
+    VK_BORDER_COLOR_END_RANGE = VK_BORDER_COLOR_INT_OPAQUE_WHITE,
+    VK_BORDER_COLOR_RANGE_SIZE = (VK_BORDER_COLOR_INT_OPAQUE_WHITE - VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK + 1),
+    VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF
+} VkBorderColor;
+
+typedef enum VkDescriptorType {
+    VK_DESCRIPTOR_TYPE_SAMPLER = 0,
+    VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1,
+    VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2,
+    VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3,
+    VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4,
+    VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5,
+    VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6,
+    VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7,
+    VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8,
+    VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9,
+    VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10,
+    VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER,
+    VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT,
+    VK_DESCRIPTOR_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT - VK_DESCRIPTOR_TYPE_SAMPLER + 1),
+    VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkDescriptorType;
+
+typedef enum VkAttachmentLoadOp {
+    VK_ATTACHMENT_LOAD_OP_LOAD = 0,
+    VK_ATTACHMENT_LOAD_OP_CLEAR = 1,
+    VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2,
+    VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE = VK_ATTACHMENT_LOAD_OP_LOAD,
+    VK_ATTACHMENT_LOAD_OP_END_RANGE = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+    VK_ATTACHMENT_LOAD_OP_RANGE_SIZE = (VK_ATTACHMENT_LOAD_OP_DONT_CARE - VK_ATTACHMENT_LOAD_OP_LOAD + 1),
+    VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF
+} VkAttachmentLoadOp;
+
+typedef enum VkAttachmentStoreOp {
+    VK_ATTACHMENT_STORE_OP_STORE = 0,
+    VK_ATTACHMENT_STORE_OP_DONT_CARE = 1,
+    VK_ATTACHMENT_STORE_OP_BEGIN_RANGE = VK_ATTACHMENT_STORE_OP_STORE,
+    VK_ATTACHMENT_STORE_OP_END_RANGE = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+    VK_ATTACHMENT_STORE_OP_RANGE_SIZE = (VK_ATTACHMENT_STORE_OP_DONT_CARE - VK_ATTACHMENT_STORE_OP_STORE + 1),
+    VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF
+} VkAttachmentStoreOp;
+
+typedef enum VkPipelineBindPoint {
+    VK_PIPELINE_BIND_POINT_GRAPHICS = 0,
+    VK_PIPELINE_BIND_POINT_COMPUTE = 1,
+    VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS,
+    VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE,
+    VK_PIPELINE_BIND_POINT_RANGE_SIZE = (VK_PIPELINE_BIND_POINT_COMPUTE - VK_PIPELINE_BIND_POINT_GRAPHICS + 1),
+    VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineBindPoint;
+
+typedef enum VkCommandBufferLevel {
+    VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0,
+    VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1,
+    VK_COMMAND_BUFFER_LEVEL_BEGIN_RANGE = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+    VK_COMMAND_BUFFER_LEVEL_END_RANGE = VK_COMMAND_BUFFER_LEVEL_SECONDARY,
+    VK_COMMAND_BUFFER_LEVEL_RANGE_SIZE = (VK_COMMAND_BUFFER_LEVEL_SECONDARY - VK_COMMAND_BUFFER_LEVEL_PRIMARY + 1),
+    VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF
+} VkCommandBufferLevel;
+
+typedef enum VkIndexType {
+    VK_INDEX_TYPE_UINT16 = 0,
+    VK_INDEX_TYPE_UINT32 = 1,
+    VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16,
+    VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32,
+    VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1),
+    VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkIndexType;
+
+typedef enum VkSubpassContents {
+    VK_SUBPASS_CONTENTS_INLINE = 0,
+    VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1,
+    VK_SUBPASS_CONTENTS_BEGIN_RANGE = VK_SUBPASS_CONTENTS_INLINE,
+    VK_SUBPASS_CONTENTS_END_RANGE = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS,
+    VK_SUBPASS_CONTENTS_RANGE_SIZE = (VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS - VK_SUBPASS_CONTENTS_INLINE + 1),
+    VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF
+} VkSubpassContents;
+
+typedef VkFlags VkInstanceCreateFlags;
+
+typedef enum VkFormatFeatureFlagBits {
+    VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001,
+    VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002,
+    VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004,
+    VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008,
+    VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010,
+    VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020,
+    VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040,
+    VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080,
+    VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100,
+    VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200,
+    VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400,
+    VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800,
+    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000,
+} VkFormatFeatureFlagBits;
+typedef VkFlags VkFormatFeatureFlags;
+
+typedef enum VkImageUsageFlagBits {
+    VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001,
+    VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002,
+    VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004,
+    VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008,
+    VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010,
+    VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020,
+    VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040,
+    VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080,
+} VkImageUsageFlagBits;
+typedef VkFlags VkImageUsageFlags;
+
+typedef enum VkImageCreateFlagBits {
+    VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001,
+    VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+    VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+    VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008,
+    VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010,
+} VkImageCreateFlagBits;
+typedef VkFlags VkImageCreateFlags;
+
+typedef enum VkSampleCountFlagBits {
+    VK_SAMPLE_COUNT_1_BIT = 0x00000001,
+    VK_SAMPLE_COUNT_2_BIT = 0x00000002,
+    VK_SAMPLE_COUNT_4_BIT = 0x00000004,
+    VK_SAMPLE_COUNT_8_BIT = 0x00000008,
+    VK_SAMPLE_COUNT_16_BIT = 0x00000010,
+    VK_SAMPLE_COUNT_32_BIT = 0x00000020,
+    VK_SAMPLE_COUNT_64_BIT = 0x00000040,
+} VkSampleCountFlagBits;
+typedef VkFlags VkSampleCountFlags;
+
+typedef enum VkQueueFlagBits {
+    VK_QUEUE_GRAPHICS_BIT = 0x00000001,
+    VK_QUEUE_COMPUTE_BIT = 0x00000002,
+    VK_QUEUE_TRANSFER_BIT = 0x00000004,
+    VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008,
+} VkQueueFlagBits;
+typedef VkFlags VkQueueFlags;
+
+typedef enum VkMemoryPropertyFlagBits {
+    VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001,
+    VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002,
+    VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004,
+    VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008,
+    VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010,
+} VkMemoryPropertyFlagBits;
+typedef VkFlags VkMemoryPropertyFlags;
+
+typedef enum VkMemoryHeapFlagBits {
+    VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001,
+} VkMemoryHeapFlagBits;
+typedef VkFlags VkMemoryHeapFlags;
+typedef VkFlags VkDeviceCreateFlags;
+typedef VkFlags VkDeviceQueueCreateFlags;
+
+typedef enum VkPipelineStageFlagBits {
+    VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001,
+    VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002,
+    VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004,
+    VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008,
+    VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010,
+    VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020,
+    VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040,
+    VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080,
+    VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100,
+    VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200,
+    VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400,
+    VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800,
+    VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000,
+    VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000,
+    VK_PIPELINE_STAGE_HOST_BIT = 0x00004000,
+    VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000,
+    VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000,
+} VkPipelineStageFlagBits;
+typedef VkFlags VkPipelineStageFlags;
+typedef VkFlags VkMemoryMapFlags;
+
+typedef enum VkImageAspectFlagBits {
+    VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001,
+    VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002,
+    VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004,
+    VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008,
+} VkImageAspectFlagBits;
+typedef VkFlags VkImageAspectFlags;
+
+typedef enum VkSparseImageFormatFlagBits {
+    VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001,
+    VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002,
+    VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004,
+} VkSparseImageFormatFlagBits;
+typedef VkFlags VkSparseImageFormatFlags;
+
+typedef enum VkSparseMemoryBindFlagBits {
+    VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001,
+} VkSparseMemoryBindFlagBits;
+typedef VkFlags VkSparseMemoryBindFlags;
+
+typedef enum VkFenceCreateFlagBits {
+    VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001,
+} VkFenceCreateFlagBits;
+typedef VkFlags VkFenceCreateFlags;
+typedef VkFlags VkSemaphoreCreateFlags;
+typedef VkFlags VkEventCreateFlags;
+typedef VkFlags VkQueryPoolCreateFlags;
+
+typedef enum VkQueryPipelineStatisticFlagBits {
+    VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001,
+    VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002,
+    VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004,
+    VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008,
+    VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010,
+    VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020,
+    VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040,
+    VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080,
+    VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100,
+    VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200,
+    VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400,
+} VkQueryPipelineStatisticFlagBits;
+typedef VkFlags VkQueryPipelineStatisticFlags;
+
+typedef enum VkQueryResultFlagBits {
+    VK_QUERY_RESULT_64_BIT = 0x00000001,
+    VK_QUERY_RESULT_WAIT_BIT = 0x00000002,
+    VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004,
+    VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008,
+} VkQueryResultFlagBits;
+typedef VkFlags VkQueryResultFlags;
+
+typedef enum VkBufferCreateFlagBits {
+    VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001,
+    VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+    VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+} VkBufferCreateFlagBits;
+typedef VkFlags VkBufferCreateFlags;
+
+typedef enum VkBufferUsageFlagBits {
+    VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001,
+    VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002,
+    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004,
+    VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008,
+    VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010,
+    VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020,
+    VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040,
+    VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080,
+    VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100,
+} VkBufferUsageFlagBits;
+typedef VkFlags VkBufferUsageFlags;
+typedef VkFlags VkBufferViewCreateFlags;
+typedef VkFlags VkImageViewCreateFlags;
+typedef VkFlags VkShaderModuleCreateFlags;
+typedef VkFlags VkPipelineCacheCreateFlags;
+
+typedef enum VkPipelineCreateFlagBits {
+    VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,
+    VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002,
+    VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004,
+} VkPipelineCreateFlagBits;
+typedef VkFlags VkPipelineCreateFlags;
+typedef VkFlags VkPipelineShaderStageCreateFlags;
+
+typedef enum VkShaderStageFlagBits {
+    VK_SHADER_STAGE_VERTEX_BIT = 0x00000001,
+    VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002,
+    VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004,
+    VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008,
+    VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010,
+    VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020,
+    VK_SHADER_STAGE_ALL_GRAPHICS = 0x1F,
+    VK_SHADER_STAGE_ALL = 0x7FFFFFFF,
+} VkShaderStageFlagBits;
+typedef VkFlags VkPipelineVertexInputStateCreateFlags;
+typedef VkFlags VkPipelineInputAssemblyStateCreateFlags;
+typedef VkFlags VkPipelineTessellationStateCreateFlags;
+typedef VkFlags VkPipelineViewportStateCreateFlags;
+typedef VkFlags VkPipelineRasterizationStateCreateFlags;
+
+typedef enum VkCullModeFlagBits {
+    VK_CULL_MODE_NONE = 0,
+    VK_CULL_MODE_FRONT_BIT = 0x00000001,
+    VK_CULL_MODE_BACK_BIT = 0x00000002,
+    VK_CULL_MODE_FRONT_AND_BACK = 0x3,
+} VkCullModeFlagBits;
+typedef VkFlags VkCullModeFlags;
+typedef VkFlags VkPipelineMultisampleStateCreateFlags;
+typedef VkFlags VkPipelineDepthStencilStateCreateFlags;
+typedef VkFlags VkPipelineColorBlendStateCreateFlags;
+
+typedef enum VkColorComponentFlagBits {
+    VK_COLOR_COMPONENT_R_BIT = 0x00000001,
+    VK_COLOR_COMPONENT_G_BIT = 0x00000002,
+    VK_COLOR_COMPONENT_B_BIT = 0x00000004,
+    VK_COLOR_COMPONENT_A_BIT = 0x00000008,
+} VkColorComponentFlagBits;
+typedef VkFlags VkColorComponentFlags;
+typedef VkFlags VkPipelineDynamicStateCreateFlags;
+typedef VkFlags VkPipelineLayoutCreateFlags;
+typedef VkFlags VkShaderStageFlags;
+typedef VkFlags VkSamplerCreateFlags;
+typedef VkFlags VkDescriptorSetLayoutCreateFlags;
+
+typedef enum VkDescriptorPoolCreateFlagBits {
+    VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001,
+} VkDescriptorPoolCreateFlagBits;
+typedef VkFlags VkDescriptorPoolCreateFlags;
+typedef VkFlags VkDescriptorPoolResetFlags;
+typedef VkFlags VkFramebufferCreateFlags;
+typedef VkFlags VkRenderPassCreateFlags;
+
+typedef enum VkAttachmentDescriptionFlagBits {
+    VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001,
+} VkAttachmentDescriptionFlagBits;
+typedef VkFlags VkAttachmentDescriptionFlags;
+typedef VkFlags VkSubpassDescriptionFlags;
+
+typedef enum VkAccessFlagBits {
+    VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001,
+    VK_ACCESS_INDEX_READ_BIT = 0x00000002,
+    VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004,
+    VK_ACCESS_UNIFORM_READ_BIT = 0x00000008,
+    VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010,
+    VK_ACCESS_SHADER_READ_BIT = 0x00000020,
+    VK_ACCESS_SHADER_WRITE_BIT = 0x00000040,
+    VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080,
+    VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100,
+    VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200,
+    VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400,
+    VK_ACCESS_TRANSFER_READ_BIT = 0x00000800,
+    VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000,
+    VK_ACCESS_HOST_READ_BIT = 0x00002000,
+    VK_ACCESS_HOST_WRITE_BIT = 0x00004000,
+    VK_ACCESS_MEMORY_READ_BIT = 0x00008000,
+    VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000,
+} VkAccessFlagBits;
+typedef VkFlags VkAccessFlags;
+
+typedef enum VkDependencyFlagBits {
+    VK_DEPENDENCY_BY_REGION_BIT = 0x00000001,
+} VkDependencyFlagBits;
+typedef VkFlags VkDependencyFlags;
+
+typedef enum VkCommandPoolCreateFlagBits {
+    VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001,
+    VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002,
+} VkCommandPoolCreateFlagBits;
+typedef VkFlags VkCommandPoolCreateFlags;
+
+typedef enum VkCommandPoolResetFlagBits {
+    VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001,
+} VkCommandPoolResetFlagBits;
+typedef VkFlags VkCommandPoolResetFlags;
+
+typedef enum VkCommandBufferUsageFlagBits {
+    VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001,
+    VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002,
+    VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004,
+} VkCommandBufferUsageFlagBits;
+typedef VkFlags VkCommandBufferUsageFlags;
+
+typedef enum VkQueryControlFlagBits {
+    VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001,
+} VkQueryControlFlagBits;
+typedef VkFlags VkQueryControlFlags;
+
+typedef enum VkCommandBufferResetFlagBits {
+    VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001,
+} VkCommandBufferResetFlagBits;
+typedef VkFlags VkCommandBufferResetFlags;
+
+typedef enum VkStencilFaceFlagBits {
+    VK_STENCIL_FACE_FRONT_BIT = 0x00000001,
+    VK_STENCIL_FACE_BACK_BIT = 0x00000002,
+    VK_STENCIL_FRONT_AND_BACK = 0x3,
+} VkStencilFaceFlagBits;
+typedef VkFlags VkStencilFaceFlags;
+
+typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)(
+    void*                                       pUserData,
+    size_t                                      size,
+    size_t                                      alignment,
+    VkSystemAllocationScope                     allocationScope);
+
+typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)(
+    void*                                       pUserData,
+    void*                                       pOriginal,
+    size_t                                      size,
+    size_t                                      alignment,
+    VkSystemAllocationScope                     allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkFreeFunction)(
+    void*                                       pUserData,
+    void*                                       pMemory);
+
+typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)(
+    void*                                       pUserData,
+    size_t                                      size,
+    VkInternalAllocationType                    allocationType,
+    VkSystemAllocationScope                     allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)(
+    void*                                       pUserData,
+    size_t                                      size,
+    VkInternalAllocationType                    allocationType,
+    VkSystemAllocationScope                     allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void);
+
+typedef struct VkApplicationInfo {
+    VkStructureType    sType;
+    const void*        pNext;
+    const char*        pApplicationName;
+    uint32_t           applicationVersion;
+    const char*        pEngineName;
+    uint32_t           engineVersion;
+    uint32_t           apiVersion;
+} VkApplicationInfo;
+
+typedef struct VkInstanceCreateInfo {
+    VkStructureType             sType;
+    const void*                 pNext;
+    VkInstanceCreateFlags       flags;
+    const VkApplicationInfo*    pApplicationInfo;
+    uint32_t                    enabledLayerCount;
+    const char* const*          ppEnabledLayerNames;
+    uint32_t                    enabledExtensionCount;
+    const char* const*          ppEnabledExtensionNames;
+} VkInstanceCreateInfo;
+
+typedef struct VkAllocationCallbacks {
+    void*                                   pUserData;
+    PFN_vkAllocationFunction                pfnAllocation;
+    PFN_vkReallocationFunction              pfnReallocation;
+    PFN_vkFreeFunction                      pfnFree;
+    PFN_vkInternalAllocationNotification    pfnInternalAllocation;
+    PFN_vkInternalFreeNotification          pfnInternalFree;
+} VkAllocationCallbacks;
+
+typedef struct VkPhysicalDeviceFeatures {
+    VkBool32    robustBufferAccess;
+    VkBool32    fullDrawIndexUint32;
+    VkBool32    imageCubeArray;
+    VkBool32    independentBlend;
+    VkBool32    geometryShader;
+    VkBool32    tessellationShader;
+    VkBool32    sampleRateShading;
+    VkBool32    dualSrcBlend;
+    VkBool32    logicOp;
+    VkBool32    multiDrawIndirect;
+    VkBool32    drawIndirectFirstInstance;
+    VkBool32    depthClamp;
+    VkBool32    depthBiasClamp;
+    VkBool32    fillModeNonSolid;
+    VkBool32    depthBounds;
+    VkBool32    wideLines;
+    VkBool32    largePoints;
+    VkBool32    alphaToOne;
+    VkBool32    multiViewport;
+    VkBool32    samplerAnisotropy;
+    VkBool32    textureCompressionETC2;
+    VkBool32    textureCompressionASTC_LDR;
+    VkBool32    textureCompressionBC;
+    VkBool32    occlusionQueryPrecise;
+    VkBool32    pipelineStatisticsQuery;
+    VkBool32    vertexPipelineStoresAndAtomics;
+    VkBool32    fragmentStoresAndAtomics;
+    VkBool32    shaderTessellationAndGeometryPointSize;
+    VkBool32    shaderImageGatherExtended;
+    VkBool32    shaderStorageImageExtendedFormats;
+    VkBool32    shaderStorageImageMultisample;
+    VkBool32    shaderStorageImageReadWithoutFormat;
+    VkBool32    shaderStorageImageWriteWithoutFormat;
+    VkBool32    shaderUniformBufferArrayDynamicIndexing;
+    VkBool32    shaderSampledImageArrayDynamicIndexing;
+    VkBool32    shaderStorageBufferArrayDynamicIndexing;
+    VkBool32    shaderStorageImageArrayDynamicIndexing;
+    VkBool32    shaderClipDistance;
+    VkBool32    shaderCullDistance;
+    VkBool32    shaderFloat64;
+    VkBool32    shaderInt64;
+    VkBool32    shaderInt16;
+    VkBool32    shaderResourceResidency;
+    VkBool32    shaderResourceMinLod;
+    VkBool32    sparseBinding;
+    VkBool32    sparseResidencyBuffer;
+    VkBool32    sparseResidencyImage2D;
+    VkBool32    sparseResidencyImage3D;
+    VkBool32    sparseResidency2Samples;
+    VkBool32    sparseResidency4Samples;
+    VkBool32    sparseResidency8Samples;
+    VkBool32    sparseResidency16Samples;
+    VkBool32    sparseResidencyAliased;
+    VkBool32    variableMultisampleRate;
+    VkBool32    inheritedQueries;
+} VkPhysicalDeviceFeatures;
+
+typedef struct VkFormatProperties {
+    VkFormatFeatureFlags    linearTilingFeatures;
+    VkFormatFeatureFlags    optimalTilingFeatures;
+    VkFormatFeatureFlags    bufferFeatures;
+} VkFormatProperties;
+
+typedef struct VkExtent3D {
+    uint32_t    width;
+    uint32_t    height;
+    uint32_t    depth;
+} VkExtent3D;
+
+typedef struct VkImageFormatProperties {
+    VkExtent3D            maxExtent;
+    uint32_t              maxMipLevels;
+    uint32_t              maxArrayLayers;
+    VkSampleCountFlags    sampleCounts;
+    VkDeviceSize          maxResourceSize;
+} VkImageFormatProperties;
+
+typedef struct VkPhysicalDeviceLimits {
+    uint32_t              maxImageDimension1D;
+    uint32_t              maxImageDimension2D;
+    uint32_t              maxImageDimension3D;
+    uint32_t              maxImageDimensionCube;
+    uint32_t              maxImageArrayLayers;
+    uint32_t              maxTexelBufferElements;
+    uint32_t              maxUniformBufferRange;
+    uint32_t              maxStorageBufferRange;
+    uint32_t              maxPushConstantsSize;
+    uint32_t              maxMemoryAllocationCount;
+    uint32_t              maxSamplerAllocationCount;
+    VkDeviceSize          bufferImageGranularity;
+    VkDeviceSize          sparseAddressSpaceSize;
+    uint32_t              maxBoundDescriptorSets;
+    uint32_t              maxPerStageDescriptorSamplers;
+    uint32_t              maxPerStageDescriptorUniformBuffers;
+    uint32_t              maxPerStageDescriptorStorageBuffers;
+    uint32_t              maxPerStageDescriptorSampledImages;
+    uint32_t              maxPerStageDescriptorStorageImages;
+    uint32_t              maxPerStageDescriptorInputAttachments;
+    uint32_t              maxPerStageResources;
+    uint32_t              maxDescriptorSetSamplers;
+    uint32_t              maxDescriptorSetUniformBuffers;
+    uint32_t              maxDescriptorSetUniformBuffersDynamic;
+    uint32_t              maxDescriptorSetStorageBuffers;
+    uint32_t              maxDescriptorSetStorageBuffersDynamic;
+    uint32_t              maxDescriptorSetSampledImages;
+    uint32_t              maxDescriptorSetStorageImages;
+    uint32_t              maxDescriptorSetInputAttachments;
+    uint32_t              maxVertexInputAttributes;
+    uint32_t              maxVertexInputBindings;
+    uint32_t              maxVertexInputAttributeOffset;
+    uint32_t              maxVertexInputBindingStride;
+    uint32_t              maxVertexOutputComponents;
+    uint32_t              maxTessellationGenerationLevel;
+    uint32_t              maxTessellationPatchSize;
+    uint32_t              maxTessellationControlPerVertexInputComponents;
+    uint32_t              maxTessellationControlPerVertexOutputComponents;
+    uint32_t              maxTessellationControlPerPatchOutputComponents;
+    uint32_t              maxTessellationControlTotalOutputComponents;
+    uint32_t              maxTessellationEvaluationInputComponents;
+    uint32_t              maxTessellationEvaluationOutputComponents;
+    uint32_t              maxGeometryShaderInvocations;
+    uint32_t              maxGeometryInputComponents;
+    uint32_t              maxGeometryOutputComponents;
+    uint32_t              maxGeometryOutputVertices;
+    uint32_t              maxGeometryTotalOutputComponents;
+    uint32_t              maxFragmentInputComponents;
+    uint32_t              maxFragmentOutputAttachments;
+    uint32_t              maxFragmentDualSrcAttachments;
+    uint32_t              maxFragmentCombinedOutputResources;
+    uint32_t              maxComputeSharedMemorySize;
+    uint32_t              maxComputeWorkGroupCount[3];
+    uint32_t              maxComputeWorkGroupInvocations;
+    uint32_t              maxComputeWorkGroupSize[3];
+    uint32_t              subPixelPrecisionBits;
+    uint32_t              subTexelPrecisionBits;
+    uint32_t              mipmapPrecisionBits;
+    uint32_t              maxDrawIndexedIndexValue;
+    uint32_t              maxDrawIndirectCount;
+    float                 maxSamplerLodBias;
+    float                 maxSamplerAnisotropy;
+    uint32_t              maxViewports;
+    uint32_t              maxViewportDimensions[2];
+    float                 viewportBoundsRange[2];
+    uint32_t              viewportSubPixelBits;
+    size_t                minMemoryMapAlignment;
+    VkDeviceSize          minTexelBufferOffsetAlignment;
+    VkDeviceSize          minUniformBufferOffsetAlignment;
+    VkDeviceSize          minStorageBufferOffsetAlignment;
+    int32_t               minTexelOffset;
+    uint32_t              maxTexelOffset;
+    int32_t               minTexelGatherOffset;
+    uint32_t              maxTexelGatherOffset;
+    float                 minInterpolationOffset;
+    float                 maxInterpolationOffset;
+    uint32_t              subPixelInterpolationOffsetBits;
+    uint32_t              maxFramebufferWidth;
+    uint32_t              maxFramebufferHeight;
+    uint32_t              maxFramebufferLayers;
+    VkSampleCountFlags    framebufferColorSampleCounts;
+    VkSampleCountFlags    framebufferDepthSampleCounts;
+    VkSampleCountFlags    framebufferStencilSampleCounts;
+    VkSampleCountFlags    framebufferNoAttachmentsSampleCounts;
+    uint32_t              maxColorAttachments;
+    VkSampleCountFlags    sampledImageColorSampleCounts;
+    VkSampleCountFlags    sampledImageIntegerSampleCounts;
+    VkSampleCountFlags    sampledImageDepthSampleCounts;
+    VkSampleCountFlags    sampledImageStencilSampleCounts;
+    VkSampleCountFlags    storageImageSampleCounts;
+    uint32_t              maxSampleMaskWords;
+    VkBool32              timestampComputeAndGraphics;
+    float                 timestampPeriod;
+    uint32_t              maxClipDistances;
+    uint32_t              maxCullDistances;
+    uint32_t              maxCombinedClipAndCullDistances;
+    uint32_t              discreteQueuePriorities;
+    float                 pointSizeRange[2];
+    float                 lineWidthRange[2];
+    float                 pointSizeGranularity;
+    float                 lineWidthGranularity;
+    VkBool32              strictLines;
+    VkBool32              standardSampleLocations;
+    VkDeviceSize          optimalBufferCopyOffsetAlignment;
+    VkDeviceSize          optimalBufferCopyRowPitchAlignment;
+    VkDeviceSize          nonCoherentAtomSize;
+} VkPhysicalDeviceLimits;
+
+typedef struct VkPhysicalDeviceSparseProperties {
+    VkBool32    residencyStandard2DBlockShape;
+    VkBool32    residencyStandard2DMultisampleBlockShape;
+    VkBool32    residencyStandard3DBlockShape;
+    VkBool32    residencyAlignedMipSize;
+    VkBool32    residencyNonResidentStrict;
+} VkPhysicalDeviceSparseProperties;
+
+typedef struct VkPhysicalDeviceProperties {
+    uint32_t                            apiVersion;
+    uint32_t                            driverVersion;
+    uint32_t                            vendorID;
+    uint32_t                            deviceID;
+    VkPhysicalDeviceType                deviceType;
+    char                                deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE];
+    uint8_t                             pipelineCacheUUID[VK_UUID_SIZE];
+    VkPhysicalDeviceLimits              limits;
+    VkPhysicalDeviceSparseProperties    sparseProperties;
+} VkPhysicalDeviceProperties;
+
+typedef struct VkQueueFamilyProperties {
+    VkQueueFlags    queueFlags;
+    uint32_t        queueCount;
+    uint32_t        timestampValidBits;
+    VkExtent3D      minImageTransferGranularity;
+} VkQueueFamilyProperties;
+
+typedef struct VkMemoryType {
+    VkMemoryPropertyFlags    propertyFlags;
+    uint32_t                 heapIndex;
+} VkMemoryType;
+
+typedef struct VkMemoryHeap {
+    VkDeviceSize         size;
+    VkMemoryHeapFlags    flags;
+} VkMemoryHeap;
+
+typedef struct VkPhysicalDeviceMemoryProperties {
+    uint32_t        memoryTypeCount;
+    VkMemoryType    memoryTypes[VK_MAX_MEMORY_TYPES];
+    uint32_t        memoryHeapCount;
+    VkMemoryHeap    memoryHeaps[VK_MAX_MEMORY_HEAPS];
+} VkPhysicalDeviceMemoryProperties;
+
+typedef struct VkDeviceQueueCreateInfo {
+    VkStructureType             sType;
+    const void*                 pNext;
+    VkDeviceQueueCreateFlags    flags;
+    uint32_t                    queueFamilyIndex;
+    uint32_t                    queueCount;
+    const float*                pQueuePriorities;
+} VkDeviceQueueCreateInfo;
+
+typedef struct VkDeviceCreateInfo {
+    VkStructureType                    sType;
+    const void*                        pNext;
+    VkDeviceCreateFlags                flags;
+    uint32_t                           queueCreateInfoCount;
+    const VkDeviceQueueCreateInfo*     pQueueCreateInfos;
+    uint32_t                           enabledLayerCount;
+    const char* const*                 ppEnabledLayerNames;
+    uint32_t                           enabledExtensionCount;
+    const char* const*                 ppEnabledExtensionNames;
+    const VkPhysicalDeviceFeatures*    pEnabledFeatures;
+} VkDeviceCreateInfo;
+
+typedef struct VkExtensionProperties {
+    char        extensionName[VK_MAX_EXTENSION_NAME_SIZE];
+    uint32_t    specVersion;
+} VkExtensionProperties;
+
+typedef struct VkLayerProperties {
+    char        layerName[VK_MAX_EXTENSION_NAME_SIZE];
+    uint32_t    specVersion;
+    uint32_t    implementationVersion;
+    char        description[VK_MAX_DESCRIPTION_SIZE];
+} VkLayerProperties;
+
+typedef struct VkSubmitInfo {
+    VkStructureType                sType;
+    const void*                    pNext;
+    uint32_t                       waitSemaphoreCount;
+    const VkSemaphore*             pWaitSemaphores;
+    const VkPipelineStageFlags*    pWaitDstStageMask;
+    uint32_t                       commandBufferCount;
+    const VkCommandBuffer*         pCommandBuffers;
+    uint32_t                       signalSemaphoreCount;
+    const VkSemaphore*             pSignalSemaphores;
+} VkSubmitInfo;
+
+typedef struct VkMemoryAllocateInfo {
+    VkStructureType    sType;
+    const void*        pNext;
+    VkDeviceSize       allocationSize;
+    uint32_t           memoryTypeIndex;
+} VkMemoryAllocateInfo;
+
+typedef struct VkMappedMemoryRange {
+    VkStructureType    sType;
+    const void*        pNext;
+    VkDeviceMemory     memory;
+    VkDeviceSize       offset;
+    VkDeviceSize       size;
+} VkMappedMemoryRange;
+
+typedef struct VkMemoryRequirements {
+    VkDeviceSize    size;
+    VkDeviceSize    alignment;
+    uint32_t        memoryTypeBits;
+} VkMemoryRequirements;
+
+typedef struct VkSparseImageFormatProperties {
+    VkImageAspectFlags          aspectMask;
+    VkExtent3D                  imageGranularity;
+    VkSparseImageFormatFlags    flags;
+} VkSparseImageFormatProperties;
+
+typedef struct VkSparseImageMemoryRequirements {
+    VkSparseImageFormatProperties    formatProperties;
+    uint32_t                         imageMipTailFirstLod;
+    VkDeviceSize                     imageMipTailSize;
+    VkDeviceSize                     imageMipTailOffset;
+    VkDeviceSize                     imageMipTailStride;
+} VkSparseImageMemoryRequirements;
+
+typedef struct VkSparseMemoryBind {
+    VkDeviceSize               resourceOffset;
+    VkDeviceSize               size;
+    VkDeviceMemory             memory;
+    VkDeviceSize               memoryOffset;
+    VkSparseMemoryBindFlags    flags;
+} VkSparseMemoryBind;
+
+typedef struct VkSparseBufferMemoryBindInfo {
+    VkBuffer                     buffer;
+    uint32_t                     bindCount;
+    const VkSparseMemoryBind*    pBinds;
+} VkSparseBufferMemoryBindInfo;
+
+typedef struct VkSparseImageOpaqueMemoryBindInfo {
+    VkImage                      image;
+    uint32_t                     bindCount;
+    const VkSparseMemoryBind*    pBinds;
+} VkSparseImageOpaqueMemoryBindInfo;
+
+typedef struct VkImageSubresource {
+    VkImageAspectFlags    aspectMask;
+    uint32_t              mipLevel;
+    uint32_t              arrayLayer;
+} VkImageSubresource;
+
+typedef struct VkOffset3D {
+    int32_t    x;
+    int32_t    y;
+    int32_t    z;
+} VkOffset3D;
+
+typedef struct VkSparseImageMemoryBind {
+    VkImageSubresource         subresource;
+    VkOffset3D                 offset;
+    VkExtent3D                 extent;
+    VkDeviceMemory             memory;
+    VkDeviceSize               memoryOffset;
+    VkSparseMemoryBindFlags    flags;
+} VkSparseImageMemoryBind;
+
+typedef struct VkSparseImageMemoryBindInfo {
+    VkImage                           image;
+    uint32_t                          bindCount;
+    const VkSparseImageMemoryBind*    pBinds;
+} VkSparseImageMemoryBindInfo;
+
+typedef struct VkBindSparseInfo {
+    VkStructureType                             sType;
+    const void*                                 pNext;
+    uint32_t                                    waitSemaphoreCount;
+    const VkSemaphore*                          pWaitSemaphores;
+    uint32_t                                    bufferBindCount;
+    const VkSparseBufferMemoryBindInfo*         pBufferBinds;
+    uint32_t                                    imageOpaqueBindCount;
+    const VkSparseImageOpaqueMemoryBindInfo*    pImageOpaqueBinds;
+    uint32_t                                    imageBindCount;
+    const VkSparseImageMemoryBindInfo*          pImageBinds;
+    uint32_t                                    signalSemaphoreCount;
+    const VkSemaphore*                          pSignalSemaphores;
+} VkBindSparseInfo;
+
+typedef struct VkFenceCreateInfo {
+    VkStructureType       sType;
+    const void*           pNext;
+    VkFenceCreateFlags    flags;
+} VkFenceCreateInfo;
+
+typedef struct VkSemaphoreCreateInfo {
+    VkStructureType           sType;
+    const void*               pNext;
+    VkSemaphoreCreateFlags    flags;
+} VkSemaphoreCreateInfo;
+
+typedef struct VkEventCreateInfo {
+    VkStructureType       sType;
+    const void*           pNext;
+    VkEventCreateFlags    flags;
+} VkEventCreateInfo;
+
+typedef struct VkQueryPoolCreateInfo {
+    VkStructureType                  sType;
+    const void*                      pNext;
+    VkQueryPoolCreateFlags           flags;
+    VkQueryType                      queryType;
+    uint32_t                         queryCount;
+    VkQueryPipelineStatisticFlags    pipelineStatistics;
+} VkQueryPoolCreateInfo;
+
+typedef struct VkBufferCreateInfo {
+    VkStructureType        sType;
+    const void*            pNext;
+    VkBufferCreateFlags    flags;
+    VkDeviceSize           size;
+    VkBufferUsageFlags     usage;
+    VkSharingMode          sharingMode;
+    uint32_t               queueFamilyIndexCount;
+    const uint32_t*        pQueueFamilyIndices;
+} VkBufferCreateInfo;
+
+typedef struct VkBufferViewCreateInfo {
+    VkStructureType            sType;
+    const void*                pNext;
+    VkBufferViewCreateFlags    flags;
+    VkBuffer                   buffer;
+    VkFormat                   format;
+    VkDeviceSize               offset;
+    VkDeviceSize               range;
+} VkBufferViewCreateInfo;
+
+typedef struct VkImageCreateInfo {
+    VkStructureType          sType;
+    const void*              pNext;
+    VkImageCreateFlags       flags;
+    VkImageType              imageType;
+    VkFormat                 format;
+    VkExtent3D               extent;
+    uint32_t                 mipLevels;
+    uint32_t                 arrayLayers;
+    VkSampleCountFlagBits    samples;
+    VkImageTiling            tiling;
+    VkImageUsageFlags        usage;
+    VkSharingMode            sharingMode;
+    uint32_t                 queueFamilyIndexCount;
+    const uint32_t*          pQueueFamilyIndices;
+    VkImageLayout            initialLayout;
+} VkImageCreateInfo;
+
+typedef struct VkSubresourceLayout {
+    VkDeviceSize    offset;
+    VkDeviceSize    size;
+    VkDeviceSize    rowPitch;
+    VkDeviceSize    arrayPitch;
+    VkDeviceSize    depthPitch;
+} VkSubresourceLayout;
+
+typedef struct VkComponentMapping {
+    VkComponentSwizzle    r;
+    VkComponentSwizzle    g;
+    VkComponentSwizzle    b;
+    VkComponentSwizzle    a;
+} VkComponentMapping;
+
+typedef struct VkImageSubresourceRange {
+    VkImageAspectFlags    aspectMask;
+    uint32_t              baseMipLevel;
+    uint32_t              levelCount;
+    uint32_t              baseArrayLayer;
+    uint32_t              layerCount;
+} VkImageSubresourceRange;
+
+typedef struct VkImageViewCreateInfo {
+    VkStructureType            sType;
+    const void*                pNext;
+    VkImageViewCreateFlags     flags;
+    VkImage                    image;
+    VkImageViewType            viewType;
+    VkFormat                   format;
+    VkComponentMapping         components;
+    VkImageSubresourceRange    subresourceRange;
+} VkImageViewCreateInfo;
+
+typedef struct VkShaderModuleCreateInfo {
+    VkStructureType              sType;
+    const void*                  pNext;
+    VkShaderModuleCreateFlags    flags;
+    size_t                       codeSize;
+    const uint32_t*              pCode;
+} VkShaderModuleCreateInfo;
+
+typedef struct VkPipelineCacheCreateInfo {
+    VkStructureType               sType;
+    const void*                   pNext;
+    VkPipelineCacheCreateFlags    flags;
+    size_t                        initialDataSize;
+    const void*                   pInitialData;
+} VkPipelineCacheCreateInfo;
+
+typedef struct VkSpecializationMapEntry {
+    uint32_t    constantID;
+    uint32_t    offset;
+    size_t      size;
+} VkSpecializationMapEntry;
+
+typedef struct VkSpecializationInfo {
+    uint32_t                           mapEntryCount;
+    const VkSpecializationMapEntry*    pMapEntries;
+    size_t                             dataSize;
+    const void*                        pData;
+} VkSpecializationInfo;
+
+typedef struct VkPipelineShaderStageCreateInfo {
+    VkStructureType                     sType;
+    const void*                         pNext;
+    VkPipelineShaderStageCreateFlags    flags;
+    VkShaderStageFlagBits               stage;
+    VkShaderModule                      module;
+    const char*                         pName;
+    const VkSpecializationInfo*         pSpecializationInfo;
+} VkPipelineShaderStageCreateInfo;
+
+typedef struct VkVertexInputBindingDescription {
+    uint32_t             binding;
+    uint32_t             stride;
+    VkVertexInputRate    inputRate;
+} VkVertexInputBindingDescription;
+
+typedef struct VkVertexInputAttributeDescription {
+    uint32_t    location;
+    uint32_t    binding;
+    VkFormat    format;
+    uint32_t    offset;
+} VkVertexInputAttributeDescription;
+
+typedef struct VkPipelineVertexInputStateCreateInfo {
+    VkStructureType                             sType;
+    const void*                                 pNext;
+    VkPipelineVertexInputStateCreateFlags       flags;
+    uint32_t                                    vertexBindingDescriptionCount;
+    const VkVertexInputBindingDescription*      pVertexBindingDescriptions;
+    uint32_t                                    vertexAttributeDescriptionCount;
+    const VkVertexInputAttributeDescription*    pVertexAttributeDescriptions;
+} VkPipelineVertexInputStateCreateInfo;
+
+typedef struct VkPipelineInputAssemblyStateCreateInfo {
+    VkStructureType                            sType;
+    const void*                                pNext;
+    VkPipelineInputAssemblyStateCreateFlags    flags;
+    VkPrimitiveTopology                        topology;
+    VkBool32                                   primitiveRestartEnable;
+} VkPipelineInputAssemblyStateCreateInfo;
+
+typedef struct VkPipelineTessellationStateCreateInfo {
+    VkStructureType                           sType;
+    const void*                               pNext;
+    VkPipelineTessellationStateCreateFlags    flags;
+    uint32_t                                  patchControlPoints;
+} VkPipelineTessellationStateCreateInfo;
+
+typedef struct VkViewport {
+    float    x;
+    float    y;
+    float    width;
+    float    height;
+    float    minDepth;
+    float    maxDepth;
+} VkViewport;
+
+typedef struct VkOffset2D {
+    int32_t    x;
+    int32_t    y;
+} VkOffset2D;
+
+typedef struct VkExtent2D {
+    uint32_t    width;
+    uint32_t    height;
+} VkExtent2D;
+
+typedef struct VkRect2D {
+    VkOffset2D    offset;
+    VkExtent2D    extent;
+} VkRect2D;
+
+typedef struct VkPipelineViewportStateCreateInfo {
+    VkStructureType                       sType;
+    const void*                           pNext;
+    VkPipelineViewportStateCreateFlags    flags;
+    uint32_t                              viewportCount;
+    const VkViewport*                     pViewports;
+    uint32_t                              scissorCount;
+    const VkRect2D*                       pScissors;
+} VkPipelineViewportStateCreateInfo;
+
+typedef struct VkPipelineRasterizationStateCreateInfo {
+    VkStructureType                            sType;
+    const void*                                pNext;
+    VkPipelineRasterizationStateCreateFlags    flags;
+    VkBool32                                   depthClampEnable;
+    VkBool32                                   rasterizerDiscardEnable;
+    VkPolygonMode                              polygonMode;
+    VkCullModeFlags                            cullMode;
+    VkFrontFace                                frontFace;
+    VkBool32                                   depthBiasEnable;
+    float                                      depthBiasConstantFactor;
+    float                                      depthBiasClamp;
+    float                                      depthBiasSlopeFactor;
+    float                                      lineWidth;
+} VkPipelineRasterizationStateCreateInfo;
+
+typedef struct VkPipelineMultisampleStateCreateInfo {
+    VkStructureType                          sType;
+    const void*                              pNext;
+    VkPipelineMultisampleStateCreateFlags    flags;
+    VkSampleCountFlagBits                    rasterizationSamples;
+    VkBool32                                 sampleShadingEnable;
+    float                                    minSampleShading;
+    const VkSampleMask*                      pSampleMask;
+    VkBool32                                 alphaToCoverageEnable;
+    VkBool32                                 alphaToOneEnable;
+} VkPipelineMultisampleStateCreateInfo;
+
+typedef struct VkStencilOpState {
+    VkStencilOp    failOp;
+    VkStencilOp    passOp;
+    VkStencilOp    depthFailOp;
+    VkCompareOp    compareOp;
+    uint32_t       compareMask;
+    uint32_t       writeMask;
+    uint32_t       reference;
+} VkStencilOpState;
+
+typedef struct VkPipelineDepthStencilStateCreateInfo {
+    VkStructureType                           sType;
+    const void*                               pNext;
+    VkPipelineDepthStencilStateCreateFlags    flags;
+    VkBool32                                  depthTestEnable;
+    VkBool32                                  depthWriteEnable;
+    VkCompareOp                               depthCompareOp;
+    VkBool32                                  depthBoundsTestEnable;
+    VkBool32                                  stencilTestEnable;
+    VkStencilOpState                          front;
+    VkStencilOpState                          back;
+    float                                     minDepthBounds;
+    float                                     maxDepthBounds;
+} VkPipelineDepthStencilStateCreateInfo;
+
+typedef struct VkPipelineColorBlendAttachmentState {
+    VkBool32                 blendEnable;
+    VkBlendFactor            srcColorBlendFactor;
+    VkBlendFactor            dstColorBlendFactor;
+    VkBlendOp                colorBlendOp;
+    VkBlendFactor            srcAlphaBlendFactor;
+    VkBlendFactor            dstAlphaBlendFactor;
+    VkBlendOp                alphaBlendOp;
+    VkColorComponentFlags    colorWriteMask;
+} VkPipelineColorBlendAttachmentState;
+
+typedef struct VkPipelineColorBlendStateCreateInfo {
+    VkStructureType                               sType;
+    const void*                                   pNext;
+    VkPipelineColorBlendStateCreateFlags          flags;
+    VkBool32                                      logicOpEnable;
+    VkLogicOp                                     logicOp;
+    uint32_t                                      attachmentCount;
+    const VkPipelineColorBlendAttachmentState*    pAttachments;
+    float                                         blendConstants[4];
+} VkPipelineColorBlendStateCreateInfo;
+
+typedef struct VkPipelineDynamicStateCreateInfo {
+    VkStructureType                      sType;
+    const void*                          pNext;
+    VkPipelineDynamicStateCreateFlags    flags;
+    uint32_t                             dynamicStateCount;
+    const VkDynamicState*                pDynamicStates;
+} VkPipelineDynamicStateCreateInfo;
+
+typedef struct VkGraphicsPipelineCreateInfo {
+    VkStructureType                                  sType;
+    const void*                                      pNext;
+    VkPipelineCreateFlags                            flags;
+    uint32_t                                         stageCount;
+    const VkPipelineShaderStageCreateInfo*           pStages;
+    const VkPipelineVertexInputStateCreateInfo*      pVertexInputState;
+    const VkPipelineInputAssemblyStateCreateInfo*    pInputAssemblyState;
+    const VkPipelineTessellationStateCreateInfo*     pTessellationState;
+    const VkPipelineViewportStateCreateInfo*         pViewportState;
+    const VkPipelineRasterizationStateCreateInfo*    pRasterizationState;
+    const VkPipelineMultisampleStateCreateInfo*      pMultisampleState;
+    const VkPipelineDepthStencilStateCreateInfo*     pDepthStencilState;
+    const VkPipelineColorBlendStateCreateInfo*       pColorBlendState;
+    const VkPipelineDynamicStateCreateInfo*          pDynamicState;
+    VkPipelineLayout                                 layout;
+    VkRenderPass                                     renderPass;
+    uint32_t                                         subpass;
+    VkPipeline                                       basePipelineHandle;
+    int32_t                                          basePipelineIndex;
+} VkGraphicsPipelineCreateInfo;
+
+typedef struct VkComputePipelineCreateInfo {
+    VkStructureType                    sType;
+    const void*                        pNext;
+    VkPipelineCreateFlags              flags;
+    VkPipelineShaderStageCreateInfo    stage;
+    VkPipelineLayout                   layout;
+    VkPipeline                         basePipelineHandle;
+    int32_t                            basePipelineIndex;
+} VkComputePipelineCreateInfo;
+
+typedef struct VkPushConstantRange {
+    VkShaderStageFlags    stageFlags;
+    uint32_t              offset;
+    uint32_t              size;
+} VkPushConstantRange;
+
+typedef struct VkPipelineLayoutCreateInfo {
+    VkStructureType                 sType;
+    const void*                     pNext;
+    VkPipelineLayoutCreateFlags     flags;
+    uint32_t                        setLayoutCount;
+    const VkDescriptorSetLayout*    pSetLayouts;
+    uint32_t                        pushConstantRangeCount;
+    const VkPushConstantRange*      pPushConstantRanges;
+} VkPipelineLayoutCreateInfo;
+
+typedef struct VkSamplerCreateInfo {
+    VkStructureType         sType;
+    const void*             pNext;
+    VkSamplerCreateFlags    flags;
+    VkFilter                magFilter;
+    VkFilter                minFilter;
+    VkSamplerMipmapMode     mipmapMode;
+    VkSamplerAddressMode    addressModeU;
+    VkSamplerAddressMode    addressModeV;
+    VkSamplerAddressMode    addressModeW;
+    float                   mipLodBias;
+    VkBool32                anisotropyEnable;
+    float                   maxAnisotropy;
+    VkBool32                compareEnable;
+    VkCompareOp             compareOp;
+    float                   minLod;
+    float                   maxLod;
+    VkBorderColor           borderColor;
+    VkBool32                unnormalizedCoordinates;
+} VkSamplerCreateInfo;
+
+typedef struct VkDescriptorSetLayoutBinding {
+    uint32_t              binding;
+    VkDescriptorType      descriptorType;
+    uint32_t              descriptorCount;
+    VkShaderStageFlags    stageFlags;
+    const VkSampler*      pImmutableSamplers;
+} VkDescriptorSetLayoutBinding;
+
+typedef struct VkDescriptorSetLayoutCreateInfo {
+    VkStructureType                        sType;
+    const void*                            pNext;
+    VkDescriptorSetLayoutCreateFlags       flags;
+    uint32_t                               bindingCount;
+    const VkDescriptorSetLayoutBinding*    pBindings;
+} VkDescriptorSetLayoutCreateInfo;
+
+typedef struct VkDescriptorPoolSize {
+    VkDescriptorType    type;
+    uint32_t            descriptorCount;
+} VkDescriptorPoolSize;
+
+typedef struct VkDescriptorPoolCreateInfo {
+    VkStructureType                sType;
+    const void*                    pNext;
+    VkDescriptorPoolCreateFlags    flags;
+    uint32_t                       maxSets;
+    uint32_t                       poolSizeCount;
+    const VkDescriptorPoolSize*    pPoolSizes;
+} VkDescriptorPoolCreateInfo;
+
+typedef struct VkDescriptorSetAllocateInfo {
+    VkStructureType                 sType;
+    const void*                     pNext;
+    VkDescriptorPool                descriptorPool;
+    uint32_t                        descriptorSetCount;
+    const VkDescriptorSetLayout*    pSetLayouts;
+} VkDescriptorSetAllocateInfo;
+
+typedef struct VkDescriptorImageInfo {
+    VkSampler        sampler;
+    VkImageView      imageView;
+    VkImageLayout    imageLayout;
+} VkDescriptorImageInfo;
+
+typedef struct VkDescriptorBufferInfo {
+    VkBuffer        buffer;
+    VkDeviceSize    offset;
+    VkDeviceSize    range;
+} VkDescriptorBufferInfo;
+
+typedef struct VkWriteDescriptorSet {
+    VkStructureType                  sType;
+    const void*                      pNext;
+    VkDescriptorSet                  dstSet;
+    uint32_t                         dstBinding;
+    uint32_t                         dstArrayElement;
+    uint32_t                         descriptorCount;
+    VkDescriptorType                 descriptorType;
+    const VkDescriptorImageInfo*     pImageInfo;
+    const VkDescriptorBufferInfo*    pBufferInfo;
+    const VkBufferView*              pTexelBufferView;
+} VkWriteDescriptorSet;
+
+typedef struct VkCopyDescriptorSet {
+    VkStructureType    sType;
+    const void*        pNext;
+    VkDescriptorSet    srcSet;
+    uint32_t           srcBinding;
+    uint32_t           srcArrayElement;
+    VkDescriptorSet    dstSet;
+    uint32_t           dstBinding;
+    uint32_t           dstArrayElement;
+    uint32_t           descriptorCount;
+} VkCopyDescriptorSet;
+
+typedef struct VkFramebufferCreateInfo {
+    VkStructureType             sType;
+    const void*                 pNext;
+    VkFramebufferCreateFlags    flags;
+    VkRenderPass                renderPass;
+    uint32_t                    attachmentCount;
+    const VkImageView*          pAttachments;
+    uint32_t                    width;
+    uint32_t                    height;
+    uint32_t                    layers;
+} VkFramebufferCreateInfo;
+
+typedef struct VkAttachmentDescription {
+    VkAttachmentDescriptionFlags    flags;
+    VkFormat                        format;
+    VkSampleCountFlagBits           samples;
+    VkAttachmentLoadOp              loadOp;
+    VkAttachmentStoreOp             storeOp;
+    VkAttachmentLoadOp              stencilLoadOp;
+    VkAttachmentStoreOp             stencilStoreOp;
+    VkImageLayout                   initialLayout;
+    VkImageLayout                   finalLayout;
+} VkAttachmentDescription;
+
+typedef struct VkAttachmentReference {
+    uint32_t         attachment;
+    VkImageLayout    layout;
+} VkAttachmentReference;
+
+typedef struct VkSubpassDescription {
+    VkSubpassDescriptionFlags       flags;
+    VkPipelineBindPoint             pipelineBindPoint;
+    uint32_t                        inputAttachmentCount;
+    const VkAttachmentReference*    pInputAttachments;
+    uint32_t                        colorAttachmentCount;
+    const VkAttachmentReference*    pColorAttachments;
+    const VkAttachmentReference*    pResolveAttachments;
+    const VkAttachmentReference*    pDepthStencilAttachment;
+    uint32_t                        preserveAttachmentCount;
+    const uint32_t*                 pPreserveAttachments;
+} VkSubpassDescription;
+
+typedef struct VkSubpassDependency {
+    uint32_t                srcSubpass;
+    uint32_t                dstSubpass;
+    VkPipelineStageFlags    srcStageMask;
+    VkPipelineStageFlags    dstStageMask;
+    VkAccessFlags           srcAccessMask;
+    VkAccessFlags           dstAccessMask;
+    VkDependencyFlags       dependencyFlags;
+} VkSubpassDependency;
+
+typedef struct VkRenderPassCreateInfo {
+    VkStructureType                   sType;
+    const void*                       pNext;
+    VkRenderPassCreateFlags           flags;
+    uint32_t                          attachmentCount;
+    const VkAttachmentDescription*    pAttachments;
+    uint32_t                          subpassCount;
+    const VkSubpassDescription*       pSubpasses;
+    uint32_t                          dependencyCount;
+    const VkSubpassDependency*        pDependencies;
+} VkRenderPassCreateInfo;
+
+typedef struct VkCommandPoolCreateInfo {
+    VkStructureType             sType;
+    const void*                 pNext;
+    VkCommandPoolCreateFlags    flags;
+    uint32_t                    queueFamilyIndex;
+} VkCommandPoolCreateInfo;
+
+typedef struct VkCommandBufferAllocateInfo {
+    VkStructureType         sType;
+    const void*             pNext;
+    VkCommandPool           commandPool;
+    VkCommandBufferLevel    level;
+    uint32_t                commandBufferCount;
+} VkCommandBufferAllocateInfo;
+
+typedef struct VkCommandBufferInheritanceInfo {
+    VkStructureType                  sType;
+    const void*                      pNext;
+    VkRenderPass                     renderPass;
+    uint32_t                         subpass;
+    VkFramebuffer                    framebuffer;
+    VkBool32                         occlusionQueryEnable;
+    VkQueryControlFlags              queryFlags;
+    VkQueryPipelineStatisticFlags    pipelineStatistics;
+} VkCommandBufferInheritanceInfo;
+
+typedef struct VkCommandBufferBeginInfo {
+    VkStructureType                          sType;
+    const void*                              pNext;
+    VkCommandBufferUsageFlags                flags;
+    const VkCommandBufferInheritanceInfo*    pInheritanceInfo;
+} VkCommandBufferBeginInfo;
+
+typedef struct VkBufferCopy {
+    VkDeviceSize    srcOffset;
+    VkDeviceSize    dstOffset;
+    VkDeviceSize    size;
+} VkBufferCopy;
+
+typedef struct VkImageSubresourceLayers {
+    VkImageAspectFlags    aspectMask;
+    uint32_t              mipLevel;
+    uint32_t              baseArrayLayer;
+    uint32_t              layerCount;
+} VkImageSubresourceLayers;
+
+typedef struct VkImageCopy {
+    VkImageSubresourceLayers    srcSubresource;
+    VkOffset3D                  srcOffset;
+    VkImageSubresourceLayers    dstSubresource;
+    VkOffset3D                  dstOffset;
+    VkExtent3D                  extent;
+} VkImageCopy;
+
+typedef struct VkImageBlit {
+    VkImageSubresourceLayers    srcSubresource;
+    VkOffset3D                  srcOffsets[2];
+    VkImageSubresourceLayers    dstSubresource;
+    VkOffset3D                  dstOffsets[2];
+} VkImageBlit;
+
+typedef struct VkBufferImageCopy {
+    VkDeviceSize                bufferOffset;
+    uint32_t                    bufferRowLength;
+    uint32_t                    bufferImageHeight;
+    VkImageSubresourceLayers    imageSubresource;
+    VkOffset3D                  imageOffset;
+    VkExtent3D                  imageExtent;
+} VkBufferImageCopy;
+
+typedef union VkClearColorValue {
+    float       float32[4];
+    int32_t     int32[4];
+    uint32_t    uint32[4];
+} VkClearColorValue;
+
+typedef struct VkClearDepthStencilValue {
+    float       depth;
+    uint32_t    stencil;
+} VkClearDepthStencilValue;
+
+typedef union VkClearValue {
+    VkClearColorValue           color;
+    VkClearDepthStencilValue    depthStencil;
+} VkClearValue;
+
+typedef struct VkClearAttachment {
+    VkImageAspectFlags    aspectMask;
+    uint32_t              colorAttachment;
+    VkClearValue          clearValue;
+} VkClearAttachment;
+
+typedef struct VkClearRect {
+    VkRect2D    rect;
+    uint32_t    baseArrayLayer;
+    uint32_t    layerCount;
+} VkClearRect;
+
+typedef struct VkImageResolve {
+    VkImageSubresourceLayers    srcSubresource;
+    VkOffset3D                  srcOffset;
+    VkImageSubresourceLayers    dstSubresource;
+    VkOffset3D                  dstOffset;
+    VkExtent3D                  extent;
+} VkImageResolve;
+
+typedef struct VkMemoryBarrier {
+    VkStructureType    sType;
+    const void*        pNext;
+    VkAccessFlags      srcAccessMask;
+    VkAccessFlags      dstAccessMask;
+} VkMemoryBarrier;
+
+typedef struct VkBufferMemoryBarrier {
+    VkStructureType    sType;
+    const void*        pNext;
+    VkAccessFlags      srcAccessMask;
+    VkAccessFlags      dstAccessMask;
+    uint32_t           srcQueueFamilyIndex;
+    uint32_t           dstQueueFamilyIndex;
+    VkBuffer           buffer;
+    VkDeviceSize       offset;
+    VkDeviceSize       size;
+} VkBufferMemoryBarrier;
+
+typedef struct VkImageMemoryBarrier {
+    VkStructureType            sType;
+    const void*                pNext;
+    VkAccessFlags              srcAccessMask;
+    VkAccessFlags              dstAccessMask;
+    VkImageLayout              oldLayout;
+    VkImageLayout              newLayout;
+    uint32_t                   srcQueueFamilyIndex;
+    uint32_t                   dstQueueFamilyIndex;
+    VkImage                    image;
+    VkImageSubresourceRange    subresourceRange;
+} VkImageMemoryBarrier;
+
+typedef struct VkRenderPassBeginInfo {
+    VkStructureType        sType;
+    const void*            pNext;
+    VkRenderPass           renderPass;
+    VkFramebuffer          framebuffer;
+    VkRect2D               renderArea;
+    uint32_t               clearValueCount;
+    const VkClearValue*    pClearValues;
+} VkRenderPassBeginInfo;
+
+typedef struct VkDispatchIndirectCommand {
+    uint32_t    x;
+    uint32_t    y;
+    uint32_t    z;
+} VkDispatchIndirectCommand;
+
+typedef struct VkDrawIndexedIndirectCommand {
+    uint32_t    indexCount;
+    uint32_t    instanceCount;
+    uint32_t    firstIndex;
+    int32_t     vertexOffset;
+    uint32_t    firstInstance;
+} VkDrawIndexedIndirectCommand;
+
+typedef struct VkDrawIndirectCommand {
+    uint32_t    vertexCount;
+    uint32_t    instanceCount;
+    uint32_t    firstVertex;
+    uint32_t    firstInstance;
+} VkDrawIndirectCommand;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance);
+typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName);
+typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice);
+typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue);
+typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory);
+typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData);
+typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory);
+typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges);
+typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes);
+typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset);
+typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset);
+typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);
+typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences);
+typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore);
+typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent);
+typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer);
+typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView);
+typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage);
+typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView);
+typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule);
+typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler);
+typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets);
+typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets);
+typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer);
+typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
+typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers);
+typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
+typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer);
+typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
+typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports);
+typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors);
+typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor);
+typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);
+typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets);
+typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);
+typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
+typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t* pData);
+typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data);
+typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
+typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
+typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects);
+typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query);
+typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount);
+typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues);
+typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents);
+typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents);
+typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer);
+typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(
+    const VkInstanceCreateInfo*                 pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkInstance*                                 pInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(
+    VkInstance                                  instance,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(
+    VkInstance                                  instance,
+    uint32_t*                                   pPhysicalDeviceCount,
+    VkPhysicalDevice*                           pPhysicalDevices);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures(
+    VkPhysicalDevice                            physicalDevice,
+    VkPhysicalDeviceFeatures*                   pFeatures);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkFormat                                    format,
+    VkFormatProperties*                         pFormatProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkFormat                                    format,
+    VkImageType                                 type,
+    VkImageTiling                               tiling,
+    VkImageUsageFlags                           usage,
+    VkImageCreateFlags                          flags,
+    VkImageFormatProperties*                    pImageFormatProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkPhysicalDeviceProperties*                 pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t*                                   pQueueFamilyPropertyCount,
+    VkQueueFamilyProperties*                    pQueueFamilyProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkPhysicalDeviceMemoryProperties*           pMemoryProperties);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(
+    VkInstance                                  instance,
+    const char*                                 pName);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(
+    VkDevice                                    device,
+    const char*                                 pName);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
+    VkPhysicalDevice                            physicalDevice,
+    const VkDeviceCreateInfo*                   pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDevice*                                   pDevice);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(
+    VkDevice                                    device,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
+    const char*                                 pLayerName,
+    uint32_t*                                   pPropertyCount,
+    VkExtensionProperties*                      pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(
+    VkPhysicalDevice                            physicalDevice,
+    const char*                                 pLayerName,
+    uint32_t*                                   pPropertyCount,
+    VkExtensionProperties*                      pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
+    uint32_t*                                   pPropertyCount,
+    VkLayerProperties*                          pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t*                                   pPropertyCount,
+    VkLayerProperties*                          pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(
+    VkDevice                                    device,
+    uint32_t                                    queueFamilyIndex,
+    uint32_t                                    queueIndex,
+    VkQueue*                                    pQueue);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
+    VkQueue                                     queue,
+    uint32_t                                    submitCount,
+    const VkSubmitInfo*                         pSubmits,
+    VkFence                                     fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(
+    VkQueue                                     queue);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(
+    VkDevice                                    device);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(
+    VkDevice                                    device,
+    const VkMemoryAllocateInfo*                 pAllocateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDeviceMemory*                             pMemory);
+
+VKAPI_ATTR void VKAPI_CALL vkFreeMemory(
+    VkDevice                                    device,
+    VkDeviceMemory                              memory,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(
+    VkDevice                                    device,
+    VkDeviceMemory                              memory,
+    VkDeviceSize                                offset,
+    VkDeviceSize                                size,
+    VkMemoryMapFlags                            flags,
+    void**                                      ppData);
+
+VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(
+    VkDevice                                    device,
+    VkDeviceMemory                              memory);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges(
+    VkDevice                                    device,
+    uint32_t                                    memoryRangeCount,
+    const VkMappedMemoryRange*                  pMemoryRanges);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(
+    VkDevice                                    device,
+    uint32_t                                    memoryRangeCount,
+    const VkMappedMemoryRange*                  pMemoryRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment(
+    VkDevice                                    device,
+    VkDeviceMemory                              memory,
+    VkDeviceSize*                               pCommittedMemoryInBytes);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(
+    VkDevice                                    device,
+    VkBuffer                                    buffer,
+    VkDeviceMemory                              memory,
+    VkDeviceSize                                memoryOffset);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(
+    VkDevice                                    device,
+    VkImage                                     image,
+    VkDeviceMemory                              memory,
+    VkDeviceSize                                memoryOffset);
+
+VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(
+    VkDevice                                    device,
+    VkBuffer                                    buffer,
+    VkMemoryRequirements*                       pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(
+    VkDevice                                    device,
+    VkImage                                     image,
+    VkMemoryRequirements*                       pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements(
+    VkDevice                                    device,
+    VkImage                                     image,
+    uint32_t*                                   pSparseMemoryRequirementCount,
+    VkSparseImageMemoryRequirements*            pSparseMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkFormat                                    format,
+    VkImageType                                 type,
+    VkSampleCountFlagBits                       samples,
+    VkImageUsageFlags                           usage,
+    VkImageTiling                               tiling,
+    uint32_t*                                   pPropertyCount,
+    VkSparseImageFormatProperties*              pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
+    VkQueue                                     queue,
+    uint32_t                                    bindInfoCount,
+    const VkBindSparseInfo*                     pBindInfo,
+    VkFence                                     fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(
+    VkDevice                                    device,
+    const VkFenceCreateInfo*                    pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkFence*                                    pFence);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyFence(
+    VkDevice                                    device,
+    VkFence                                     fence,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(
+    VkDevice                                    device,
+    uint32_t                                    fenceCount,
+    const VkFence*                              pFences);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(
+    VkDevice                                    device,
+    VkFence                                     fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(
+    VkDevice                                    device,
+    uint32_t                                    fenceCount,
+    const VkFence*                              pFences,
+    VkBool32                                    waitAll,
+    uint64_t                                    timeout);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(
+    VkDevice                                    device,
+    const VkSemaphoreCreateInfo*                pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSemaphore*                                pSemaphore);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(
+    VkDevice                                    device,
+    VkSemaphore                                 semaphore,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(
+    VkDevice                                    device,
+    const VkEventCreateInfo*                    pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkEvent*                                    pEvent);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(
+    VkDevice                                    device,
+    VkEvent                                     event,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(
+    VkDevice                                    device,
+    VkEvent                                     event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(
+    VkDevice                                    device,
+    VkEvent                                     event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(
+    VkDevice                                    device,
+    VkEvent                                     event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(
+    VkDevice                                    device,
+    const VkQueryPoolCreateInfo*                pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkQueryPool*                                pQueryPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool(
+    VkDevice                                    device,
+    VkQueryPool                                 queryPool,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(
+    VkDevice                                    device,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    firstQuery,
+    uint32_t                                    queryCount,
+    size_t                                      dataSize,
+    void*                                       pData,
+    VkDeviceSize                                stride,
+    VkQueryResultFlags                          flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(
+    VkDevice                                    device,
+    const VkBufferCreateInfo*                   pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkBuffer*                                   pBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(
+    VkDevice                                    device,
+    VkBuffer                                    buffer,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(
+    VkDevice                                    device,
+    const VkBufferViewCreateInfo*               pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkBufferView*                               pView);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(
+    VkDevice                                    device,
+    VkBufferView                                bufferView,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(
+    VkDevice                                    device,
+    const VkImageCreateInfo*                    pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkImage*                                    pImage);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyImage(
+    VkDevice                                    device,
+    VkImage                                     image,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(
+    VkDevice                                    device,
+    VkImage                                     image,
+    const VkImageSubresource*                   pSubresource,
+    VkSubresourceLayout*                        pLayout);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(
+    VkDevice                                    device,
+    const VkImageViewCreateInfo*                pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkImageView*                                pView);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyImageView(
+    VkDevice                                    device,
+    VkImageView                                 imageView,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(
+    VkDevice                                    device,
+    const VkShaderModuleCreateInfo*             pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkShaderModule*                             pShaderModule);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(
+    VkDevice                                    device,
+    VkShaderModule                              shaderModule,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(
+    VkDevice                                    device,
+    const VkPipelineCacheCreateInfo*            pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipelineCache*                            pPipelineCache);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache(
+    VkDevice                                    device,
+    VkPipelineCache                             pipelineCache,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(
+    VkDevice                                    device,
+    VkPipelineCache                             pipelineCache,
+    size_t*                                     pDataSize,
+    void*                                       pData);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(
+    VkDevice                                    device,
+    VkPipelineCache                             dstCache,
+    uint32_t                                    srcCacheCount,
+    const VkPipelineCache*                      pSrcCaches);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(
+    VkDevice                                    device,
+    VkPipelineCache                             pipelineCache,
+    uint32_t                                    createInfoCount,
+    const VkGraphicsPipelineCreateInfo*         pCreateInfos,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipeline*                                 pPipelines);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(
+    VkDevice                                    device,
+    VkPipelineCache                             pipelineCache,
+    uint32_t                                    createInfoCount,
+    const VkComputePipelineCreateInfo*          pCreateInfos,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipeline*                                 pPipelines);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline(
+    VkDevice                                    device,
+    VkPipeline                                  pipeline,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(
+    VkDevice                                    device,
+    const VkPipelineLayoutCreateInfo*           pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipelineLayout*                           pPipelineLayout);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout(
+    VkDevice                                    device,
+    VkPipelineLayout                            pipelineLayout,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(
+    VkDevice                                    device,
+    const VkSamplerCreateInfo*                  pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSampler*                                  pSampler);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySampler(
+    VkDevice                                    device,
+    VkSampler                                   sampler,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(
+    VkDevice                                    device,
+    const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDescriptorSetLayout*                      pSetLayout);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout(
+    VkDevice                                    device,
+    VkDescriptorSetLayout                       descriptorSetLayout,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(
+    VkDevice                                    device,
+    const VkDescriptorPoolCreateInfo*           pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDescriptorPool*                           pDescriptorPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool(
+    VkDevice                                    device,
+    VkDescriptorPool                            descriptorPool,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(
+    VkDevice                                    device,
+    VkDescriptorPool                            descriptorPool,
+    VkDescriptorPoolResetFlags                  flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(
+    VkDevice                                    device,
+    const VkDescriptorSetAllocateInfo*          pAllocateInfo,
+    VkDescriptorSet*                            pDescriptorSets);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(
+    VkDevice                                    device,
+    VkDescriptorPool                            descriptorPool,
+    uint32_t                                    descriptorSetCount,
+    const VkDescriptorSet*                      pDescriptorSets);
+
+VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(
+    VkDevice                                    device,
+    uint32_t                                    descriptorWriteCount,
+    const VkWriteDescriptorSet*                 pDescriptorWrites,
+    uint32_t                                    descriptorCopyCount,
+    const VkCopyDescriptorSet*                  pDescriptorCopies);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(
+    VkDevice                                    device,
+    const VkFramebufferCreateInfo*              pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkFramebuffer*                              pFramebuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(
+    VkDevice                                    device,
+    VkFramebuffer                               framebuffer,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(
+    VkDevice                                    device,
+    const VkRenderPassCreateInfo*               pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkRenderPass*                               pRenderPass);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(
+    VkDevice                                    device,
+    VkRenderPass                                renderPass,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity(
+    VkDevice                                    device,
+    VkRenderPass                                renderPass,
+    VkExtent2D*                                 pGranularity);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(
+    VkDevice                                    device,
+    const VkCommandPoolCreateInfo*              pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkCommandPool*                              pCommandPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(
+    VkDevice                                    device,
+    VkCommandPool                               commandPool,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
+    VkDevice                                    device,
+    VkCommandPool                               commandPool,
+    VkCommandPoolResetFlags                     flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(
+    VkDevice                                    device,
+    const VkCommandBufferAllocateInfo*          pAllocateInfo,
+    VkCommandBuffer*                            pCommandBuffers);
+
+VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(
+    VkDevice                                    device,
+    VkCommandPool                               commandPool,
+    uint32_t                                    commandBufferCount,
+    const VkCommandBuffer*                      pCommandBuffers);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(
+    VkCommandBuffer                             commandBuffer,
+    const VkCommandBufferBeginInfo*             pBeginInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(
+    VkCommandBuffer                             commandBuffer);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkCommandBufferResetFlags                   flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineBindPoint                         pipelineBindPoint,
+    VkPipeline                                  pipeline);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    firstViewport,
+    uint32_t                                    viewportCount,
+    const VkViewport*                           pViewports);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    firstScissor,
+    uint32_t                                    scissorCount,
+    const VkRect2D*                             pScissors);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(
+    VkCommandBuffer                             commandBuffer,
+    float                                       lineWidth);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(
+    VkCommandBuffer                             commandBuffer,
+    float                                       depthBiasConstantFactor,
+    float                                       depthBiasClamp,
+    float                                       depthBiasSlopeFactor);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(
+    VkCommandBuffer                             commandBuffer,
+    const float                                 blendConstants[4]);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(
+    VkCommandBuffer                             commandBuffer,
+    float                                       minDepthBounds,
+    float                                       maxDepthBounds);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(
+    VkCommandBuffer                             commandBuffer,
+    VkStencilFaceFlags                          faceMask,
+    uint32_t                                    compareMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(
+    VkCommandBuffer                             commandBuffer,
+    VkStencilFaceFlags                          faceMask,
+    uint32_t                                    writeMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(
+    VkCommandBuffer                             commandBuffer,
+    VkStencilFaceFlags                          faceMask,
+    uint32_t                                    reference);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineBindPoint                         pipelineBindPoint,
+    VkPipelineLayout                            layout,
+    uint32_t                                    firstSet,
+    uint32_t                                    descriptorSetCount,
+    const VkDescriptorSet*                      pDescriptorSets,
+    uint32_t                                    dynamicOffsetCount,
+    const uint32_t*                             pDynamicOffsets);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    buffer,
+    VkDeviceSize                                offset,
+    VkIndexType                                 indexType);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    firstBinding,
+    uint32_t                                    bindingCount,
+    const VkBuffer*                             pBuffers,
+    const VkDeviceSize*                         pOffsets);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDraw(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    vertexCount,
+    uint32_t                                    instanceCount,
+    uint32_t                                    firstVertex,
+    uint32_t                                    firstInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    indexCount,
+    uint32_t                                    instanceCount,
+    uint32_t                                    firstIndex,
+    int32_t                                     vertexOffset,
+    uint32_t                                    firstInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    buffer,
+    VkDeviceSize                                offset,
+    uint32_t                                    drawCount,
+    uint32_t                                    stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    buffer,
+    VkDeviceSize                                offset,
+    uint32_t                                    drawCount,
+    uint32_t                                    stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    x,
+    uint32_t                                    y,
+    uint32_t                                    z);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    buffer,
+    VkDeviceSize                                offset);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    srcBuffer,
+    VkBuffer                                    dstBuffer,
+    uint32_t                                    regionCount,
+    const VkBufferCopy*                         pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     srcImage,
+    VkImageLayout                               srcImageLayout,
+    VkImage                                     dstImage,
+    VkImageLayout                               dstImageLayout,
+    uint32_t                                    regionCount,
+    const VkImageCopy*                          pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     srcImage,
+    VkImageLayout                               srcImageLayout,
+    VkImage                                     dstImage,
+    VkImageLayout                               dstImageLayout,
+    uint32_t                                    regionCount,
+    const VkImageBlit*                          pRegions,
+    VkFilter                                    filter);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    srcBuffer,
+    VkImage                                     dstImage,
+    VkImageLayout                               dstImageLayout,
+    uint32_t                                    regionCount,
+    const VkBufferImageCopy*                    pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     srcImage,
+    VkImageLayout                               srcImageLayout,
+    VkBuffer                                    dstBuffer,
+    uint32_t                                    regionCount,
+    const VkBufferImageCopy*                    pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    dstBuffer,
+    VkDeviceSize                                dstOffset,
+    VkDeviceSize                                dataSize,
+    const uint32_t*                             pData);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    dstBuffer,
+    VkDeviceSize                                dstOffset,
+    VkDeviceSize                                size,
+    uint32_t                                    data);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     image,
+    VkImageLayout                               imageLayout,
+    const VkClearColorValue*                    pColor,
+    uint32_t                                    rangeCount,
+    const VkImageSubresourceRange*              pRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     image,
+    VkImageLayout                               imageLayout,
+    const VkClearDepthStencilValue*             pDepthStencil,
+    uint32_t                                    rangeCount,
+    const VkImageSubresourceRange*              pRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    attachmentCount,
+    const VkClearAttachment*                    pAttachments,
+    uint32_t                                    rectCount,
+    const VkClearRect*                          pRects);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     srcImage,
+    VkImageLayout                               srcImageLayout,
+    VkImage                                     dstImage,
+    VkImageLayout                               dstImageLayout,
+    uint32_t                                    regionCount,
+    const VkImageResolve*                       pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(
+    VkCommandBuffer                             commandBuffer,
+    VkEvent                                     event,
+    VkPipelineStageFlags                        stageMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(
+    VkCommandBuffer                             commandBuffer,
+    VkEvent                                     event,
+    VkPipelineStageFlags                        stageMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    eventCount,
+    const VkEvent*                              pEvents,
+    VkPipelineStageFlags                        srcStageMask,
+    VkPipelineStageFlags                        dstStageMask,
+    uint32_t                                    memoryBarrierCount,
+    const VkMemoryBarrier*                      pMemoryBarriers,
+    uint32_t                                    bufferMemoryBarrierCount,
+    const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
+    uint32_t                                    imageMemoryBarrierCount,
+    const VkImageMemoryBarrier*                 pImageMemoryBarriers);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineStageFlags                        srcStageMask,
+    VkPipelineStageFlags                        dstStageMask,
+    VkDependencyFlags                           dependencyFlags,
+    uint32_t                                    memoryBarrierCount,
+    const VkMemoryBarrier*                      pMemoryBarriers,
+    uint32_t                                    bufferMemoryBarrierCount,
+    const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
+    uint32_t                                    imageMemoryBarrierCount,
+    const VkImageMemoryBarrier*                 pImageMemoryBarriers);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    query,
+    VkQueryControlFlags                         flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    query);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    firstQuery,
+    uint32_t                                    queryCount);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineStageFlagBits                     pipelineStage,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    query);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    firstQuery,
+    uint32_t                                    queryCount,
+    VkBuffer                                    dstBuffer,
+    VkDeviceSize                                dstOffset,
+    VkDeviceSize                                stride,
+    VkQueryResultFlags                          flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineLayout                            layout,
+    VkShaderStageFlags                          stageFlags,
+    uint32_t                                    offset,
+    uint32_t                                    size,
+    const void*                                 pValues);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(
+    VkCommandBuffer                             commandBuffer,
+    const VkRenderPassBeginInfo*                pRenderPassBegin,
+    VkSubpassContents                           contents);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(
+    VkCommandBuffer                             commandBuffer,
+    VkSubpassContents                           contents);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(
+    VkCommandBuffer                             commandBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    commandBufferCount,
+    const VkCommandBuffer*                      pCommandBuffers);
+#endif
+
+#define VK_KHR_surface 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR)
+
+#define VK_KHR_SURFACE_SPEC_VERSION       25
+#define VK_KHR_SURFACE_EXTENSION_NAME     "VK_KHR_surface"
+
+
+typedef enum VkColorSpaceKHR {
+    VK_COLORSPACE_SRGB_NONLINEAR_KHR = 0,
+    VK_COLORSPACE_BEGIN_RANGE = VK_COLORSPACE_SRGB_NONLINEAR_KHR,
+    VK_COLORSPACE_END_RANGE = VK_COLORSPACE_SRGB_NONLINEAR_KHR,
+    VK_COLORSPACE_RANGE_SIZE = (VK_COLORSPACE_SRGB_NONLINEAR_KHR - VK_COLORSPACE_SRGB_NONLINEAR_KHR + 1),
+    VK_COLORSPACE_MAX_ENUM = 0x7FFFFFFF
+} VkColorSpaceKHR;
+
+typedef enum VkPresentModeKHR {
+    VK_PRESENT_MODE_IMMEDIATE_KHR = 0,
+    VK_PRESENT_MODE_MAILBOX_KHR = 1,
+    VK_PRESENT_MODE_FIFO_KHR = 2,
+    VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3,
+    VK_PRESENT_MODE_BEGIN_RANGE = VK_PRESENT_MODE_IMMEDIATE_KHR,
+    VK_PRESENT_MODE_END_RANGE = VK_PRESENT_MODE_FIFO_RELAXED_KHR,
+    VK_PRESENT_MODE_RANGE_SIZE = (VK_PRESENT_MODE_FIFO_RELAXED_KHR - VK_PRESENT_MODE_IMMEDIATE_KHR + 1),
+    VK_PRESENT_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkPresentModeKHR;
+
+
+typedef enum VkSurfaceTransformFlagBitsKHR {
+    VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001,
+    VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002,
+    VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004,
+    VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008,
+    VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010,
+    VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020,
+    VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040,
+    VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080,
+    VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100,
+} VkSurfaceTransformFlagBitsKHR;
+typedef VkFlags VkSurfaceTransformFlagsKHR;
+
+typedef enum VkCompositeAlphaFlagBitsKHR {
+    VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001,
+    VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002,
+    VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004,
+    VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008,
+} VkCompositeAlphaFlagBitsKHR;
+typedef VkFlags VkCompositeAlphaFlagsKHR;
+
+typedef struct VkSurfaceCapabilitiesKHR {
+    uint32_t                         minImageCount;
+    uint32_t                         maxImageCount;
+    VkExtent2D                       currentExtent;
+    VkExtent2D                       minImageExtent;
+    VkExtent2D                       maxImageExtent;
+    uint32_t                         maxImageArrayLayers;
+    VkSurfaceTransformFlagsKHR       supportedTransforms;
+    VkSurfaceTransformFlagBitsKHR    currentTransform;
+    VkCompositeAlphaFlagsKHR         supportedCompositeAlpha;
+    VkImageUsageFlags                supportedUsageFlags;
+} VkSurfaceCapabilitiesKHR;
+
+typedef struct VkSurfaceFormatKHR {
+    VkFormat           format;
+    VkColorSpaceKHR    colorSpace;
+} VkSurfaceFormatKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(
+    VkInstance                                  instance,
+    VkSurfaceKHR                                surface,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    VkSurfaceKHR                                surface,
+    VkBool32*                                   pSupported);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
+    VkPhysicalDevice                            physicalDevice,
+    VkSurfaceKHR                                surface,
+    VkSurfaceCapabilitiesKHR*                   pSurfaceCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(
+    VkPhysicalDevice                            physicalDevice,
+    VkSurfaceKHR                                surface,
+    uint32_t*                                   pSurfaceFormatCount,
+    VkSurfaceFormatKHR*                         pSurfaceFormats);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(
+    VkPhysicalDevice                            physicalDevice,
+    VkSurfaceKHR                                surface,
+    uint32_t*                                   pPresentModeCount,
+    VkPresentModeKHR*                           pPresentModes);
+#endif
+
+#define VK_KHR_swapchain 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR)
+
+#define VK_KHR_SWAPCHAIN_SPEC_VERSION     67
+#define VK_KHR_SWAPCHAIN_EXTENSION_NAME   "VK_KHR_swapchain"
+
+typedef VkFlags VkSwapchainCreateFlagsKHR;
+
+typedef struct VkSwapchainCreateInfoKHR {
+    VkStructureType                  sType;
+    const void*                      pNext;
+    VkSwapchainCreateFlagsKHR        flags;
+    VkSurfaceKHR                     surface;
+    uint32_t                         minImageCount;
+    VkFormat                         imageFormat;
+    VkColorSpaceKHR                  imageColorSpace;
+    VkExtent2D                       imageExtent;
+    uint32_t                         imageArrayLayers;
+    VkImageUsageFlags                imageUsage;
+    VkSharingMode                    imageSharingMode;
+    uint32_t                         queueFamilyIndexCount;
+    const uint32_t*                  pQueueFamilyIndices;
+    VkSurfaceTransformFlagBitsKHR    preTransform;
+    VkCompositeAlphaFlagBitsKHR      compositeAlpha;
+    VkPresentModeKHR                 presentMode;
+    VkBool32                         clipped;
+    VkSwapchainKHR                   oldSwapchain;
+} VkSwapchainCreateInfoKHR;
+
+typedef struct VkPresentInfoKHR {
+    VkStructureType          sType;
+    const void*              pNext;
+    uint32_t                 waitSemaphoreCount;
+    const VkSemaphore*       pWaitSemaphores;
+    uint32_t                 swapchainCount;
+    const VkSwapchainKHR*    pSwapchains;
+    const uint32_t*          pImageIndices;
+    VkResult*                pResults;
+} VkPresentInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain);
+typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages);
+typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex);
+typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
+    VkDevice                                    device,
+    const VkSwapchainCreateInfoKHR*             pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSwapchainKHR*                             pSwapchain);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
+    VkDevice                                    device,
+    VkSwapchainKHR                              swapchain,
+    const VkAllocationCallbacks*                pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
+    VkDevice                                    device,
+    VkSwapchainKHR                              swapchain,
+    uint32_t*                                   pSwapchainImageCount,
+    VkImage*                                    pSwapchainImages);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
+    VkDevice                                    device,
+    VkSwapchainKHR                              swapchain,
+    uint64_t                                    timeout,
+    VkSemaphore                                 semaphore,
+    VkFence                                     fence,
+    uint32_t*                                   pImageIndex);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(
+    VkQueue                                     queue,
+    const VkPresentInfoKHR*                     pPresentInfo);
+#endif
+
+#define VK_KHR_display 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR)
+
+#define VK_KHR_DISPLAY_SPEC_VERSION       21
+#define VK_KHR_DISPLAY_EXTENSION_NAME     "VK_KHR_display"
+
+
+typedef enum VkDisplayPlaneAlphaFlagBitsKHR {
+    VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001,
+    VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002,
+    VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004,
+    VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008,
+} VkDisplayPlaneAlphaFlagBitsKHR;
+typedef VkFlags VkDisplayModeCreateFlagsKHR;
+typedef VkFlags VkDisplayPlaneAlphaFlagsKHR;
+typedef VkFlags VkDisplaySurfaceCreateFlagsKHR;
+
+typedef struct VkDisplayPropertiesKHR {
+    VkDisplayKHR                  display;
+    const char*                   displayName;
+    VkExtent2D                    physicalDimensions;
+    VkExtent2D                    physicalResolution;
+    VkSurfaceTransformFlagsKHR    supportedTransforms;
+    VkBool32                      planeReorderPossible;
+    VkBool32                      persistentContent;
+} VkDisplayPropertiesKHR;
+
+typedef struct VkDisplayModeParametersKHR {
+    VkExtent2D    visibleRegion;
+    uint32_t      refreshRate;
+} VkDisplayModeParametersKHR;
+
+typedef struct VkDisplayModePropertiesKHR {
+    VkDisplayModeKHR              displayMode;
+    VkDisplayModeParametersKHR    parameters;
+} VkDisplayModePropertiesKHR;
+
+typedef struct VkDisplayModeCreateInfoKHR {
+    VkStructureType                sType;
+    const void*                    pNext;
+    VkDisplayModeCreateFlagsKHR    flags;
+    VkDisplayModeParametersKHR     parameters;
+} VkDisplayModeCreateInfoKHR;
+
+typedef struct VkDisplayPlaneCapabilitiesKHR {
+    VkDisplayPlaneAlphaFlagsKHR    supportedAlpha;
+    VkOffset2D                     minSrcPosition;
+    VkOffset2D                     maxSrcPosition;
+    VkExtent2D                     minSrcExtent;
+    VkExtent2D                     maxSrcExtent;
+    VkOffset2D                     minDstPosition;
+    VkOffset2D                     maxDstPosition;
+    VkExtent2D                     minDstExtent;
+    VkExtent2D                     maxDstExtent;
+} VkDisplayPlaneCapabilitiesKHR;
+
+typedef struct VkDisplayPlanePropertiesKHR {
+    VkDisplayKHR    currentDisplay;
+    uint32_t        currentStackIndex;
+} VkDisplayPlanePropertiesKHR;
+
+typedef struct VkDisplaySurfaceCreateInfoKHR {
+    VkStructureType                   sType;
+    const void*                       pNext;
+    VkDisplaySurfaceCreateFlagsKHR    flags;
+    VkDisplayModeKHR                  displayMode;
+    uint32_t                          planeIndex;
+    uint32_t                          planeStackIndex;
+    VkSurfaceTransformFlagBitsKHR     transform;
+    float                             globalAlpha;
+    VkDisplayPlaneAlphaFlagBitsKHR    alphaMode;
+    VkExtent2D                        imageExtent;
+} VkDisplaySurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR*pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t*                                   pPropertyCount,
+    VkDisplayPropertiesKHR*                     pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t*                                   pPropertyCount,
+    VkDisplayPlanePropertiesKHR*                pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    planeIndex,
+    uint32_t*                                   pDisplayCount,
+    VkDisplayKHR*                               pDisplays);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR(
+    VkPhysicalDevice                            physicalDevice,
+    VkDisplayKHR                                display,
+    uint32_t*                                   pPropertyCount,
+    VkDisplayModePropertiesKHR*                 pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR(
+    VkPhysicalDevice                            physicalDevice,
+    VkDisplayKHR                                display,
+    const VkDisplayModeCreateInfoKHR*           pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDisplayModeKHR*                           pMode);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR(
+    VkPhysicalDevice                            physicalDevice,
+    VkDisplayModeKHR                            mode,
+    uint32_t                                    planeIndex,
+    VkDisplayPlaneCapabilitiesKHR*              pCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR(
+    VkInstance                                  instance,
+    const VkDisplaySurfaceCreateInfoKHR*        pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface);
+#endif
+
+#define VK_KHR_display_swapchain 1
+#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 9
+#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain"
+
+typedef struct VkDisplayPresentInfoKHR {
+    VkStructureType    sType;
+    const void*        pNext;
+    VkRect2D           srcRect;
+    VkRect2D           dstRect;
+    VkBool32           persistent;
+} VkDisplayPresentInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR(
+    VkDevice                                    device,
+    uint32_t                                    swapchainCount,
+    const VkSwapchainCreateInfoKHR*             pCreateInfos,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSwapchainKHR*                             pSwapchains);
+#endif
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+#define VK_KHR_xlib_surface 1
+#include <X11/Xlib.h>
+
+#define VK_KHR_XLIB_SURFACE_SPEC_VERSION  6
+#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface"
+
+typedef VkFlags VkXlibSurfaceCreateFlagsKHR;
+
+typedef struct VkXlibSurfaceCreateInfoKHR {
+    VkStructureType                sType;
+    const void*                    pNext;
+    VkXlibSurfaceCreateFlagsKHR    flags;
+    Display*                       dpy;
+    Window                         window;
+} VkXlibSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
+    VkInstance                                  instance,
+    const VkXlibSurfaceCreateInfoKHR*           pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    Display*                                    dpy,
+    VisualID                                    visualID);
+#endif
+#endif /* VK_USE_PLATFORM_XLIB_KHR */
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+#define VK_KHR_xcb_surface 1
+#include <xcb/xcb.h>
+
+#define VK_KHR_XCB_SURFACE_SPEC_VERSION   6
+#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface"
+
+typedef VkFlags VkXcbSurfaceCreateFlagsKHR;
+
+typedef struct VkXcbSurfaceCreateInfoKHR {
+    VkStructureType               sType;
+    const void*                   pNext;
+    VkXcbSurfaceCreateFlagsKHR    flags;
+    xcb_connection_t*             connection;
+    xcb_window_t                  window;
+} VkXcbSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
+    VkInstance                                  instance,
+    const VkXcbSurfaceCreateInfoKHR*            pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    xcb_connection_t*                           connection,
+    xcb_visualid_t                              visual_id);
+#endif
+#endif /* VK_USE_PLATFORM_XCB_KHR */
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+#define VK_KHR_wayland_surface 1
+#include <wayland-client.h>
+
+#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 5
+#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface"
+
+typedef VkFlags VkWaylandSurfaceCreateFlagsKHR;
+
+typedef struct VkWaylandSurfaceCreateInfoKHR {
+    VkStructureType                   sType;
+    const void*                       pNext;
+    VkWaylandSurfaceCreateFlagsKHR    flags;
+    struct wl_display*                display;
+    struct wl_surface*                surface;
+} VkWaylandSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
+    VkInstance                                  instance,
+    const VkWaylandSurfaceCreateInfoKHR*        pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    struct wl_display*                          display);
+#endif
+#endif /* VK_USE_PLATFORM_WAYLAND_KHR */
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+#define VK_KHR_mir_surface 1
+#include <mir_toolkit/client_types.h>
+
+#define VK_KHR_MIR_SURFACE_SPEC_VERSION   4
+#define VK_KHR_MIR_SURFACE_EXTENSION_NAME "VK_KHR_mir_surface"
+
+typedef VkFlags VkMirSurfaceCreateFlagsKHR;
+
+typedef struct VkMirSurfaceCreateInfoKHR {
+    VkStructureType               sType;
+    const void*                   pNext;
+    VkMirSurfaceCreateFlagsKHR    flags;
+    MirConnection*                connection;
+    MirSurface*                   mirSurface;
+} VkMirSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateMirSurfaceKHR)(VkInstance instance, const VkMirSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, MirConnection* connection);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR(
+    VkInstance                                  instance,
+    const VkMirSurfaceCreateInfoKHR*            pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceMirPresentationSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    MirConnection*                              connection);
+#endif
+#endif /* VK_USE_PLATFORM_MIR_KHR */
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+#define VK_KHR_android_surface 1
+#include <android/native_window.h>
+
+#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6
+#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface"
+
+typedef VkFlags VkAndroidSurfaceCreateFlagsKHR;
+
+typedef struct VkAndroidSurfaceCreateInfoKHR {
+    VkStructureType                   sType;
+    const void*                       pNext;
+    VkAndroidSurfaceCreateFlagsKHR    flags;
+    ANativeWindow*                    window;
+} VkAndroidSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
+    VkInstance                                  instance,
+    const VkAndroidSurfaceCreateInfoKHR*        pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface);
+#endif
+#endif /* VK_USE_PLATFORM_ANDROID_KHR */
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+#define VK_KHR_win32_surface 1
+#include <windows.h>
+
+#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 5
+#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface"
+
+typedef VkFlags VkWin32SurfaceCreateFlagsKHR;
+
+typedef struct VkWin32SurfaceCreateInfoKHR {
+    VkStructureType                 sType;
+    const void*                     pNext;
+    VkWin32SurfaceCreateFlagsKHR    flags;
+    HINSTANCE                       hinstance;
+    HWND                            hwnd;
+} VkWin32SurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
+    VkInstance                                  instance,
+    const VkWin32SurfaceCreateInfoKHR*          pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex);
+#endif
+#endif /* VK_USE_PLATFORM_WIN32_KHR */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/vulkan/vulkan_intel.h b/include/vulkan/vulkan_intel.h
new file mode 100644 (file)
index 0000000..1f77128
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __VULKAN_INTEL_H__
+#define __VULKAN_INTEL_H__
+
+#include "vulkan.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+#define VK_STRUCTURE_TYPE_DMA_BUF_IMAGE_CREATE_INFO_INTEL 1024
+typedef struct VkDmaBufImageCreateInfo_
+{
+    VkStructureType                             sType;                      // Must be VK_STRUCTURE_TYPE_DMA_BUF_IMAGE_CREATE_INFO_INTEL
+    const void*                                 pNext;                      // Pointer to next structure.
+    int                                         fd;
+    VkFormat                                    format;
+    VkExtent3D                                  extent;         // Depth must be 1
+    uint32_t                                    strideInBytes;
+} VkDmaBufImageCreateInfo;
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDmaBufImageINTEL)(VkDevice device, const VkDmaBufImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMem, VkImage* pImage);
+
+#ifdef VK_PROTOTYPES
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDmaBufImageINTEL(
+    VkDevice                                    _device,
+    const VkDmaBufImageCreateInfo*              pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDeviceMemory*                             pMem,
+    VkImage*                                    pImage);
+
+#endif
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+#endif // __VULKAN_INTEL_H__
index 9b23cf58f4ff96b0c555fa0d93b11a5952c459f3..73686a93b3c2384dd6ce6eb8b56c960ab1cc73dc 100644 (file)
@@ -56,6 +56,10 @@ EXTRA_DIST = \
 AM_CFLAGS = $(VISIBILITY_CFLAGS)
 AM_CXXFLAGS = $(VISIBILITY_CXXFLAGS)
 
+if HAVE_VULKAN
+SUBDIRS += intel
+endif
+
 AM_CPPFLAGS = \
        -I$(top_srcdir)/include/ \
        -I$(top_srcdir)/src/mapi/ \
index f218af1b40527453dbc96896793b46f4d7f57d55..8f37448469637868d4bf0e0243a9c70ec52b8f52 100644 (file)
@@ -84,7 +84,7 @@ check_PROGRAMS +=                                     \
        glsl/tests/sampler-types-test                   \
        glsl/tests/uniform-initializer-test
 
-noinst_PROGRAMS = glsl_compiler
+noinst_PROGRAMS = glsl_compiler spirv2nir
 
 glsl_tests_blob_test_SOURCES =                         \
        glsl/tests/blob_test.c
@@ -176,6 +176,20 @@ glsl_glsl_test_LDADD =                                     \
        $(top_builddir)/src/libglsl_util.la             \
        $(PTHREAD_LIBS)
 
+spirv2nir_SOURCES = \
+       nir/spirv2nir.c
+
+spirv2nir_CPPFLAGS =                                   \
+       $(AM_CPPFLAGS)                                  \
+       -I$(top_builddir)/src/compiler/nir              \
+       -I$(top_srcdir)/src/compiler/nir
+
+spirv2nir_LDADD =                                      \
+       nir/libnir.la                                   \
+       $(top_builddir)/src/util/libmesautil.la         \
+       -lm -lstdc++                                    \
+       $(PTHREAD_LIBS)
+
 # We write our own rules for yacc and lex below. We'd rather use automake,
 # but automake makes it especially difficult for a number of reasons:
 #
@@ -264,6 +278,7 @@ nir_libnir_la_LIBADD = \
 
 nir_libnir_la_SOURCES =                                        \
        $(NIR_FILES)                                    \
+       $(SPIRV_FILES)                                  \
        $(NIR_GENERATED_FILES)
 
 PYTHON_GEN = $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS)
index 9f3bcf0255b9af715de10172710e658f44679d56..b0b8281869d43ced298096f25d7c6c743ec6d983 100644 (file)
@@ -175,7 +175,9 @@ NIR_FILES = \
        nir/nir_control_flow_private.h \
        nir/nir_dominance.c \
        nir/nir_from_ssa.c \
+       nir/nir_gather_info.c \
        nir/nir_gs_count_vertices.c \
+       nir/nir_inline_functions.c \
        nir/nir_intrinsics.c \
        nir/nir_intrinsics.h \
        nir/nir_instr_set.c \
@@ -193,6 +195,7 @@ NIR_FILES = \
        nir/nir_lower_io.c \
        nir/nir_lower_outputs_to_temporaries.c \
        nir/nir_lower_phis_to_scalar.c \
+       nir/nir_lower_returns.c \
        nir/nir_lower_samplers.c \
        nir/nir_lower_system_values.c \
        nir/nir_lower_tex.c \
@@ -214,8 +217,11 @@ NIR_FILES = \
        nir/nir_opt_peephole_select.c \
        nir/nir_opt_remove_phis.c \
        nir/nir_opt_undef.c \
+       nir/nir_phi_builder.c \
+       nir/nir_phi_builder.h \
        nir/nir_print.c \
        nir/nir_remove_dead_variables.c \
+       nir/nir_repair_ssa.c \
        nir/nir_search.c \
        nir/nir_search.h \
        nir/nir_split_var_copies.c \
@@ -225,3 +231,12 @@ NIR_FILES = \
        nir/nir_vla.h \
        nir/nir_worklist.c \
        nir/nir_worklist.h
+
+SPIRV_FILES = \
+       nir/spirv/nir_spirv.h \
+       nir/spirv/spirv_to_nir.c \
+       nir/spirv/vtn_alu.c \
+       nir/spirv/vtn_cfg.c \
+       nir/spirv/vtn_glsl450.c \
+       nir/spirv/vtn_private.h \
+       nir/spirv/vtn_variables.c
index 09951bac4455e3e3c9afd6b175ca53bb9a9dfc23..6db4e738f6e1f26ec7af3f5634856b010ba9bb1d 100644 (file)
@@ -3,6 +3,7 @@ glsl_parser.cpp
 glsl_parser.h
 glsl_parser.output
 glsl_test
+spirv2nir
 subtest-cr/
 subtest-lf/
 subtest-cr-lf/
index 9954b81240338659ec881300cf3eec29dcc067c4..d6b1f9ed6958da6d852e88aba95229352c7b0390 100644 (file)
@@ -66,7 +66,7 @@ check_PROGRAMS =                                      \
        tests/sampler-types-test                        \
        tests/uniform-initializer-test
 
-noinst_PROGRAMS = glsl_compiler
+noinst_PROGRAMS = glsl_compiler spirv2nir
 
 tests_blob_test_SOURCES =                              \
        tests/blob_test.c
@@ -135,7 +135,6 @@ libglsl_la_SOURCES =                                        \
        glsl_parser.h                                   \
        $(LIBGLSL_FILES)
 
-
 glsl_compiler_SOURCES = \
        $(GLSL_COMPILER_CXX_FILES)
 
@@ -145,6 +144,16 @@ glsl_compiler_LDADD =                                      \
        $(top_builddir)/src/util/libmesautil.la         \
        $(PTHREAD_LIBS)
 
+spirv2nir_SOURCES = \
+       standalone_scaffolding.cpp \
+       standalone_scaffolding.h \
+       nir/spirv2nir.c
+
+spirv2nir_LDADD =                                      \
+       libglsl.la                                      \
+       $(top_builddir)/src/libglsl_util.la             \
+       $(PTHREAD_LIBS)
+
 glsl_test_SOURCES = \
        standalone_scaffolding.cpp \
        test.cpp \
index 08b40c5cc8fb64006bd7dbfd9b2ca09f0065f18f..3f537d5b37acaf83549cca35132290b5dac82bf4 100644 (file)
@@ -29,7 +29,9 @@ NIR_FILES = \
        nir/nir_control_flow_private.h \
        nir/nir_dominance.c \
        nir/nir_from_ssa.c \
+       nir/nir_gather_info.c \
        nir/nir_gs_count_vertices.c \
+       nir/nir_inline_functions.c \
        nir/nir_intrinsics.c \
        nir/nir_intrinsics.h \
        nir/nir_instr_set.c \
@@ -38,8 +40,10 @@ NIR_FILES = \
        nir/nir_lower_alu_to_scalar.c \
        nir/nir_lower_atomics.c \
        nir/nir_lower_clip.c \
+       nir/nir_lower_returns.c \
        nir/nir_lower_global_vars_to_local.c \
        nir/nir_lower_gs_intrinsics.c \
+        nir/nir_lower_indirect_derefs.c \
        nir/nir_lower_load_const_to_scalar.c \
        nir/nir_lower_locals_to_regs.c \
        nir/nir_lower_idiv.c \
@@ -67,8 +71,11 @@ NIR_FILES = \
        nir/nir_opt_peephole_select.c \
        nir/nir_opt_remove_phis.c \
        nir/nir_opt_undef.c \
+       nir/nir_phi_builder.c \
+       nir/nir_phi_builder.h \
        nir/nir_print.c \
        nir/nir_remove_dead_variables.c \
+       nir/nir_repair_ssa.c \
        nir/nir_search.c \
        nir/nir_search.h \
        nir/nir_split_var_copies.c \
@@ -79,6 +86,15 @@ NIR_FILES = \
        nir/nir_worklist.c \
        nir/nir_worklist.h
 
+SPIRV_FILES = \
+       nir/spirv/nir_spirv.h \
+       nir/spirv/spirv_to_nir.c \
+       nir/spirv/vtn_alu.c \
+       nir/spirv/vtn_cfg.c \
+       nir/spirv/vtn_glsl450.c \
+       nir/spirv/vtn_private.h \
+       nir/spirv/vtn_variables.c
+
 # libglsl
 
 LIBGLSL_FILES = \
index 1c6cd43cd6885a770b4497538a6d62d3aff92ec0..1ac8489b45ac90869c27f01c89861f21e485b35e 100644 (file)
@@ -86,6 +86,8 @@ _mesa_glsl_parse_state::_mesa_glsl_parse_state(struct gl_context *_ctx,
 
    this->extensions = &ctx->Extensions;
 
+   this->ARB_compute_shader_enable = true;
+
    this->Const.MaxLights = ctx->Const.MaxLights;
    this->Const.MaxClipPlanes = ctx->Const.MaxClipPlanes;
    this->Const.MaxTextureUnits = ctx->Const.MaxTextureUnits;
index d5d214b57cc07257fdae4f36e2afbdc9f2d77816..0f7a16a5e6f61bed9792c0707d88bc5349c81c38 100644 (file)
 #include "util/ralloc.h"
 #include "util/strtod.h"
 
+extern "C" void
+_mesa_error_no_memory(const char *caller)
+{
+   fprintf(stderr, "Mesa error: out of memory in %s", caller);
+}
+
 void
 _mesa_warning(struct gl_context *ctx, const char *fmt, ...)
 {
index f31547b9aac13014c3e83da343f725162854d1d7..a876eff289af24d60324b0ed219280852c8a93db 100644 (file)
@@ -19,7 +19,9 @@ NIR_FILES = \
        nir_control_flow_private.h \
        nir_dominance.c \
        nir_from_ssa.c \
+       nir_gather_info.c \
        nir_gs_count_vertices.c \
+       nir_inline_functions.c \
        nir_intrinsics.c \
        nir_intrinsics.h \
        nir_instr_set.c \
@@ -37,6 +39,7 @@ NIR_FILES = \
        nir_lower_io.c \
        nir_lower_outputs_to_temporaries.c \
        nir_lower_phis_to_scalar.c \
+       nir_lower_returns.c \
        nir_lower_samplers.c \
        nir_lower_system_values.c \
        nir_lower_tex.c \
@@ -58,8 +61,11 @@ NIR_FILES = \
        nir_opt_peephole_select.c \
        nir_opt_remove_phis.c \
        nir_opt_undef.c \
+       nir_phi_builder.c \
+       nir_phi_builder.h \
        nir_print.c \
        nir_remove_dead_variables.c \
+       nir_repair_ssa.c \
        nir_search.c \
        nir_search.h \
        nir_split_var_copies.c \
@@ -70,3 +76,12 @@ NIR_FILES = \
        nir_worklist.c \
        nir_worklist.h
 
+SPIRV_FILES = \
+       spirv/nir_spirv.h \
+       spirv/spirv_to_nir.c \
+       spirv/vtn_alu.c \
+       spirv/vtn_cfg.c \
+       spirv/vtn_glsl450.c \
+       spirv/vtn_private.h \
+       spirv/vtn_variables.c
+
index 613b138ae59e09e2f8a8192b51f5dadff2e3f9fe..da5d730b49e31f562495bda926dc8f944685a4ff 100644 (file)
@@ -46,7 +46,7 @@ namespace {
 class nir_visitor : public ir_visitor
 {
 public:
-   nir_visitor(nir_shader *shader);
+   nir_visitor(nir_shader *shader, gl_shader *sh);
    ~nir_visitor();
 
    virtual void visit(ir_variable *);
@@ -85,6 +85,8 @@ private:
 
    bool supports_ints;
 
+   struct gl_shader *sh;
+
    nir_shader *shader;
    nir_function_impl *impl;
    nir_builder b;
@@ -138,12 +140,21 @@ glsl_to_nir(const struct gl_shader_program *shader_prog,
 
    nir_shader *shader = nir_shader_create(NULL, stage, options);
 
-   nir_visitor v1(shader);
+   nir_visitor v1(shader, sh);
    nir_function_visitor v2(&v1);
    v2.run(sh->ir);
    visit_exec_list(sh->ir, &v1);
 
-   nir_lower_outputs_to_temporaries(shader);
+   nir_function *main = NULL;
+   nir_foreach_function(shader, func) {
+      if (strcmp(func->name, "main") == 0) {
+         main = func;
+         break;
+      }
+   }
+   assert(main);
+
+   nir_lower_outputs_to_temporaries(shader, main);
 
    shader->info.name = ralloc_asprintf(shader, "GLSL%d", shader_prog->Name);
    if (shader_prog->Label)
@@ -204,10 +215,11 @@ glsl_to_nir(const struct gl_shader_program *shader_prog,
    return shader;
 }
 
-nir_visitor::nir_visitor(nir_shader *shader)
+nir_visitor::nir_visitor(nir_shader *shader, gl_shader *sh)
 {
    this->supports_ints = shader->options->native_integers;
    this->shader = shader;
+   this->sh = sh;
    this->is_global = true;
    this->var_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
                                              _mesa_key_pointer_equal);
@@ -386,6 +398,7 @@ nir_visitor::visit(ir_variable *ir)
    }
 
    var->data.index = ir->data.index;
+   var->data.descriptor_set = 0;
    var->data.binding = ir->data.binding;
    var->data.offset = ir->data.offset;
    var->data.image.read_only = ir->data.image_read_only;
index cd78475bdb8f5bda5446afb4f9e0b0f34a033cd6..7e41ed37b0d8e14801f5ad4df3a469e1d27f4c19 100644 (file)
@@ -39,6 +39,7 @@ nir_shader_create(void *mem_ctx,
    exec_list_make_empty(&shader->uniforms);
    exec_list_make_empty(&shader->inputs);
    exec_list_make_empty(&shader->outputs);
+   exec_list_make_empty(&shader->shared);
 
    shader->options = options;
    memset(&shader->info, 0, sizeof(shader->info));
@@ -52,6 +53,7 @@ nir_shader_create(void *mem_ctx,
    shader->num_inputs = 0;
    shader->num_outputs = 0;
    shader->num_uniforms = 0;
+   shader->num_shared = 0;
 
    shader->stage = stage;
 
@@ -136,6 +138,11 @@ nir_shader_add_variable(nir_shader *shader, nir_variable *var)
       exec_list_push_tail(&shader->uniforms, &var->node);
       break;
 
+   case nir_var_shared:
+      assert(shader->stage == MESA_SHADER_COMPUTE);
+      exec_list_push_tail(&shader->shared, &var->node);
+      break;
+
    case nir_var_system_value:
       exec_list_push_tail(&shader->system_values, &var->node);
       break;
@@ -716,6 +723,69 @@ nir_cf_node_get_function(nir_cf_node *node)
    return nir_cf_node_as_function(node);
 }
 
+/* Reduces a cursor by trying to convert everything to after and trying to
+ * go up to block granularity when possible.
+ */
+static nir_cursor
+reduce_cursor(nir_cursor cursor)
+{
+   switch (cursor.option) {
+   case nir_cursor_before_block:
+      if (exec_list_is_empty(&cursor.block->instr_list)) {
+         /* Empty block.  After is as good as before. */
+         cursor.option = nir_cursor_after_block;
+      } else {
+         /* Try to switch to after the previous block if there is one.
+          * (This isn't likely, but it can happen.)
+          */
+         nir_cf_node *prev_node = nir_cf_node_prev(&cursor.block->cf_node);
+         if (prev_node && prev_node->type == nir_cf_node_block) {
+            cursor.block = nir_cf_node_as_block(prev_node);
+            cursor.option = nir_cursor_after_block;
+         }
+      }
+      return cursor;
+
+   case nir_cursor_after_block:
+      return cursor;
+
+   case nir_cursor_before_instr: {
+      nir_instr *prev_instr = nir_instr_prev(cursor.instr);
+      if (prev_instr) {
+         /* Before this instruction is after the previous */
+         cursor.instr = prev_instr;
+         cursor.option = nir_cursor_after_instr;
+      } else {
+         /* No previous instruction.  Switch to before block */
+         cursor.block = cursor.instr->block;
+         cursor.option = nir_cursor_before_block;
+      }
+      return reduce_cursor(cursor);
+   }
+
+   case nir_cursor_after_instr:
+      if (nir_instr_next(cursor.instr) == NULL) {
+         /* This is the last instruction, switch to after block */
+         cursor.option = nir_cursor_after_block;
+         cursor.block = cursor.instr->block;
+      }
+      return cursor;
+
+   default:
+      unreachable("Inavlid cursor option");
+   }
+}
+
+bool
+nir_cursors_equal(nir_cursor a, nir_cursor b)
+{
+   /* Reduced cursors should be unique */
+   a = reduce_cursor(a);
+   b = reduce_cursor(b);
+
+   return a.block == b.block && a.option == b.option;
+}
+
 static bool
 add_use_cb(nir_src *src, void *state)
 {
index 34f31eb9859846f13878f83c55fcac412073333d..ae37cbf7325cbcfaec490ecf54f1e0cf2c6279e8 100644 (file)
@@ -88,6 +88,7 @@ typedef enum {
    nir_var_local,
    nir_var_uniform,
    nir_var_shader_storage,
+   nir_var_shared,
    nir_var_system_value,
    nir_var_param,
 } nir_variable_mode;
@@ -172,7 +173,7 @@ typedef struct nir_variable {
        *
        * \sa nir_variable_mode
        */
-      nir_variable_mode mode:4;
+      nir_variable_mode mode:5;
 
       /**
        * Interpolation mode for shader inputs / outputs
@@ -264,6 +265,11 @@ typedef struct nir_variable {
        */
       int index;
 
+      /**
+       * Descriptor set binding for sampler or UBO.
+       */
+      int descriptor_set;
+
       /**
        * Initial binding point for a sampler or UBO.
        *
@@ -332,12 +338,43 @@ typedef struct nir_variable {
 #define nir_foreach_variable(var, var_list) \
    foreach_list_typed(nir_variable, var, node, var_list)
 
+#define nir_foreach_variable_safe(var, var_list) \
+   foreach_list_typed_safe(nir_variable, var, node, var_list)
+
 static inline bool
 nir_variable_is_global(const nir_variable *var)
 {
    return var->data.mode != nir_var_local && var->data.mode != nir_var_param;
 }
 
+/**
+ * Returns the bits in the inputs_read, outputs_written, or
+ * system_values_read bitfield corresponding to this variable.
+ */
+static inline uint64_t
+nir_variable_get_io_mask(nir_variable *var, gl_shader_stage stage)
+{
+   assert(var->data.mode == nir_var_shader_in ||
+          var->data.mode == nir_var_shader_out ||
+          var->data.mode == nir_var_system_value);
+   assert(var->data.location >= 0);
+
+   const struct glsl_type *var_type = var->type;
+   if (stage == MESA_SHADER_GEOMETRY && var->data.mode == nir_var_shader_in) {
+      /* Most geometry shader inputs are per-vertex arrays */
+      if (var->data.location >= VARYING_SLOT_VAR0)
+         assert(glsl_type_is_array(var_type));
+
+      if (glsl_type_is_array(var_type))
+         var_type = glsl_get_array_element(var_type);
+   }
+
+   bool is_vertex_input = (var->data.mode == nir_var_shader_in &&
+                           stage == MESA_SHADER_VERTEX);
+   unsigned slots = glsl_count_attribute_slots(var_type, is_vertex_input);
+   return ((1ull << slots) - 1) << var->data.location;
+}
+
 typedef struct nir_register {
    struct exec_node node;
 
@@ -891,6 +928,22 @@ typedef enum {
     */
    NIR_INTRINSIC_UCP_ID = 4,
 
+   /**
+    * The range of a load operation.  This specifies the maximum amount of
+    * data starting at the base offset (if any) that can be accessed.
+    */
+   NIR_INTRINSIC_RANGE = 5,
+
+   /**
+    * The Vulkan descriptor set for vulkan_resource_index intrinsic.
+    */
+   NIR_INTRINSIC_DESC_SET = 6,
+
+   /**
+    * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
+    */
+   NIR_INTRINSIC_BINDING = 7,
+
    NIR_INTRINSIC_NUM_INDEX_FLAGS,
 
 } nir_intrinsic_index_flag;
@@ -954,6 +1007,9 @@ INTRINSIC_IDX_ACCESSORS(write_mask, WRMASK, unsigned)
 INTRINSIC_IDX_ACCESSORS(base, BASE, int)
 INTRINSIC_IDX_ACCESSORS(stream_id, STREAM_ID, unsigned)
 INTRINSIC_IDX_ACCESSORS(ucp_id, UCP_ID, unsigned)
+INTRINSIC_IDX_ACCESSORS(range, RANGE, unsigned)
+INTRINSIC_IDX_ACCESSORS(desc_set, DESC_SET, unsigned)
+INTRINSIC_IDX_ACCESSORS(binding, BINDING, unsigned)
 
 /**
  * \group texture information
@@ -1562,6 +1618,9 @@ typedef struct nir_shader_compiler_options {
     * are simulated by floats.)
     */
    bool native_integers;
+
+   /* Indicates that the driver only has zero-based vertex id */
+   bool vertex_id_zero_based;
 } nir_shader_compiler_options;
 
 typedef struct nir_shader_info {
@@ -1660,6 +1719,9 @@ typedef struct nir_shader {
    /** list of outputs (nir_variable) */
    struct exec_list outputs;
 
+   /** list of shared compute variables (nir_variable) */
+   struct exec_list shared;
+
    /** Set of driver-specific options for the shader.
     *
     * The memory for the options is expected to be kept in a single static
@@ -1688,12 +1750,21 @@ typedef struct nir_shader {
     * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
     * access plus one
     */
-   unsigned num_inputs, num_uniforms, num_outputs;
+   unsigned num_inputs, num_uniforms, num_outputs, num_shared;
 
    /** The shader stage, such as MESA_SHADER_VERTEX. */
    gl_shader_stage stage;
 } nir_shader;
 
+static inline nir_function *
+nir_shader_get_entrypoint(nir_shader *shader)
+{
+   assert(exec_list_length(&shader->functions) == 1);
+   struct exec_node *func_node = exec_list_get_head(&shader->functions);
+   nir_function *func = exec_node_data(nir_function, func_node, node);
+   return func;
+}
+
 #define nir_foreach_function(shader, func) \
    foreach_list_typed(nir_function, func, node, &(shader)->functions)
 
@@ -1803,6 +1874,19 @@ typedef struct {
    };
 } nir_cursor;
 
+static inline nir_block *
+nir_cursor_current_block(nir_cursor cursor)
+{
+   if (cursor.option == nir_cursor_before_instr ||
+       cursor.option == nir_cursor_after_instr) {
+      return cursor.instr->block;
+   } else {
+      return cursor.block;
+   }
+}
+
+bool nir_cursors_equal(nir_cursor a, nir_cursor b);
+
 static inline nir_cursor
 nir_before_block(nir_block *block)
 {
@@ -1868,6 +1952,22 @@ nir_after_cf_node(nir_cf_node *node)
    return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node)));
 }
 
+static inline nir_cursor
+nir_after_cf_node_and_phis(nir_cf_node *node)
+{
+   if (node->type == nir_cf_node_block)
+      return nir_after_block(nir_cf_node_as_block(node));
+
+   nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node));
+   assert(block->cf_node.type == nir_cf_node_block);
+
+   nir_foreach_instr(block, instr) {
+      if (instr->type != nir_instr_type_phi)
+         return nir_before_instr(instr);
+   }
+   return nir_after_block(block);
+}
+
 static inline nir_cursor
 nir_before_cf_list(struct exec_list *cf_list)
 {
@@ -1996,6 +2096,7 @@ void nir_print_instr(const nir_instr *instr, FILE *fp);
 
 nir_shader * nir_shader_clone(void *mem_ctx, const nir_shader *s);
 nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi);
+nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
 
 #ifdef DEBUG
 void nir_validate_shader(nir_shader *shader);
@@ -2060,6 +2161,11 @@ int nir_gs_count_vertices(const nir_shader *shader);
 
 bool nir_split_var_copies(nir_shader *shader);
 
+bool nir_lower_returns_impl(nir_function_impl *impl);
+bool nir_lower_returns(nir_shader *shader);
+
+bool nir_inline_functions(nir_shader *shader);
+
 void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, void *mem_ctx);
 void nir_lower_var_copies(nir_shader *shader);
 
@@ -2069,7 +2175,10 @@ bool nir_lower_indirect_derefs(nir_shader *shader, uint32_t mode_mask);
 
 bool nir_lower_locals_to_regs(nir_shader *shader);
 
-void nir_lower_outputs_to_temporaries(nir_shader *shader);
+void nir_lower_outputs_to_temporaries(nir_shader *shader,
+                                      nir_function *entrypoint);
+
+void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
 
 void nir_assign_var_locations(struct exec_list *var_list,
                               unsigned *size,
@@ -2083,7 +2192,7 @@ nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
 
 void nir_lower_vars_to_ssa(nir_shader *shader);
 
-bool nir_remove_dead_variables(nir_shader *shader);
+bool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode mode);
 
 void nir_move_vec_src_uses_to_dest(nir_shader *shader);
 bool nir_lower_vec_to_movs(nir_shader *shader);
@@ -2167,6 +2276,9 @@ bool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b);
 void nir_convert_to_ssa_impl(nir_function_impl *impl);
 void nir_convert_to_ssa(nir_shader *shader);
 
+bool nir_repair_ssa_impl(nir_function_impl *impl);
+bool nir_repair_ssa(nir_shader *shader);
+
 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
  * registers.  If false, convert all values (even those not involved in a phi
  * node) to registers.
index d546e41b5febe65758c6778f648cb84713f2856a..b4dde54f7e7a1b30b61a4fbd129d7de8e9dc436d 100644 (file)
@@ -69,6 +69,20 @@ nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf)
    nir_cf_node_insert(build->cursor, cf);
 }
 
+static inline nir_ssa_def *
+nir_ssa_undef(nir_builder *build, unsigned num_components)
+{
+   nir_ssa_undef_instr *undef =
+      nir_ssa_undef_instr_create(build->shader, num_components);
+   if (!undef)
+      return NULL;
+
+   nir_instr_insert(nir_before_block(nir_start_block(build->impl)),
+                    &undef->instr);
+
+   return &undef->def;
+}
+
 static inline nir_ssa_def *
 nir_build_imm(nir_builder *build, unsigned num_components, nir_const_value value)
 {
@@ -274,6 +288,23 @@ nir_swizzle(nir_builder *build, nir_ssa_def *src, unsigned swiz[4],
                      nir_imov_alu(build, alu_src, num_components);
 }
 
+/* Selects the right fdot given the number of components in each source. */
+static inline nir_ssa_def *
+nir_fdot(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1)
+{
+   assert(src0->num_components == src1->num_components);
+   switch (src0->num_components) {
+   case 1: return nir_fmul(build, src0, src1);
+   case 2: return nir_fdot2(build, src0, src1);
+   case 3: return nir_fdot3(build, src0, src1);
+   case 4: return nir_fdot4(build, src0, src1);
+   default:
+      unreachable("bad component size");
+   }
+
+   return NULL;
+}
+
 static inline nir_ssa_def *
 nir_channel(nir_builder *b, nir_ssa_def *def, unsigned c)
 {
@@ -349,6 +380,45 @@ nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
    nir_builder_instr_insert(build, &store->instr);
 }
 
+static inline void
+nir_store_deref_var(nir_builder *build, nir_deref_var *deref,
+                    nir_ssa_def *value, unsigned writemask)
+{
+   const unsigned num_components =
+      glsl_get_vector_elements(nir_deref_tail(&deref->deref)->type);
+
+   nir_intrinsic_instr *store =
+      nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_var);
+   store->num_components = num_components;
+   store->const_index[0] = writemask & ((1 << num_components) - 1);
+   store->variables[0] = nir_deref_as_var(nir_copy_deref(store, &deref->deref));
+   store->src[0] = nir_src_for_ssa(value);
+   nir_builder_instr_insert(build, &store->instr);
+}
+
+static inline void
+nir_copy_deref_var(nir_builder *build, nir_deref_var *dest, nir_deref_var *src)
+{
+   assert(nir_deref_tail(&dest->deref)->type ==
+          nir_deref_tail(&src->deref)->type);
+
+   nir_intrinsic_instr *copy =
+      nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_var);
+   copy->variables[0] = nir_deref_as_var(nir_copy_deref(copy, &dest->deref));
+   copy->variables[1] = nir_deref_as_var(nir_copy_deref(copy, &src->deref));
+   nir_builder_instr_insert(build, &copy->instr);
+}
+
+static inline void
+nir_copy_var(nir_builder *build, nir_variable *dest, nir_variable *src)
+{
+   nir_intrinsic_instr *copy =
+      nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_var);
+   copy->variables[0] = nir_deref_var_create(copy, dest);
+   copy->variables[1] = nir_deref_var_create(copy, src);
+   nir_builder_instr_insert(build, &copy->instr);
+}
+
 static inline nir_ssa_def *
 nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index)
 {
@@ -361,4 +431,11 @@ nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index)
    return &load->dest.ssa;
 }
 
+static inline void
+nir_jump(nir_builder *build, nir_jump_type jump_type)
+{
+   nir_jump_instr *jump = nir_jump_instr_create(build->shader, jump_type);
+   nir_builder_instr_insert(build, &jump->instr);
+}
+
 #endif /* NIR_BUILDER_H */
index 198ca8b9b1282ac2abe13ceeaa86ff5058573dc3..3268deb3ee433cb1582b27f00fe7c638f20adff9 100644 (file)
@@ -109,8 +109,8 @@ remap_var(clone_state *state, const nir_variable *var)
    return _lookup_ptr(state, var, nir_variable_is_global(var));
 }
 
-static nir_constant *
-clone_constant(clone_state *state, const nir_constant *c, nir_variable *nvar)
+nir_constant *
+nir_constant_clone(const nir_constant *c, nir_variable *nvar)
 {
    nir_constant *nc = ralloc(nvar, nir_constant);
 
@@ -118,7 +118,7 @@ clone_constant(clone_state *state, const nir_constant *c, nir_variable *nvar)
    nc->num_elements = c->num_elements;
    nc->elements = ralloc_array(nvar, nir_constant *, c->num_elements);
    for (unsigned i = 0; i < c->num_elements; i++) {
-      nc->elements[i] = clone_constant(state, c->elements[i], nvar);
+      nc->elements[i] = nir_constant_clone(c->elements[i], nvar);
    }
 
    return nc;
@@ -142,7 +142,7 @@ clone_variable(clone_state *state, const nir_variable *var)
           var->num_state_slots * sizeof(nir_state_slot));
    if (var->constant_initializer) {
       nvar->constant_initializer =
-         clone_constant(state, var->constant_initializer, nvar);
+         nir_constant_clone(var->constant_initializer, nvar);
    }
    nvar->interface_type = var->interface_type;
 
@@ -675,6 +675,7 @@ nir_shader_clone(void *mem_ctx, const nir_shader *s)
    clone_var_list(&state, &ns->uniforms, &s->uniforms);
    clone_var_list(&state, &ns->inputs,   &s->inputs);
    clone_var_list(&state, &ns->outputs,  &s->outputs);
+   clone_var_list(&state, &ns->shared,   &s->shared);
    clone_var_list(&state, &ns->globals,  &s->globals);
    clone_var_list(&state, &ns->system_values, &s->system_values);
 
@@ -704,6 +705,7 @@ nir_shader_clone(void *mem_ctx, const nir_shader *s)
    ns->num_inputs = s->num_inputs;
    ns->num_uniforms = s->num_uniforms;
    ns->num_outputs = s->num_outputs;
+   ns->num_shared = s->num_shared;
 
    free_clone_state(&state);
 
index 96395a4161564c66cf22a3445e2e2b51d6d01b11..33b06d0cc846d5c41b32233ca02ba1c817a27960 100644 (file)
@@ -336,8 +336,7 @@ block_add_normal_succs(nir_block *block)
          nir_block *next_block = nir_cf_node_as_block(next);
 
          link_blocks(block, next_block, NULL);
-      } else {
-         assert(parent->type == nir_cf_node_loop);
+      } else if (parent->type == nir_cf_node_loop) {
          nir_loop *loop = nir_cf_node_as_loop(parent);
 
          nir_cf_node *head = nir_loop_first_cf_node(loop);
@@ -346,6 +345,10 @@ block_add_normal_succs(nir_block *block)
 
          link_blocks(block, head_block, NULL);
          insert_phi_undef(head_block, block);
+      } else {
+         assert(parent->type == nir_cf_node_function);
+         nir_function_impl *impl = nir_cf_node_as_function(parent);
+         link_blocks(block, impl->end_block, NULL);
       }
    } else {
       nir_cf_node *next = nir_cf_node_next(&block->cf_node);
@@ -746,6 +749,12 @@ nir_cf_extract(nir_cf_list *extracted, nir_cursor begin, nir_cursor end)
 {
    nir_block *block_begin, *block_end, *block_before, *block_after;
 
+   if (nir_cursors_equal(begin, end)) {
+      exec_list_make_empty(&extracted->list);
+      extracted->impl = NULL; /* we shouldn't need this */
+      return;
+   }
+
    /* In the case where begin points to an instruction in some basic block and
     * end points to the end of the same basic block, we rely on the fact that
     * splitting on an instruction moves earlier instructions into a new basic
@@ -785,6 +794,9 @@ nir_cf_reinsert(nir_cf_list *cf_list, nir_cursor cursor)
 {
    nir_block *before, *after;
 
+   if (exec_list_is_empty(&cf_list->list))
+      return;
+
    split_block_cursor(cursor, &before, &after);
 
    foreach_list_typed_safe(nir_cf_node, node, node, &cf_list->list) {
index b345b85e8a0f94ff1b7e33f5457f643e11cfc442..d95f396807458afba64a405067b7a9e35fa1fe2d 100644 (file)
@@ -94,7 +94,6 @@ calc_dominance_cb(nir_block *block, void *_state)
       }
    }
 
-   assert(new_idom);
    if (block->imm_dom != new_idom) {
       block->imm_dom = new_idom;
       state->progress = true;
@@ -112,6 +111,11 @@ calc_dom_frontier_cb(nir_block *block, void *state)
       struct set_entry *entry;
       set_foreach(block->predecessors, entry) {
          nir_block *runner = (nir_block *) entry->key;
+
+         /* Skip unreachable predecessors */
+         if (runner->imm_dom == NULL)
+            continue;
+
          while (runner != block->imm_dom) {
             _mesa_set_add(runner->dom_frontier, block);
             runner = runner->imm_dom;
diff --git a/src/compiler/nir/nir_gather_info.c b/src/compiler/nir/nir_gather_info.c
new file mode 100644 (file)
index 0000000..8f0abd3
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "nir.h"
+
+static void
+gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader)
+{
+   switch (instr->intrinsic) {
+   case nir_intrinsic_discard:
+      assert(shader->stage == MESA_SHADER_FRAGMENT);
+      shader->info.fs.uses_discard = true;
+      break;
+
+   case nir_intrinsic_load_front_face:
+   case nir_intrinsic_load_vertex_id:
+   case nir_intrinsic_load_vertex_id_zero_base:
+   case nir_intrinsic_load_base_vertex:
+   case nir_intrinsic_load_instance_id:
+   case nir_intrinsic_load_sample_id:
+   case nir_intrinsic_load_sample_pos:
+   case nir_intrinsic_load_sample_mask_in:
+   case nir_intrinsic_load_primitive_id:
+   case nir_intrinsic_load_invocation_id:
+   case nir_intrinsic_load_local_invocation_id:
+   case nir_intrinsic_load_work_group_id:
+   case nir_intrinsic_load_num_work_groups:
+      shader->info.system_values_read |=
+         (1 << nir_system_value_from_intrinsic(instr->intrinsic));
+      break;
+
+   case nir_intrinsic_end_primitive:
+   case nir_intrinsic_end_primitive_with_counter:
+      assert(shader->stage == MESA_SHADER_GEOMETRY);
+      shader->info.gs.uses_end_primitive = 1;
+      break;
+
+   default:
+      break;
+   }
+}
+
+static void
+gather_tex_info(nir_tex_instr *instr, nir_shader *shader)
+{
+   if (instr->op == nir_texop_tg4)
+      shader->info.uses_texture_gather = true;
+}
+
+static bool
+gather_info_block(nir_block *block, void *shader)
+{
+   nir_foreach_instr(block, instr) {
+      switch (instr->type) {
+      case nir_instr_type_intrinsic:
+         gather_intrinsic_info(nir_instr_as_intrinsic(instr), shader);
+         break;
+      case nir_instr_type_tex:
+         gather_tex_info(nir_instr_as_tex(instr), shader);
+         break;
+      case nir_instr_type_call:
+         assert(!"nir_shader_gather_info only works if functions are inlined");
+         break;
+      default:
+         break;
+      }
+   }
+
+   return true;
+}
+
+void
+nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint)
+{
+   shader->info.inputs_read = 0;
+   foreach_list_typed(nir_variable, var, node, &shader->inputs)
+      shader->info.inputs_read |= nir_variable_get_io_mask(var, shader->stage);
+
+   /* TODO: Some day we may need to add stream support to NIR */
+   shader->info.outputs_written = 0;
+   foreach_list_typed(nir_variable, var, node, &shader->outputs)
+      shader->info.outputs_written |= nir_variable_get_io_mask(var, shader->stage);
+
+   shader->info.system_values_read = 0;
+   foreach_list_typed(nir_variable, var, node, &shader->system_values)
+      shader->info.system_values_read |= nir_variable_get_io_mask(var, shader->stage);
+
+   shader->info.num_textures = 0;
+   shader->info.num_images = 0;
+   nir_foreach_variable(var, &shader->uniforms) {
+      const struct glsl_type *type = var->type;
+      unsigned count = 1;
+      if (glsl_type_is_array(type)) {
+         count = glsl_get_length(type);
+         type = glsl_get_array_element(type);
+      }
+
+      if (glsl_type_is_image(type)) {
+         shader->info.num_images += count;
+      } else if (glsl_type_is_sampler(type)) {
+         shader->info.num_textures += count;
+      }
+   }
+
+   nir_foreach_block(entrypoint, gather_info_block, shader);
+}
diff --git a/src/compiler/nir/nir_inline_functions.c b/src/compiler/nir/nir_inline_functions.c
new file mode 100644 (file)
index 0000000..4a08dcc
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "nir.h"
+#include "nir_builder.h"
+#include "nir_control_flow.h"
+
+struct inline_functions_state {
+   struct set *inlined;
+   nir_builder builder;
+   bool progress;
+};
+
+static bool inline_function_impl(nir_function_impl *impl, struct set *inlined);
+
+static bool
+rewrite_param_derefs_block(nir_block *block, void *void_state)
+{
+   nir_call_instr *call = void_state;
+
+   nir_foreach_instr_safe(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      for (unsigned i = 0;
+           i < nir_intrinsic_infos[intrin->intrinsic].num_variables; i++) {
+         if (intrin->variables[i]->var->data.mode != nir_var_param)
+            continue;
+
+         int param_idx = intrin->variables[i]->var->data.location;
+
+         nir_deref_var *call_deref;
+         if (param_idx >= 0) {
+            assert(param_idx < call->callee->num_params);
+            call_deref = call->params[param_idx];
+         } else {
+            call_deref = call->return_deref;
+         }
+         assert(call_deref);
+
+         nir_deref_var *new_deref = nir_deref_as_var(nir_copy_deref(intrin, &call_deref->deref));
+         nir_deref *new_tail = nir_deref_tail(&new_deref->deref);
+         new_tail->child = intrin->variables[i]->deref.child;
+         ralloc_steal(new_tail, new_tail->child);
+         intrin->variables[i] = new_deref;
+      }
+   }
+
+   return true;
+}
+
+static void
+lower_param_to_local(nir_variable *param, nir_function_impl *impl, bool write)
+{
+   if (param->data.mode != nir_var_param)
+      return;
+
+   nir_parameter_type param_type;
+   if (param->data.location >= 0) {
+      assert(param->data.location < impl->num_params);
+      param_type = impl->function->params[param->data.location].param_type;
+   } else {
+      /* Return variable */
+      param_type = nir_parameter_out;
+   }
+
+   if ((write && param_type == nir_parameter_in) ||
+       (!write && param_type == nir_parameter_out)) {
+      /* In this case, we need a shadow copy.  Turn it into a local */
+      param->data.mode = nir_var_local;
+      exec_list_push_tail(&impl->locals, &param->node);
+   }
+}
+
+static bool
+lower_params_to_locals_block(nir_block *block, void *void_state)
+{
+   nir_function_impl *impl = void_state;
+
+   nir_foreach_instr_safe(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      switch (intrin->intrinsic) {
+      case nir_intrinsic_store_var:
+         lower_param_to_local(intrin->variables[0]->var, impl, true);
+         break;
+
+      case nir_intrinsic_copy_var:
+         lower_param_to_local(intrin->variables[0]->var, impl, true);
+         lower_param_to_local(intrin->variables[1]->var, impl, false);
+         break;
+
+      case nir_intrinsic_load_var:
+         /* All other intrinsics which access variables (image_load_store)
+          * do so in a read-only fasion.
+          */
+         for (unsigned i = 0;
+              i < nir_intrinsic_infos[intrin->intrinsic].num_variables; i++) {
+            lower_param_to_local(intrin->variables[i]->var, impl, false);
+         }
+         break;
+
+      default:
+         continue;
+      }
+   }
+
+   return true;
+}
+
+static bool
+inline_functions_block(nir_block *block, void *void_state)
+{
+   struct inline_functions_state *state = void_state;
+
+   nir_builder *b = &state->builder;
+
+   /* This is tricky.  We're iterating over instructions in a block but, as
+    * we go, the block and its instruction list are being split into
+    * pieces.  However, this *should* be safe since foreach_safe always
+    * stashes the next thing in the iteration.  That next thing will
+    * properly get moved to the next block when it gets split, and we
+    * continue iterating there.
+    */
+   nir_foreach_instr_safe(block, instr) {
+      if (instr->type != nir_instr_type_call)
+         continue;
+
+      state->progress = true;
+
+      nir_call_instr *call = nir_instr_as_call(instr);
+      assert(call->callee->impl);
+
+      inline_function_impl(call->callee->impl, state->inlined);
+
+      nir_function_impl *callee_copy =
+         nir_function_impl_clone(call->callee->impl);
+      callee_copy->function = call->callee;
+
+      /* Add copies of all in parameters */
+      assert(call->num_params == callee_copy->num_params);
+
+      exec_list_append(&b->impl->locals, &callee_copy->locals);
+      exec_list_append(&b->impl->registers, &callee_copy->registers);
+
+      b->cursor = nir_before_instr(&call->instr);
+
+      /* We now need to tie the two functions together using the
+       * parameters.  There are two ways we do this: One is to turn the
+       * parameter into a local variable and do a shadow-copy.  The other
+       * is to treat the parameter as a "proxy" and rewrite derefs to use
+       * the actual variable that comes from the call instruction.  We
+       * implement both schemes.  The first is needed in the case where we
+       * have an in parameter that we write or similar.  The second case is
+       * needed for handling things such as images and uniforms properly.
+       */
+
+      /* Figure out when we need to lower to a shadow local */
+      nir_foreach_block(callee_copy, lower_params_to_locals_block, callee_copy);
+      for (unsigned i = 0; i < callee_copy->num_params; i++) {
+         nir_variable *param = callee_copy->params[i];
+
+         if (param->data.mode == nir_var_local &&
+             call->callee->params[i].param_type != nir_parameter_out) {
+            nir_copy_deref_var(b, nir_deref_var_create(b->shader, param),
+                                  call->params[i]);
+         }
+      }
+
+      nir_foreach_block(callee_copy, rewrite_param_derefs_block, call);
+
+      /* Pluck the body out of the function and place it here */
+      nir_cf_list body;
+      nir_cf_list_extract(&body, &callee_copy->body);
+      nir_cf_reinsert(&body, b->cursor);
+
+      b->cursor = nir_before_instr(&call->instr);
+
+      /* Add copies of all out parameters and the return */
+      assert(call->num_params == callee_copy->num_params);
+      for (unsigned i = 0; i < callee_copy->num_params; i++) {
+         nir_variable *param = callee_copy->params[i];
+
+         if (param->data.mode == nir_var_local &&
+             call->callee->params[i].param_type != nir_parameter_in) {
+            nir_copy_deref_var(b, call->params[i],
+                                  nir_deref_var_create(b->shader, param));
+         }
+      }
+      if (!glsl_type_is_void(call->callee->return_type) &&
+          callee_copy->return_var->data.mode == nir_var_local) {
+         nir_copy_deref_var(b, call->return_deref,
+                               nir_deref_var_create(b->shader,
+                                                    callee_copy->return_var));
+      }
+
+      nir_instr_remove(&call->instr);
+   }
+
+   return true;
+}
+
+static bool
+inline_function_impl(nir_function_impl *impl, struct set *inlined)
+{
+   if (_mesa_set_search(inlined, impl))
+      return false; /* Already inlined */
+
+   struct inline_functions_state state;
+
+   state.inlined = inlined;
+   state.progress = false;
+   nir_builder_init(&state.builder, impl);
+
+   nir_foreach_block(impl, inline_functions_block, &state);
+
+   if (state.progress) {
+      /* SSA and register indices are completely messed up now */
+      nir_index_ssa_defs(impl);
+      nir_index_local_regs(impl);
+
+      nir_metadata_preserve(impl, nir_metadata_none);
+   }
+
+   _mesa_set_add(inlined, impl);
+
+   return state.progress;
+}
+
+bool
+nir_inline_functions(nir_shader *shader)
+{
+   struct set *inlined = _mesa_set_create(NULL, _mesa_hash_pointer,
+                                          _mesa_key_pointer_equal);
+   bool progress = false;
+
+   nir_foreach_function(shader, function) {
+      if (function->impl)
+         progress = inline_function_impl(function->impl, inlined) || progress;
+   }
+
+   _mesa_set_destroy(inlined, NULL);
+
+   return progress;
+}
index 00725562874583b307ee41d77478cd6aa3c04226..fa162f9d126927f475f7abbeeb5dcfd06d39d00a 100644 (file)
@@ -175,6 +175,53 @@ INTRINSIC(image_size, 0, ARR(), true, 4, 1, 0, xx, xx, xx,
 INTRINSIC(image_samples, 0, ARR(), true, 1, 1, 0, xx, xx, xx,
           NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
 
+/*
+ * Vulkan descriptor set intrinsic
+ *
+ * The Vulkan API uses a different binding model from GL.  In the Vulkan
+ * API, all external resources are represented by a tripple:
+ *
+ * (descriptor set, binding, array index)
+ *
+ * where the array index is the only thing allowed to be indirect.  The
+ * vulkan_surface_index intrinsic takes the descriptor set and binding as
+ * its first two indices and the array index as its source.  The third
+ * index is a nir_variable_mode in case that's useful to the backend.
+ *
+ * The intended usage is that the shader will call vulkan_surface_index to
+ * get an index and then pass that as the buffer index ubo/ssbo calls.
+ */
+INTRINSIC(vulkan_resource_index, 1, ARR(1), true, 1, 0, 2,
+          DESC_SET, BINDING, xx,
+          NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+
+/*
+ * variable atomic intrinsics
+ *
+ * All of these variable atomic memory operations read a value from memory,
+ * compute a new value using one of the operations below, write the new value
+ * to memory, and return the original value read.
+ *
+ * All operations take 1 source except CompSwap that takes 2. These sources
+ * represent:
+ *
+ * 0: The data parameter to the atomic function (i.e. the value to add
+ *    in shared_atomic_add, etc).
+ * 1: For CompSwap only: the second data parameter.
+ *
+ * All operations take 1 variable deref.
+ */
+INTRINSIC(var_atomic_add, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
+INTRINSIC(var_atomic_imin, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
+INTRINSIC(var_atomic_umin, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
+INTRINSIC(var_atomic_imax, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
+INTRINSIC(var_atomic_umax, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
+INTRINSIC(var_atomic_and, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
+INTRINSIC(var_atomic_or, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
+INTRINSIC(var_atomic_xor, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
+INTRINSIC(var_atomic_exchange, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0)
+INTRINSIC(var_atomic_comp_swap, 2, ARR(1, 1), true, 1, 1, 0, xx, xx, xx, 0)
+
 /*
  * SSBO atomic intrinsics
  *
@@ -266,6 +313,9 @@ SYSTEM_VALUE(helper_invocation, 1, 0, xx, xx, xx)
  * of the start of the variable being loaded and and the offset source is a
  * offset into that variable.
  *
+ * Uniform load operations have a second index that specifies the size of the
+ * variable being loaded.  If const_index[1] == 0, then the size is unknown.
+ *
  * Some load operations such as UBO/SSBO load and per_vertex loads take an
  * additional source to specify which UBO/SSBO/vertex to load from.
  *
@@ -278,8 +328,9 @@ SYSTEM_VALUE(helper_invocation, 1, 0, xx, xx, xx)
 #define LOAD(name, srcs, num_indices, idx0, idx1, idx2, flags) \
    INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, num_indices, idx0, idx1, idx2, flags)
 
-/* src[] = { offset }. const_index[] = { base } */
-LOAD(uniform, 1, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+/* src[] = { offset }. const_index[] = { base, range } */
+LOAD(uniform, 1, 2, BASE, RANGE, xx,
+     NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
 /* src[] = { buffer_index, offset }. No const_index */
 LOAD(ubo, 2, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
 /* src[] = { offset }. const_index[] = { base } */
@@ -294,6 +345,9 @@ LOAD(output, 1, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
 LOAD(per_vertex_output, 2, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
 /* src[] = { offset }. const_index[] = { base } */
 LOAD(shared, 1, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE)
+/* src[] = { offset }. const_index[] = { base, range } */
+LOAD(push_constant, 1, 2, BASE, RANGE, xx,
+     NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
 
 /*
  * Stores work the same way as loads, except now the first source is the value
index 1935a527c6f9ae53cb0a646a8864287dc19495d4..eefcb55a0a6b9157e67e3544358734330d205bef 100644 (file)
@@ -63,7 +63,8 @@ lower_instr(nir_intrinsic_instr *instr,
    }
 
    if (instr->variables[0]->var->data.mode != nir_var_uniform &&
-       instr->variables[0]->var->data.mode != nir_var_shader_storage)
+       instr->variables[0]->var->data.mode != nir_var_shader_storage &&
+       instr->variables[0]->var->data.mode != nir_var_shared)
       return; /* atomics passed as function arguments can't be lowered */
 
    void *mem_ctx = ralloc_parent(instr);
index 11fb973a2377f5134b3c99a2724fac133f972496..84e353775cf2045a3586111ef8ea7a1a10230690 100644 (file)
@@ -160,12 +160,56 @@ load_op(struct lower_io_state *state,
    case nir_var_uniform:
       op = nir_intrinsic_load_uniform;
       break;
+   case nir_var_shared:
+      op = nir_intrinsic_load_shared;
+      break;
    default:
       unreachable("Unknown variable mode");
    }
    return op;
 }
 
+static nir_intrinsic_op
+store_op(struct lower_io_state *state,
+         nir_variable_mode mode, bool per_vertex)
+{
+   nir_intrinsic_op op;
+   switch (mode) {
+   case nir_var_shader_in:
+   case nir_var_shader_out:
+      op = per_vertex ? nir_intrinsic_store_per_vertex_output :
+                        nir_intrinsic_store_output;
+      break;
+   case nir_var_shared:
+      op = nir_intrinsic_store_shared;
+      break;
+   default:
+      unreachable("Unknown variable mode");
+   }
+   return op;
+}
+
+static nir_intrinsic_op
+atomic_op(nir_intrinsic_op opcode)
+{
+   switch (opcode) {
+#define OP(O) case nir_intrinsic_var_##O: return nir_intrinsic_shared_##O;
+   OP(atomic_exchange)
+   OP(atomic_comp_swap)
+   OP(atomic_add)
+   OP(atomic_imin)
+   OP(atomic_umin)
+   OP(atomic_imax)
+   OP(atomic_umax)
+   OP(atomic_and)
+   OP(atomic_or)
+   OP(atomic_xor)
+#undef OP
+   default:
+      unreachable("Invalid atomic");
+   }
+}
+
 static bool
 nir_lower_io_block(nir_block *block, void *void_state)
 {
@@ -179,9 +223,25 @@ nir_lower_io_block(nir_block *block, void *void_state)
 
       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
 
-      if (intrin->intrinsic != nir_intrinsic_load_var &&
-          intrin->intrinsic != nir_intrinsic_store_var)
+      switch (intrin->intrinsic) {
+      case nir_intrinsic_load_var:
+      case nir_intrinsic_store_var:
+      case nir_intrinsic_var_atomic_add:
+      case nir_intrinsic_var_atomic_imin:
+      case nir_intrinsic_var_atomic_umin:
+      case nir_intrinsic_var_atomic_imax:
+      case nir_intrinsic_var_atomic_umax:
+      case nir_intrinsic_var_atomic_and:
+      case nir_intrinsic_var_atomic_or:
+      case nir_intrinsic_var_atomic_xor:
+      case nir_intrinsic_var_atomic_exchange:
+      case nir_intrinsic_var_atomic_comp_swap:
+         /* We can lower the io for this nir instrinsic */
+         break;
+      default:
+         /* We can't lower the io for this nir instrinsic, so skip it */
          continue;
+      }
 
       nir_variable_mode mode = intrin->variables[0]->var->data.mode;
 
@@ -190,6 +250,7 @@ nir_lower_io_block(nir_block *block, void *void_state)
 
       if (mode != nir_var_shader_in &&
           mode != nir_var_shader_out &&
+          mode != nir_var_shared &&
           mode != nir_var_uniform)
          continue;
 
@@ -216,6 +277,11 @@ nir_lower_io_block(nir_block *block, void *void_state)
          nir_intrinsic_set_base(load,
             intrin->variables[0]->var->data.driver_location);
 
+         if (load->intrinsic == nir_intrinsic_load_uniform) {
+            load->const_index[1] =
+               state->type_size(intrin->variables[0]->var->type);
+         }
+
          if (per_vertex)
             load->src[0] = nir_src_for_ssa(vertex_index);
 
@@ -236,7 +302,7 @@ nir_lower_io_block(nir_block *block, void *void_state)
       }
 
       case nir_intrinsic_store_var: {
-         assert(mode == nir_var_shader_out);
+         assert(mode == nir_var_shader_out || mode == nir_var_shared);
 
          nir_ssa_def *offset;
          nir_ssa_def *vertex_index;
@@ -248,12 +314,9 @@ nir_lower_io_block(nir_block *block, void *void_state)
                                 per_vertex ? &vertex_index : NULL,
                                 state->type_size);
 
-         nir_intrinsic_op store_op =
-            per_vertex ? nir_intrinsic_store_per_vertex_output :
-                         nir_intrinsic_store_output;
-
-         nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx,
-                                                                 store_op);
+         nir_intrinsic_instr *store =
+            nir_intrinsic_instr_create(state->mem_ctx,
+                                       store_op(state, mode, per_vertex));
          store->num_components = intrin->num_components;
 
          nir_src_copy(&store->src[0], &intrin->src[0], store);
@@ -272,6 +335,51 @@ nir_lower_io_block(nir_block *block, void *void_state)
          break;
       }
 
+      case nir_intrinsic_var_atomic_add:
+      case nir_intrinsic_var_atomic_imin:
+      case nir_intrinsic_var_atomic_umin:
+      case nir_intrinsic_var_atomic_imax:
+      case nir_intrinsic_var_atomic_umax:
+      case nir_intrinsic_var_atomic_and:
+      case nir_intrinsic_var_atomic_or:
+      case nir_intrinsic_var_atomic_xor:
+      case nir_intrinsic_var_atomic_exchange:
+      case nir_intrinsic_var_atomic_comp_swap: {
+         assert(mode == nir_var_shared);
+
+         nir_ssa_def *offset;
+
+         offset = get_io_offset(b, intrin->variables[0],
+                                NULL, state->type_size);
+
+         nir_intrinsic_instr *atomic =
+            nir_intrinsic_instr_create(state->mem_ctx,
+                                       atomic_op(intrin->intrinsic));
+
+         atomic->src[0] = nir_src_for_ssa(offset);
+
+         atomic->const_index[0] =
+            intrin->variables[0]->var->data.driver_location;
+
+         nir_src_copy(&atomic->src[1], &intrin->src[0], atomic);
+
+         if (intrin->intrinsic == nir_intrinsic_var_atomic_comp_swap)
+            nir_src_copy(&atomic->src[2], &intrin->src[1], atomic);
+
+         if (intrin->dest.is_ssa) {
+            nir_ssa_dest_init(&atomic->instr, &atomic->dest,
+                              intrin->dest.ssa.num_components, NULL);
+            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+                                     nir_src_for_ssa(&atomic->dest.ssa));
+         } else {
+            nir_dest_copy(&atomic->dest, &intrin->dest, state->mem_ctx);
+         }
+
+         nir_instr_insert_before(&intrin->instr, &atomic->instr);
+         nir_instr_remove(&intrin->instr);
+         break;
+      }
+
       default:
          break;
       }
@@ -319,10 +427,13 @@ nir_get_io_offset_src(nir_intrinsic_instr *instr)
    case nir_intrinsic_load_output:
    case nir_intrinsic_load_uniform:
       return &instr->src[0];
+   case nir_intrinsic_load_ubo:
+   case nir_intrinsic_load_ssbo:
    case nir_intrinsic_load_per_vertex_input:
    case nir_intrinsic_load_per_vertex_output:
    case nir_intrinsic_store_output:
       return &instr->src[1];
+   case nir_intrinsic_store_ssbo:
    case nir_intrinsic_store_per_vertex_output:
       return &instr->src[2];
    default:
index 71b06b81fcceace1712108a45f1eb540ef44f47d..00ac09114cf9cddd21766ce05532dc174e1b0815 100644 (file)
@@ -74,7 +74,7 @@ emit_output_copies_block(nir_block *block, void *state)
 }
 
 void
-nir_lower_outputs_to_temporaries(nir_shader *shader)
+nir_lower_outputs_to_temporaries(nir_shader *shader, nir_function *entrypoint)
 {
    struct lower_outputs_state state;
 
@@ -97,6 +97,9 @@ nir_lower_outputs_to_temporaries(nir_shader *shader)
       /* Reparent the name to the new variable */
       ralloc_steal(output, output->name);
 
+      /* Reparent the constant initializer (if any) */
+      ralloc_steal(output, output->constant_initializer);
+
       /* Give the output a new name with @out-temp appended */
       temp->name = ralloc_asprintf(var, "%s@out-temp", output->name);
       temp->data.mode = nir_var_global;
@@ -114,7 +117,7 @@ nir_lower_outputs_to_temporaries(nir_shader *shader)
           * before each EmitVertex call.
           */
          nir_foreach_block(function->impl, emit_output_copies_block, &state);
-      } else if (strcmp(function->name, "main") == 0) {
+      } else if (function == entrypoint) {
          /* For all other shader types, we need to do the copies right before
           * the jumps to the end block.
           */
diff --git a/src/compiler/nir/nir_lower_returns.c b/src/compiler/nir/nir_lower_returns.c
new file mode 100644 (file)
index 0000000..91bb2f7
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "nir.h"
+#include "nir_builder.h"
+#include "nir_control_flow.h"
+
+struct lower_returns_state {
+   nir_builder builder;
+   struct exec_list *cf_list;
+   nir_loop *loop;
+   nir_variable *return_flag;
+};
+
+static bool lower_returns_in_cf_list(struct exec_list *cf_list,
+                                     struct lower_returns_state *state);
+
+static void
+predicate_following(nir_cf_node *node, struct lower_returns_state *state)
+{
+   nir_builder *b = &state->builder;
+   b->cursor = nir_after_cf_node_and_phis(node);
+
+   if (nir_cursors_equal(b->cursor, nir_after_cf_list(state->cf_list)))
+      return; /* Nothing to predicate */
+
+   assert(state->return_flag);
+
+   nir_if *if_stmt = nir_if_create(b->shader);
+   if_stmt->condition = nir_src_for_ssa(nir_load_var(b, state->return_flag));
+   nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
+
+   if (state->loop) {
+      /* If we're inside of a loop, then all we need to do is insert a
+       * conditional break.
+       */
+      nir_jump_instr *brk =
+         nir_jump_instr_create(state->builder.shader, nir_jump_break);
+      nir_instr_insert(nir_before_cf_list(&if_stmt->then_list), &brk->instr);
+   } else {
+      /* Otherwise, we need to actually move everything into the else case
+       * of the if statement.
+       */
+      nir_cf_list list;
+      nir_cf_extract(&list, nir_after_cf_node(&if_stmt->cf_node),
+                            nir_after_cf_list(state->cf_list));
+      assert(!exec_list_is_empty(&list.list));
+      nir_cf_reinsert(&list, nir_before_cf_list(&if_stmt->else_list));
+   }
+}
+
+static bool
+lower_returns_in_loop(nir_loop *loop, struct lower_returns_state *state)
+{
+   nir_loop *parent = state->loop;
+   state->loop = loop;
+   bool progress = lower_returns_in_cf_list(&loop->body, state);
+   state->loop = parent;
+
+   /* If the recursive call made progress, then there were returns inside
+    * of the loop.  These would have been lowered to breaks with the return
+    * flag set to true.  We need to predicate everything following the loop
+    * on the return flag.
+    */
+   if (progress)
+      predicate_following(&loop->cf_node, state);
+
+   return progress;
+}
+
+static bool
+lower_returns_in_if(nir_if *if_stmt, struct lower_returns_state *state)
+{
+   bool progress;
+
+   progress = lower_returns_in_cf_list(&if_stmt->then_list, state);
+   progress = lower_returns_in_cf_list(&if_stmt->else_list, state) || progress;
+
+   /* If either of the recursive calls made progress, then there were
+    * returns inside of the body of the if.  If we're in a loop, then these
+    * were lowered to breaks which automatically skip to the end of the
+    * loop so we don't have to do anything.  If we're not in a loop, then
+    * all we know is that the return flag is set appropreately and that the
+    * recursive calls ensured that nothing gets executed *inside* the if
+    * after a return.  In order to ensure nothing outside gets executed
+    * after a return, we need to predicate everything following on the
+    * return flag.
+    */
+   if (progress && !state->loop)
+      predicate_following(&if_stmt->cf_node, state);
+
+   return progress;
+}
+
+static bool
+lower_returns_in_block(nir_block *block, struct lower_returns_state *state)
+{
+   if (block->predecessors->entries == 0 &&
+       block != nir_start_block(state->builder.impl)) {
+      /* This block is unreachable.  Delete it and everything after it. */
+      nir_cf_list list;
+      nir_cf_extract(&list, nir_before_cf_node(&block->cf_node),
+                            nir_after_cf_list(state->cf_list));
+
+      if (exec_list_is_empty(&list.list)) {
+         /* There's nothing here, which also means there's nothing in this
+          * block so we have nothing to do.
+          */
+         return false;
+      } else {
+         nir_cf_delete(&list);
+         return true;
+      }
+   }
+
+   nir_instr *last_instr = nir_block_last_instr(block);
+   if (last_instr == NULL)
+      return false;
+
+   if (last_instr->type != nir_instr_type_jump)
+      return false;
+
+   nir_jump_instr *jump = nir_instr_as_jump(last_instr);
+   if (jump->type != nir_jump_return)
+      return false;
+
+   nir_instr_remove(&jump->instr);
+
+   nir_builder *b = &state->builder;
+   b->cursor = nir_after_block(block);
+
+   /* Set the return flag */
+   if (state->return_flag == NULL) {
+      state->return_flag =
+         nir_local_variable_create(b->impl, glsl_bool_type(), "return");
+
+      /* Set a default value of false */
+      state->return_flag->constant_initializer =
+         rzalloc(state->return_flag, nir_constant);
+   }
+   nir_store_var(b, state->return_flag, nir_imm_int(b, NIR_TRUE), 1);
+
+   if (state->loop) {
+      /* We're in a loop;  we need to break out of it. */
+      nir_jump(b, nir_jump_break);
+   } else {
+      /* Not in a loop;  we'll deal with predicating later*/
+      assert(nir_cf_node_next(&block->cf_node) == NULL);
+   }
+
+   return true;
+}
+
+static bool
+lower_returns_in_cf_list(struct exec_list *cf_list,
+                         struct lower_returns_state *state)
+{
+   bool progress = false;
+
+   struct exec_list *parent_list = state->cf_list;
+   state->cf_list = cf_list;
+
+   /* We iterate over the list backwards because any given lower call may
+    * take everything following the given CF node and predicate it.  In
+    * order to avoid recursion/iteration problems, we want everything after
+    * a given node to already be lowered before this happens.
+    */
+   foreach_list_typed_reverse_safe(nir_cf_node, node, node, cf_list) {
+      switch (node->type) {
+      case nir_cf_node_block:
+         if (lower_returns_in_block(nir_cf_node_as_block(node), state))
+            progress = true;
+         break;
+
+      case nir_cf_node_if:
+         if (lower_returns_in_if(nir_cf_node_as_if(node), state))
+            progress = true;
+         break;
+
+      case nir_cf_node_loop:
+         if (lower_returns_in_loop(nir_cf_node_as_loop(node), state))
+            progress = true;
+         break;
+
+      default:
+         unreachable("Invalid inner CF node type");
+      }
+   }
+
+   state->cf_list = parent_list;
+
+   return progress;
+}
+
+bool
+nir_lower_returns_impl(nir_function_impl *impl)
+{
+   struct lower_returns_state state;
+
+   state.cf_list = &impl->body;
+   state.loop = NULL;
+   state.return_flag = NULL;
+   nir_builder_init(&state.builder, impl);
+
+   bool progress = lower_returns_in_cf_list(&impl->body, &state);
+
+   if (progress) {
+      nir_metadata_preserve(impl, nir_metadata_none);
+      nir_repair_ssa_impl(impl);
+   }
+
+   return progress;
+}
+
+bool
+nir_lower_returns(nir_shader *shader)
+{
+   bool progress = false;
+
+   nir_foreach_function(shader, function) {
+      if (function->impl)
+         progress = nir_lower_returns_impl(function->impl) || progress;
+   }
+
+   return progress;
+}
index 2bd787d3574bada0d20e5bbb426b655aeee46f65..79f6bedc990bc3e5c3c804c0b17a378eb94a9e3e 100644 (file)
@@ -55,9 +55,77 @@ convert_block(nir_block *block, void *void_state)
 
       b->cursor = nir_after_instr(&load_var->instr);
 
-      nir_intrinsic_op sysval_op =
-         nir_intrinsic_from_system_value(var->data.location);
-      nir_ssa_def *sysval = nir_load_system_value(b, sysval_op, 0);
+      nir_ssa_def *sysval;
+      switch (var->data.location) {
+      case SYSTEM_VALUE_GLOBAL_INVOCATION_ID: {
+         /* From the GLSL man page for gl_GlobalInvocationID:
+          *
+          *    "The value of gl_GlobalInvocationID is equal to
+          *    gl_WorkGroupID * gl_WorkGroupSize + gl_LocalInvocationID"
+          */
+
+         nir_const_value local_size;
+         local_size.u[0] = b->shader->info.cs.local_size[0];
+         local_size.u[1] = b->shader->info.cs.local_size[1];
+         local_size.u[2] = b->shader->info.cs.local_size[2];
+
+         nir_ssa_def *group_id =
+            nir_load_system_value(b, nir_intrinsic_load_work_group_id, 0);
+         nir_ssa_def *local_id =
+            nir_load_system_value(b, nir_intrinsic_load_local_invocation_id, 0);
+
+         sysval = nir_iadd(b, nir_imul(b, group_id,
+                                          nir_build_imm(b, 3, local_size)),
+                              local_id);
+         break;
+      }
+
+      case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX: {
+         /* From the GLSL man page for gl_LocalInvocationIndex:
+          *
+          *    ?The value of gl_LocalInvocationIndex is equal to
+          *    gl_LocalInvocationID.z * gl_WorkGroupSize.x *
+          *    gl_WorkGroupSize.y + gl_LocalInvocationID.y *
+          *    gl_WorkGroupSize.x + gl_LocalInvocationID.x"
+          */
+         nir_ssa_def *local_id =
+            nir_load_system_value(b, nir_intrinsic_load_local_invocation_id, 0);
+
+         unsigned stride_y = b->shader->info.cs.local_size[0];
+         unsigned stride_z = b->shader->info.cs.local_size[0] *
+                             b->shader->info.cs.local_size[1];
+
+         sysval = nir_iadd(b, nir_imul(b, nir_channel(b, local_id, 2),
+                                          nir_imm_int(b, stride_z)),
+                              nir_iadd(b, nir_imul(b, nir_channel(b, local_id, 1),
+                                                      nir_imm_int(b, stride_y)),
+                                          nir_channel(b, local_id, 0)));
+         break;
+      }
+
+      case SYSTEM_VALUE_VERTEX_ID:
+         if (b->shader->options->vertex_id_zero_based) {
+            sysval = nir_iadd(b,
+               nir_load_system_value(b, nir_intrinsic_load_vertex_id_zero_base, 0),
+               nir_load_system_value(b, nir_intrinsic_load_base_vertex, 0));
+         } else {
+            sysval = nir_load_system_value(b, nir_intrinsic_load_vertex_id, 0);
+         }
+         break;
+
+      case SYSTEM_VALUE_INSTANCE_INDEX:
+         sysval = nir_iadd(b,
+            nir_load_system_value(b, nir_intrinsic_load_instance_id, 0),
+            nir_load_system_value(b, nir_intrinsic_load_base_instance, 0));
+         break;
+
+      default: {
+         nir_intrinsic_op sysval_op =
+            nir_intrinsic_from_system_value(var->data.location);
+         sysval = nir_load_system_value(b, sysval_op, 0);
+         break;
+      } /* default */
+      }
 
       nir_ssa_def_rewrite_uses(&load_var->dest.ssa, nir_src_for_ssa(sysval));
       nir_instr_remove(&load_var->instr);
index 5e81f237c1a3d40916eca2e4c8aab1adbe0012cd..a3f3fcfd9b481264efc3065e2191cc2d61c6ef6a 100644 (file)
@@ -27,6 +27,7 @@
 
 #include "nir.h"
 #include "nir_builder.h"
+#include "nir_phi_builder.h"
 #include "nir_vla.h"
 
 
@@ -47,8 +48,7 @@ struct deref_node {
    struct set *stores;
    struct set *copies;
 
-   nir_ssa_def **def_stack;
-   nir_ssa_def **def_stack_tail;
+   struct nir_phi_builder_value *pb_value;
 
    struct deref_node *wildcard;
    struct deref_node *indirect;
@@ -87,8 +87,7 @@ struct lower_variables_state {
     */
    bool add_to_direct_deref_nodes;
 
-   /* A hash table mapping phi nodes to deref_state data */
-   struct hash_table *phi_table;
+   struct nir_phi_builder *phi_builder;
 };
 
 static struct deref_node *
@@ -473,114 +472,6 @@ lower_copies_to_load_store(struct deref_node *node,
    return true;
 }
 
-/** Pushes an SSA def onto the def stack for the given node
- *
- * Each node is potentially associated with a stack of SSA definitions.
- * This stack is used for determining what SSA definition reaches a given
- * point in the program for variable renaming.  The stack is always kept in
- * dominance-order with at most one SSA def per block.  If the SSA
- * definition on the top of the stack is in the same block as the one being
- * pushed, the top element is replaced.
- */
-static void
-def_stack_push(struct deref_node *node, nir_ssa_def *def,
-               struct lower_variables_state *state)
-{
-   if (node->def_stack == NULL) {
-      node->def_stack = ralloc_array(state->dead_ctx, nir_ssa_def *,
-                                     state->impl->num_blocks);
-      node->def_stack_tail = node->def_stack - 1;
-   }
-
-   if (node->def_stack_tail >= node->def_stack) {
-      nir_ssa_def *top_def = *node->def_stack_tail;
-
-      if (def->parent_instr->block == top_def->parent_instr->block) {
-         /* They're in the same block, just replace the top */
-         *node->def_stack_tail = def;
-         return;
-      }
-   }
-
-   *(++node->def_stack_tail) = def;
-}
-
-/* Pop the top of the def stack if it's in the given block */
-static void
-def_stack_pop_if_in_block(struct deref_node *node, nir_block *block)
-{
-   /* If we're popping, then we have presumably pushed at some time in the
-    * past so this should exist.
-    */
-   assert(node->def_stack != NULL);
-
-   /* The stack is already empty.  Do nothing. */
-   if (node->def_stack_tail < node->def_stack)
-      return;
-
-   nir_ssa_def *def = *node->def_stack_tail;
-   if (def->parent_instr->block == block)
-      node->def_stack_tail--;
-}
-
-/** Retrieves the SSA definition on the top of the stack for the given
- * node, if one exists.  If the stack is empty, then we return the constant
- * initializer (if it exists) or an SSA undef.
- */
-static nir_ssa_def *
-get_ssa_def_for_block(struct deref_node *node, nir_block *block,
-                      struct lower_variables_state *state)
-{
-   /* If we have something on the stack, go ahead and return it.  We're
-    * assuming that the top of the stack dominates the given block.
-    */
-   if (node->def_stack && node->def_stack_tail >= node->def_stack)
-      return *node->def_stack_tail;
-
-   /* If we got here then we don't have a definition that dominates the
-    * given block.  This means that we need to add an undef and use that.
-    */
-   nir_ssa_undef_instr *undef =
-      nir_ssa_undef_instr_create(state->shader,
-                                 glsl_get_vector_elements(node->type));
-   nir_instr_insert_before_cf_list(&state->impl->body, &undef->instr);
-   def_stack_push(node, &undef->def, state);
-   return &undef->def;
-}
-
-/* Given a block and one of its predecessors, this function fills in the
- * souces of the phi nodes to take SSA defs from the given predecessor.
- * This function must be called exactly once per block/predecessor pair.
- */
-static void
-add_phi_sources(nir_block *block, nir_block *pred,
-                struct lower_variables_state *state)
-{
-   nir_foreach_instr(block, instr) {
-      if (instr->type != nir_instr_type_phi)
-         break;
-
-      nir_phi_instr *phi = nir_instr_as_phi(instr);
-
-      struct hash_entry *entry =
-            _mesa_hash_table_search(state->phi_table, phi);
-      if (!entry)
-         continue;
-
-      struct deref_node *node = entry->data;
-
-      nir_phi_src *src = ralloc(phi, nir_phi_src);
-      src->pred = pred;
-      src->src.parent_instr = &phi->instr;
-      src->src.is_ssa = true;
-      src->src.ssa = get_ssa_def_for_block(node, pred, state);
-
-      list_addtail(&src->src.use_link, &src->src.ssa->uses);
-
-      exec_list_push_tail(&phi->srcs, &src->node);
-   }
-}
-
 /* Performs variable renaming by doing a DFS of the dominance tree
  *
  * This algorithm is very similar to the one outlined in "Efficiently
@@ -595,266 +486,127 @@ rename_variables_block(nir_block *block, struct lower_variables_state *state)
    nir_builder_init(&b, state->impl);
 
    nir_foreach_instr_safe(block, instr) {
-      if (instr->type == nir_instr_type_phi) {
-         nir_phi_instr *phi = nir_instr_as_phi(instr);
-
-         struct hash_entry *entry =
-            _mesa_hash_table_search(state->phi_table, phi);
-
-         /* This can happen if we already have phi nodes in the program
-          * that were not created in this pass.
-          */
-         if (!entry)
-            continue;
-
-         struct deref_node *node = entry->data;
-
-         def_stack_push(node, &phi->dest.ssa, state);
-      } else if (instr->type == nir_instr_type_intrinsic) {
-         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-
-         switch (intrin->intrinsic) {
-         case nir_intrinsic_load_var: {
-            struct deref_node *node =
-               get_deref_node(intrin->variables[0], state);
-
-            if (node == NULL) {
-               /* If we hit this path then we are referencing an invalid
-                * value.  Most likely, we unrolled something and are
-                * reading past the end of some array.  In any case, this
-                * should result in an undefined value.
-                */
-               nir_ssa_undef_instr *undef =
-                  nir_ssa_undef_instr_create(state->shader,
-                                             intrin->num_components);
-
-               nir_instr_insert_before(&intrin->instr, &undef->instr);
-               nir_instr_remove(&intrin->instr);
-
-               nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                        nir_src_for_ssa(&undef->def));
-               continue;
-            }
-
-            if (!node->lower_to_ssa)
-               continue;
-
-            nir_alu_instr *mov = nir_alu_instr_create(state->shader,
-                                                      nir_op_imov);
-            mov->src[0].src.is_ssa = true;
-            mov->src[0].src.ssa = get_ssa_def_for_block(node, block, state);
-            for (unsigned i = intrin->num_components; i < 4; i++)
-               mov->src[0].swizzle[i] = 0;
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
 
-            assert(intrin->dest.is_ssa);
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
 
-            mov->dest.write_mask = (1 << intrin->num_components) - 1;
-            nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
-                              intrin->num_components, NULL);
+      switch (intrin->intrinsic) {
+      case nir_intrinsic_load_var: {
+         struct deref_node *node =
+            get_deref_node(intrin->variables[0], state);
+
+         if (node == NULL) {
+            /* If we hit this path then we are referencing an invalid
+             * value.  Most likely, we unrolled something and are
+             * reading past the end of some array.  In any case, this
+             * should result in an undefined value.
+             */
+            nir_ssa_undef_instr *undef =
+               nir_ssa_undef_instr_create(state->shader,
+                                          intrin->num_components);
 
-            nir_instr_insert_before(&intrin->instr, &mov->instr);
+            nir_instr_insert_before(&intrin->instr, &undef->instr);
             nir_instr_remove(&intrin->instr);
 
             nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                     nir_src_for_ssa(&mov->dest.dest.ssa));
-            break;
+                                     nir_src_for_ssa(&undef->def));
+            continue;
          }
 
-         case nir_intrinsic_store_var: {
-            struct deref_node *node =
-               get_deref_node(intrin->variables[0], state);
+         if (!node->lower_to_ssa)
+            continue;
 
-            if (node == NULL) {
-               /* Probably an out-of-bounds array store.  That should be a
-                * no-op. */
-               nir_instr_remove(&intrin->instr);
-               continue;
-            }
+         nir_alu_instr *mov = nir_alu_instr_create(state->shader,
+                                                   nir_op_imov);
+         mov->src[0].src = nir_src_for_ssa(
+            nir_phi_builder_value_get_block_def(node->pb_value, block));
+         for (unsigned i = intrin->num_components; i < 4; i++)
+            mov->src[0].swizzle[i] = 0;
 
-            if (!node->lower_to_ssa)
-               continue;
-
-            assert(intrin->num_components ==
-                   glsl_get_vector_elements(node->type));
-
-            assert(intrin->src[0].is_ssa);
-
-            nir_ssa_def *new_def;
-            b.cursor = nir_before_instr(&intrin->instr);
-
-            unsigned wrmask = nir_intrinsic_write_mask(intrin);
-            if (wrmask == (1 << intrin->num_components) - 1) {
-               /* Whole variable store - just copy the source.  Note that
-                * intrin->num_components and intrin->src[0].ssa->num_components
-                * may differ.
-                */
-               unsigned swiz[4];
-               for (unsigned i = 0; i < 4; i++)
-                  swiz[i] = i < intrin->num_components ? i : 0;
-
-               new_def = nir_swizzle(&b, intrin->src[0].ssa, swiz,
-                                     intrin->num_components, false);
-            } else {
-               nir_ssa_def *old_def = get_ssa_def_for_block(node, block, state);
-               /* For writemasked store_var intrinsics, we combine the newly
-                * written values with the existing contents of unwritten
-                * channels, creating a new SSA value for the whole vector.
-                */
-               nir_ssa_def *srcs[4];
-               for (unsigned i = 0; i < intrin->num_components; i++) {
-                  if (wrmask & (1 << i)) {
-                     srcs[i] = nir_channel(&b, intrin->src[0].ssa, i);
-                  } else {
-                     srcs[i] = nir_channel(&b, old_def, i);
-                  }
-               }
-               new_def = nir_vec(&b, srcs, intrin->num_components);
-            }
-
-            assert(new_def->num_components == intrin->num_components);
+         assert(intrin->dest.is_ssa);
 
-            def_stack_push(node, new_def, state);
+         mov->dest.write_mask = (1 << intrin->num_components) - 1;
+         nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
+                           intrin->num_components, NULL);
 
-            /* We'll wait to remove the instruction until the next pass
-             * where we pop the node we just pushed back off the stack.
-             */
-            break;
-         }
+         nir_instr_insert_before(&intrin->instr, &mov->instr);
+         nir_instr_remove(&intrin->instr);
 
-         default:
-            break;
-         }
+         nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+                                  nir_src_for_ssa(&mov->dest.dest.ssa));
+         break;
       }
-   }
-
-   if (block->successors[0])
-      add_phi_sources(block->successors[0], block, state);
-   if (block->successors[1])
-      add_phi_sources(block->successors[1], block, state);
-
-   for (unsigned i = 0; i < block->num_dom_children; ++i)
-      rename_variables_block(block->dom_children[i], state);
-
-   /* Now we iterate over the instructions and pop off any SSA defs that we
-    * pushed in the first loop.
-    */
-   nir_foreach_instr_safe(block, instr) {
-      if (instr->type == nir_instr_type_phi) {
-         nir_phi_instr *phi = nir_instr_as_phi(instr);
-
-         struct hash_entry *entry =
-            _mesa_hash_table_search(state->phi_table, phi);
-
-         /* This can happen if we already have phi nodes in the program
-          * that were not created in this pass.
-          */
-         if (!entry)
-            continue;
-
-         struct deref_node *node = entry->data;
 
-         def_stack_pop_if_in_block(node, block);
-      } else if (instr->type == nir_instr_type_intrinsic) {
-         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+      case nir_intrinsic_store_var: {
+         struct deref_node *node =
+            get_deref_node(intrin->variables[0], state);
 
-         if (intrin->intrinsic != nir_intrinsic_store_var)
-            continue;
-
-         struct deref_node *node = get_deref_node(intrin->variables[0], state);
-         if (!node)
+         if (node == NULL) {
+            /* Probably an out-of-bounds array store.  That should be a
+             * no-op. */
+            nir_instr_remove(&intrin->instr);
             continue;
+         }
 
          if (!node->lower_to_ssa)
             continue;
 
-         def_stack_pop_if_in_block(node, block);
-         nir_instr_remove(&intrin->instr);
-      }
-   }
-
-   return true;
-}
-
-/* Inserts phi nodes for all variables marked lower_to_ssa
- *
- * This is the same algorithm as presented in "Efficiently Computing Static
- * Single Assignment Form and the Control Dependence Graph" by Cytron et.
- * al.
- */
-static void
-insert_phi_nodes(struct lower_variables_state *state)
-{
-   NIR_VLA_ZERO(unsigned, work, state->impl->num_blocks);
-   NIR_VLA_ZERO(unsigned, has_already, state->impl->num_blocks);
-
-   /*
-    * Since the work flags already prevent us from inserting a node that has
-    * ever been inserted into W, we don't need to use a set to represent W.
-    * Also, since no block can ever be inserted into W more than once, we know
-    * that the maximum size of W is the number of basic blocks in the
-    * function. So all we need to handle W is an array and a pointer to the
-    * next element to be inserted and the next element to be removed.
-    */
-   NIR_VLA(nir_block *, W, state->impl->num_blocks);
-
-   unsigned w_start, w_end;
-   unsigned iter_count = 0;
-
-   foreach_list_typed(struct deref_node, node, direct_derefs_link,
-                      &state->direct_deref_nodes) {
-      if (node->stores == NULL)
-         continue;
+         assert(intrin->num_components ==
+                glsl_get_vector_elements(node->type));
 
-      if (!node->lower_to_ssa)
-         continue;
+         assert(intrin->src[0].is_ssa);
 
-      w_start = w_end = 0;
-      iter_count++;
+         nir_ssa_def *new_def;
+         b.cursor = nir_before_instr(&intrin->instr);
 
-      struct set_entry *store_entry;
-      set_foreach(node->stores, store_entry) {
-         nir_intrinsic_instr *store = (nir_intrinsic_instr *)store_entry->key;
-         if (work[store->instr.block->index] < iter_count)
-            W[w_end++] = store->instr.block;
-         work[store->instr.block->index] = iter_count;
-      }
-
-      while (w_start != w_end) {
-         nir_block *cur = W[w_start++];
-         struct set_entry *dom_entry;
-         set_foreach(cur->dom_frontier, dom_entry) {
-            nir_block *next = (nir_block *) dom_entry->key;
-
-            /*
-             * If there's more than one return statement, then the end block
-             * can be a join point for some definitions. However, there are
-             * no instructions in the end block, so nothing would use those
-             * phi nodes. Of course, we couldn't place those phi nodes
-             * anyways due to the restriction of having no instructions in the
-             * end block...
+         unsigned wrmask = nir_intrinsic_write_mask(intrin);
+         if (wrmask == (1 << intrin->num_components) - 1) {
+            /* Whole variable store - just copy the source.  Note that
+             * intrin->num_components and intrin->src[0].ssa->num_components
+             * may differ.
              */
-            if (next == state->impl->end_block)
-               continue;
-
-            if (has_already[next->index] < iter_count) {
-               nir_phi_instr *phi = nir_phi_instr_create(state->shader);
-               nir_ssa_dest_init(&phi->instr, &phi->dest,
-                                 glsl_get_vector_elements(node->type), NULL);
-               nir_instr_insert_before_block(next, &phi->instr);
+            unsigned swiz[4];
+            for (unsigned i = 0; i < 4; i++)
+               swiz[i] = i < intrin->num_components ? i : 0;
 
-               _mesa_hash_table_insert(state->phi_table, phi, node);
-
-               has_already[next->index] = iter_count;
-               if (work[next->index] < iter_count) {
-                  work[next->index] = iter_count;
-                  W[w_end++] = next;
+            new_def = nir_swizzle(&b, intrin->src[0].ssa, swiz,
+                                  intrin->num_components, false);
+         } else {
+            nir_ssa_def *old_def =
+               nir_phi_builder_value_get_block_def(node->pb_value, block);
+            /* For writemasked store_var intrinsics, we combine the newly
+             * written values with the existing contents of unwritten
+             * channels, creating a new SSA value for the whole vector.
+             */
+            nir_ssa_def *srcs[4];
+            for (unsigned i = 0; i < intrin->num_components; i++) {
+               if (wrmask & (1 << i)) {
+                  srcs[i] = nir_channel(&b, intrin->src[0].ssa, i);
+               } else {
+                  srcs[i] = nir_channel(&b, old_def, i);
                }
             }
+            new_def = nir_vec(&b, srcs, intrin->num_components);
          }
+
+         assert(new_def->num_components == intrin->num_components);
+
+         nir_phi_builder_value_set_block_def(node->pb_value, block, new_def);
+         nir_instr_remove(&intrin->instr);
+         break;
+      }
+
+      default:
+         break;
       }
    }
-}
 
+   for (unsigned i = 0; i < block->num_dom_children; ++i)
+      rename_variables_block(block->dom_children[i], state);
+
+   return true;
+}
 
 /** Implements a pass to lower variable uses to SSA values
  *
@@ -896,9 +648,6 @@ nir_lower_vars_to_ssa_impl(nir_function_impl *impl)
                                                    _mesa_hash_pointer,
                                                    _mesa_key_pointer_equal);
    exec_list_make_empty(&state.direct_deref_nodes);
-   state.phi_table = _mesa_hash_table_create(state.dead_ctx,
-                                             _mesa_hash_pointer,
-                                             _mesa_key_pointer_equal);
 
    /* Build the initial deref structures and direct_deref_nodes table */
    state.add_to_direct_deref_nodes = true;
@@ -928,15 +677,6 @@ nir_lower_vars_to_ssa_impl(nir_function_impl *impl)
       node->lower_to_ssa = true;
       progress = true;
 
-      if (deref->var->constant_initializer) {
-         nir_load_const_instr *load =
-            nir_deref_get_const_initializer_load(state.shader, deref);
-         nir_ssa_def_init(&load->instr, &load->def,
-                          glsl_get_vector_elements(node->type), NULL);
-         nir_instr_insert_before_cf_list(&impl->body, &load->instr);
-         def_stack_push(node, &load->def, &state);
-      }
-
       foreach_deref_node_match(deref, lower_copies_to_load_store, &state);
    }
 
@@ -953,9 +693,47 @@ nir_lower_vars_to_ssa_impl(nir_function_impl *impl)
     */
    nir_foreach_block(impl, register_variable_uses_block, &state);
 
-   insert_phi_nodes(&state);
+   state.phi_builder = nir_phi_builder_create(state.impl);
+
+   NIR_VLA(BITSET_WORD, store_blocks, BITSET_WORDS(state.impl->num_blocks));
+   foreach_list_typed(struct deref_node, node, direct_derefs_link,
+                      &state.direct_deref_nodes) {
+      if (!node->lower_to_ssa)
+         continue;
+
+      memset(store_blocks, 0,
+             BITSET_WORDS(state.impl->num_blocks) * sizeof(*store_blocks));
+
+      if (node->stores) {
+         struct set_entry *store_entry;
+         set_foreach(node->stores, store_entry) {
+            nir_intrinsic_instr *store =
+               (nir_intrinsic_instr *)store_entry->key;
+            BITSET_SET(store_blocks, store->instr.block->index);
+         }
+      }
+
+      if (node->deref->var->constant_initializer)
+         BITSET_SET(store_blocks, 0);
+
+      node->pb_value =
+         nir_phi_builder_add_value(state.phi_builder,
+                                   glsl_get_vector_elements(node->type),
+                                   store_blocks);
+
+      if (node->deref->var->constant_initializer) {
+         nir_load_const_instr *load =
+            nir_deref_get_const_initializer_load(state.shader, node->deref);
+         nir_instr_insert_before_cf_list(&impl->body, &load->instr);
+         nir_phi_builder_value_set_block_def(node->pb_value,
+                                             nir_start_block(impl), &load->def);
+      }
+   }
+
    rename_variables_block(nir_start_block(impl), &state);
 
+   nir_phi_builder_finish(state.phi_builder);
+
    nir_metadata_preserve(impl, nir_metadata_block_index |
                                nir_metadata_dominance);
 
index a37fe2dc060b26fb001d2030a8ce69927fa51264..60ade4a80ae9140d36f1c7dd91a22c87579f74a5 100644 (file)
@@ -176,6 +176,7 @@ unop("ffloor", tfloat, "floorf(src0)")
 unop("ffract", tfloat, "src0 - floorf(src0)")
 unop("fround_even", tfloat, "_mesa_roundevenf(src0)")
 
+unop("fquantize2f16", tfloat, "(fabs(src0) < ldexpf(1.0, -14)) ? copysignf(0.0f, src0) : _mesa_half_to_float(_mesa_float_to_half(src0))")
 
 # Trigonometric operations.
 
@@ -378,9 +379,23 @@ binop_convert("uadd_carry", tuint, tuint, commutative, "src0 + src1 < src0")
 
 binop_convert("usub_borrow", tuint, tuint, "", "src0 < src1")
 
-binop("fmod", tfloat, "", "src0 - src1 * floorf(src0 / src1)")
 binop("umod", tuint, "", "src1 == 0 ? 0 : src0 % src1")
 
+# For signed integers, there are several different possible definitions of
+# "modulus" or "remainder".  We follow the conventions used by LLVM and
+# SPIR-V.  The irem opcode implements the standard C/C++ signed "%"
+# operation while the imod opcode implements the more mathematical
+# "modulus" operation.  For details on the difference, see
+#
+# http://mathforum.org/library/drmath/view/52343.html
+
+binop("irem", tint, "", "src1 == 0 ? 0 : src0 % src1")
+binop("imod", tint, "",
+      "src1 == 0 ? 0 : ((src0 % src1 == 0 || (src0 >= 0) == (src1 >= 0)) ?"
+      "                 src0 % src1 : src0 % src1 + src1)")
+binop("fmod", tfloat, "", "src0 - src1 * floorf(src0 / src1)")
+binop("frem", tfloat, "", "src0 - src1 * truncf(src0 / src1)")
+
 #
 # Comparisons
 #
index 39be85f639e8372f64c238217dd8f160463661ca..54f7d86843aaafcc2136268a01ae13ed4afd3ffc 100644 (file)
@@ -1,4 +1,5 @@
 #! /usr/bin/env python
+# -*- encoding: utf-8 -*-
 #
 # Copyright (C) 2014 Intel Corporation
 #
@@ -74,6 +75,7 @@ optimizations = [
    (('imul', a, 1), a),
    (('fmul', a, -1.0), ('fneg', a)),
    (('imul', a, -1), ('ineg', a)),
+   (('fdiv', a, b), ('fmul', a, ('frcp', b)), 'options->lower_fdiv'),
    (('ffma', 0.0, a, b), b),
    (('ffma', a, 0.0, b), b),
    (('ffma', a, b, 0.0), ('fmul', a, b)),
@@ -241,8 +243,11 @@ optimizations = [
 
    # Misc. lowering
    (('fmod', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
+   (('frem', a, b), ('fsub', a, ('fmul', b, ('ftrunc', ('fdiv', a, b)))), 'options->lower_fmod'),
    (('uadd_carry', a, b), ('b2i', ('ult', ('iadd', a, b), a)), 'options->lower_uadd_carry'),
    (('usub_borrow', a, b), ('b2i', ('ult', a, b)), 'options->lower_usub_borrow'),
+   (('ldexp', 'x', 'exp'),
+    ('fmul', 'x', ('ishl', ('imin', ('imax', ('iadd', 'exp', 0x7f), 0), 0xff), 23))),
 
    (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
     ('bcsel', ('ilt', 31, 'bits'), 'insert',
diff --git a/src/compiler/nir/nir_phi_builder.c b/src/compiler/nir/nir_phi_builder.c
new file mode 100644 (file)
index 0000000..5429083
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "nir_phi_builder.h"
+#include "nir/nir_vla.h"
+
+struct nir_phi_builder {
+   nir_shader *shader;
+   nir_function_impl *impl;
+
+   /* Copied from the impl for easy access */
+   unsigned num_blocks;
+
+   /* Array of all blocks indexed by block->index. */
+   nir_block **blocks;
+
+   /* Hold on to the values so we can easily iterate over them. */
+   struct exec_list values;
+
+   /* Worklist for phi adding */
+   unsigned iter_count;
+   unsigned *work;
+   nir_block **W;
+};
+
+#define NEEDS_PHI ((nir_ssa_def *)(intptr_t)-1)
+
+struct nir_phi_builder_value {
+   struct exec_node node;
+
+   struct nir_phi_builder *builder;
+
+   /* Needed so we can create phis and undefs */
+   unsigned num_components;
+
+   /* The list of phi nodes associated with this value.  Phi nodes are not
+    * added directly.  Instead, they are created, the instr->block pointer
+    * set, and then added to this list.  Later, in phi_builder_finish, we
+    * set up their sources and add them to the top of their respective
+    * blocks.
+    */
+   struct exec_list phis;
+
+   /* Array of SSA defs, indexed by block.  If a phi needs to be inserted
+    * in a given block, it will have the magic value NEEDS_PHI.
+    */
+   nir_ssa_def *defs[0];
+};
+
+static bool
+fill_block_array(nir_block *block, void *void_data)
+{
+   nir_block **blocks = void_data;
+   blocks[block->index] = block;
+   return true;
+}
+
+struct nir_phi_builder *
+nir_phi_builder_create(nir_function_impl *impl)
+{
+   struct nir_phi_builder *pb = ralloc(NULL, struct nir_phi_builder);
+
+   pb->shader = impl->function->shader;
+   pb->impl = impl;
+
+   assert(impl->valid_metadata & (nir_metadata_block_index |
+                                  nir_metadata_dominance));
+
+   pb->num_blocks = impl->num_blocks;
+   pb->blocks = ralloc_array(pb, nir_block *, pb->num_blocks);
+   nir_foreach_block(impl, fill_block_array, pb->blocks);
+
+   exec_list_make_empty(&pb->values);
+
+   pb->iter_count = 0;
+   pb->work = rzalloc_array(pb, unsigned, pb->num_blocks);
+   pb->W = ralloc_array(pb, nir_block *, pb->num_blocks);
+
+   return pb;
+}
+
+struct nir_phi_builder_value *
+nir_phi_builder_add_value(struct nir_phi_builder *pb, unsigned num_components,
+                          const BITSET_WORD *defs)
+{
+   struct nir_phi_builder_value *val;
+   unsigned i, w_start = 0, w_end = 0;
+
+   val = rzalloc_size(pb, sizeof(*val) + sizeof(val->defs[0]) * pb->num_blocks);
+   val->builder = pb;
+   val->num_components = num_components;
+   exec_list_make_empty(&val->phis);
+   exec_list_push_tail(&pb->values, &val->node);
+
+   pb->iter_count++;
+
+   BITSET_WORD tmp;
+   BITSET_FOREACH_SET(i, tmp, defs, pb->num_blocks) {
+      if (pb->work[i] < pb->iter_count)
+         pb->W[w_end++] = pb->blocks[i];
+      pb->work[i] = pb->iter_count;
+   }
+
+   while (w_start != w_end) {
+      nir_block *cur = pb->W[w_start++];
+      struct set_entry *dom_entry;
+      set_foreach(cur->dom_frontier, dom_entry) {
+         nir_block *next = (nir_block *) dom_entry->key;
+
+         /*
+          * If there's more than one return statement, then the end block
+          * can be a join point for some definitions. However, there are
+          * no instructions in the end block, so nothing would use those
+          * phi nodes. Of course, we couldn't place those phi nodes
+          * anyways due to the restriction of having no instructions in the
+          * end block...
+          */
+         if (next == pb->impl->end_block)
+            continue;
+
+         if (val->defs[next->index] == NULL) {
+            val->defs[next->index] = NEEDS_PHI;
+
+            if (pb->work[next->index] < pb->iter_count) {
+               pb->work[next->index] = pb->iter_count;
+               pb->W[w_end++] = next;
+            }
+         }
+      }
+   }
+
+   return val;
+}
+
+void
+nir_phi_builder_value_set_block_def(struct nir_phi_builder_value *val,
+                                    nir_block *block, nir_ssa_def *def)
+{
+   val->defs[block->index] = def;
+}
+
+nir_ssa_def *
+nir_phi_builder_value_get_block_def(struct nir_phi_builder_value *val,
+                                    nir_block *block)
+{
+   if (val->defs[block->index] == NULL) {
+      if (block->imm_dom) {
+         /* Grab it from our immediate dominator.  We'll stash it here for
+          * easy access later.
+          */
+         val->defs[block->index] =
+            nir_phi_builder_value_get_block_def(val, block->imm_dom);
+         return val->defs[block->index];
+      } else {
+         /* No immediate dominator means that this block is either the
+          * start block or unreachable.  In either case, the value is
+          * undefined so we need an SSA undef.
+          */
+         nir_ssa_undef_instr *undef =
+            nir_ssa_undef_instr_create(val->builder->shader,
+                                       val->num_components);
+         nir_instr_insert(nir_before_cf_list(&val->builder->impl->body),
+                          &undef->instr);
+         val->defs[block->index] = &undef->def;
+         return &undef->def;
+      }
+   } else if (val->defs[block->index] == NEEDS_PHI) {
+      /* If we need a phi instruction, go ahead and create one but don't
+       * add it to the program yet.  Later, we'll go through and set up phi
+       * sources and add the instructions will be added at that time.
+       */
+      nir_phi_instr *phi = nir_phi_instr_create(val->builder->shader);
+      nir_ssa_dest_init(&phi->instr, &phi->dest, val->num_components, NULL);
+      phi->instr.block = block;
+      exec_list_push_tail(&val->phis, &phi->instr.node);
+      val->defs[block->index] = &phi->dest.ssa;
+      return &phi->dest.ssa;
+   } else {
+      return val->defs[block->index];
+   }
+}
+
+static int
+compare_blocks(const void *_a, const void *_b)
+{
+   nir_block * const * a = _a;
+   nir_block * const * b = _b;
+
+   return (*a)->index - (*b)->index;
+}
+
+void
+nir_phi_builder_finish(struct nir_phi_builder *pb)
+{
+   const unsigned num_blocks = pb->num_blocks;
+   NIR_VLA(nir_block *, preds, num_blocks);
+
+   foreach_list_typed(struct nir_phi_builder_value, val, node, &pb->values) {
+      /* We can't iterate over the list of phis normally because we are
+       * removing them as we go and, in some cases, adding new phis as we
+       * build the source lists of others.
+       */
+      while (!exec_list_is_empty(&val->phis)) {
+         struct exec_node *head = exec_list_get_head(&val->phis);
+         nir_phi_instr *phi = exec_node_data(nir_phi_instr, head, instr.node);
+         assert(phi->instr.type == nir_instr_type_phi);
+
+         exec_node_remove(&phi->instr.node);
+
+         /* Construct an array of predecessors.  We sort it to ensure
+          * determinism in the phi insertion algorithm.
+          *
+          * XXX: Calling qsort this many times seems expensive.
+          */
+         int num_preds = 0;
+         struct set_entry *entry;
+         set_foreach(phi->instr.block->predecessors, entry)
+            preds[num_preds++] = (nir_block *)entry->key;
+         qsort(preds, num_preds, sizeof(*preds), compare_blocks);
+
+         for (unsigned i = 0; i < num_preds; i++) {
+            nir_phi_src *src = ralloc(phi, nir_phi_src);
+            src->pred = preds[i];
+            src->src = nir_src_for_ssa(
+               nir_phi_builder_value_get_block_def(val, preds[i]));
+            exec_list_push_tail(&phi->srcs, &src->node);
+         }
+
+         nir_instr_insert(nir_before_block(phi->instr.block), &phi->instr);
+      }
+   }
+
+   ralloc_free(pb);
+}
diff --git a/src/compiler/nir/nir_phi_builder.h b/src/compiler/nir/nir_phi_builder.h
new file mode 100644 (file)
index 0000000..50251bf
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "nir.h"
+
+struct nir_phi_builder;
+struct nir_phi_builder_value;
+
+/* Create a new phi builder.
+ *
+ * While this is fairly cheap, it does allocate some memory and walk the list
+ * of blocks so it's recommended that you only call it once and use it to
+ * build phis for several values.
+ */
+struct nir_phi_builder *nir_phi_builder_create(nir_function_impl *impl);
+
+/* Register a value with the builder.
+ *
+ * The 'defs' parameter specifies a bitset of blocks in which the given value
+ * is defined.  This is used to determine where to place the phi nodes.
+ */
+struct nir_phi_builder_value *
+nir_phi_builder_add_value(struct nir_phi_builder *pb, unsigned num_components,
+                          const BITSET_WORD *defs);
+
+/* Register a definition for the given value and block.
+ *
+ * It is safe to call this function as many times as you wish for any given
+ * block/value pair.  However, it always replaces whatever was there
+ * previously even if that definition is from a phi node.  The phi builder
+ * always uses the latest information it has, so you must be careful about the
+ * order in which you register definitions.  The final value at the end of the
+ * block must be the last value registered.
+ */
+void
+nir_phi_builder_value_set_block_def(struct nir_phi_builder_value *val,
+                                    nir_block *block, nir_ssa_def *def);
+
+/* Get the definition for the given value in the given block.
+ *
+ * This definition will always be the latest definition known for the given
+ * block.  If no definition is immediately available, it will crawl up the
+ * dominance tree and insert phi nodes as needed until it finds one.  In the
+ * case that no suitable definition is found, it will return the result of a
+ * nir_ssa_undef_instr with the correct number of components.
+ *
+ * Because this function only uses the latest available information for any
+ * given block, you must have already finished registering definitions for any
+ * blocks that dominate the current block in order to get the correct result.
+ */
+nir_ssa_def *
+nir_phi_builder_value_get_block_def(struct nir_phi_builder_value *val,
+                                    nir_block *block);
+
+/* Finish building phi nodes and free the builder.
+ *
+ * This function does far more than just free memory.  Prior to calling
+ * nir_phi_builder_finish, no phi nodes have actually been inserted in the
+ * program.  This function is what finishes setting up phi node sources and
+ * adds the phi nodes to the program.
+ */
+void nir_phi_builder_finish(struct nir_phi_builder *pb);
index bdfbd2600c030f6fc47b1537715c716311e3022d..24d5281ec54aa9a65220dc32c2110b30e7c3537e 100644 (file)
@@ -312,7 +312,8 @@ print_var_decl(nir_variable *var, print_state *state)
    const char *const patch = (var->data.patch) ? "patch " : "";
    const char *const inv = (var->data.invariant) ? "invariant " : "";
    const char *const mode[] = { "shader_in ", "shader_out ", "", "",
-                                "uniform ", "shader_storage", "system " };
+                                "uniform ", "shader_storage ", "shared ",
+                                "system "};
 
    fprintf(fp, "%s%s%s%s%s%s ",
       cent, samp, patch, inv, mode[var->data.mode],
@@ -501,6 +502,9 @@ print_intrinsic_instr(nir_intrinsic_instr *instr, print_state *state)
       [NIR_INTRINSIC_WRMASK] = "wrmask",
       [NIR_INTRINSIC_STREAM_ID] = "stream-id",
       [NIR_INTRINSIC_UCP_ID] = "ucp-id",
+      [NIR_INTRINSIC_RANGE] = "range",
+      [NIR_INTRINSIC_DESC_SET] = "desc-set",
+      [NIR_INTRINSIC_BINDING] = "binding",
    };
    for (unsigned idx = 1; idx < NIR_INTRINSIC_NUM_INDEX_FLAGS; idx++) {
       if (!info->index_map[idx])
@@ -958,6 +962,16 @@ print_function_impl(nir_function_impl *impl, print_state *state)
 
    fprintf(fp, "{\n");
 
+   for (unsigned i = 0; i < impl->num_params; i++) {
+      fprintf(fp, "\t");
+      print_var_decl(impl->params[i], state);
+   }
+
+   if (impl->return_var) {
+      fprintf(fp, "\t");
+      print_var_decl(impl->return_var, state);
+   }
+
    nir_foreach_variable(var, &impl->locals) {
       fprintf(fp, "\t");
       print_var_decl(var, state);
@@ -1056,6 +1070,7 @@ nir_print_shader(nir_shader *shader, FILE *fp)
    fprintf(fp, "inputs: %u\n", shader->num_inputs);
    fprintf(fp, "outputs: %u\n", shader->num_outputs);
    fprintf(fp, "uniforms: %u\n", shader->num_uniforms);
+   fprintf(fp, "shared: %u\n", shader->num_shared);
 
    nir_foreach_variable(var, &shader->uniforms) {
       print_var_decl(var, &state);
@@ -1069,6 +1084,10 @@ nir_print_shader(nir_shader *shader, FILE *fp)
       print_var_decl(var, &state);
    }
 
+   nir_foreach_variable(var, &shader->shared) {
+      print_var_decl(var, &state);
+   }
+
    nir_foreach_variable(var, &shader->globals) {
       print_var_decl(var, &state);
    }
index 65192682d3c8e61954888631c3a1580e4faeca35..ad69de85b97f2b7aa70858b1e636711dc79c368d 100644 (file)
@@ -120,7 +120,7 @@ remove_dead_vars(struct exec_list *var_list, struct set *live)
 }
 
 bool
-nir_remove_dead_variables(nir_shader *shader)
+nir_remove_dead_variables(nir_shader *shader, nir_variable_mode mode)
 {
    bool progress = false;
    struct set *live =
@@ -128,15 +128,30 @@ nir_remove_dead_variables(nir_shader *shader)
 
    add_var_use_shader(shader, live);
 
-   progress = remove_dead_vars(&shader->globals, live) || progress;
+   if (mode == nir_var_uniform || mode == nir_var_all)
+      progress = remove_dead_vars(&shader->uniforms, live) || progress;
 
-   nir_foreach_function(shader, function) {
-      if (function->impl) {
-         if (remove_dead_vars(&function->impl->locals, live)) {
-            nir_metadata_preserve(function->impl, nir_metadata_block_index |
-                                                  nir_metadata_dominance |
-                                                  nir_metadata_live_ssa_defs);
-            progress = true;
+   if (mode == nir_var_shader_in || mode == nir_var_all)
+      progress = remove_dead_vars(&shader->inputs, live) || progress;
+
+   if (mode == nir_var_shader_out || mode == nir_var_all)
+      progress = remove_dead_vars(&shader->outputs, live) || progress;
+
+   if (mode == nir_var_global || mode == nir_var_all)
+      progress = remove_dead_vars(&shader->globals, live) || progress;
+
+   if (mode == nir_var_system_value || mode == nir_var_all)
+      progress = remove_dead_vars(&shader->system_values, live) || progress;
+
+   if (mode == nir_var_local || mode == nir_var_all) {
+      nir_foreach_function(shader, function) {
+         if (function->impl) {
+            if (remove_dead_vars(&function->impl->locals, live)) {
+               nir_metadata_preserve(function->impl, nir_metadata_block_index |
+                                                     nir_metadata_dominance |
+                                                     nir_metadata_live_ssa_defs);
+               progress = true;
+            }
          }
       }
    }
diff --git a/src/compiler/nir/nir_repair_ssa.c b/src/compiler/nir/nir_repair_ssa.c
new file mode 100644 (file)
index 0000000..3ab4f0f
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "nir.h"
+#include "nir_phi_builder.h"
+
+struct repair_ssa_state {
+   nir_function_impl *impl;
+
+   BITSET_WORD *def_set;
+   struct nir_phi_builder *phi_builder;
+
+   bool progress;
+};
+
+/* Get ready to build a phi and return the builder */
+static struct nir_phi_builder *
+prep_build_phi(struct repair_ssa_state *state)
+{
+   const unsigned num_words = BITSET_WORDS(state->impl->num_blocks);
+
+   /* We create the phi builder on-demand. */
+   if (state->phi_builder == NULL) {
+      state->phi_builder = nir_phi_builder_create(state->impl);
+      state->def_set = ralloc_array(NULL, BITSET_WORD, num_words);
+   }
+
+   /* We're going to build a phi.  That's progress. */
+   state->progress = true;
+
+   /* Set the defs set to empty */
+   memset(state->def_set, 0, num_words * sizeof(*state->def_set));
+
+   return state->phi_builder;
+}
+
+static nir_block *
+get_src_block(nir_src *src)
+{
+   if (src->parent_instr->type == nir_instr_type_phi) {
+      return exec_node_data(nir_phi_src, src, src)->pred;
+   } else {
+      return src->parent_instr->block;
+   }
+}
+
+static bool
+repair_ssa_def(nir_ssa_def *def, void *void_state)
+{
+   struct repair_ssa_state *state = void_state;
+
+   bool is_valid = true;
+   nir_foreach_use(def, src) {
+      if (!nir_block_dominates(def->parent_instr->block, get_src_block(src))) {
+         is_valid = false;
+         break;
+      }
+   }
+
+   if (is_valid)
+      return true;
+
+   struct nir_phi_builder *pb = prep_build_phi(state);
+
+   BITSET_SET(state->def_set, def->parent_instr->block->index);
+
+   struct nir_phi_builder_value *val =
+      nir_phi_builder_add_value(pb, def->num_components, state->def_set);
+
+   nir_phi_builder_value_set_block_def(val, def->parent_instr->block, def);
+
+   nir_foreach_use_safe(def, src) {
+      nir_block *src_block = get_src_block(src);
+      if (!nir_block_dominates(def->parent_instr->block, src_block)) {
+         nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(
+            nir_phi_builder_value_get_block_def(val, src_block)));
+      }
+   }
+
+   return true;
+}
+
+static bool
+repair_ssa_block(nir_block *block, void *state)
+{
+   nir_foreach_instr_safe(block, instr) {
+      nir_foreach_ssa_def(instr, repair_ssa_def, state);
+   }
+
+   return true;
+}
+
+bool
+nir_repair_ssa_impl(nir_function_impl *impl)
+{
+   struct repair_ssa_state state;
+
+   state.impl = impl;
+   state.phi_builder = NULL;
+   state.progress = false;
+
+   nir_metadata_require(impl, nir_metadata_block_index |
+                              nir_metadata_dominance);
+
+   nir_foreach_block(impl, repair_ssa_block, &state);
+
+   if (state.progress)
+      nir_metadata_preserve(impl, nir_metadata_block_index |
+                                  nir_metadata_dominance);
+
+   if (state.phi_builder) {
+      nir_phi_builder_finish(state.phi_builder);
+      ralloc_free(state.def_set);
+   }
+
+   return state.progress;
+}
+
+/** This pass can be used to repair SSA form in a shader.
+ *
+ * Sometimes a transformation (such as return lowering) will have to make
+ * changes to a shader which, while still correct, break some of NIR's SSA
+ * invariants.  This pass will insert ssa_undefs and phi nodes as needed to
+ * get the shader back into SSA that the validator will like.
+ */
+bool
+nir_repair_ssa(nir_shader *shader)
+{
+   bool progress = false;
+
+   nir_foreach_function(shader, function) {
+      if (function->impl)
+         progress = nir_repair_ssa_impl(function->impl) || progress;
+   }
+
+   return progress;
+}
index 0710bdba7c7c950aa8346738c3aa1daf92e2cce7..b22f0f5656937fec6bd057776fd1c5e347f70608 100644 (file)
@@ -119,6 +119,8 @@ sweep_impl(nir_shader *nir, nir_function_impl *impl)
    ralloc_steal(nir, impl);
 
    ralloc_steal(nir, impl->params);
+   for (unsigned i = 0; i < impl->num_params; i++)
+      ralloc_steal(nir, impl->params[i]);
    ralloc_steal(nir, impl->return_var);
    steal_list(nir, nir_variable, &impl->locals);
    steal_list(nir, nir_register, &impl->registers);
@@ -159,6 +161,7 @@ nir_sweep(nir_shader *nir)
    steal_list(nir, nir_variable, &nir->uniforms);
    steal_list(nir, nir_variable, &nir->inputs);
    steal_list(nir, nir_variable, &nir->outputs);
+   steal_list(nir, nir_variable, &nir->shared);
    steal_list(nir, nir_variable, &nir->globals);
    steal_list(nir, nir_variable, &nir->system_values);
    steal_list(nir, nir_register, &nir->registers);
index 0c9d816a384d02b29ed32181bfd24e464f0f40e5..0c32d5fe07a5e83a96deaa66f67f813f821770fa 100644 (file)
@@ -1047,6 +1047,11 @@ nir_validate_shader(nir_shader *shader)
      validate_var_decl(var, true, &state);
    }
 
+   exec_list_validate(&shader->shared);
+   nir_foreach_variable(var, &shader->shared) {
+      validate_var_decl(var, true, &state);
+   }
+
    exec_list_validate(&shader->globals);
    nir_foreach_variable(var, &shader->globals) {
      validate_var_decl(var, true, &state);
diff --git a/src/compiler/nir/spirv/GLSL.std.450.h b/src/compiler/nir/spirv/GLSL.std.450.h
new file mode 100644 (file)
index 0000000..d1c9b5c
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+** Copyright (c) 2014-2015 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ 
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLstd450_H
+#define GLSLstd450_H
+
+const int GLSLstd450Version = 99;
+const int GLSLstd450Revision = 3;
+
+enum GLSLstd450 {
+    GLSLstd450Bad = 0,              // Don't use
+
+    GLSLstd450Round = 1,
+    GLSLstd450RoundEven = 2,
+    GLSLstd450Trunc = 3,
+    GLSLstd450FAbs = 4,
+    GLSLstd450SAbs = 5,
+    GLSLstd450FSign = 6,
+    GLSLstd450SSign = 7,
+    GLSLstd450Floor = 8,
+    GLSLstd450Ceil = 9,
+    GLSLstd450Fract = 10,
+
+    GLSLstd450Radians = 11,
+    GLSLstd450Degrees = 12,
+    GLSLstd450Sin = 13,
+    GLSLstd450Cos = 14,
+    GLSLstd450Tan = 15,
+    GLSLstd450Asin = 16,
+    GLSLstd450Acos = 17,
+    GLSLstd450Atan = 18,
+    GLSLstd450Sinh = 19,
+    GLSLstd450Cosh = 20,
+    GLSLstd450Tanh = 21,
+    GLSLstd450Asinh = 22,
+    GLSLstd450Acosh = 23,
+    GLSLstd450Atanh = 24,
+    GLSLstd450Atan2 = 25,
+
+    GLSLstd450Pow = 26,
+    GLSLstd450Exp = 27,
+    GLSLstd450Log = 28,
+    GLSLstd450Exp2 = 29,
+    GLSLstd450Log2 = 30,
+    GLSLstd450Sqrt = 31,
+    GLSLstd450InverseSqrt = 32,
+
+    GLSLstd450Determinant = 33,
+    GLSLstd450MatrixInverse = 34,
+
+    GLSLstd450Modf = 35,            // second operand needs an OpVariable to write to
+    GLSLstd450ModfStruct = 36,      // no OpVariable operand
+    GLSLstd450FMin = 37,
+    GLSLstd450UMin = 38,
+    GLSLstd450SMin = 39,
+    GLSLstd450FMax = 40,
+    GLSLstd450UMax = 41,
+    GLSLstd450SMax = 42,
+    GLSLstd450FClamp = 43,
+    GLSLstd450UClamp = 44,
+    GLSLstd450SClamp = 45,
+    GLSLstd450FMix = 46,
+    GLSLstd450IMix = 47,
+    GLSLstd450Step = 48,
+    GLSLstd450SmoothStep = 49,
+
+    GLSLstd450Fma = 50,
+    GLSLstd450Frexp = 51,            // second operand needs an OpVariable to write to
+    GLSLstd450FrexpStruct = 52,      // no OpVariable operand
+    GLSLstd450Ldexp = 53,
+
+    GLSLstd450PackSnorm4x8 = 54,
+    GLSLstd450PackUnorm4x8 = 55,
+    GLSLstd450PackSnorm2x16 = 56,
+    GLSLstd450PackUnorm2x16 = 57,
+    GLSLstd450PackHalf2x16 = 58,
+    GLSLstd450PackDouble2x32 = 59,
+    GLSLstd450UnpackSnorm2x16 = 60,
+    GLSLstd450UnpackUnorm2x16 = 61,
+    GLSLstd450UnpackHalf2x16 = 62,
+    GLSLstd450UnpackSnorm4x8 = 63,
+    GLSLstd450UnpackUnorm4x8 = 64,
+    GLSLstd450UnpackDouble2x32 = 65,
+
+    GLSLstd450Length = 66,
+    GLSLstd450Distance = 67,
+    GLSLstd450Cross = 68,
+    GLSLstd450Normalize = 69,
+    GLSLstd450FaceForward = 70,
+    GLSLstd450Reflect = 71,
+    GLSLstd450Refract = 72,
+
+    GLSLstd450FindILsb = 73,
+    GLSLstd450FindSMsb = 74,
+    GLSLstd450FindUMsb = 75,
+
+    GLSLstd450InterpolateAtCentroid = 76,
+    GLSLstd450InterpolateAtSample = 77,
+    GLSLstd450InterpolateAtOffset = 78,
+
+    GLSLstd450Count
+};
+
+#endif  // #ifndef GLSLstd450_H
diff --git a/src/compiler/nir/spirv/nir_spirv.h b/src/compiler/nir/spirv/nir_spirv.h
new file mode 100644 (file)
index 0000000..500f2cb
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jason Ekstrand (jason@jlekstrand.net)
+ *
+ */
+
+#pragma once
+
+#ifndef _NIR_SPIRV_H_
+#define _NIR_SPIRV_H_
+
+#include "nir/nir.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct nir_spirv_specialization {
+   uint32_t id;
+   uint32_t data;
+};
+
+nir_function *spirv_to_nir(const uint32_t *words, size_t word_count,
+                           struct nir_spirv_specialization *specializations,
+                           unsigned num_specializations,
+                           gl_shader_stage stage, const char *entry_point_name,
+                           const nir_shader_compiler_options *options);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _NIR_SPIRV_H_ */
diff --git a/src/compiler/nir/spirv/spirv.h b/src/compiler/nir/spirv/spirv.h
new file mode 100644 (file)
index 0000000..63bcb2f
--- /dev/null
@@ -0,0 +1,870 @@
+/*
+** Copyright (c) 2014-2015 The Khronos Group Inc.
+** 
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+** 
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+** 
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ 
+** 
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+/*
+** This header is automatically generated by the same tool that creates
+** the Binary Section of the SPIR-V specification.
+*/
+
+/*
+** Enumeration tokens for SPIR-V, in various styles:
+**   C, C++, C++11, JSON, Lua, Python
+** 
+** - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+** - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+** - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+** 
+** Some tokens act like mask values, which can be OR'd together,
+** while others are mutually exclusive.  The mask-like ones have
+** "Mask" in their name, and a parallel enum that has the shift
+** amount (1 << x) for each corresponding enumerant.
+*/
+
+#ifndef spirv_H
+#define spirv_H
+
+typedef unsigned int SpvId;
+
+#define SPV_VERSION 0x10000
+#define SPV_REVISION 2
+
+static const unsigned int SpvMagicNumber = 0x07230203;
+static const unsigned int SpvVersion = 0x00010000;
+static const unsigned int SpvRevision = 2;
+static const unsigned int SpvOpCodeMask = 0xffff;
+static const unsigned int SpvWordCountShift = 16;
+
+typedef enum SpvSourceLanguage_ {
+    SpvSourceLanguageUnknown = 0,
+    SpvSourceLanguageESSL = 1,
+    SpvSourceLanguageGLSL = 2,
+    SpvSourceLanguageOpenCL_C = 3,
+    SpvSourceLanguageOpenCL_CPP = 4,
+} SpvSourceLanguage;
+
+typedef enum SpvExecutionModel_ {
+    SpvExecutionModelVertex = 0,
+    SpvExecutionModelTessellationControl = 1,
+    SpvExecutionModelTessellationEvaluation = 2,
+    SpvExecutionModelGeometry = 3,
+    SpvExecutionModelFragment = 4,
+    SpvExecutionModelGLCompute = 5,
+    SpvExecutionModelKernel = 6,
+} SpvExecutionModel;
+
+typedef enum SpvAddressingModel_ {
+    SpvAddressingModelLogical = 0,
+    SpvAddressingModelPhysical32 = 1,
+    SpvAddressingModelPhysical64 = 2,
+} SpvAddressingModel;
+
+typedef enum SpvMemoryModel_ {
+    SpvMemoryModelSimple = 0,
+    SpvMemoryModelGLSL450 = 1,
+    SpvMemoryModelOpenCL = 2,
+} SpvMemoryModel;
+
+typedef enum SpvExecutionMode_ {
+    SpvExecutionModeInvocations = 0,
+    SpvExecutionModeSpacingEqual = 1,
+    SpvExecutionModeSpacingFractionalEven = 2,
+    SpvExecutionModeSpacingFractionalOdd = 3,
+    SpvExecutionModeVertexOrderCw = 4,
+    SpvExecutionModeVertexOrderCcw = 5,
+    SpvExecutionModePixelCenterInteger = 6,
+    SpvExecutionModeOriginUpperLeft = 7,
+    SpvExecutionModeOriginLowerLeft = 8,
+    SpvExecutionModeEarlyFragmentTests = 9,
+    SpvExecutionModePointMode = 10,
+    SpvExecutionModeXfb = 11,
+    SpvExecutionModeDepthReplacing = 12,
+    SpvExecutionModeDepthGreater = 14,
+    SpvExecutionModeDepthLess = 15,
+    SpvExecutionModeDepthUnchanged = 16,
+    SpvExecutionModeLocalSize = 17,
+    SpvExecutionModeLocalSizeHint = 18,
+    SpvExecutionModeInputPoints = 19,
+    SpvExecutionModeInputLines = 20,
+    SpvExecutionModeInputLinesAdjacency = 21,
+    SpvExecutionModeTriangles = 22,
+    SpvExecutionModeInputTrianglesAdjacency = 23,
+    SpvExecutionModeQuads = 24,
+    SpvExecutionModeIsolines = 25,
+    SpvExecutionModeOutputVertices = 26,
+    SpvExecutionModeOutputPoints = 27,
+    SpvExecutionModeOutputLineStrip = 28,
+    SpvExecutionModeOutputTriangleStrip = 29,
+    SpvExecutionModeVecTypeHint = 30,
+    SpvExecutionModeContractionOff = 31,
+} SpvExecutionMode;
+
+typedef enum SpvStorageClass_ {
+    SpvStorageClassUniformConstant = 0,
+    SpvStorageClassInput = 1,
+    SpvStorageClassUniform = 2,
+    SpvStorageClassOutput = 3,
+    SpvStorageClassWorkgroup = 4,
+    SpvStorageClassCrossWorkgroup = 5,
+    SpvStorageClassPrivate = 6,
+    SpvStorageClassFunction = 7,
+    SpvStorageClassGeneric = 8,
+    SpvStorageClassPushConstant = 9,
+    SpvStorageClassAtomicCounter = 10,
+    SpvStorageClassImage = 11,
+} SpvStorageClass;
+
+typedef enum SpvDim_ {
+    SpvDim1D = 0,
+    SpvDim2D = 1,
+    SpvDim3D = 2,
+    SpvDimCube = 3,
+    SpvDimRect = 4,
+    SpvDimBuffer = 5,
+    SpvDimSubpassData = 6,
+} SpvDim;
+
+typedef enum SpvSamplerAddressingMode_ {
+    SpvSamplerAddressingModeNone = 0,
+    SpvSamplerAddressingModeClampToEdge = 1,
+    SpvSamplerAddressingModeClamp = 2,
+    SpvSamplerAddressingModeRepeat = 3,
+    SpvSamplerAddressingModeRepeatMirrored = 4,
+} SpvSamplerAddressingMode;
+
+typedef enum SpvSamplerFilterMode_ {
+    SpvSamplerFilterModeNearest = 0,
+    SpvSamplerFilterModeLinear = 1,
+} SpvSamplerFilterMode;
+
+typedef enum SpvImageFormat_ {
+    SpvImageFormatUnknown = 0,
+    SpvImageFormatRgba32f = 1,
+    SpvImageFormatRgba16f = 2,
+    SpvImageFormatR32f = 3,
+    SpvImageFormatRgba8 = 4,
+    SpvImageFormatRgba8Snorm = 5,
+    SpvImageFormatRg32f = 6,
+    SpvImageFormatRg16f = 7,
+    SpvImageFormatR11fG11fB10f = 8,
+    SpvImageFormatR16f = 9,
+    SpvImageFormatRgba16 = 10,
+    SpvImageFormatRgb10A2 = 11,
+    SpvImageFormatRg16 = 12,
+    SpvImageFormatRg8 = 13,
+    SpvImageFormatR16 = 14,
+    SpvImageFormatR8 = 15,
+    SpvImageFormatRgba16Snorm = 16,
+    SpvImageFormatRg16Snorm = 17,
+    SpvImageFormatRg8Snorm = 18,
+    SpvImageFormatR16Snorm = 19,
+    SpvImageFormatR8Snorm = 20,
+    SpvImageFormatRgba32i = 21,
+    SpvImageFormatRgba16i = 22,
+    SpvImageFormatRgba8i = 23,
+    SpvImageFormatR32i = 24,
+    SpvImageFormatRg32i = 25,
+    SpvImageFormatRg16i = 26,
+    SpvImageFormatRg8i = 27,
+    SpvImageFormatR16i = 28,
+    SpvImageFormatR8i = 29,
+    SpvImageFormatRgba32ui = 30,
+    SpvImageFormatRgba16ui = 31,
+    SpvImageFormatRgba8ui = 32,
+    SpvImageFormatR32ui = 33,
+    SpvImageFormatRgb10a2ui = 34,
+    SpvImageFormatRg32ui = 35,
+    SpvImageFormatRg16ui = 36,
+    SpvImageFormatRg8ui = 37,
+    SpvImageFormatR16ui = 38,
+    SpvImageFormatR8ui = 39,
+} SpvImageFormat;
+
+typedef enum SpvImageChannelOrder_ {
+    SpvImageChannelOrderR = 0,
+    SpvImageChannelOrderA = 1,
+    SpvImageChannelOrderRG = 2,
+    SpvImageChannelOrderRA = 3,
+    SpvImageChannelOrderRGB = 4,
+    SpvImageChannelOrderRGBA = 5,
+    SpvImageChannelOrderBGRA = 6,
+    SpvImageChannelOrderARGB = 7,
+    SpvImageChannelOrderIntensity = 8,
+    SpvImageChannelOrderLuminance = 9,
+    SpvImageChannelOrderRx = 10,
+    SpvImageChannelOrderRGx = 11,
+    SpvImageChannelOrderRGBx = 12,
+    SpvImageChannelOrderDepth = 13,
+    SpvImageChannelOrderDepthStencil = 14,
+    SpvImageChannelOrdersRGB = 15,
+    SpvImageChannelOrdersRGBx = 16,
+    SpvImageChannelOrdersRGBA = 17,
+    SpvImageChannelOrdersBGRA = 18,
+} SpvImageChannelOrder;
+
+typedef enum SpvImageChannelDataType_ {
+    SpvImageChannelDataTypeSnormInt8 = 0,
+    SpvImageChannelDataTypeSnormInt16 = 1,
+    SpvImageChannelDataTypeUnormInt8 = 2,
+    SpvImageChannelDataTypeUnormInt16 = 3,
+    SpvImageChannelDataTypeUnormShort565 = 4,
+    SpvImageChannelDataTypeUnormShort555 = 5,
+    SpvImageChannelDataTypeUnormInt101010 = 6,
+    SpvImageChannelDataTypeSignedInt8 = 7,
+    SpvImageChannelDataTypeSignedInt16 = 8,
+    SpvImageChannelDataTypeSignedInt32 = 9,
+    SpvImageChannelDataTypeUnsignedInt8 = 10,
+    SpvImageChannelDataTypeUnsignedInt16 = 11,
+    SpvImageChannelDataTypeUnsignedInt32 = 12,
+    SpvImageChannelDataTypeHalfFloat = 13,
+    SpvImageChannelDataTypeFloat = 14,
+    SpvImageChannelDataTypeUnormInt24 = 15,
+    SpvImageChannelDataTypeUnormInt101010_2 = 16,
+} SpvImageChannelDataType;
+
+typedef enum SpvImageOperandsShift_ {
+    SpvImageOperandsBiasShift = 0,
+    SpvImageOperandsLodShift = 1,
+    SpvImageOperandsGradShift = 2,
+    SpvImageOperandsConstOffsetShift = 3,
+    SpvImageOperandsOffsetShift = 4,
+    SpvImageOperandsConstOffsetsShift = 5,
+    SpvImageOperandsSampleShift = 6,
+    SpvImageOperandsMinLodShift = 7,
+} SpvImageOperandsShift;
+
+typedef enum SpvImageOperandsMask_ {
+    SpvImageOperandsMaskNone = 0,
+    SpvImageOperandsBiasMask = 0x00000001,
+    SpvImageOperandsLodMask = 0x00000002,
+    SpvImageOperandsGradMask = 0x00000004,
+    SpvImageOperandsConstOffsetMask = 0x00000008,
+    SpvImageOperandsOffsetMask = 0x00000010,
+    SpvImageOperandsConstOffsetsMask = 0x00000020,
+    SpvImageOperandsSampleMask = 0x00000040,
+    SpvImageOperandsMinLodMask = 0x00000080,
+} SpvImageOperandsMask;
+
+typedef enum SpvFPFastMathModeShift_ {
+    SpvFPFastMathModeNotNaNShift = 0,
+    SpvFPFastMathModeNotInfShift = 1,
+    SpvFPFastMathModeNSZShift = 2,
+    SpvFPFastMathModeAllowRecipShift = 3,
+    SpvFPFastMathModeFastShift = 4,
+} SpvFPFastMathModeShift;
+
+typedef enum SpvFPFastMathModeMask_ {
+    SpvFPFastMathModeMaskNone = 0,
+    SpvFPFastMathModeNotNaNMask = 0x00000001,
+    SpvFPFastMathModeNotInfMask = 0x00000002,
+    SpvFPFastMathModeNSZMask = 0x00000004,
+    SpvFPFastMathModeAllowRecipMask = 0x00000008,
+    SpvFPFastMathModeFastMask = 0x00000010,
+} SpvFPFastMathModeMask;
+
+typedef enum SpvFPRoundingMode_ {
+    SpvFPRoundingModeRTE = 0,
+    SpvFPRoundingModeRTZ = 1,
+    SpvFPRoundingModeRTP = 2,
+    SpvFPRoundingModeRTN = 3,
+} SpvFPRoundingMode;
+
+typedef enum SpvLinkageType_ {
+    SpvLinkageTypeExport = 0,
+    SpvLinkageTypeImport = 1,
+} SpvLinkageType;
+
+typedef enum SpvAccessQualifier_ {
+    SpvAccessQualifierReadOnly = 0,
+    SpvAccessQualifierWriteOnly = 1,
+    SpvAccessQualifierReadWrite = 2,
+} SpvAccessQualifier;
+
+typedef enum SpvFunctionParameterAttribute_ {
+    SpvFunctionParameterAttributeZext = 0,
+    SpvFunctionParameterAttributeSext = 1,
+    SpvFunctionParameterAttributeByVal = 2,
+    SpvFunctionParameterAttributeSret = 3,
+    SpvFunctionParameterAttributeNoAlias = 4,
+    SpvFunctionParameterAttributeNoCapture = 5,
+    SpvFunctionParameterAttributeNoWrite = 6,
+    SpvFunctionParameterAttributeNoReadWrite = 7,
+} SpvFunctionParameterAttribute;
+
+typedef enum SpvDecoration_ {
+    SpvDecorationRelaxedPrecision = 0,
+    SpvDecorationSpecId = 1,
+    SpvDecorationBlock = 2,
+    SpvDecorationBufferBlock = 3,
+    SpvDecorationRowMajor = 4,
+    SpvDecorationColMajor = 5,
+    SpvDecorationArrayStride = 6,
+    SpvDecorationMatrixStride = 7,
+    SpvDecorationGLSLShared = 8,
+    SpvDecorationGLSLPacked = 9,
+    SpvDecorationCPacked = 10,
+    SpvDecorationBuiltIn = 11,
+    SpvDecorationNoPerspective = 13,
+    SpvDecorationFlat = 14,
+    SpvDecorationPatch = 15,
+    SpvDecorationCentroid = 16,
+    SpvDecorationSample = 17,
+    SpvDecorationInvariant = 18,
+    SpvDecorationRestrict = 19,
+    SpvDecorationAliased = 20,
+    SpvDecorationVolatile = 21,
+    SpvDecorationConstant = 22,
+    SpvDecorationCoherent = 23,
+    SpvDecorationNonWritable = 24,
+    SpvDecorationNonReadable = 25,
+    SpvDecorationUniform = 26,
+    SpvDecorationSaturatedConversion = 28,
+    SpvDecorationStream = 29,
+    SpvDecorationLocation = 30,
+    SpvDecorationComponent = 31,
+    SpvDecorationIndex = 32,
+    SpvDecorationBinding = 33,
+    SpvDecorationDescriptorSet = 34,
+    SpvDecorationOffset = 35,
+    SpvDecorationXfbBuffer = 36,
+    SpvDecorationXfbStride = 37,
+    SpvDecorationFuncParamAttr = 38,
+    SpvDecorationFPRoundingMode = 39,
+    SpvDecorationFPFastMathMode = 40,
+    SpvDecorationLinkageAttributes = 41,
+    SpvDecorationNoContraction = 42,
+    SpvDecorationInputAttachmentIndex = 43,
+    SpvDecorationAlignment = 44,
+} SpvDecoration;
+
+typedef enum SpvBuiltIn_ {
+    SpvBuiltInPosition = 0,
+    SpvBuiltInPointSize = 1,
+    SpvBuiltInClipDistance = 3,
+    SpvBuiltInCullDistance = 4,
+    SpvBuiltInVertexId = 5,
+    SpvBuiltInInstanceId = 6,
+    SpvBuiltInPrimitiveId = 7,
+    SpvBuiltInInvocationId = 8,
+    SpvBuiltInLayer = 9,
+    SpvBuiltInViewportIndex = 10,
+    SpvBuiltInTessLevelOuter = 11,
+    SpvBuiltInTessLevelInner = 12,
+    SpvBuiltInTessCoord = 13,
+    SpvBuiltInPatchVertices = 14,
+    SpvBuiltInFragCoord = 15,
+    SpvBuiltInPointCoord = 16,
+    SpvBuiltInFrontFacing = 17,
+    SpvBuiltInSampleId = 18,
+    SpvBuiltInSamplePosition = 19,
+    SpvBuiltInSampleMask = 20,
+    SpvBuiltInFragDepth = 22,
+    SpvBuiltInHelperInvocation = 23,
+    SpvBuiltInNumWorkgroups = 24,
+    SpvBuiltInWorkgroupSize = 25,
+    SpvBuiltInWorkgroupId = 26,
+    SpvBuiltInLocalInvocationId = 27,
+    SpvBuiltInGlobalInvocationId = 28,
+    SpvBuiltInLocalInvocationIndex = 29,
+    SpvBuiltInWorkDim = 30,
+    SpvBuiltInGlobalSize = 31,
+    SpvBuiltInEnqueuedWorkgroupSize = 32,
+    SpvBuiltInGlobalOffset = 33,
+    SpvBuiltInGlobalLinearId = 34,
+    SpvBuiltInSubgroupSize = 36,
+    SpvBuiltInSubgroupMaxSize = 37,
+    SpvBuiltInNumSubgroups = 38,
+    SpvBuiltInNumEnqueuedSubgroups = 39,
+    SpvBuiltInSubgroupId = 40,
+    SpvBuiltInSubgroupLocalInvocationId = 41,
+    SpvBuiltInVertexIndex = 42,
+    SpvBuiltInInstanceIndex = 43,
+} SpvBuiltIn;
+
+typedef enum SpvSelectionControlShift_ {
+    SpvSelectionControlFlattenShift = 0,
+    SpvSelectionControlDontFlattenShift = 1,
+} SpvSelectionControlShift;
+
+typedef enum SpvSelectionControlMask_ {
+    SpvSelectionControlMaskNone = 0,
+    SpvSelectionControlFlattenMask = 0x00000001,
+    SpvSelectionControlDontFlattenMask = 0x00000002,
+} SpvSelectionControlMask;
+
+typedef enum SpvLoopControlShift_ {
+    SpvLoopControlUnrollShift = 0,
+    SpvLoopControlDontUnrollShift = 1,
+} SpvLoopControlShift;
+
+typedef enum SpvLoopControlMask_ {
+    SpvLoopControlMaskNone = 0,
+    SpvLoopControlUnrollMask = 0x00000001,
+    SpvLoopControlDontUnrollMask = 0x00000002,
+} SpvLoopControlMask;
+
+typedef enum SpvFunctionControlShift_ {
+    SpvFunctionControlInlineShift = 0,
+    SpvFunctionControlDontInlineShift = 1,
+    SpvFunctionControlPureShift = 2,
+    SpvFunctionControlConstShift = 3,
+} SpvFunctionControlShift;
+
+typedef enum SpvFunctionControlMask_ {
+    SpvFunctionControlMaskNone = 0,
+    SpvFunctionControlInlineMask = 0x00000001,
+    SpvFunctionControlDontInlineMask = 0x00000002,
+    SpvFunctionControlPureMask = 0x00000004,
+    SpvFunctionControlConstMask = 0x00000008,
+} SpvFunctionControlMask;
+
+typedef enum SpvMemorySemanticsShift_ {
+    SpvMemorySemanticsAcquireShift = 1,
+    SpvMemorySemanticsReleaseShift = 2,
+    SpvMemorySemanticsAcquireReleaseShift = 3,
+    SpvMemorySemanticsSequentiallyConsistentShift = 4,
+    SpvMemorySemanticsUniformMemoryShift = 6,
+    SpvMemorySemanticsSubgroupMemoryShift = 7,
+    SpvMemorySemanticsWorkgroupMemoryShift = 8,
+    SpvMemorySemanticsCrossWorkgroupMemoryShift = 9,
+    SpvMemorySemanticsAtomicCounterMemoryShift = 10,
+    SpvMemorySemanticsImageMemoryShift = 11,
+} SpvMemorySemanticsShift;
+
+typedef enum SpvMemorySemanticsMask_ {
+    SpvMemorySemanticsMaskNone = 0,
+    SpvMemorySemanticsAcquireMask = 0x00000002,
+    SpvMemorySemanticsReleaseMask = 0x00000004,
+    SpvMemorySemanticsAcquireReleaseMask = 0x00000008,
+    SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010,
+    SpvMemorySemanticsUniformMemoryMask = 0x00000040,
+    SpvMemorySemanticsSubgroupMemoryMask = 0x00000080,
+    SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100,
+    SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
+    SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400,
+    SpvMemorySemanticsImageMemoryMask = 0x00000800,
+} SpvMemorySemanticsMask;
+
+typedef enum SpvMemoryAccessShift_ {
+    SpvMemoryAccessVolatileShift = 0,
+    SpvMemoryAccessAlignedShift = 1,
+    SpvMemoryAccessNontemporalShift = 2,
+} SpvMemoryAccessShift;
+
+typedef enum SpvMemoryAccessMask_ {
+    SpvMemoryAccessMaskNone = 0,
+    SpvMemoryAccessVolatileMask = 0x00000001,
+    SpvMemoryAccessAlignedMask = 0x00000002,
+    SpvMemoryAccessNontemporalMask = 0x00000004,
+} SpvMemoryAccessMask;
+
+typedef enum SpvScope_ {
+    SpvScopeCrossDevice = 0,
+    SpvScopeDevice = 1,
+    SpvScopeWorkgroup = 2,
+    SpvScopeSubgroup = 3,
+    SpvScopeInvocation = 4,
+} SpvScope;
+
+typedef enum SpvGroupOperation_ {
+    SpvGroupOperationReduce = 0,
+    SpvGroupOperationInclusiveScan = 1,
+    SpvGroupOperationExclusiveScan = 2,
+} SpvGroupOperation;
+
+typedef enum SpvKernelEnqueueFlags_ {
+    SpvKernelEnqueueFlagsNoWait = 0,
+    SpvKernelEnqueueFlagsWaitKernel = 1,
+    SpvKernelEnqueueFlagsWaitWorkGroup = 2,
+} SpvKernelEnqueueFlags;
+
+typedef enum SpvKernelProfilingInfoShift_ {
+    SpvKernelProfilingInfoCmdExecTimeShift = 0,
+} SpvKernelProfilingInfoShift;
+
+typedef enum SpvKernelProfilingInfoMask_ {
+    SpvKernelProfilingInfoMaskNone = 0,
+    SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001,
+} SpvKernelProfilingInfoMask;
+
+typedef enum SpvCapability_ {
+    SpvCapabilityMatrix = 0,
+    SpvCapabilityShader = 1,
+    SpvCapabilityGeometry = 2,
+    SpvCapabilityTessellation = 3,
+    SpvCapabilityAddresses = 4,
+    SpvCapabilityLinkage = 5,
+    SpvCapabilityKernel = 6,
+    SpvCapabilityVector16 = 7,
+    SpvCapabilityFloat16Buffer = 8,
+    SpvCapabilityFloat16 = 9,
+    SpvCapabilityFloat64 = 10,
+    SpvCapabilityInt64 = 11,
+    SpvCapabilityInt64Atomics = 12,
+    SpvCapabilityImageBasic = 13,
+    SpvCapabilityImageReadWrite = 14,
+    SpvCapabilityImageMipmap = 15,
+    SpvCapabilityPipes = 17,
+    SpvCapabilityGroups = 18,
+    SpvCapabilityDeviceEnqueue = 19,
+    SpvCapabilityLiteralSampler = 20,
+    SpvCapabilityAtomicStorage = 21,
+    SpvCapabilityInt16 = 22,
+    SpvCapabilityTessellationPointSize = 23,
+    SpvCapabilityGeometryPointSize = 24,
+    SpvCapabilityImageGatherExtended = 25,
+    SpvCapabilityStorageImageMultisample = 27,
+    SpvCapabilityUniformBufferArrayDynamicIndexing = 28,
+    SpvCapabilitySampledImageArrayDynamicIndexing = 29,
+    SpvCapabilityStorageBufferArrayDynamicIndexing = 30,
+    SpvCapabilityStorageImageArrayDynamicIndexing = 31,
+    SpvCapabilityClipDistance = 32,
+    SpvCapabilityCullDistance = 33,
+    SpvCapabilityImageCubeArray = 34,
+    SpvCapabilitySampleRateShading = 35,
+    SpvCapabilityImageRect = 36,
+    SpvCapabilitySampledRect = 37,
+    SpvCapabilityGenericPointer = 38,
+    SpvCapabilityInt8 = 39,
+    SpvCapabilityInputAttachment = 40,
+    SpvCapabilitySparseResidency = 41,
+    SpvCapabilityMinLod = 42,
+    SpvCapabilitySampled1D = 43,
+    SpvCapabilityImage1D = 44,
+    SpvCapabilitySampledCubeArray = 45,
+    SpvCapabilitySampledBuffer = 46,
+    SpvCapabilityImageBuffer = 47,
+    SpvCapabilityImageMSArray = 48,
+    SpvCapabilityStorageImageExtendedFormats = 49,
+    SpvCapabilityImageQuery = 50,
+    SpvCapabilityDerivativeControl = 51,
+    SpvCapabilityInterpolationFunction = 52,
+    SpvCapabilityTransformFeedback = 53,
+    SpvCapabilityGeometryStreams = 54,
+    SpvCapabilityStorageImageReadWithoutFormat = 55,
+    SpvCapabilityStorageImageWriteWithoutFormat = 56,
+    SpvCapabilityMultiViewport = 57,
+} SpvCapability;
+
+typedef enum SpvOp_ {
+    SpvOpNop = 0,
+    SpvOpUndef = 1,
+    SpvOpSourceContinued = 2,
+    SpvOpSource = 3,
+    SpvOpSourceExtension = 4,
+    SpvOpName = 5,
+    SpvOpMemberName = 6,
+    SpvOpString = 7,
+    SpvOpLine = 8,
+    SpvOpExtension = 10,
+    SpvOpExtInstImport = 11,
+    SpvOpExtInst = 12,
+    SpvOpMemoryModel = 14,
+    SpvOpEntryPoint = 15,
+    SpvOpExecutionMode = 16,
+    SpvOpCapability = 17,
+    SpvOpTypeVoid = 19,
+    SpvOpTypeBool = 20,
+    SpvOpTypeInt = 21,
+    SpvOpTypeFloat = 22,
+    SpvOpTypeVector = 23,
+    SpvOpTypeMatrix = 24,
+    SpvOpTypeImage = 25,
+    SpvOpTypeSampler = 26,
+    SpvOpTypeSampledImage = 27,
+    SpvOpTypeArray = 28,
+    SpvOpTypeRuntimeArray = 29,
+    SpvOpTypeStruct = 30,
+    SpvOpTypeOpaque = 31,
+    SpvOpTypePointer = 32,
+    SpvOpTypeFunction = 33,
+    SpvOpTypeEvent = 34,
+    SpvOpTypeDeviceEvent = 35,
+    SpvOpTypeReserveId = 36,
+    SpvOpTypeQueue = 37,
+    SpvOpTypePipe = 38,
+    SpvOpTypeForwardPointer = 39,
+    SpvOpConstantTrue = 41,
+    SpvOpConstantFalse = 42,
+    SpvOpConstant = 43,
+    SpvOpConstantComposite = 44,
+    SpvOpConstantSampler = 45,
+    SpvOpConstantNull = 46,
+    SpvOpSpecConstantTrue = 48,
+    SpvOpSpecConstantFalse = 49,
+    SpvOpSpecConstant = 50,
+    SpvOpSpecConstantComposite = 51,
+    SpvOpSpecConstantOp = 52,
+    SpvOpFunction = 54,
+    SpvOpFunctionParameter = 55,
+    SpvOpFunctionEnd = 56,
+    SpvOpFunctionCall = 57,
+    SpvOpVariable = 59,
+    SpvOpImageTexelPointer = 60,
+    SpvOpLoad = 61,
+    SpvOpStore = 62,
+    SpvOpCopyMemory = 63,
+    SpvOpCopyMemorySized = 64,
+    SpvOpAccessChain = 65,
+    SpvOpInBoundsAccessChain = 66,
+    SpvOpPtrAccessChain = 67,
+    SpvOpArrayLength = 68,
+    SpvOpGenericPtrMemSemantics = 69,
+    SpvOpInBoundsPtrAccessChain = 70,
+    SpvOpDecorate = 71,
+    SpvOpMemberDecorate = 72,
+    SpvOpDecorationGroup = 73,
+    SpvOpGroupDecorate = 74,
+    SpvOpGroupMemberDecorate = 75,
+    SpvOpVectorExtractDynamic = 77,
+    SpvOpVectorInsertDynamic = 78,
+    SpvOpVectorShuffle = 79,
+    SpvOpCompositeConstruct = 80,
+    SpvOpCompositeExtract = 81,
+    SpvOpCompositeInsert = 82,
+    SpvOpCopyObject = 83,
+    SpvOpTranspose = 84,
+    SpvOpSampledImage = 86,
+    SpvOpImageSampleImplicitLod = 87,
+    SpvOpImageSampleExplicitLod = 88,
+    SpvOpImageSampleDrefImplicitLod = 89,
+    SpvOpImageSampleDrefExplicitLod = 90,
+    SpvOpImageSampleProjImplicitLod = 91,
+    SpvOpImageSampleProjExplicitLod = 92,
+    SpvOpImageSampleProjDrefImplicitLod = 93,
+    SpvOpImageSampleProjDrefExplicitLod = 94,
+    SpvOpImageFetch = 95,
+    SpvOpImageGather = 96,
+    SpvOpImageDrefGather = 97,
+    SpvOpImageRead = 98,
+    SpvOpImageWrite = 99,
+    SpvOpImage = 100,
+    SpvOpImageQueryFormat = 101,
+    SpvOpImageQueryOrder = 102,
+    SpvOpImageQuerySizeLod = 103,
+    SpvOpImageQuerySize = 104,
+    SpvOpImageQueryLod = 105,
+    SpvOpImageQueryLevels = 106,
+    SpvOpImageQuerySamples = 107,
+    SpvOpConvertFToU = 109,
+    SpvOpConvertFToS = 110,
+    SpvOpConvertSToF = 111,
+    SpvOpConvertUToF = 112,
+    SpvOpUConvert = 113,
+    SpvOpSConvert = 114,
+    SpvOpFConvert = 115,
+    SpvOpQuantizeToF16 = 116,
+    SpvOpConvertPtrToU = 117,
+    SpvOpSatConvertSToU = 118,
+    SpvOpSatConvertUToS = 119,
+    SpvOpConvertUToPtr = 120,
+    SpvOpPtrCastToGeneric = 121,
+    SpvOpGenericCastToPtr = 122,
+    SpvOpGenericCastToPtrExplicit = 123,
+    SpvOpBitcast = 124,
+    SpvOpSNegate = 126,
+    SpvOpFNegate = 127,
+    SpvOpIAdd = 128,
+    SpvOpFAdd = 129,
+    SpvOpISub = 130,
+    SpvOpFSub = 131,
+    SpvOpIMul = 132,
+    SpvOpFMul = 133,
+    SpvOpUDiv = 134,
+    SpvOpSDiv = 135,
+    SpvOpFDiv = 136,
+    SpvOpUMod = 137,
+    SpvOpSRem = 138,
+    SpvOpSMod = 139,
+    SpvOpFRem = 140,
+    SpvOpFMod = 141,
+    SpvOpVectorTimesScalar = 142,
+    SpvOpMatrixTimesScalar = 143,
+    SpvOpVectorTimesMatrix = 144,
+    SpvOpMatrixTimesVector = 145,
+    SpvOpMatrixTimesMatrix = 146,
+    SpvOpOuterProduct = 147,
+    SpvOpDot = 148,
+    SpvOpIAddCarry = 149,
+    SpvOpISubBorrow = 150,
+    SpvOpUMulExtended = 151,
+    SpvOpSMulExtended = 152,
+    SpvOpAny = 154,
+    SpvOpAll = 155,
+    SpvOpIsNan = 156,
+    SpvOpIsInf = 157,
+    SpvOpIsFinite = 158,
+    SpvOpIsNormal = 159,
+    SpvOpSignBitSet = 160,
+    SpvOpLessOrGreater = 161,
+    SpvOpOrdered = 162,
+    SpvOpUnordered = 163,
+    SpvOpLogicalEqual = 164,
+    SpvOpLogicalNotEqual = 165,
+    SpvOpLogicalOr = 166,
+    SpvOpLogicalAnd = 167,
+    SpvOpLogicalNot = 168,
+    SpvOpSelect = 169,
+    SpvOpIEqual = 170,
+    SpvOpINotEqual = 171,
+    SpvOpUGreaterThan = 172,
+    SpvOpSGreaterThan = 173,
+    SpvOpUGreaterThanEqual = 174,
+    SpvOpSGreaterThanEqual = 175,
+    SpvOpULessThan = 176,
+    SpvOpSLessThan = 177,
+    SpvOpULessThanEqual = 178,
+    SpvOpSLessThanEqual = 179,
+    SpvOpFOrdEqual = 180,
+    SpvOpFUnordEqual = 181,
+    SpvOpFOrdNotEqual = 182,
+    SpvOpFUnordNotEqual = 183,
+    SpvOpFOrdLessThan = 184,
+    SpvOpFUnordLessThan = 185,
+    SpvOpFOrdGreaterThan = 186,
+    SpvOpFUnordGreaterThan = 187,
+    SpvOpFOrdLessThanEqual = 188,
+    SpvOpFUnordLessThanEqual = 189,
+    SpvOpFOrdGreaterThanEqual = 190,
+    SpvOpFUnordGreaterThanEqual = 191,
+    SpvOpShiftRightLogical = 194,
+    SpvOpShiftRightArithmetic = 195,
+    SpvOpShiftLeftLogical = 196,
+    SpvOpBitwiseOr = 197,
+    SpvOpBitwiseXor = 198,
+    SpvOpBitwiseAnd = 199,
+    SpvOpNot = 200,
+    SpvOpBitFieldInsert = 201,
+    SpvOpBitFieldSExtract = 202,
+    SpvOpBitFieldUExtract = 203,
+    SpvOpBitReverse = 204,
+    SpvOpBitCount = 205,
+    SpvOpDPdx = 207,
+    SpvOpDPdy = 208,
+    SpvOpFwidth = 209,
+    SpvOpDPdxFine = 210,
+    SpvOpDPdyFine = 211,
+    SpvOpFwidthFine = 212,
+    SpvOpDPdxCoarse = 213,
+    SpvOpDPdyCoarse = 214,
+    SpvOpFwidthCoarse = 215,
+    SpvOpEmitVertex = 218,
+    SpvOpEndPrimitive = 219,
+    SpvOpEmitStreamVertex = 220,
+    SpvOpEndStreamPrimitive = 221,
+    SpvOpControlBarrier = 224,
+    SpvOpMemoryBarrier = 225,
+    SpvOpAtomicLoad = 227,
+    SpvOpAtomicStore = 228,
+    SpvOpAtomicExchange = 229,
+    SpvOpAtomicCompareExchange = 230,
+    SpvOpAtomicCompareExchangeWeak = 231,
+    SpvOpAtomicIIncrement = 232,
+    SpvOpAtomicIDecrement = 233,
+    SpvOpAtomicIAdd = 234,
+    SpvOpAtomicISub = 235,
+    SpvOpAtomicSMin = 236,
+    SpvOpAtomicUMin = 237,
+    SpvOpAtomicSMax = 238,
+    SpvOpAtomicUMax = 239,
+    SpvOpAtomicAnd = 240,
+    SpvOpAtomicOr = 241,
+    SpvOpAtomicXor = 242,
+    SpvOpPhi = 245,
+    SpvOpLoopMerge = 246,
+    SpvOpSelectionMerge = 247,
+    SpvOpLabel = 248,
+    SpvOpBranch = 249,
+    SpvOpBranchConditional = 250,
+    SpvOpSwitch = 251,
+    SpvOpKill = 252,
+    SpvOpReturn = 253,
+    SpvOpReturnValue = 254,
+    SpvOpUnreachable = 255,
+    SpvOpLifetimeStart = 256,
+    SpvOpLifetimeStop = 257,
+    SpvOpGroupAsyncCopy = 259,
+    SpvOpGroupWaitEvents = 260,
+    SpvOpGroupAll = 261,
+    SpvOpGroupAny = 262,
+    SpvOpGroupBroadcast = 263,
+    SpvOpGroupIAdd = 264,
+    SpvOpGroupFAdd = 265,
+    SpvOpGroupFMin = 266,
+    SpvOpGroupUMin = 267,
+    SpvOpGroupSMin = 268,
+    SpvOpGroupFMax = 269,
+    SpvOpGroupUMax = 270,
+    SpvOpGroupSMax = 271,
+    SpvOpReadPipe = 274,
+    SpvOpWritePipe = 275,
+    SpvOpReservedReadPipe = 276,
+    SpvOpReservedWritePipe = 277,
+    SpvOpReserveReadPipePackets = 278,
+    SpvOpReserveWritePipePackets = 279,
+    SpvOpCommitReadPipe = 280,
+    SpvOpCommitWritePipe = 281,
+    SpvOpIsValidReserveId = 282,
+    SpvOpGetNumPipePackets = 283,
+    SpvOpGetMaxPipePackets = 284,
+    SpvOpGroupReserveReadPipePackets = 285,
+    SpvOpGroupReserveWritePipePackets = 286,
+    SpvOpGroupCommitReadPipe = 287,
+    SpvOpGroupCommitWritePipe = 288,
+    SpvOpEnqueueMarker = 291,
+    SpvOpEnqueueKernel = 292,
+    SpvOpGetKernelNDrangeSubGroupCount = 293,
+    SpvOpGetKernelNDrangeMaxSubGroupSize = 294,
+    SpvOpGetKernelWorkGroupSize = 295,
+    SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296,
+    SpvOpRetainEvent = 297,
+    SpvOpReleaseEvent = 298,
+    SpvOpCreateUserEvent = 299,
+    SpvOpIsValidEvent = 300,
+    SpvOpSetUserEventStatus = 301,
+    SpvOpCaptureEventProfilingInfo = 302,
+    SpvOpGetDefaultQueue = 303,
+    SpvOpBuildNDRange = 304,
+    SpvOpImageSparseSampleImplicitLod = 305,
+    SpvOpImageSparseSampleExplicitLod = 306,
+    SpvOpImageSparseSampleDrefImplicitLod = 307,
+    SpvOpImageSparseSampleDrefExplicitLod = 308,
+    SpvOpImageSparseSampleProjImplicitLod = 309,
+    SpvOpImageSparseSampleProjExplicitLod = 310,
+    SpvOpImageSparseSampleProjDrefImplicitLod = 311,
+    SpvOpImageSparseSampleProjDrefExplicitLod = 312,
+    SpvOpImageSparseFetch = 313,
+    SpvOpImageSparseGather = 314,
+    SpvOpImageSparseDrefGather = 315,
+    SpvOpImageSparseTexelsResident = 316,
+    SpvOpNoLine = 317,
+    SpvOpAtomicFlagTestAndSet = 318,
+    SpvOpAtomicFlagClear = 319,
+} SpvOp;
+
+#endif  // #ifndef spirv_H
+
diff --git a/src/compiler/nir/spirv/spirv_to_nir.c b/src/compiler/nir/spirv/spirv_to_nir.c
new file mode 100644 (file)
index 0000000..5a7184a
--- /dev/null
@@ -0,0 +1,2704 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jason Ekstrand (jason@jlekstrand.net)
+ *
+ */
+
+#include "vtn_private.h"
+#include "nir/nir_vla.h"
+#include "nir/nir_control_flow.h"
+#include "nir/nir_constant_expressions.h"
+
+static struct vtn_ssa_value *
+vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
+{
+   struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+   val->type = type;
+
+   if (glsl_type_is_vector_or_scalar(type)) {
+      unsigned num_components = glsl_get_vector_elements(val->type);
+      nir_ssa_undef_instr *undef =
+         nir_ssa_undef_instr_create(b->shader, num_components);
+
+      nir_instr_insert_before_cf_list(&b->impl->body, &undef->instr);
+      val->def = &undef->def;
+   } else {
+      unsigned elems = glsl_get_length(val->type);
+      val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+      if (glsl_type_is_matrix(type)) {
+         const struct glsl_type *elem_type =
+            glsl_vector_type(glsl_get_base_type(type),
+                             glsl_get_vector_elements(type));
+
+         for (unsigned i = 0; i < elems; i++)
+            val->elems[i] = vtn_undef_ssa_value(b, elem_type);
+      } else if (glsl_type_is_array(type)) {
+         const struct glsl_type *elem_type = glsl_get_array_element(type);
+         for (unsigned i = 0; i < elems; i++)
+            val->elems[i] = vtn_undef_ssa_value(b, elem_type);
+      } else {
+         for (unsigned i = 0; i < elems; i++) {
+            const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
+            val->elems[i] = vtn_undef_ssa_value(b, elem_type);
+         }
+      }
+   }
+
+   return val;
+}
+
+static struct vtn_ssa_value *
+vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
+                    const struct glsl_type *type)
+{
+   struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
+
+   if (entry)
+      return entry->data;
+
+   struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+   val->type = type;
+
+   switch (glsl_get_base_type(type)) {
+   case GLSL_TYPE_INT:
+   case GLSL_TYPE_UINT:
+   case GLSL_TYPE_BOOL:
+   case GLSL_TYPE_FLOAT:
+   case GLSL_TYPE_DOUBLE:
+      if (glsl_type_is_vector_or_scalar(type)) {
+         unsigned num_components = glsl_get_vector_elements(val->type);
+         nir_load_const_instr *load =
+            nir_load_const_instr_create(b->shader, num_components);
+
+         for (unsigned i = 0; i < num_components; i++)
+            load->value.u[i] = constant->value.u[i];
+
+         nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
+         val->def = &load->def;
+      } else {
+         assert(glsl_type_is_matrix(type));
+         unsigned rows = glsl_get_vector_elements(val->type);
+         unsigned columns = glsl_get_matrix_columns(val->type);
+         val->elems = ralloc_array(b, struct vtn_ssa_value *, columns);
+
+         for (unsigned i = 0; i < columns; i++) {
+            struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
+            col_val->type = glsl_get_column_type(val->type);
+            nir_load_const_instr *load =
+               nir_load_const_instr_create(b->shader, rows);
+
+            for (unsigned j = 0; j < rows; j++)
+               load->value.u[j] = constant->value.u[rows * i + j];
+
+            nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
+            col_val->def = &load->def;
+
+            val->elems[i] = col_val;
+         }
+      }
+      break;
+
+   case GLSL_TYPE_ARRAY: {
+      unsigned elems = glsl_get_length(val->type);
+      val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+      const struct glsl_type *elem_type = glsl_get_array_element(val->type);
+      for (unsigned i = 0; i < elems; i++)
+         val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
+                                             elem_type);
+      break;
+   }
+
+   case GLSL_TYPE_STRUCT: {
+      unsigned elems = glsl_get_length(val->type);
+      val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+      for (unsigned i = 0; i < elems; i++) {
+         const struct glsl_type *elem_type =
+            glsl_get_struct_field(val->type, i);
+         val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
+                                             elem_type);
+      }
+      break;
+   }
+
+   default:
+      unreachable("bad constant type");
+   }
+
+   return val;
+}
+
+struct vtn_ssa_value *
+vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
+{
+   struct vtn_value *val = vtn_untyped_value(b, value_id);
+   switch (val->value_type) {
+   case vtn_value_type_undef:
+      return vtn_undef_ssa_value(b, val->type->type);
+
+   case vtn_value_type_constant:
+      return vtn_const_ssa_value(b, val->constant, val->const_type);
+
+   case vtn_value_type_ssa:
+      return val->ssa;
+
+   case vtn_value_type_access_chain:
+      /* This is needed for function parameters */
+      return vtn_variable_load(b, val->access_chain);
+
+   default:
+      unreachable("Invalid type for an SSA value");
+   }
+}
+
+static char *
+vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
+                   unsigned word_count, unsigned *words_used)
+{
+   char *dup = ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
+   if (words_used) {
+      /* Ammount of space taken by the string (including the null) */
+      unsigned len = strlen(dup) + 1;
+      *words_used = DIV_ROUND_UP(len, sizeof(*words));
+   }
+   return dup;
+}
+
+const uint32_t *
+vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
+                        const uint32_t *end, vtn_instruction_handler handler)
+{
+   b->file = NULL;
+   b->line = -1;
+   b->col = -1;
+
+   const uint32_t *w = start;
+   while (w < end) {
+      SpvOp opcode = w[0] & SpvOpCodeMask;
+      unsigned count = w[0] >> SpvWordCountShift;
+      assert(count >= 1 && w + count <= end);
+
+      switch (opcode) {
+      case SpvOpNop:
+         break; /* Do nothing */
+
+      case SpvOpLine:
+         b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
+         b->line = w[2];
+         b->col = w[3];
+         break;
+
+      case SpvOpNoLine:
+         b->file = NULL;
+         b->line = -1;
+         b->col = -1;
+         break;
+
+      default:
+         if (!handler(b, opcode, w, count))
+            return w;
+         break;
+      }
+
+      w += count;
+   }
+   assert(w == end);
+   return w;
+}
+
+static void
+vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
+                     const uint32_t *w, unsigned count)
+{
+   switch (opcode) {
+   case SpvOpExtInstImport: {
+      struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
+      if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) {
+         val->ext_handler = vtn_handle_glsl450_instruction;
+      } else {
+         assert(!"Unsupported extension");
+      }
+      break;
+   }
+
+   case SpvOpExtInst: {
+      struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
+      bool handled = val->ext_handler(b, w[4], w, count);
+      (void)handled;
+      assert(handled);
+      break;
+   }
+
+   default:
+      unreachable("Unhandled opcode");
+   }
+}
+
+static void
+_foreach_decoration_helper(struct vtn_builder *b,
+                           struct vtn_value *base_value,
+                           int parent_member,
+                           struct vtn_value *value,
+                           vtn_decoration_foreach_cb cb, void *data)
+{
+   for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
+      int member;
+      if (dec->scope == VTN_DEC_DECORATION) {
+         member = parent_member;
+      } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
+         assert(parent_member == -1);
+         member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
+      } else {
+         /* Not a decoration */
+         continue;
+      }
+
+      if (dec->group) {
+         assert(dec->group->value_type == vtn_value_type_decoration_group);
+         _foreach_decoration_helper(b, base_value, member, dec->group,
+                                    cb, data);
+      } else {
+         cb(b, base_value, member, dec, data);
+      }
+   }
+}
+
+/** Iterates (recursively if needed) over all of the decorations on a value
+ *
+ * This function iterates over all of the decorations applied to a given
+ * value.  If it encounters a decoration group, it recurses into the group
+ * and iterates over all of those decorations as well.
+ */
+void
+vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
+                       vtn_decoration_foreach_cb cb, void *data)
+{
+   _foreach_decoration_helper(b, value, -1, value, cb, data);
+}
+
+void
+vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
+                           vtn_execution_mode_foreach_cb cb, void *data)
+{
+   for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
+      if (dec->scope != VTN_DEC_EXECUTION_MODE)
+         continue;
+
+      assert(dec->group == NULL);
+      cb(b, value, dec, data);
+   }
+}
+
+static void
+vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
+                      const uint32_t *w, unsigned count)
+{
+   const uint32_t *w_end = w + count;
+   const uint32_t target = w[1];
+   w += 2;
+
+   switch (opcode) {
+   case SpvOpDecorationGroup:
+      vtn_push_value(b, target, vtn_value_type_decoration_group);
+      break;
+
+   case SpvOpDecorate:
+   case SpvOpMemberDecorate:
+   case SpvOpExecutionMode: {
+      struct vtn_value *val = &b->values[target];
+
+      struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
+      switch (opcode) {
+      case SpvOpDecorate:
+         dec->scope = VTN_DEC_DECORATION;
+         break;
+      case SpvOpMemberDecorate:
+         dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
+         break;
+      case SpvOpExecutionMode:
+         dec->scope = VTN_DEC_EXECUTION_MODE;
+         break;
+      default:
+         unreachable("Invalid decoration opcode");
+      }
+      dec->decoration = *(w++);
+      dec->literals = w;
+
+      /* Link into the list */
+      dec->next = val->decoration;
+      val->decoration = dec;
+      break;
+   }
+
+   case SpvOpGroupMemberDecorate:
+   case SpvOpGroupDecorate: {
+      struct vtn_value *group =
+         vtn_value(b, target, vtn_value_type_decoration_group);
+
+      for (; w < w_end; w++) {
+         struct vtn_value *val = vtn_untyped_value(b, *w);
+         struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
+
+         dec->group = group;
+         if (opcode == SpvOpGroupDecorate) {
+            dec->scope = VTN_DEC_DECORATION;
+         } else {
+            dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
+         }
+
+         /* Link into the list */
+         dec->next = val->decoration;
+         val->decoration = dec;
+      }
+      break;
+   }
+
+   default:
+      unreachable("Unhandled opcode");
+   }
+}
+
+struct member_decoration_ctx {
+   unsigned num_fields;
+   struct glsl_struct_field *fields;
+   struct vtn_type *type;
+};
+
+/* does a shallow copy of a vtn_type */
+
+static struct vtn_type *
+vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
+{
+   struct vtn_type *dest = ralloc(b, struct vtn_type);
+   dest->type = src->type;
+   dest->is_builtin = src->is_builtin;
+   if (src->is_builtin)
+      dest->builtin = src->builtin;
+
+   if (!glsl_type_is_scalar(src->type)) {
+      switch (glsl_get_base_type(src->type)) {
+      case GLSL_TYPE_INT:
+      case GLSL_TYPE_UINT:
+      case GLSL_TYPE_BOOL:
+      case GLSL_TYPE_FLOAT:
+      case GLSL_TYPE_DOUBLE:
+      case GLSL_TYPE_ARRAY:
+         dest->row_major = src->row_major;
+         dest->stride = src->stride;
+         dest->array_element = src->array_element;
+         break;
+
+      case GLSL_TYPE_STRUCT: {
+         unsigned elems = glsl_get_length(src->type);
+
+         dest->members = ralloc_array(b, struct vtn_type *, elems);
+         memcpy(dest->members, src->members, elems * sizeof(struct vtn_type *));
+
+         dest->offsets = ralloc_array(b, unsigned, elems);
+         memcpy(dest->offsets, src->offsets, elems * sizeof(unsigned));
+         break;
+      }
+
+      default:
+         unreachable("unhandled type");
+      }
+   }
+
+   return dest;
+}
+
+static struct vtn_type *
+mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
+{
+   type->members[member] = vtn_type_copy(b, type->members[member]);
+   type = type->members[member];
+
+   /* We may have an array of matrices.... Oh, joy! */
+   while (glsl_type_is_array(type->type)) {
+      type->array_element = vtn_type_copy(b, type->array_element);
+      type = type->array_element;
+   }
+
+   assert(glsl_type_is_matrix(type->type));
+
+   return type;
+}
+
+static void
+struct_member_decoration_cb(struct vtn_builder *b,
+                            struct vtn_value *val, int member,
+                            const struct vtn_decoration *dec, void *void_ctx)
+{
+   struct member_decoration_ctx *ctx = void_ctx;
+
+   if (member < 0)
+      return;
+
+   assert(member < ctx->num_fields);
+
+   switch (dec->decoration) {
+   case SpvDecorationRelaxedPrecision:
+      break; /* FIXME: Do nothing with this for now. */
+   case SpvDecorationNoPerspective:
+      ctx->fields[member].interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+      break;
+   case SpvDecorationFlat:
+      ctx->fields[member].interpolation = INTERP_QUALIFIER_FLAT;
+      break;
+   case SpvDecorationCentroid:
+      ctx->fields[member].centroid = true;
+      break;
+   case SpvDecorationSample:
+      ctx->fields[member].sample = true;
+      break;
+   case SpvDecorationLocation:
+      ctx->fields[member].location = dec->literals[0];
+      break;
+   case SpvDecorationBuiltIn:
+      ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
+      ctx->type->members[member]->is_builtin = true;
+      ctx->type->members[member]->builtin = dec->literals[0];
+      ctx->type->builtin_block = true;
+      break;
+   case SpvDecorationOffset:
+      ctx->type->offsets[member] = dec->literals[0];
+      break;
+   case SpvDecorationMatrixStride:
+      mutable_matrix_member(b, ctx->type, member)->stride = dec->literals[0];
+      break;
+   case SpvDecorationColMajor:
+      break; /* Nothing to do here.  Column-major is the default. */
+   case SpvDecorationRowMajor:
+      mutable_matrix_member(b, ctx->type, member)->row_major = true;
+      break;
+   default:
+      unreachable("Unhandled member decoration");
+   }
+}
+
+static void
+type_decoration_cb(struct vtn_builder *b,
+                   struct vtn_value *val, int member,
+                    const struct vtn_decoration *dec, void *ctx)
+{
+   struct vtn_type *type = val->type;
+
+   if (member != -1)
+      return;
+
+   switch (dec->decoration) {
+   case SpvDecorationArrayStride:
+      type->stride = dec->literals[0];
+      break;
+   case SpvDecorationBlock:
+      type->block = true;
+      break;
+   case SpvDecorationBufferBlock:
+      type->buffer_block = true;
+      break;
+   case SpvDecorationGLSLShared:
+   case SpvDecorationGLSLPacked:
+      /* Ignore these, since we get explicit offsets anyways */
+      break;
+
+   case SpvDecorationStream:
+      assert(dec->literals[0] == 0);
+      break;
+
+   default:
+      unreachable("Unhandled type decoration");
+   }
+}
+
+static unsigned
+translate_image_format(SpvImageFormat format)
+{
+   switch (format) {
+   case SpvImageFormatUnknown:      return 0;      /* GL_NONE */
+   case SpvImageFormatRgba32f:      return 0x8814; /* GL_RGBA32F */
+   case SpvImageFormatRgba16f:      return 0x881A; /* GL_RGBA16F */
+   case SpvImageFormatR32f:         return 0x822E; /* GL_R32F */
+   case SpvImageFormatRgba8:        return 0x8058; /* GL_RGBA8 */
+   case SpvImageFormatRgba8Snorm:   return 0x8F97; /* GL_RGBA8_SNORM */
+   case SpvImageFormatRg32f:        return 0x8230; /* GL_RG32F */
+   case SpvImageFormatRg16f:        return 0x822F; /* GL_RG16F */
+   case SpvImageFormatR11fG11fB10f: return 0x8C3A; /* GL_R11F_G11F_B10F */
+   case SpvImageFormatR16f:         return 0x822D; /* GL_R16F */
+   case SpvImageFormatRgba16:       return 0x805B; /* GL_RGBA16 */
+   case SpvImageFormatRgb10A2:      return 0x8059; /* GL_RGB10_A2 */
+   case SpvImageFormatRg16:         return 0x822C; /* GL_RG16 */
+   case SpvImageFormatRg8:          return 0x822B; /* GL_RG8 */
+   case SpvImageFormatR16:          return 0x822A; /* GL_R16 */
+   case SpvImageFormatR8:           return 0x8229; /* GL_R8 */
+   case SpvImageFormatRgba16Snorm:  return 0x8F9B; /* GL_RGBA16_SNORM */
+   case SpvImageFormatRg16Snorm:    return 0x8F99; /* GL_RG16_SNORM */
+   case SpvImageFormatRg8Snorm:     return 0x8F95; /* GL_RG8_SNORM */
+   case SpvImageFormatR16Snorm:     return 0x8F98; /* GL_R16_SNORM */
+   case SpvImageFormatR8Snorm:      return 0x8F94; /* GL_R8_SNORM */
+   case SpvImageFormatRgba32i:      return 0x8D82; /* GL_RGBA32I */
+   case SpvImageFormatRgba16i:      return 0x8D88; /* GL_RGBA16I */
+   case SpvImageFormatRgba8i:       return 0x8D8E; /* GL_RGBA8I */
+   case SpvImageFormatR32i:         return 0x8235; /* GL_R32I */
+   case SpvImageFormatRg32i:        return 0x823B; /* GL_RG32I */
+   case SpvImageFormatRg16i:        return 0x8239; /* GL_RG16I */
+   case SpvImageFormatRg8i:         return 0x8237; /* GL_RG8I */
+   case SpvImageFormatR16i:         return 0x8233; /* GL_R16I */
+   case SpvImageFormatR8i:          return 0x8231; /* GL_R8I */
+   case SpvImageFormatRgba32ui:     return 0x8D70; /* GL_RGBA32UI */
+   case SpvImageFormatRgba16ui:     return 0x8D76; /* GL_RGBA16UI */
+   case SpvImageFormatRgba8ui:      return 0x8D7C; /* GL_RGBA8UI */
+   case SpvImageFormatR32ui:        return 0x8236; /* GL_R32UI */
+   case SpvImageFormatRgb10a2ui:    return 0x906F; /* GL_RGB10_A2UI */
+   case SpvImageFormatRg32ui:       return 0x823C; /* GL_RG32UI */
+   case SpvImageFormatRg16ui:       return 0x823A; /* GL_RG16UI */
+   case SpvImageFormatRg8ui:        return 0x8238; /* GL_RG8UI */
+   case SpvImageFormatR16ui:        return 0x823A; /* GL_RG16UI */
+   case SpvImageFormatR8ui:         return 0x8232; /* GL_R8UI */
+   default:
+      assert(!"Invalid image format");
+      return 0;
+   }
+}
+
+static void
+vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
+                const uint32_t *w, unsigned count)
+{
+   struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
+
+   val->type = rzalloc(b, struct vtn_type);
+   val->type->is_builtin = false;
+   val->type->val = val;
+
+   switch (opcode) {
+   case SpvOpTypeVoid:
+      val->type->type = glsl_void_type();
+      break;
+   case SpvOpTypeBool:
+      val->type->type = glsl_bool_type();
+      break;
+   case SpvOpTypeInt: {
+      const bool signedness = w[3];
+      val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
+      break;
+   }
+   case SpvOpTypeFloat:
+      val->type->type = glsl_float_type();
+      break;
+
+   case SpvOpTypeVector: {
+      struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
+      unsigned elems = w[3];
+
+      assert(glsl_type_is_scalar(base->type));
+      val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
+
+      /* Vectors implicitly have sizeof(base_type) stride.  For now, this
+       * is always 4 bytes.  This will have to change if we want to start
+       * supporting doubles or half-floats.
+       */
+      val->type->stride = 4;
+      val->type->array_element = base;
+      break;
+   }
+
+   case SpvOpTypeMatrix: {
+      struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
+      unsigned columns = w[3];
+
+      assert(glsl_type_is_vector(base->type));
+      val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
+                                         glsl_get_vector_elements(base->type),
+                                         columns);
+      assert(!glsl_type_is_error(val->type->type));
+      val->type->array_element = base;
+      val->type->row_major = false;
+      val->type->stride = 0;
+      break;
+   }
+
+   case SpvOpTypeRuntimeArray:
+   case SpvOpTypeArray: {
+      struct vtn_type *array_element =
+         vtn_value(b, w[2], vtn_value_type_type)->type;
+
+      unsigned length;
+      if (opcode == SpvOpTypeRuntimeArray) {
+         /* A length of 0 is used to denote unsized arrays */
+         length = 0;
+      } else {
+         length =
+            vtn_value(b, w[3], vtn_value_type_constant)->constant->value.u[0];
+      }
+
+      val->type->type = glsl_array_type(array_element->type, length);
+      val->type->array_element = array_element;
+      val->type->stride = 0;
+      break;
+   }
+
+   case SpvOpTypeStruct: {
+      unsigned num_fields = count - 2;
+      val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
+      val->type->offsets = ralloc_array(b, unsigned, num_fields);
+
+      NIR_VLA(struct glsl_struct_field, fields, count);
+      for (unsigned i = 0; i < num_fields; i++) {
+         val->type->members[i] =
+            vtn_value(b, w[i + 2], vtn_value_type_type)->type;
+         fields[i] = (struct glsl_struct_field) {
+            .type = val->type->members[i]->type,
+            .name = ralloc_asprintf(b, "field%d", i),
+            .location = -1,
+         };
+      }
+
+      struct member_decoration_ctx ctx = {
+         .num_fields = num_fields,
+         .fields = fields,
+         .type = val->type
+      };
+
+      vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
+
+      const char *name = val->name ? val->name : "struct";
+
+      val->type->type = glsl_struct_type(fields, num_fields, name);
+      break;
+   }
+
+   case SpvOpTypeFunction: {
+      const struct glsl_type *return_type =
+         vtn_value(b, w[2], vtn_value_type_type)->type->type;
+      NIR_VLA(struct glsl_function_param, params, count - 3);
+      for (unsigned i = 0; i < count - 3; i++) {
+         params[i].type = vtn_value(b, w[i + 3], vtn_value_type_type)->type->type;
+
+         /* FIXME: */
+         params[i].in = true;
+         params[i].out = true;
+      }
+      val->type->type = glsl_function_type(return_type, params, count - 3);
+      break;
+   }
+
+   case SpvOpTypePointer:
+      /* FIXME:  For now, we'll just do the really lame thing and return
+       * the same type.  The validator should ensure that the proper number
+       * of dereferences happen
+       */
+      val->type = vtn_value(b, w[3], vtn_value_type_type)->type;
+      break;
+
+   case SpvOpTypeImage: {
+      const struct glsl_type *sampled_type =
+         vtn_value(b, w[2], vtn_value_type_type)->type->type;
+
+      assert(glsl_type_is_vector_or_scalar(sampled_type));
+
+      enum glsl_sampler_dim dim;
+      switch ((SpvDim)w[3]) {
+      case SpvDim1D:       dim = GLSL_SAMPLER_DIM_1D;    break;
+      case SpvDim2D:       dim = GLSL_SAMPLER_DIM_2D;    break;
+      case SpvDim3D:       dim = GLSL_SAMPLER_DIM_3D;    break;
+      case SpvDimCube:     dim = GLSL_SAMPLER_DIM_CUBE;  break;
+      case SpvDimRect:     dim = GLSL_SAMPLER_DIM_RECT;  break;
+      case SpvDimBuffer:   dim = GLSL_SAMPLER_DIM_BUF;   break;
+      default:
+         unreachable("Invalid SPIR-V Sampler dimension");
+      }
+
+      bool is_shadow = w[4];
+      bool is_array = w[5];
+      bool multisampled = w[6];
+      unsigned sampled = w[7];
+      SpvImageFormat format = w[8];
+
+      if (count > 9)
+         val->type->access_qualifier = w[9];
+      else
+         val->type->access_qualifier = SpvAccessQualifierReadWrite;
+
+      if (multisampled) {
+         assert(dim == GLSL_SAMPLER_DIM_2D);
+         dim = GLSL_SAMPLER_DIM_MS;
+      }
+
+      val->type->image_format = translate_image_format(format);
+
+      if (sampled == 1) {
+         val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
+                                             glsl_get_base_type(sampled_type));
+      } else if (sampled == 2) {
+         assert(format);
+         assert(!is_shadow);
+         val->type->type = glsl_image_type(dim, is_array,
+                                           glsl_get_base_type(sampled_type));
+      } else {
+         assert(!"We need to know if the image will be sampled");
+      }
+      break;
+   }
+
+   case SpvOpTypeSampledImage:
+      val->type = vtn_value(b, w[2], vtn_value_type_type)->type;
+      break;
+
+   case SpvOpTypeSampler:
+      /* The actual sampler type here doesn't really matter.  It gets
+       * thrown away the moment you combine it with an image.  What really
+       * matters is that it's a sampler type as opposed to an integer type
+       * so the backend knows what to do.
+       */
+      val->type->type = glsl_bare_sampler_type();
+      break;
+
+   case SpvOpTypeOpaque:
+   case SpvOpTypeEvent:
+   case SpvOpTypeDeviceEvent:
+   case SpvOpTypeReserveId:
+   case SpvOpTypeQueue:
+   case SpvOpTypePipe:
+   default:
+      unreachable("Unhandled opcode");
+   }
+
+   vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
+}
+
+static nir_constant *
+vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type)
+{
+   nir_constant *c = rzalloc(b, nir_constant);
+
+   switch (glsl_get_base_type(type)) {
+   case GLSL_TYPE_INT:
+   case GLSL_TYPE_UINT:
+   case GLSL_TYPE_BOOL:
+   case GLSL_TYPE_FLOAT:
+   case GLSL_TYPE_DOUBLE:
+      /* Nothing to do here.  It's already initialized to zero */
+      break;
+
+   case GLSL_TYPE_ARRAY:
+      assert(glsl_get_length(type) > 0);
+      c->num_elements = glsl_get_length(type);
+      c->elements = ralloc_array(b, nir_constant *, c->num_elements);
+
+      c->elements[0] = vtn_null_constant(b, glsl_get_array_element(type));
+      for (unsigned i = 1; i < c->num_elements; i++)
+         c->elements[i] = c->elements[0];
+      break;
+
+   case GLSL_TYPE_STRUCT:
+      c->num_elements = glsl_get_length(type);
+      c->elements = ralloc_array(b, nir_constant *, c->num_elements);
+
+      for (unsigned i = 0; i < c->num_elements; i++) {
+         c->elements[i] = vtn_null_constant(b, glsl_get_struct_field(type, i));
+      }
+      break;
+
+   default:
+      unreachable("Invalid type for null constant");
+   }
+
+   return c;
+}
+
+static void
+spec_constant_deocoration_cb(struct vtn_builder *b, struct vtn_value *v,
+                             int member, const struct vtn_decoration *dec,
+                             void *data)
+{
+   assert(member == -1);
+   if (dec->decoration != SpvDecorationSpecId)
+      return;
+
+   uint32_t *const_value = data;
+
+   for (unsigned i = 0; i < b->num_specializations; i++) {
+      if (b->specializations[i].id == dec->literals[0]) {
+         *const_value = b->specializations[i].data;
+         return;
+      }
+   }
+}
+
+static uint32_t
+get_specialization(struct vtn_builder *b, struct vtn_value *val,
+                   uint32_t const_value)
+{
+   vtn_foreach_decoration(b, val, spec_constant_deocoration_cb, &const_value);
+   return const_value;
+}
+
+static void
+vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
+                    const uint32_t *w, unsigned count)
+{
+   struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
+   val->const_type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
+   val->constant = rzalloc(b, nir_constant);
+   switch (opcode) {
+   case SpvOpConstantTrue:
+      assert(val->const_type == glsl_bool_type());
+      val->constant->value.u[0] = NIR_TRUE;
+      break;
+   case SpvOpConstantFalse:
+      assert(val->const_type == glsl_bool_type());
+      val->constant->value.u[0] = NIR_FALSE;
+      break;
+
+   case SpvOpSpecConstantTrue:
+   case SpvOpSpecConstantFalse: {
+      assert(val->const_type == glsl_bool_type());
+      uint32_t int_val =
+         get_specialization(b, val, (opcode == SpvOpSpecConstantTrue));
+      val->constant->value.u[0] = int_val ? NIR_TRUE : NIR_FALSE;
+      break;
+   }
+
+   case SpvOpConstant:
+      assert(glsl_type_is_scalar(val->const_type));
+      val->constant->value.u[0] = w[3];
+      break;
+   case SpvOpSpecConstant:
+      assert(glsl_type_is_scalar(val->const_type));
+      val->constant->value.u[0] = get_specialization(b, val, w[3]);
+      break;
+   case SpvOpSpecConstantComposite:
+   case SpvOpConstantComposite: {
+      unsigned elem_count = count - 3;
+      nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
+      for (unsigned i = 0; i < elem_count; i++)
+         elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
+
+      switch (glsl_get_base_type(val->const_type)) {
+      case GLSL_TYPE_UINT:
+      case GLSL_TYPE_INT:
+      case GLSL_TYPE_FLOAT:
+      case GLSL_TYPE_BOOL:
+         if (glsl_type_is_matrix(val->const_type)) {
+            unsigned rows = glsl_get_vector_elements(val->const_type);
+            assert(glsl_get_matrix_columns(val->const_type) == elem_count);
+            for (unsigned i = 0; i < elem_count; i++)
+               for (unsigned j = 0; j < rows; j++)
+                  val->constant->value.u[rows * i + j] = elems[i]->value.u[j];
+         } else {
+            assert(glsl_type_is_vector(val->const_type));
+            assert(glsl_get_vector_elements(val->const_type) == elem_count);
+            for (unsigned i = 0; i < elem_count; i++)
+               val->constant->value.u[i] = elems[i]->value.u[0];
+         }
+         ralloc_free(elems);
+         break;
+
+      case GLSL_TYPE_STRUCT:
+      case GLSL_TYPE_ARRAY:
+         ralloc_steal(val->constant, elems);
+         val->constant->num_elements = elem_count;
+         val->constant->elements = elems;
+         break;
+
+      default:
+         unreachable("Unsupported type for constants");
+      }
+      break;
+   }
+
+   case SpvOpSpecConstantOp: {
+      SpvOp opcode = get_specialization(b, val, w[3]);
+      switch (opcode) {
+      case SpvOpVectorShuffle: {
+         struct vtn_value *v0 = vtn_value(b, w[4], vtn_value_type_constant);
+         struct vtn_value *v1 = vtn_value(b, w[5], vtn_value_type_constant);
+         unsigned len0 = glsl_get_vector_elements(v0->const_type);
+         unsigned len1 = glsl_get_vector_elements(v1->const_type);
+
+         uint32_t u[8];
+         for (unsigned i = 0; i < len0; i++)
+            u[i] = v0->constant->value.u[i];
+         for (unsigned i = 0; i < len1; i++)
+            u[len0 + i] = v1->constant->value.u[i];
+
+         for (unsigned i = 0; i < count - 6; i++) {
+            uint32_t comp = w[i + 6];
+            if (comp == (uint32_t)-1) {
+               val->constant->value.u[i] = 0xdeadbeef;
+            } else {
+               val->constant->value.u[i] = u[comp];
+            }
+         }
+         return;
+      }
+
+      case SpvOpCompositeExtract:
+      case SpvOpCompositeInsert: {
+         struct vtn_value *comp;
+         unsigned deref_start;
+         struct nir_constant **c;
+         if (opcode == SpvOpCompositeExtract) {
+            comp = vtn_value(b, w[4], vtn_value_type_constant);
+            deref_start = 5;
+            c = &comp->constant;
+         } else {
+            comp = vtn_value(b, w[5], vtn_value_type_constant);
+            deref_start = 6;
+            val->constant = nir_constant_clone(comp->constant,
+                                               (nir_variable *)b);
+            c = &val->constant;
+         }
+
+         int elem = -1;
+         const struct glsl_type *type = comp->const_type;
+         for (unsigned i = deref_start; i < count; i++) {
+            switch (glsl_get_base_type(type)) {
+            case GLSL_TYPE_UINT:
+            case GLSL_TYPE_INT:
+            case GLSL_TYPE_FLOAT:
+            case GLSL_TYPE_BOOL:
+               /* If we hit this granularity, we're picking off an element */
+               if (elem < 0)
+                  elem = 0;
+
+               if (glsl_type_is_matrix(type)) {
+                  elem += w[i] * glsl_get_vector_elements(type);
+                  type = glsl_get_column_type(type);
+               } else {
+                  assert(glsl_type_is_vector(type));
+                  elem += w[i];
+                  type = glsl_scalar_type(glsl_get_base_type(type));
+               }
+               continue;
+
+            case GLSL_TYPE_ARRAY:
+               c = &(*c)->elements[w[i]];
+               type = glsl_get_array_element(type);
+               continue;
+
+            case GLSL_TYPE_STRUCT:
+               c = &(*c)->elements[w[i]];
+               type = glsl_get_struct_field(type, w[i]);
+               continue;
+
+            default:
+               unreachable("Invalid constant type");
+            }
+         }
+
+         if (opcode == SpvOpCompositeExtract) {
+            if (elem == -1) {
+               val->constant = *c;
+            } else {
+               unsigned num_components = glsl_get_vector_elements(type);
+               for (unsigned i = 0; i < num_components; i++)
+                  val->constant->value.u[i] = (*c)->value.u[elem + i];
+            }
+         } else {
+            struct vtn_value *insert =
+               vtn_value(b, w[4], vtn_value_type_constant);
+            assert(insert->const_type == type);
+            if (elem == -1) {
+               *c = insert->constant;
+            } else {
+               unsigned num_components = glsl_get_vector_elements(type);
+               for (unsigned i = 0; i < num_components; i++)
+                  (*c)->value.u[elem + i] = insert->constant->value.u[i];
+            }
+         }
+         return;
+      }
+
+      default: {
+         bool swap;
+         nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap);
+
+         unsigned num_components = glsl_get_vector_elements(val->const_type);
+
+         nir_const_value src[3];
+         assert(count <= 7);
+         for (unsigned i = 0; i < count - 4; i++) {
+            nir_constant *c =
+               vtn_value(b, w[4 + i], vtn_value_type_constant)->constant;
+
+            unsigned j = swap ? 1 - i : i;
+            for (unsigned k = 0; k < num_components; k++)
+               src[j].u[k] = c->value.u[k];
+         }
+
+         nir_const_value res = nir_eval_const_opcode(op, num_components, src);
+
+         for (unsigned k = 0; k < num_components; k++)
+            val->constant->value.u[k] = res.u[k];
+
+         return;
+      } /* default */
+      }
+   }
+
+   case SpvOpConstantNull:
+      val->constant = vtn_null_constant(b, val->const_type);
+      break;
+
+   case SpvOpConstantSampler:
+      assert(!"OpConstantSampler requires Kernel Capability");
+      break;
+
+   default:
+      unreachable("Unhandled opcode");
+   }
+}
+
+static void
+vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
+                         const uint32_t *w, unsigned count)
+{
+   struct nir_function *callee =
+      vtn_value(b, w[3], vtn_value_type_function)->func->impl->function;
+
+   nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee);
+   for (unsigned i = 0; i < call->num_params; i++) {
+      unsigned arg_id = w[4 + i];
+      struct vtn_value *arg = vtn_untyped_value(b, arg_id);
+      if (arg->value_type == vtn_value_type_access_chain) {
+         nir_deref_var *d = vtn_access_chain_to_deref(b, arg->access_chain);
+         call->params[i] = nir_deref_as_var(nir_copy_deref(call, &d->deref));
+      } else {
+         struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
+
+         /* Make a temporary to store the argument in */
+         nir_variable *tmp =
+            nir_local_variable_create(b->impl, arg_ssa->type, "arg_tmp");
+         call->params[i] = nir_deref_var_create(call, tmp);
+
+         vtn_local_store(b, arg_ssa, call->params[i]);
+      }
+   }
+
+   nir_variable *out_tmp = NULL;
+   if (!glsl_type_is_void(callee->return_type)) {
+      out_tmp = nir_local_variable_create(b->impl, callee->return_type,
+                                          "out_tmp");
+      call->return_deref = nir_deref_var_create(call, out_tmp);
+   }
+
+   nir_builder_instr_insert(&b->nb, &call->instr);
+
+   if (glsl_type_is_void(callee->return_type)) {
+      vtn_push_value(b, w[2], vtn_value_type_undef);
+   } else {
+      struct vtn_value *retval = vtn_push_value(b, w[2], vtn_value_type_ssa);
+      retval->ssa = vtn_local_load(b, call->return_deref);
+   }
+}
+
+struct vtn_ssa_value *
+vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
+{
+   struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+   val->type = type;
+
+   if (!glsl_type_is_vector_or_scalar(type)) {
+      unsigned elems = glsl_get_length(type);
+      val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+      for (unsigned i = 0; i < elems; i++) {
+         const struct glsl_type *child_type;
+
+         switch (glsl_get_base_type(type)) {
+         case GLSL_TYPE_INT:
+         case GLSL_TYPE_UINT:
+         case GLSL_TYPE_BOOL:
+         case GLSL_TYPE_FLOAT:
+         case GLSL_TYPE_DOUBLE:
+            child_type = glsl_get_column_type(type);
+            break;
+         case GLSL_TYPE_ARRAY:
+            child_type = glsl_get_array_element(type);
+            break;
+         case GLSL_TYPE_STRUCT:
+            child_type = glsl_get_struct_field(type, i);
+            break;
+         default:
+            unreachable("unkown base type");
+         }
+
+         val->elems[i] = vtn_create_ssa_value(b, child_type);
+      }
+   }
+
+   return val;
+}
+
+static nir_tex_src
+vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
+{
+   nir_tex_src src;
+   src.src = nir_src_for_ssa(vtn_ssa_value(b, index)->def);
+   src.src_type = type;
+   return src;
+}
+
+static void
+vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
+                   const uint32_t *w, unsigned count)
+{
+   if (opcode == SpvOpSampledImage) {
+      struct vtn_value *val =
+         vtn_push_value(b, w[2], vtn_value_type_sampled_image);
+      val->sampled_image = ralloc(b, struct vtn_sampled_image);
+      val->sampled_image->image =
+         vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+      val->sampled_image->sampler =
+         vtn_value(b, w[4], vtn_value_type_access_chain)->access_chain;
+      return;
+   } else if (opcode == SpvOpImage) {
+      struct vtn_value *val =
+         vtn_push_value(b, w[2], vtn_value_type_access_chain);
+      struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
+      if (src_val->value_type == vtn_value_type_sampled_image) {
+         val->access_chain = src_val->sampled_image->image;
+      } else {
+         assert(src_val->value_type == vtn_value_type_access_chain);
+         val->access_chain = src_val->access_chain;
+      }
+      return;
+   }
+
+   struct vtn_type *ret_type = vtn_value(b, w[1], vtn_value_type_type)->type;
+   struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+
+   struct vtn_sampled_image sampled;
+   struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
+   if (sampled_val->value_type == vtn_value_type_sampled_image) {
+      sampled = *sampled_val->sampled_image;
+   } else {
+      assert(sampled_val->value_type == vtn_value_type_access_chain);
+      sampled.image = NULL;
+      sampled.sampler = sampled_val->access_chain;
+   }
+
+   const struct glsl_type *image_type;
+   if (sampled.image) {
+      image_type = sampled.image->var->var->interface_type;
+   } else {
+      image_type = sampled.sampler->var->var->interface_type;
+   }
+
+   nir_tex_src srcs[8]; /* 8 should be enough */
+   nir_tex_src *p = srcs;
+
+   unsigned idx = 4;
+
+   bool has_coord = false;
+   switch (opcode) {
+   case SpvOpImageSampleImplicitLod:
+   case SpvOpImageSampleExplicitLod:
+   case SpvOpImageSampleDrefImplicitLod:
+   case SpvOpImageSampleDrefExplicitLod:
+   case SpvOpImageSampleProjImplicitLod:
+   case SpvOpImageSampleProjExplicitLod:
+   case SpvOpImageSampleProjDrefImplicitLod:
+   case SpvOpImageSampleProjDrefExplicitLod:
+   case SpvOpImageFetch:
+   case SpvOpImageGather:
+   case SpvOpImageDrefGather:
+   case SpvOpImageQueryLod: {
+      /* All these types have the coordinate as their first real argument */
+      struct vtn_ssa_value *coord = vtn_ssa_value(b, w[idx++]);
+      has_coord = true;
+      p->src = nir_src_for_ssa(coord->def);
+      p->src_type = nir_tex_src_coord;
+      p++;
+      break;
+   }
+
+   default:
+      break;
+   }
+
+   /* These all have an explicit depth value as their next source */
+   switch (opcode) {
+   case SpvOpImageSampleDrefImplicitLod:
+   case SpvOpImageSampleDrefExplicitLod:
+   case SpvOpImageSampleProjDrefImplicitLod:
+   case SpvOpImageSampleProjDrefExplicitLod:
+      (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparitor);
+      break;
+   default:
+      break;
+   }
+
+   /* For OpImageQuerySizeLod, we always have an LOD */
+   if (opcode == SpvOpImageQuerySizeLod)
+      (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
+
+   /* Figure out the base texture operation */
+   nir_texop texop;
+   switch (opcode) {
+   case SpvOpImageSampleImplicitLod:
+   case SpvOpImageSampleDrefImplicitLod:
+   case SpvOpImageSampleProjImplicitLod:
+   case SpvOpImageSampleProjDrefImplicitLod:
+      texop = nir_texop_tex;
+      break;
+
+   case SpvOpImageSampleExplicitLod:
+   case SpvOpImageSampleDrefExplicitLod:
+   case SpvOpImageSampleProjExplicitLod:
+   case SpvOpImageSampleProjDrefExplicitLod:
+      texop = nir_texop_txl;
+      break;
+
+   case SpvOpImageFetch:
+      if (glsl_get_sampler_dim(image_type) == GLSL_SAMPLER_DIM_MS) {
+         texop = nir_texop_txf_ms;
+      } else {
+         texop = nir_texop_txf;
+      }
+      break;
+
+   case SpvOpImageGather:
+   case SpvOpImageDrefGather:
+      texop = nir_texop_tg4;
+      break;
+
+   case SpvOpImageQuerySizeLod:
+   case SpvOpImageQuerySize:
+      texop = nir_texop_txs;
+      break;
+
+   case SpvOpImageQueryLod:
+      texop = nir_texop_lod;
+      break;
+
+   case SpvOpImageQueryLevels:
+      texop = nir_texop_query_levels;
+      break;
+
+   case SpvOpImageQuerySamples:
+   default:
+      unreachable("Unhandled opcode");
+   }
+
+   /* Now we need to handle some number of optional arguments */
+   if (idx < count) {
+      uint32_t operands = w[idx++];
+
+      if (operands & SpvImageOperandsBiasMask) {
+         assert(texop == nir_texop_tex);
+         texop = nir_texop_txb;
+         (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_bias);
+      }
+
+      if (operands & SpvImageOperandsLodMask) {
+         assert(texop == nir_texop_txl || texop == nir_texop_txf ||
+                texop == nir_texop_txf_ms || texop == nir_texop_txs);
+         (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
+      }
+
+      if (operands & SpvImageOperandsGradMask) {
+         assert(texop == nir_texop_tex);
+         texop = nir_texop_txd;
+         (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddx);
+         (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddy);
+      }
+
+      if (operands & SpvImageOperandsOffsetMask ||
+          operands & SpvImageOperandsConstOffsetMask)
+         (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
+
+      if (operands & SpvImageOperandsConstOffsetsMask)
+         assert(!"Constant offsets to texture gather not yet implemented");
+
+      if (operands & SpvImageOperandsSampleMask) {
+         assert(texop == nir_texop_txf_ms);
+         texop = nir_texop_txf_ms;
+         (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
+      }
+   }
+   /* We should have now consumed exactly all of the arguments */
+   assert(idx == count);
+
+   nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
+   instr->op = texop;
+
+   memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
+
+   instr->sampler_dim = glsl_get_sampler_dim(image_type);
+   instr->is_array = glsl_sampler_type_is_array(image_type);
+   instr->is_shadow = glsl_sampler_type_is_shadow(image_type);
+   instr->is_new_style_shadow = instr->is_shadow;
+
+   if (has_coord) {
+      switch (instr->sampler_dim) {
+      case GLSL_SAMPLER_DIM_1D:
+      case GLSL_SAMPLER_DIM_BUF:
+         instr->coord_components = 1;
+         break;
+      case GLSL_SAMPLER_DIM_2D:
+      case GLSL_SAMPLER_DIM_RECT:
+      case GLSL_SAMPLER_DIM_MS:
+         instr->coord_components = 2;
+         break;
+      case GLSL_SAMPLER_DIM_3D:
+      case GLSL_SAMPLER_DIM_CUBE:
+         instr->coord_components = 3;
+         break;
+      default:
+         assert("Invalid sampler type");
+      }
+
+      if (instr->is_array)
+         instr->coord_components++;
+   } else {
+      instr->coord_components = 0;
+   }
+
+   switch (glsl_get_sampler_result_type(image_type)) {
+   case GLSL_TYPE_FLOAT:   instr->dest_type = nir_type_float;     break;
+   case GLSL_TYPE_INT:     instr->dest_type = nir_type_int;       break;
+   case GLSL_TYPE_UINT:    instr->dest_type = nir_type_uint;  break;
+   case GLSL_TYPE_BOOL:    instr->dest_type = nir_type_bool;      break;
+   default:
+      unreachable("Invalid base type for sampler result");
+   }
+
+   nir_deref_var *sampler = vtn_access_chain_to_deref(b, sampled.sampler);
+   if (sampled.image) {
+      nir_deref_var *image = vtn_access_chain_to_deref(b, sampled.image);
+      instr->texture = nir_deref_as_var(nir_copy_deref(instr, &image->deref));
+   } else {
+      instr->texture = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+   }
+
+   switch (instr->op) {
+   case nir_texop_tex:
+   case nir_texop_txb:
+   case nir_texop_txl:
+   case nir_texop_txd:
+      /* These operations require a sampler */
+      instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+      break;
+   case nir_texop_txf:
+   case nir_texop_txf_ms:
+   case nir_texop_txs:
+   case nir_texop_lod:
+   case nir_texop_tg4:
+   case nir_texop_query_levels:
+   case nir_texop_texture_samples:
+   case nir_texop_samples_identical:
+      /* These don't */
+      instr->sampler = NULL;
+      break;
+   }
+
+   nir_ssa_dest_init(&instr->instr, &instr->dest,
+                     nir_tex_instr_dest_size(instr), NULL);
+
+   assert(glsl_get_vector_elements(ret_type->type) ==
+          nir_tex_instr_dest_size(instr));
+
+   val->ssa = vtn_create_ssa_value(b, ret_type->type);
+   val->ssa->def = &instr->dest.ssa;
+
+   nir_builder_instr_insert(&b->nb, &instr->instr);
+}
+
+static nir_ssa_def *
+get_image_coord(struct vtn_builder *b, uint32_t value)
+{
+   struct vtn_ssa_value *coord = vtn_ssa_value(b, value);
+
+   /* The image_load_store intrinsics assume a 4-dim coordinate */
+   unsigned dim = glsl_get_vector_elements(coord->type);
+   unsigned swizzle[4];
+   for (unsigned i = 0; i < 4; i++)
+      swizzle[i] = MIN2(i, dim - 1);
+
+   return nir_swizzle(&b->nb, coord->def, swizzle, 4, false);
+}
+
+static void
+vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
+                 const uint32_t *w, unsigned count)
+{
+   /* Just get this one out of the way */
+   if (opcode == SpvOpImageTexelPointer) {
+      struct vtn_value *val =
+         vtn_push_value(b, w[2], vtn_value_type_image_pointer);
+      val->image = ralloc(b, struct vtn_image_pointer);
+
+      val->image->image =
+         vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+      val->image->coord = get_image_coord(b, w[4]);
+      val->image->sample = vtn_ssa_value(b, w[5])->def;
+      return;
+   }
+
+   struct vtn_image_pointer image;
+
+   switch (opcode) {
+   case SpvOpAtomicExchange:
+   case SpvOpAtomicCompareExchange:
+   case SpvOpAtomicCompareExchangeWeak:
+   case SpvOpAtomicIIncrement:
+   case SpvOpAtomicIDecrement:
+   case SpvOpAtomicIAdd:
+   case SpvOpAtomicISub:
+   case SpvOpAtomicSMin:
+   case SpvOpAtomicUMin:
+   case SpvOpAtomicSMax:
+   case SpvOpAtomicUMax:
+   case SpvOpAtomicAnd:
+   case SpvOpAtomicOr:
+   case SpvOpAtomicXor:
+      image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
+      break;
+
+   case SpvOpImageQuerySize:
+      image.image =
+         vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+      image.coord = NULL;
+      image.sample = NULL;
+      break;
+
+   case SpvOpImageRead:
+      image.image =
+         vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+      image.coord = get_image_coord(b, w[4]);
+
+      if (count > 5 && (w[5] & SpvImageOperandsSampleMask)) {
+         assert(w[5] == SpvImageOperandsSampleMask);
+         image.sample = vtn_ssa_value(b, w[6])->def;
+      } else {
+         image.sample = nir_ssa_undef(&b->nb, 1);
+      }
+      break;
+
+   case SpvOpImageWrite:
+      image.image =
+         vtn_value(b, w[1], vtn_value_type_access_chain)->access_chain;
+      image.coord = get_image_coord(b, w[2]);
+
+      /* texel = w[3] */
+
+      if (count > 4 && (w[4] & SpvImageOperandsSampleMask)) {
+         assert(w[4] == SpvImageOperandsSampleMask);
+         image.sample = vtn_ssa_value(b, w[5])->def;
+      } else {
+         image.sample = nir_ssa_undef(&b->nb, 1);
+      }
+      break;
+
+   default:
+      unreachable("Invalid image opcode");
+   }
+
+   nir_intrinsic_op op;
+   switch (opcode) {
+#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
+   OP(ImageQuerySize,         size)
+   OP(ImageRead,              load)
+   OP(ImageWrite,             store)
+   OP(AtomicExchange,         atomic_exchange)
+   OP(AtomicCompareExchange,  atomic_comp_swap)
+   OP(AtomicIIncrement,       atomic_add)
+   OP(AtomicIDecrement,       atomic_add)
+   OP(AtomicIAdd,             atomic_add)
+   OP(AtomicISub,             atomic_add)
+   OP(AtomicSMin,             atomic_min)
+   OP(AtomicUMin,             atomic_min)
+   OP(AtomicSMax,             atomic_max)
+   OP(AtomicUMax,             atomic_max)
+   OP(AtomicAnd,              atomic_and)
+   OP(AtomicOr,               atomic_or)
+   OP(AtomicXor,              atomic_xor)
+#undef OP
+   default:
+      unreachable("Invalid image opcode");
+   }
+
+   nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
+
+   nir_deref_var *image_deref = vtn_access_chain_to_deref(b, image.image);
+   intrin->variables[0] =
+      nir_deref_as_var(nir_copy_deref(&intrin->instr, &image_deref->deref));
+
+   /* ImageQuerySize doesn't take any extra parameters */
+   if (opcode != SpvOpImageQuerySize) {
+      /* The image coordinate is always 4 components but we may not have that
+       * many.  Swizzle to compensate.
+       */
+      unsigned swiz[4];
+      for (unsigned i = 0; i < 4; i++)
+         swiz[i] = i < image.coord->num_components ? i : 0;
+      intrin->src[0] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord,
+                                                   swiz, 4, false));
+      intrin->src[1] = nir_src_for_ssa(image.sample);
+   }
+
+   switch (opcode) {
+   case SpvOpImageQuerySize:
+   case SpvOpImageRead:
+      break;
+   case SpvOpImageWrite:
+      intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
+      break;
+   case SpvOpAtomicIIncrement:
+      intrin->src[2] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
+      break;
+   case SpvOpAtomicIDecrement:
+      intrin->src[2] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
+      break;
+
+   case SpvOpAtomicExchange:
+   case SpvOpAtomicIAdd:
+   case SpvOpAtomicSMin:
+   case SpvOpAtomicUMin:
+   case SpvOpAtomicSMax:
+   case SpvOpAtomicUMax:
+   case SpvOpAtomicAnd:
+   case SpvOpAtomicOr:
+   case SpvOpAtomicXor:
+      intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+      break;
+
+   case SpvOpAtomicCompareExchange:
+      intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
+      intrin->src[3] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+      break;
+
+   case SpvOpAtomicISub:
+      intrin->src[2] = nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
+      break;
+
+   default:
+      unreachable("Invalid image opcode");
+   }
+
+   if (opcode != SpvOpImageWrite) {
+      struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+      struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
+      nir_ssa_dest_init(&intrin->instr, &intrin->dest, 4, NULL);
+
+      nir_builder_instr_insert(&b->nb, &intrin->instr);
+
+      /* The image intrinsics always return 4 channels but we may not want
+       * that many.  Emit a mov to trim it down.
+       */
+      unsigned swiz[4] = {0, 1, 2, 3};
+      val->ssa = vtn_create_ssa_value(b, type->type);
+      val->ssa->def = nir_swizzle(&b->nb, &intrin->dest.ssa, swiz,
+                                  glsl_get_vector_elements(type->type), false);
+   } else {
+      nir_builder_instr_insert(&b->nb, &intrin->instr);
+   }
+}
+
+static nir_intrinsic_op
+get_ssbo_nir_atomic_op(SpvOp opcode)
+{
+   switch (opcode) {
+#define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
+   OP(AtomicExchange,         atomic_exchange)
+   OP(AtomicCompareExchange,  atomic_comp_swap)
+   OP(AtomicIIncrement,       atomic_add)
+   OP(AtomicIDecrement,       atomic_add)
+   OP(AtomicIAdd,             atomic_add)
+   OP(AtomicISub,             atomic_add)
+   OP(AtomicSMin,             atomic_imin)
+   OP(AtomicUMin,             atomic_umin)
+   OP(AtomicSMax,             atomic_imax)
+   OP(AtomicUMax,             atomic_umax)
+   OP(AtomicAnd,              atomic_and)
+   OP(AtomicOr,               atomic_or)
+   OP(AtomicXor,              atomic_xor)
+#undef OP
+   default:
+      unreachable("Invalid SSBO atomic");
+   }
+}
+
+static nir_intrinsic_op
+get_shared_nir_atomic_op(SpvOp opcode)
+{
+   switch (opcode) {
+#define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
+   OP(AtomicExchange,         atomic_exchange)
+   OP(AtomicCompareExchange,  atomic_comp_swap)
+   OP(AtomicIIncrement,       atomic_add)
+   OP(AtomicIDecrement,       atomic_add)
+   OP(AtomicIAdd,             atomic_add)
+   OP(AtomicISub,             atomic_add)
+   OP(AtomicSMin,             atomic_imin)
+   OP(AtomicUMin,             atomic_umin)
+   OP(AtomicSMax,             atomic_imax)
+   OP(AtomicUMax,             atomic_umax)
+   OP(AtomicAnd,              atomic_and)
+   OP(AtomicOr,               atomic_or)
+   OP(AtomicXor,              atomic_xor)
+#undef OP
+   default:
+      unreachable("Invalid shared atomic");
+   }
+}
+
+static void
+fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
+                           const uint32_t *w, nir_src *src)
+{
+   switch (opcode) {
+   case SpvOpAtomicIIncrement:
+      src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
+      break;
+
+   case SpvOpAtomicIDecrement:
+      src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
+      break;
+
+   case SpvOpAtomicISub:
+      src[0] =
+         nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
+      break;
+
+   case SpvOpAtomicCompareExchange:
+      src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
+      src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
+      break;
+      /* Fall through */
+
+   case SpvOpAtomicExchange:
+   case SpvOpAtomicIAdd:
+   case SpvOpAtomicSMin:
+   case SpvOpAtomicUMin:
+   case SpvOpAtomicSMax:
+   case SpvOpAtomicUMax:
+   case SpvOpAtomicAnd:
+   case SpvOpAtomicOr:
+   case SpvOpAtomicXor:
+      src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+      break;
+
+   default:
+      unreachable("Invalid SPIR-V atomic");
+   }
+}
+
+static void
+vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
+                                 const uint32_t *w, unsigned count)
+{
+   struct vtn_access_chain *chain =
+      vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+   nir_intrinsic_instr *atomic;
+
+   /*
+   SpvScope scope = w[4];
+   SpvMemorySemanticsMask semantics = w[5];
+   */
+
+   if (chain->var->mode == vtn_variable_mode_workgroup) {
+      nir_deref *deref = &vtn_access_chain_to_deref(b, chain)->deref;
+      nir_intrinsic_op op = get_shared_nir_atomic_op(opcode);
+      atomic = nir_intrinsic_instr_create(b->nb.shader, op);
+      atomic->variables[0] = nir_deref_as_var(nir_copy_deref(atomic, deref));
+      fill_common_atomic_sources(b, opcode, w, &atomic->src[0]);
+   } else {
+      assert(chain->var->mode == vtn_variable_mode_ssbo);
+      struct vtn_type *type;
+      nir_ssa_def *offset, *index;
+      offset = vtn_access_chain_to_offset(b, chain, &index, &type, NULL, false);
+
+      nir_intrinsic_op op = get_ssbo_nir_atomic_op(opcode);
+
+      atomic = nir_intrinsic_instr_create(b->nb.shader, op);
+      atomic->src[0] = nir_src_for_ssa(index);
+      atomic->src[1] = nir_src_for_ssa(offset);
+      fill_common_atomic_sources(b, opcode, w, &atomic->src[2]);
+   }
+
+   nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1, NULL);
+
+   struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
+   struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+   val->ssa = rzalloc(b, struct vtn_ssa_value);
+   val->ssa->def = &atomic->dest.ssa;
+   val->ssa->type = type->type;
+
+   nir_builder_instr_insert(&b->nb, &atomic->instr);
+}
+
+static nir_alu_instr *
+create_vec(nir_shader *shader, unsigned num_components)
+{
+   nir_op op;
+   switch (num_components) {
+   case 1: op = nir_op_fmov; break;
+   case 2: op = nir_op_vec2; break;
+   case 3: op = nir_op_vec3; break;
+   case 4: op = nir_op_vec4; break;
+   default: unreachable("bad vector size");
+   }
+
+   nir_alu_instr *vec = nir_alu_instr_create(shader, op);
+   nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components, NULL);
+   vec->dest.write_mask = (1 << num_components) - 1;
+
+   return vec;
+}
+
+struct vtn_ssa_value *
+vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
+{
+   if (src->transposed)
+      return src->transposed;
+
+   struct vtn_ssa_value *dest =
+      vtn_create_ssa_value(b, glsl_transposed_type(src->type));
+
+   for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
+      nir_alu_instr *vec = create_vec(b->shader,
+                                      glsl_get_matrix_columns(src->type));
+      if (glsl_type_is_vector_or_scalar(src->type)) {
+          vec->src[0].src = nir_src_for_ssa(src->def);
+          vec->src[0].swizzle[0] = i;
+      } else {
+         for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
+            vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
+            vec->src[j].swizzle[0] = i;
+         }
+      }
+      nir_builder_instr_insert(&b->nb, &vec->instr);
+      dest->elems[i]->def = &vec->dest.dest.ssa;
+   }
+
+   dest->transposed = src;
+
+   return dest;
+}
+
+nir_ssa_def *
+vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
+{
+   unsigned swiz[4] = { index };
+   return nir_swizzle(&b->nb, src, swiz, 1, true);
+}
+
+nir_ssa_def *
+vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
+                  unsigned index)
+{
+   nir_alu_instr *vec = create_vec(b->shader, src->num_components);
+
+   for (unsigned i = 0; i < src->num_components; i++) {
+      if (i == index) {
+         vec->src[i].src = nir_src_for_ssa(insert);
+      } else {
+         vec->src[i].src = nir_src_for_ssa(src);
+         vec->src[i].swizzle[0] = i;
+      }
+   }
+
+   nir_builder_instr_insert(&b->nb, &vec->instr);
+
+   return &vec->dest.dest.ssa;
+}
+
+nir_ssa_def *
+vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+                           nir_ssa_def *index)
+{
+   nir_ssa_def *dest = vtn_vector_extract(b, src, 0);
+   for (unsigned i = 1; i < src->num_components; i++)
+      dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
+                       vtn_vector_extract(b, src, i), dest);
+
+   return dest;
+}
+
+nir_ssa_def *
+vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+                          nir_ssa_def *insert, nir_ssa_def *index)
+{
+   nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
+   for (unsigned i = 1; i < src->num_components; i++)
+      dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
+                       vtn_vector_insert(b, src, insert, i), dest);
+
+   return dest;
+}
+
+static nir_ssa_def *
+vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
+                   nir_ssa_def *src0, nir_ssa_def *src1,
+                   const uint32_t *indices)
+{
+   nir_alu_instr *vec = create_vec(b->shader, num_components);
+
+   nir_ssa_undef_instr *undef = nir_ssa_undef_instr_create(b->shader, 1);
+   nir_builder_instr_insert(&b->nb, &undef->instr);
+
+   for (unsigned i = 0; i < num_components; i++) {
+      uint32_t index = indices[i];
+      if (index == 0xffffffff) {
+         vec->src[i].src = nir_src_for_ssa(&undef->def);
+      } else if (index < src0->num_components) {
+         vec->src[i].src = nir_src_for_ssa(src0);
+         vec->src[i].swizzle[0] = index;
+      } else {
+         vec->src[i].src = nir_src_for_ssa(src1);
+         vec->src[i].swizzle[0] = index - src0->num_components;
+      }
+   }
+
+   nir_builder_instr_insert(&b->nb, &vec->instr);
+
+   return &vec->dest.dest.ssa;
+}
+
+/*
+ * Concatentates a number of vectors/scalars together to produce a vector
+ */
+static nir_ssa_def *
+vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
+                     unsigned num_srcs, nir_ssa_def **srcs)
+{
+   nir_alu_instr *vec = create_vec(b->shader, num_components);
+
+   unsigned dest_idx = 0;
+   for (unsigned i = 0; i < num_srcs; i++) {
+      nir_ssa_def *src = srcs[i];
+      for (unsigned j = 0; j < src->num_components; j++) {
+         vec->src[dest_idx].src = nir_src_for_ssa(src);
+         vec->src[dest_idx].swizzle[0] = j;
+         dest_idx++;
+      }
+   }
+
+   nir_builder_instr_insert(&b->nb, &vec->instr);
+
+   return &vec->dest.dest.ssa;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
+{
+   struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
+   dest->type = src->type;
+
+   if (glsl_type_is_vector_or_scalar(src->type)) {
+      dest->def = src->def;
+   } else {
+      unsigned elems = glsl_get_length(src->type);
+
+      dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
+      for (unsigned i = 0; i < elems; i++)
+         dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
+   }
+
+   return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
+                     struct vtn_ssa_value *insert, const uint32_t *indices,
+                     unsigned num_indices)
+{
+   struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
+
+   struct vtn_ssa_value *cur = dest;
+   unsigned i;
+   for (i = 0; i < num_indices - 1; i++) {
+      cur = cur->elems[indices[i]];
+   }
+
+   if (glsl_type_is_vector_or_scalar(cur->type)) {
+      /* According to the SPIR-V spec, OpCompositeInsert may work down to
+       * the component granularity. In that case, the last index will be
+       * the index to insert the scalar into the vector.
+       */
+
+      cur->def = vtn_vector_insert(b, cur->def, insert->def, indices[i]);
+   } else {
+      cur->elems[indices[i]] = insert;
+   }
+
+   return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
+                      const uint32_t *indices, unsigned num_indices)
+{
+   struct vtn_ssa_value *cur = src;
+   for (unsigned i = 0; i < num_indices; i++) {
+      if (glsl_type_is_vector_or_scalar(cur->type)) {
+         assert(i == num_indices - 1);
+         /* According to the SPIR-V spec, OpCompositeExtract may work down to
+          * the component granularity. The last index will be the index of the
+          * vector to extract.
+          */
+
+         struct vtn_ssa_value *ret = rzalloc(b, struct vtn_ssa_value);
+         ret->type = glsl_scalar_type(glsl_get_base_type(cur->type));
+         ret->def = vtn_vector_extract(b, cur->def, indices[i]);
+         return ret;
+      } else {
+         cur = cur->elems[indices[i]];
+      }
+   }
+
+   return cur;
+}
+
+static void
+vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
+                     const uint32_t *w, unsigned count)
+{
+   struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+   const struct glsl_type *type =
+      vtn_value(b, w[1], vtn_value_type_type)->type->type;
+   val->ssa = vtn_create_ssa_value(b, type);
+
+   switch (opcode) {
+   case SpvOpVectorExtractDynamic:
+      val->ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
+                                                 vtn_ssa_value(b, w[4])->def);
+      break;
+
+   case SpvOpVectorInsertDynamic:
+      val->ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
+                                                vtn_ssa_value(b, w[4])->def,
+                                                vtn_ssa_value(b, w[5])->def);
+      break;
+
+   case SpvOpVectorShuffle:
+      val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type),
+                                         vtn_ssa_value(b, w[3])->def,
+                                         vtn_ssa_value(b, w[4])->def,
+                                         w + 5);
+      break;
+
+   case SpvOpCompositeConstruct: {
+      unsigned elems = count - 3;
+      if (glsl_type_is_vector_or_scalar(type)) {
+         nir_ssa_def *srcs[4];
+         for (unsigned i = 0; i < elems; i++)
+            srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
+         val->ssa->def =
+            vtn_vector_construct(b, glsl_get_vector_elements(type),
+                                 elems, srcs);
+      } else {
+         val->ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+         for (unsigned i = 0; i < elems; i++)
+            val->ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
+      }
+      break;
+   }
+   case SpvOpCompositeExtract:
+      val->ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
+                                       w + 4, count - 4);
+      break;
+
+   case SpvOpCompositeInsert:
+      val->ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
+                                      vtn_ssa_value(b, w[3]),
+                                      w + 5, count - 5);
+      break;
+
+   case SpvOpCopyObject:
+      val->ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
+      break;
+
+   default:
+      unreachable("unknown composite operation");
+   }
+}
+
+static void
+vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
+                   const uint32_t *w, unsigned count)
+{
+   nir_intrinsic_op intrinsic_op;
+   switch (opcode) {
+   case SpvOpEmitVertex:
+   case SpvOpEmitStreamVertex:
+      intrinsic_op = nir_intrinsic_emit_vertex;
+      break;
+   case SpvOpEndPrimitive:
+   case SpvOpEndStreamPrimitive:
+      intrinsic_op = nir_intrinsic_end_primitive;
+      break;
+   case SpvOpMemoryBarrier:
+      intrinsic_op = nir_intrinsic_memory_barrier;
+      break;
+   case SpvOpControlBarrier:
+      intrinsic_op = nir_intrinsic_barrier;
+      break;
+   default:
+      unreachable("unknown barrier instruction");
+   }
+
+   nir_intrinsic_instr *intrin =
+      nir_intrinsic_instr_create(b->shader, intrinsic_op);
+
+   if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive)
+      nir_intrinsic_set_stream_id(intrin, w[1]);
+
+   nir_builder_instr_insert(&b->nb, &intrin->instr);
+}
+
+static unsigned
+gl_primitive_from_spv_execution_mode(SpvExecutionMode mode)
+{
+   switch (mode) {
+   case SpvExecutionModeInputPoints:
+   case SpvExecutionModeOutputPoints:
+      return 0; /* GL_POINTS */
+   case SpvExecutionModeInputLines:
+      return 1; /* GL_LINES */
+   case SpvExecutionModeInputLinesAdjacency:
+      return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
+   case SpvExecutionModeTriangles:
+      return 4; /* GL_TRIANGLES */
+   case SpvExecutionModeInputTrianglesAdjacency:
+      return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
+   case SpvExecutionModeQuads:
+      return 7; /* GL_QUADS */
+   case SpvExecutionModeIsolines:
+      return 0x8E7A; /* GL_ISOLINES */
+   case SpvExecutionModeOutputLineStrip:
+      return 3; /* GL_LINE_STRIP */
+   case SpvExecutionModeOutputTriangleStrip:
+      return 5; /* GL_TRIANGLE_STRIP */
+   default:
+      assert(!"Invalid primitive type");
+      return 4;
+   }
+}
+
+static unsigned
+vertices_in_from_spv_execution_mode(SpvExecutionMode mode)
+{
+   switch (mode) {
+   case SpvExecutionModeInputPoints:
+      return 1;
+   case SpvExecutionModeInputLines:
+      return 2;
+   case SpvExecutionModeInputLinesAdjacency:
+      return 4;
+   case SpvExecutionModeTriangles:
+      return 3;
+   case SpvExecutionModeInputTrianglesAdjacency:
+      return 6;
+   default:
+      assert(!"Invalid GS input mode");
+      return 0;
+   }
+}
+
+static gl_shader_stage
+stage_for_execution_model(SpvExecutionModel model)
+{
+   switch (model) {
+   case SpvExecutionModelVertex:
+      return MESA_SHADER_VERTEX;
+   case SpvExecutionModelTessellationControl:
+      return MESA_SHADER_TESS_CTRL;
+   case SpvExecutionModelTessellationEvaluation:
+      return MESA_SHADER_TESS_EVAL;
+   case SpvExecutionModelGeometry:
+      return MESA_SHADER_GEOMETRY;
+   case SpvExecutionModelFragment:
+      return MESA_SHADER_FRAGMENT;
+   case SpvExecutionModelGLCompute:
+      return MESA_SHADER_COMPUTE;
+   default:
+      unreachable("Unsupported execution model");
+   }
+}
+
+static bool
+vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
+                                const uint32_t *w, unsigned count)
+{
+   switch (opcode) {
+   case SpvOpSource:
+   case SpvOpSourceExtension:
+   case SpvOpSourceContinued:
+   case SpvOpExtension:
+      /* Unhandled, but these are for debug so that's ok. */
+      break;
+
+   case SpvOpCapability: {
+      SpvCapability cap = w[1];
+      switch (cap) {
+      case SpvCapabilityMatrix:
+      case SpvCapabilityShader:
+      case SpvCapabilityGeometry:
+      case SpvCapabilityTessellationPointSize:
+      case SpvCapabilityGeometryPointSize:
+      case SpvCapabilityUniformBufferArrayDynamicIndexing:
+      case SpvCapabilitySampledImageArrayDynamicIndexing:
+      case SpvCapabilityStorageBufferArrayDynamicIndexing:
+      case SpvCapabilityStorageImageArrayDynamicIndexing:
+      case SpvCapabilityImageRect:
+      case SpvCapabilitySampledRect:
+      case SpvCapabilitySampled1D:
+      case SpvCapabilityImage1D:
+      case SpvCapabilitySampledCubeArray:
+      case SpvCapabilitySampledBuffer:
+      case SpvCapabilityImageBuffer:
+      case SpvCapabilityImageQuery:
+         break;
+      case SpvCapabilityClipDistance:
+      case SpvCapabilityCullDistance:
+      case SpvCapabilityGeometryStreams:
+         fprintf(stderr, "WARNING: Unsupported SPIR-V Capability\n");
+         break;
+      default:
+         assert(!"Unsupported capability");
+      }
+      break;
+   }
+
+   case SpvOpExtInstImport:
+      vtn_handle_extension(b, opcode, w, count);
+      break;
+
+   case SpvOpMemoryModel:
+      assert(w[1] == SpvAddressingModelLogical);
+      assert(w[2] == SpvMemoryModelGLSL450);
+      break;
+
+   case SpvOpEntryPoint: {
+      struct vtn_value *entry_point = &b->values[w[2]];
+      /* Let this be a name label regardless */
+      unsigned name_words;
+      entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words);
+
+      if (strcmp(entry_point->name, b->entry_point_name) != 0 ||
+          stage_for_execution_model(w[1]) != b->entry_point_stage)
+         break;
+
+      assert(b->entry_point == NULL);
+      b->entry_point = entry_point;
+      break;
+   }
+
+   case SpvOpString:
+      vtn_push_value(b, w[1], vtn_value_type_string)->str =
+         vtn_string_literal(b, &w[2], count - 2, NULL);
+      break;
+
+   case SpvOpName:
+      b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2, NULL);
+      break;
+
+   case SpvOpMemberName:
+      /* TODO */
+      break;
+
+   case SpvOpExecutionMode:
+   case SpvOpDecorationGroup:
+   case SpvOpDecorate:
+   case SpvOpMemberDecorate:
+   case SpvOpGroupDecorate:
+   case SpvOpGroupMemberDecorate:
+      vtn_handle_decoration(b, opcode, w, count);
+      break;
+
+   default:
+      return false; /* End of preamble */
+   }
+
+   return true;
+}
+
+static void
+vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
+                          const struct vtn_decoration *mode, void *data)
+{
+   assert(b->entry_point == entry_point);
+
+   switch(mode->exec_mode) {
+   case SpvExecutionModeOriginUpperLeft:
+   case SpvExecutionModeOriginLowerLeft:
+      b->origin_upper_left =
+         (mode->exec_mode == SpvExecutionModeOriginUpperLeft);
+      break;
+
+   case SpvExecutionModeEarlyFragmentTests:
+      assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+      b->shader->info.fs.early_fragment_tests = true;
+      break;
+
+   case SpvExecutionModeInvocations:
+      assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+      b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
+      break;
+
+   case SpvExecutionModeDepthReplacing:
+      assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+      b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
+      break;
+   case SpvExecutionModeDepthGreater:
+      assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+      b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
+      break;
+   case SpvExecutionModeDepthLess:
+      assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+      b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
+      break;
+   case SpvExecutionModeDepthUnchanged:
+      assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+      b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
+      break;
+
+   case SpvExecutionModeLocalSize:
+      assert(b->shader->stage == MESA_SHADER_COMPUTE);
+      b->shader->info.cs.local_size[0] = mode->literals[0];
+      b->shader->info.cs.local_size[1] = mode->literals[1];
+      b->shader->info.cs.local_size[2] = mode->literals[2];
+      break;
+   case SpvExecutionModeLocalSizeHint:
+      break; /* Nothing do do with this */
+
+   case SpvExecutionModeOutputVertices:
+      assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+      b->shader->info.gs.vertices_out = mode->literals[0];
+      break;
+
+   case SpvExecutionModeInputPoints:
+   case SpvExecutionModeInputLines:
+   case SpvExecutionModeInputLinesAdjacency:
+   case SpvExecutionModeTriangles:
+   case SpvExecutionModeInputTrianglesAdjacency:
+   case SpvExecutionModeQuads:
+   case SpvExecutionModeIsolines:
+      if (b->shader->stage == MESA_SHADER_GEOMETRY) {
+         b->shader->info.gs.vertices_in =
+            vertices_in_from_spv_execution_mode(mode->exec_mode);
+      } else {
+         assert(!"Tesselation shaders not yet supported");
+      }
+      break;
+
+   case SpvExecutionModeOutputPoints:
+   case SpvExecutionModeOutputLineStrip:
+   case SpvExecutionModeOutputTriangleStrip:
+      assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+      b->shader->info.gs.output_primitive =
+         gl_primitive_from_spv_execution_mode(mode->exec_mode);
+      break;
+
+   case SpvExecutionModeSpacingEqual:
+   case SpvExecutionModeSpacingFractionalEven:
+   case SpvExecutionModeSpacingFractionalOdd:
+   case SpvExecutionModeVertexOrderCw:
+   case SpvExecutionModeVertexOrderCcw:
+   case SpvExecutionModePointMode:
+      assert(!"TODO: Add tessellation metadata");
+      break;
+
+   case SpvExecutionModePixelCenterInteger:
+   case SpvExecutionModeXfb:
+      assert(!"Unhandled execution mode");
+      break;
+
+   case SpvExecutionModeVecTypeHint:
+   case SpvExecutionModeContractionOff:
+      break; /* OpenCL */
+   }
+}
+
+static bool
+vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode,
+                                        const uint32_t *w, unsigned count)
+{
+   switch (opcode) {
+   case SpvOpSource:
+   case SpvOpSourceContinued:
+   case SpvOpSourceExtension:
+   case SpvOpExtension:
+   case SpvOpCapability:
+   case SpvOpExtInstImport:
+   case SpvOpMemoryModel:
+   case SpvOpEntryPoint:
+   case SpvOpExecutionMode:
+   case SpvOpString:
+   case SpvOpName:
+   case SpvOpMemberName:
+   case SpvOpDecorationGroup:
+   case SpvOpDecorate:
+   case SpvOpMemberDecorate:
+   case SpvOpGroupDecorate:
+   case SpvOpGroupMemberDecorate:
+      assert(!"Invalid opcode types and variables section");
+      break;
+
+   case SpvOpTypeVoid:
+   case SpvOpTypeBool:
+   case SpvOpTypeInt:
+   case SpvOpTypeFloat:
+   case SpvOpTypeVector:
+   case SpvOpTypeMatrix:
+   case SpvOpTypeImage:
+   case SpvOpTypeSampler:
+   case SpvOpTypeSampledImage:
+   case SpvOpTypeArray:
+   case SpvOpTypeRuntimeArray:
+   case SpvOpTypeStruct:
+   case SpvOpTypeOpaque:
+   case SpvOpTypePointer:
+   case SpvOpTypeFunction:
+   case SpvOpTypeEvent:
+   case SpvOpTypeDeviceEvent:
+   case SpvOpTypeReserveId:
+   case SpvOpTypeQueue:
+   case SpvOpTypePipe:
+      vtn_handle_type(b, opcode, w, count);
+      break;
+
+   case SpvOpConstantTrue:
+   case SpvOpConstantFalse:
+   case SpvOpConstant:
+   case SpvOpConstantComposite:
+   case SpvOpConstantSampler:
+   case SpvOpConstantNull:
+   case SpvOpSpecConstantTrue:
+   case SpvOpSpecConstantFalse:
+   case SpvOpSpecConstant:
+   case SpvOpSpecConstantComposite:
+   case SpvOpSpecConstantOp:
+      vtn_handle_constant(b, opcode, w, count);
+      break;
+
+   case SpvOpVariable:
+      vtn_handle_variables(b, opcode, w, count);
+      break;
+
+   default:
+      return false; /* End of preamble */
+   }
+
+   return true;
+}
+
+static bool
+vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
+                            const uint32_t *w, unsigned count)
+{
+   switch (opcode) {
+   case SpvOpLabel:
+      break;
+
+   case SpvOpLoopMerge:
+   case SpvOpSelectionMerge:
+      /* This is handled by cfg pre-pass and walk_blocks */
+      break;
+
+   case SpvOpUndef: {
+      struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
+      val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+      break;
+   }
+
+   case SpvOpExtInst:
+      vtn_handle_extension(b, opcode, w, count);
+      break;
+
+   case SpvOpVariable:
+   case SpvOpLoad:
+   case SpvOpStore:
+   case SpvOpCopyMemory:
+   case SpvOpCopyMemorySized:
+   case SpvOpAccessChain:
+   case SpvOpInBoundsAccessChain:
+   case SpvOpArrayLength:
+      vtn_handle_variables(b, opcode, w, count);
+      break;
+
+   case SpvOpFunctionCall:
+      vtn_handle_function_call(b, opcode, w, count);
+      break;
+
+   case SpvOpSampledImage:
+   case SpvOpImage:
+   case SpvOpImageSampleImplicitLod:
+   case SpvOpImageSampleExplicitLod:
+   case SpvOpImageSampleDrefImplicitLod:
+   case SpvOpImageSampleDrefExplicitLod:
+   case SpvOpImageSampleProjImplicitLod:
+   case SpvOpImageSampleProjExplicitLod:
+   case SpvOpImageSampleProjDrefImplicitLod:
+   case SpvOpImageSampleProjDrefExplicitLod:
+   case SpvOpImageFetch:
+   case SpvOpImageGather:
+   case SpvOpImageDrefGather:
+   case SpvOpImageQuerySizeLod:
+   case SpvOpImageQueryLod:
+   case SpvOpImageQueryLevels:
+   case SpvOpImageQuerySamples:
+      vtn_handle_texture(b, opcode, w, count);
+      break;
+
+   case SpvOpImageRead:
+   case SpvOpImageWrite:
+   case SpvOpImageTexelPointer:
+      vtn_handle_image(b, opcode, w, count);
+      break;
+
+   case SpvOpImageQuerySize: {
+      struct vtn_access_chain *image =
+         vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+      if (glsl_type_is_image(image->var->var->interface_type)) {
+         vtn_handle_image(b, opcode, w, count);
+      } else {
+         vtn_handle_texture(b, opcode, w, count);
+      }
+      break;
+   }
+
+   case SpvOpAtomicExchange:
+   case SpvOpAtomicCompareExchange:
+   case SpvOpAtomicCompareExchangeWeak:
+   case SpvOpAtomicIIncrement:
+   case SpvOpAtomicIDecrement:
+   case SpvOpAtomicIAdd:
+   case SpvOpAtomicISub:
+   case SpvOpAtomicSMin:
+   case SpvOpAtomicUMin:
+   case SpvOpAtomicSMax:
+   case SpvOpAtomicUMax:
+   case SpvOpAtomicAnd:
+   case SpvOpAtomicOr:
+   case SpvOpAtomicXor: {
+      struct vtn_value *pointer = vtn_untyped_value(b, w[3]);
+      if (pointer->value_type == vtn_value_type_image_pointer) {
+         vtn_handle_image(b, opcode, w, count);
+      } else {
+         assert(pointer->value_type == vtn_value_type_access_chain);
+         vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
+      }
+      break;
+   }
+
+   case SpvOpSNegate:
+   case SpvOpFNegate:
+   case SpvOpNot:
+   case SpvOpAny:
+   case SpvOpAll:
+   case SpvOpConvertFToU:
+   case SpvOpConvertFToS:
+   case SpvOpConvertSToF:
+   case SpvOpConvertUToF:
+   case SpvOpUConvert:
+   case SpvOpSConvert:
+   case SpvOpFConvert:
+   case SpvOpQuantizeToF16:
+   case SpvOpConvertPtrToU:
+   case SpvOpConvertUToPtr:
+   case SpvOpPtrCastToGeneric:
+   case SpvOpGenericCastToPtr:
+   case SpvOpBitcast:
+   case SpvOpIsNan:
+   case SpvOpIsInf:
+   case SpvOpIsFinite:
+   case SpvOpIsNormal:
+   case SpvOpSignBitSet:
+   case SpvOpLessOrGreater:
+   case SpvOpOrdered:
+   case SpvOpUnordered:
+   case SpvOpIAdd:
+   case SpvOpFAdd:
+   case SpvOpISub:
+   case SpvOpFSub:
+   case SpvOpIMul:
+   case SpvOpFMul:
+   case SpvOpUDiv:
+   case SpvOpSDiv:
+   case SpvOpFDiv:
+   case SpvOpUMod:
+   case SpvOpSRem:
+   case SpvOpSMod:
+   case SpvOpFRem:
+   case SpvOpFMod:
+   case SpvOpVectorTimesScalar:
+   case SpvOpDot:
+   case SpvOpIAddCarry:
+   case SpvOpISubBorrow:
+   case SpvOpUMulExtended:
+   case SpvOpSMulExtended:
+   case SpvOpShiftRightLogical:
+   case SpvOpShiftRightArithmetic:
+   case SpvOpShiftLeftLogical:
+   case SpvOpLogicalEqual:
+   case SpvOpLogicalNotEqual:
+   case SpvOpLogicalOr:
+   case SpvOpLogicalAnd:
+   case SpvOpLogicalNot:
+   case SpvOpBitwiseOr:
+   case SpvOpBitwiseXor:
+   case SpvOpBitwiseAnd:
+   case SpvOpSelect:
+   case SpvOpIEqual:
+   case SpvOpFOrdEqual:
+   case SpvOpFUnordEqual:
+   case SpvOpINotEqual:
+   case SpvOpFOrdNotEqual:
+   case SpvOpFUnordNotEqual:
+   case SpvOpULessThan:
+   case SpvOpSLessThan:
+   case SpvOpFOrdLessThan:
+   case SpvOpFUnordLessThan:
+   case SpvOpUGreaterThan:
+   case SpvOpSGreaterThan:
+   case SpvOpFOrdGreaterThan:
+   case SpvOpFUnordGreaterThan:
+   case SpvOpULessThanEqual:
+   case SpvOpSLessThanEqual:
+   case SpvOpFOrdLessThanEqual:
+   case SpvOpFUnordLessThanEqual:
+   case SpvOpUGreaterThanEqual:
+   case SpvOpSGreaterThanEqual:
+   case SpvOpFOrdGreaterThanEqual:
+   case SpvOpFUnordGreaterThanEqual:
+   case SpvOpDPdx:
+   case SpvOpDPdy:
+   case SpvOpFwidth:
+   case SpvOpDPdxFine:
+   case SpvOpDPdyFine:
+   case SpvOpFwidthFine:
+   case SpvOpDPdxCoarse:
+   case SpvOpDPdyCoarse:
+   case SpvOpFwidthCoarse:
+   case SpvOpBitFieldInsert:
+   case SpvOpBitFieldSExtract:
+   case SpvOpBitFieldUExtract:
+   case SpvOpBitReverse:
+   case SpvOpBitCount:
+   case SpvOpTranspose:
+   case SpvOpOuterProduct:
+   case SpvOpMatrixTimesScalar:
+   case SpvOpVectorTimesMatrix:
+   case SpvOpMatrixTimesVector:
+   case SpvOpMatrixTimesMatrix:
+      vtn_handle_alu(b, opcode, w, count);
+      break;
+
+   case SpvOpVectorExtractDynamic:
+   case SpvOpVectorInsertDynamic:
+   case SpvOpVectorShuffle:
+   case SpvOpCompositeConstruct:
+   case SpvOpCompositeExtract:
+   case SpvOpCompositeInsert:
+   case SpvOpCopyObject:
+      vtn_handle_composite(b, opcode, w, count);
+      break;
+
+   case SpvOpEmitVertex:
+   case SpvOpEndPrimitive:
+   case SpvOpEmitStreamVertex:
+   case SpvOpEndStreamPrimitive:
+   case SpvOpControlBarrier:
+   case SpvOpMemoryBarrier:
+      vtn_handle_barrier(b, opcode, w, count);
+      break;
+
+   default:
+      unreachable("Unhandled opcode");
+   }
+
+   return true;
+}
+
+nir_function *
+spirv_to_nir(const uint32_t *words, size_t word_count,
+             struct nir_spirv_specialization *spec, unsigned num_spec,
+             gl_shader_stage stage, const char *entry_point_name,
+             const nir_shader_compiler_options *options)
+{
+   const uint32_t *word_end = words + word_count;
+
+   /* Handle the SPIR-V header (first 4 dwords)  */
+   assert(word_count > 5);
+
+   assert(words[0] == SpvMagicNumber);
+   assert(words[1] >= 0x10000);
+   /* words[2] == generator magic */
+   unsigned value_id_bound = words[3];
+   assert(words[4] == 0);
+
+   words+= 5;
+
+   /* Initialize the stn_builder object */
+   struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
+   b->value_id_bound = value_id_bound;
+   b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
+   exec_list_make_empty(&b->functions);
+   b->entry_point_stage = stage;
+   b->entry_point_name = entry_point_name;
+
+   /* Handle all the preamble instructions */
+   words = vtn_foreach_instruction(b, words, word_end,
+                                   vtn_handle_preamble_instruction);
+
+   if (b->entry_point == NULL) {
+      assert(!"Entry point not found");
+      ralloc_free(b);
+      return NULL;
+   }
+
+   b->shader = nir_shader_create(NULL, stage, options);
+
+   /* Parse execution modes */
+   vtn_foreach_execution_mode(b, b->entry_point,
+                              vtn_handle_execution_mode, NULL);
+
+   b->specializations = spec;
+   b->num_specializations = num_spec;
+
+   /* Handle all variable, type, and constant instructions */
+   words = vtn_foreach_instruction(b, words, word_end,
+                                   vtn_handle_variable_or_type_instruction);
+
+   vtn_build_cfg(b, words, word_end);
+
+   foreach_list_typed(struct vtn_function, func, node, &b->functions) {
+      b->impl = func->impl;
+      b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
+                                               _mesa_key_pointer_equal);
+
+      vtn_function_emit(b, func, vtn_handle_body_instruction);
+   }
+
+   assert(b->entry_point->value_type == vtn_value_type_function);
+   nir_function *entry_point = b->entry_point->func->impl->function;
+   assert(entry_point);
+
+   ralloc_free(b);
+
+   return entry_point;
+}
diff --git a/src/compiler/nir/spirv/vtn_alu.c b/src/compiler/nir/spirv/vtn_alu.c
new file mode 100644 (file)
index 0000000..450bc15
--- /dev/null
@@ -0,0 +1,448 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vtn_private.h"
+
+/*
+ * Normally, column vectors in SPIR-V correspond to a single NIR SSA
+ * definition. But for matrix multiplies, we want to do one routine for
+ * multiplying a matrix by a matrix and then pretend that vectors are matrices
+ * with one column. So we "wrap" these things, and unwrap the result before we
+ * send it off.
+ */
+
+static struct vtn_ssa_value *
+wrap_matrix(struct vtn_builder *b, struct vtn_ssa_value *val)
+{
+   if (val == NULL)
+      return NULL;
+
+   if (glsl_type_is_matrix(val->type))
+      return val;
+
+   struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
+   dest->type = val->type;
+   dest->elems = ralloc_array(b, struct vtn_ssa_value *, 1);
+   dest->elems[0] = val;
+
+   return dest;
+}
+
+static struct vtn_ssa_value *
+unwrap_matrix(struct vtn_ssa_value *val)
+{
+   if (glsl_type_is_matrix(val->type))
+         return val;
+
+   return val->elems[0];
+}
+
+static struct vtn_ssa_value *
+matrix_multiply(struct vtn_builder *b,
+                struct vtn_ssa_value *_src0, struct vtn_ssa_value *_src1)
+{
+
+   struct vtn_ssa_value *src0 = wrap_matrix(b, _src0);
+   struct vtn_ssa_value *src1 = wrap_matrix(b, _src1);
+   struct vtn_ssa_value *src0_transpose = wrap_matrix(b, _src0->transposed);
+   struct vtn_ssa_value *src1_transpose = wrap_matrix(b, _src1->transposed);
+
+   unsigned src0_rows = glsl_get_vector_elements(src0->type);
+   unsigned src0_columns = glsl_get_matrix_columns(src0->type);
+   unsigned src1_columns = glsl_get_matrix_columns(src1->type);
+
+   const struct glsl_type *dest_type;
+   if (src1_columns > 1) {
+      dest_type = glsl_matrix_type(glsl_get_base_type(src0->type),
+                                   src0_rows, src1_columns);
+   } else {
+      dest_type = glsl_vector_type(glsl_get_base_type(src0->type), src0_rows);
+   }
+   struct vtn_ssa_value *dest = vtn_create_ssa_value(b, dest_type);
+
+   dest = wrap_matrix(b, dest);
+
+   bool transpose_result = false;
+   if (src0_transpose && src1_transpose) {
+      /* transpose(A) * transpose(B) = transpose(B * A) */
+      src1 = src0_transpose;
+      src0 = src1_transpose;
+      src0_transpose = NULL;
+      src1_transpose = NULL;
+      transpose_result = true;
+   }
+
+   if (src0_transpose && !src1_transpose &&
+       glsl_get_base_type(src0->type) == GLSL_TYPE_FLOAT) {
+      /* We already have the rows of src0 and the columns of src1 available,
+       * so we can just take the dot product of each row with each column to
+       * get the result.
+       */
+
+      for (unsigned i = 0; i < src1_columns; i++) {
+         nir_ssa_def *vec_src[4];
+         for (unsigned j = 0; j < src0_rows; j++) {
+            vec_src[j] = nir_fdot(&b->nb, src0_transpose->elems[j]->def,
+                                          src1->elems[i]->def);
+         }
+         dest->elems[i]->def = nir_vec(&b->nb, vec_src, src0_rows);
+      }
+   } else {
+      /* We don't handle the case where src1 is transposed but not src0, since
+       * the general case only uses individual components of src1 so the
+       * optimizer should chew through the transpose we emitted for src1.
+       */
+
+      for (unsigned i = 0; i < src1_columns; i++) {
+         /* dest[i] = sum(src0[j] * src1[i][j] for all j) */
+         dest->elems[i]->def =
+            nir_fmul(&b->nb, src0->elems[0]->def,
+                     nir_channel(&b->nb, src1->elems[i]->def, 0));
+         for (unsigned j = 1; j < src0_columns; j++) {
+            dest->elems[i]->def =
+               nir_fadd(&b->nb, dest->elems[i]->def,
+                        nir_fmul(&b->nb, src0->elems[j]->def,
+                                 nir_channel(&b->nb, src1->elems[i]->def, j)));
+         }
+      }
+   }
+
+   dest = unwrap_matrix(dest);
+
+   if (transpose_result)
+      dest = vtn_ssa_transpose(b, dest);
+
+   return dest;
+}
+
+static struct vtn_ssa_value *
+mat_times_scalar(struct vtn_builder *b,
+                 struct vtn_ssa_value *mat,
+                 nir_ssa_def *scalar)
+{
+   struct vtn_ssa_value *dest = vtn_create_ssa_value(b, mat->type);
+   for (unsigned i = 0; i < glsl_get_matrix_columns(mat->type); i++) {
+      if (glsl_get_base_type(mat->type) == GLSL_TYPE_FLOAT)
+         dest->elems[i]->def = nir_fmul(&b->nb, mat->elems[i]->def, scalar);
+      else
+         dest->elems[i]->def = nir_imul(&b->nb, mat->elems[i]->def, scalar);
+   }
+
+   return dest;
+}
+
+static void
+vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode,
+                      struct vtn_value *dest,
+                      struct vtn_ssa_value *src0, struct vtn_ssa_value *src1)
+{
+   switch (opcode) {
+   case SpvOpFNegate: {
+      dest->ssa = vtn_create_ssa_value(b, src0->type);
+      unsigned cols = glsl_get_matrix_columns(src0->type);
+      for (unsigned i = 0; i < cols; i++)
+         dest->ssa->elems[i]->def = nir_fneg(&b->nb, src0->elems[i]->def);
+      break;
+   }
+
+   case SpvOpFAdd: {
+      dest->ssa = vtn_create_ssa_value(b, src0->type);
+      unsigned cols = glsl_get_matrix_columns(src0->type);
+      for (unsigned i = 0; i < cols; i++)
+         dest->ssa->elems[i]->def =
+            nir_fadd(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
+      break;
+   }
+
+   case SpvOpFSub: {
+      dest->ssa = vtn_create_ssa_value(b, src0->type);
+      unsigned cols = glsl_get_matrix_columns(src0->type);
+      for (unsigned i = 0; i < cols; i++)
+         dest->ssa->elems[i]->def =
+            nir_fsub(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
+      break;
+   }
+
+   case SpvOpTranspose:
+      dest->ssa = vtn_ssa_transpose(b, src0);
+      break;
+
+   case SpvOpMatrixTimesScalar:
+      if (src0->transposed) {
+         dest->ssa = vtn_ssa_transpose(b, mat_times_scalar(b, src0->transposed,
+                                                           src1->def));
+      } else {
+         dest->ssa = mat_times_scalar(b, src0, src1->def);
+      }
+      break;
+
+   case SpvOpVectorTimesMatrix:
+   case SpvOpMatrixTimesVector:
+   case SpvOpMatrixTimesMatrix:
+      if (opcode == SpvOpVectorTimesMatrix) {
+         dest->ssa = matrix_multiply(b, vtn_ssa_transpose(b, src1), src0);
+      } else {
+         dest->ssa = matrix_multiply(b, src0, src1);
+      }
+      break;
+
+   default: unreachable("unknown matrix opcode");
+   }
+}
+
+nir_op
+vtn_nir_alu_op_for_spirv_opcode(SpvOp opcode, bool *swap)
+{
+   /* Indicates that the first two arguments should be swapped.  This is
+    * used for implementing greater-than and less-than-or-equal.
+    */
+   *swap = false;
+
+   switch (opcode) {
+   case SpvOpSNegate:            return nir_op_ineg;
+   case SpvOpFNegate:            return nir_op_fneg;
+   case SpvOpNot:                return nir_op_inot;
+   case SpvOpIAdd:               return nir_op_iadd;
+   case SpvOpFAdd:               return nir_op_fadd;
+   case SpvOpISub:               return nir_op_isub;
+   case SpvOpFSub:               return nir_op_fsub;
+   case SpvOpIMul:               return nir_op_imul;
+   case SpvOpFMul:               return nir_op_fmul;
+   case SpvOpUDiv:               return nir_op_udiv;
+   case SpvOpSDiv:               return nir_op_idiv;
+   case SpvOpFDiv:               return nir_op_fdiv;
+   case SpvOpUMod:               return nir_op_umod;
+   case SpvOpSMod:               return nir_op_imod;
+   case SpvOpFMod:               return nir_op_fmod;
+   case SpvOpSRem:               return nir_op_irem;
+   case SpvOpFRem:               return nir_op_frem;
+
+   case SpvOpShiftRightLogical:     return nir_op_ushr;
+   case SpvOpShiftRightArithmetic:  return nir_op_ishr;
+   case SpvOpShiftLeftLogical:      return nir_op_ishl;
+   case SpvOpLogicalOr:             return nir_op_ior;
+   case SpvOpLogicalEqual:          return nir_op_ieq;
+   case SpvOpLogicalNotEqual:       return nir_op_ine;
+   case SpvOpLogicalAnd:            return nir_op_iand;
+   case SpvOpLogicalNot:            return nir_op_inot;
+   case SpvOpBitwiseOr:             return nir_op_ior;
+   case SpvOpBitwiseXor:            return nir_op_ixor;
+   case SpvOpBitwiseAnd:            return nir_op_iand;
+   case SpvOpSelect:                return nir_op_bcsel;
+   case SpvOpIEqual:                return nir_op_ieq;
+
+   case SpvOpBitFieldInsert:        return nir_op_bitfield_insert;
+   case SpvOpBitFieldSExtract:      return nir_op_ibitfield_extract;
+   case SpvOpBitFieldUExtract:      return nir_op_ubitfield_extract;
+   case SpvOpBitReverse:            return nir_op_bitfield_reverse;
+   case SpvOpBitCount:              return nir_op_bit_count;
+
+   /* Comparisons: (TODO: How do we want to handled ordered/unordered?) */
+   case SpvOpFOrdEqual:                            return nir_op_feq;
+   case SpvOpFUnordEqual:                          return nir_op_feq;
+   case SpvOpINotEqual:                            return nir_op_ine;
+   case SpvOpFOrdNotEqual:                         return nir_op_fne;
+   case SpvOpFUnordNotEqual:                       return nir_op_fne;
+   case SpvOpULessThan:                            return nir_op_ult;
+   case SpvOpSLessThan:                            return nir_op_ilt;
+   case SpvOpFOrdLessThan:                         return nir_op_flt;
+   case SpvOpFUnordLessThan:                       return nir_op_flt;
+   case SpvOpUGreaterThan:          *swap = true;  return nir_op_ult;
+   case SpvOpSGreaterThan:          *swap = true;  return nir_op_ilt;
+   case SpvOpFOrdGreaterThan:       *swap = true;  return nir_op_flt;
+   case SpvOpFUnordGreaterThan:     *swap = true;  return nir_op_flt;
+   case SpvOpULessThanEqual:        *swap = true;  return nir_op_uge;
+   case SpvOpSLessThanEqual:        *swap = true;  return nir_op_ige;
+   case SpvOpFOrdLessThanEqual:     *swap = true;  return nir_op_fge;
+   case SpvOpFUnordLessThanEqual:   *swap = true;  return nir_op_fge;
+   case SpvOpUGreaterThanEqual:                    return nir_op_uge;
+   case SpvOpSGreaterThanEqual:                    return nir_op_ige;
+   case SpvOpFOrdGreaterThanEqual:                 return nir_op_fge;
+   case SpvOpFUnordGreaterThanEqual:               return nir_op_fge;
+
+   /* Conversions: */
+   case SpvOpConvertFToU:           return nir_op_f2u;
+   case SpvOpConvertFToS:           return nir_op_f2i;
+   case SpvOpConvertSToF:           return nir_op_i2f;
+   case SpvOpConvertUToF:           return nir_op_u2f;
+   case SpvOpBitcast:               return nir_op_imov;
+   case SpvOpUConvert:
+   case SpvOpQuantizeToF16:         return nir_op_fquantize2f16;
+   /* TODO: NIR is 32-bit only; these are no-ops. */
+   case SpvOpSConvert:              return nir_op_imov;
+   case SpvOpFConvert:              return nir_op_fmov;
+
+   /* Derivatives: */
+   case SpvOpDPdx:         return nir_op_fddx;
+   case SpvOpDPdy:         return nir_op_fddy;
+   case SpvOpDPdxFine:     return nir_op_fddx_fine;
+   case SpvOpDPdyFine:     return nir_op_fddy_fine;
+   case SpvOpDPdxCoarse:   return nir_op_fddx_coarse;
+   case SpvOpDPdyCoarse:   return nir_op_fddy_coarse;
+
+   default:
+      unreachable("No NIR equivalent");
+   }
+}
+
+void
+vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
+               const uint32_t *w, unsigned count)
+{
+   struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+   const struct glsl_type *type =
+      vtn_value(b, w[1], vtn_value_type_type)->type->type;
+
+   /* Collect the various SSA sources */
+   const unsigned num_inputs = count - 3;
+   struct vtn_ssa_value *vtn_src[4] = { NULL, };
+   for (unsigned i = 0; i < num_inputs; i++)
+      vtn_src[i] = vtn_ssa_value(b, w[i + 3]);
+
+   if (glsl_type_is_matrix(vtn_src[0]->type) ||
+       (num_inputs >= 2 && glsl_type_is_matrix(vtn_src[1]->type))) {
+      vtn_handle_matrix_alu(b, opcode, val, vtn_src[0], vtn_src[1]);
+      return;
+   }
+
+   val->ssa = vtn_create_ssa_value(b, type);
+   nir_ssa_def *src[4] = { NULL, };
+   for (unsigned i = 0; i < num_inputs; i++) {
+      assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type));
+      src[i] = vtn_src[i]->def;
+   }
+
+   switch (opcode) {
+   case SpvOpAny:
+      if (src[0]->num_components == 1) {
+         val->ssa->def = nir_imov(&b->nb, src[0]);
+      } else {
+         nir_op op;
+         switch (src[0]->num_components) {
+         case 2:  op = nir_op_bany_inequal2; break;
+         case 3:  op = nir_op_bany_inequal3; break;
+         case 4:  op = nir_op_bany_inequal4; break;
+         }
+         val->ssa->def = nir_build_alu(&b->nb, op, src[0],
+                                       nir_imm_int(&b->nb, NIR_FALSE),
+                                       NULL, NULL);
+      }
+      return;
+
+   case SpvOpAll:
+      if (src[0]->num_components == 1) {
+         val->ssa->def = nir_imov(&b->nb, src[0]);
+      } else {
+         nir_op op;
+         switch (src[0]->num_components) {
+         case 2:  op = nir_op_ball_iequal2;  break;
+         case 3:  op = nir_op_ball_iequal3;  break;
+         case 4:  op = nir_op_ball_iequal4;  break;
+         }
+         val->ssa->def = nir_build_alu(&b->nb, op, src[0],
+                                       nir_imm_int(&b->nb, NIR_TRUE),
+                                       NULL, NULL);
+      }
+      return;
+
+   case SpvOpOuterProduct: {
+      for (unsigned i = 0; i < src[1]->num_components; i++) {
+         val->ssa->elems[i]->def =
+            nir_fmul(&b->nb, src[0], nir_channel(&b->nb, src[1], i));
+      }
+      return;
+   }
+
+   case SpvOpDot:
+      val->ssa->def = nir_fdot(&b->nb, src[0], src[1]);
+      return;
+
+   case SpvOpIAddCarry:
+      assert(glsl_type_is_struct(val->ssa->type));
+      val->ssa->elems[0]->def = nir_iadd(&b->nb, src[0], src[1]);
+      val->ssa->elems[1]->def = nir_uadd_carry(&b->nb, src[0], src[1]);
+      return;
+
+   case SpvOpISubBorrow:
+      assert(glsl_type_is_struct(val->ssa->type));
+      val->ssa->elems[0]->def = nir_isub(&b->nb, src[0], src[1]);
+      val->ssa->elems[1]->def = nir_usub_borrow(&b->nb, src[0], src[1]);
+      return;
+
+   case SpvOpUMulExtended:
+      assert(glsl_type_is_struct(val->ssa->type));
+      val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]);
+      val->ssa->elems[1]->def = nir_umul_high(&b->nb, src[0], src[1]);
+      return;
+
+   case SpvOpSMulExtended:
+      assert(glsl_type_is_struct(val->ssa->type));
+      val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]);
+      val->ssa->elems[1]->def = nir_imul_high(&b->nb, src[0], src[1]);
+      return;
+
+   case SpvOpFwidth:
+      val->ssa->def = nir_fadd(&b->nb,
+                               nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])),
+                               nir_fabs(&b->nb, nir_fddy(&b->nb, src[0])));
+      return;
+   case SpvOpFwidthFine:
+      val->ssa->def = nir_fadd(&b->nb,
+                               nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])),
+                               nir_fabs(&b->nb, nir_fddy_fine(&b->nb, src[0])));
+      return;
+   case SpvOpFwidthCoarse:
+      val->ssa->def = nir_fadd(&b->nb,
+                               nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])),
+                               nir_fabs(&b->nb, nir_fddy_coarse(&b->nb, src[0])));
+      return;
+
+   case SpvOpVectorTimesScalar:
+      /* The builder will take care of splatting for us. */
+      val->ssa->def = nir_fmul(&b->nb, src[0], src[1]);
+      return;
+
+   case SpvOpIsNan:
+      val->ssa->def = nir_fne(&b->nb, src[0], src[0]);
+      return;
+
+   case SpvOpIsInf:
+      val->ssa->def = nir_feq(&b->nb, nir_fabs(&b->nb, src[0]),
+                                      nir_imm_float(&b->nb, INFINITY));
+      return;
+
+   default: {
+      bool swap;
+      nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap);
+
+      if (swap) {
+         nir_ssa_def *tmp = src[0];
+         src[0] = src[1];
+         src[1] = tmp;
+      }
+
+      val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]);
+      return;
+   } /* default */
+   }
+}
diff --git a/src/compiler/nir/spirv/vtn_cfg.c b/src/compiler/nir/spirv/vtn_cfg.c
new file mode 100644 (file)
index 0000000..6a43ef8
--- /dev/null
@@ -0,0 +1,778 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "vtn_private.h"
+#include "nir/nir_vla.h"
+
+static bool
+vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
+                                   const uint32_t *w, unsigned count)
+{
+   switch (opcode) {
+   case SpvOpFunction: {
+      assert(b->func == NULL);
+      b->func = rzalloc(b, struct vtn_function);
+
+      list_inithead(&b->func->body);
+      b->func->control = w[3];
+
+      const struct glsl_type *result_type =
+         vtn_value(b, w[1], vtn_value_type_type)->type->type;
+      struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function);
+      val->func = b->func;
+
+      const struct glsl_type *func_type =
+         vtn_value(b, w[4], vtn_value_type_type)->type->type;
+
+      assert(glsl_get_function_return_type(func_type) == result_type);
+
+      nir_function *func =
+         nir_function_create(b->shader, ralloc_strdup(b->shader, val->name));
+
+      func->num_params = glsl_get_length(func_type);
+      func->params = ralloc_array(b->shader, nir_parameter, func->num_params);
+      for (unsigned i = 0; i < func->num_params; i++) {
+         const struct glsl_function_param *param =
+            glsl_get_function_param(func_type, i);
+         func->params[i].type = param->type;
+         if (param->in) {
+            if (param->out) {
+               func->params[i].param_type = nir_parameter_inout;
+            } else {
+               func->params[i].param_type = nir_parameter_in;
+            }
+         } else {
+            if (param->out) {
+               func->params[i].param_type = nir_parameter_out;
+            } else {
+               assert(!"Parameter is neither in nor out");
+            }
+         }
+      }
+
+      func->return_type = glsl_get_function_return_type(func_type);
+
+      b->func->impl = nir_function_impl_create(func);
+
+      b->func_param_idx = 0;
+      break;
+   }
+
+   case SpvOpFunctionEnd:
+      b->func->end = w;
+      b->func = NULL;
+      break;
+
+   case SpvOpFunctionParameter: {
+      struct vtn_value *val =
+         vtn_push_value(b, w[2], vtn_value_type_access_chain);
+
+      struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
+
+      assert(b->func_param_idx < b->func->impl->num_params);
+      nir_variable *param = b->func->impl->params[b->func_param_idx++];
+
+      assert(param->type == type->type);
+
+      /* Name the parameter so it shows up nicely in NIR */
+      param->name = ralloc_strdup(param, val->name);
+
+      struct vtn_variable *vtn_var = rzalloc(b, struct vtn_variable);
+      vtn_var->type = type;
+      vtn_var->var = param;
+      vtn_var->chain.var = vtn_var;
+      vtn_var->chain.length = 0;
+
+      struct vtn_type *without_array = type;
+      while(glsl_type_is_array(without_array->type))
+         without_array = without_array->array_element;
+
+      if (glsl_type_is_image(without_array->type)) {
+         vtn_var->mode = vtn_variable_mode_image;
+         param->interface_type = without_array->type;
+      } else if (glsl_type_is_sampler(without_array->type)) {
+         vtn_var->mode = vtn_variable_mode_sampler;
+         param->interface_type = without_array->type;
+      } else {
+         vtn_var->mode = vtn_variable_mode_param;
+      }
+
+      val->access_chain = &vtn_var->chain;
+      break;
+   }
+
+   case SpvOpLabel: {
+      assert(b->block == NULL);
+      b->block = rzalloc(b, struct vtn_block);
+      b->block->node.type = vtn_cf_node_type_block;
+      b->block->label = w;
+      vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block;
+
+      if (b->func->start_block == NULL) {
+         /* This is the first block encountered for this function.  In this
+          * case, we set the start block and add it to the list of
+          * implemented functions that we'll walk later.
+          */
+         b->func->start_block = b->block;
+         exec_list_push_tail(&b->functions, &b->func->node);
+      }
+      break;
+   }
+
+   case SpvOpSelectionMerge:
+   case SpvOpLoopMerge:
+      assert(b->block && b->block->merge == NULL);
+      b->block->merge = w;
+      break;
+
+   case SpvOpBranch:
+   case SpvOpBranchConditional:
+   case SpvOpSwitch:
+   case SpvOpKill:
+   case SpvOpReturn:
+   case SpvOpReturnValue:
+   case SpvOpUnreachable:
+      assert(b->block && b->block->branch == NULL);
+      b->block->branch = w;
+      b->block = NULL;
+      break;
+
+   default:
+      /* Continue on as per normal */
+      return true;
+   }
+
+   return true;
+}
+
+static void
+vtn_add_case(struct vtn_builder *b, struct vtn_switch *swtch,
+             struct vtn_block *break_block,
+             uint32_t block_id, uint32_t val, bool is_default)
+{
+   struct vtn_block *case_block =
+      vtn_value(b, block_id, vtn_value_type_block)->block;
+
+   /* Don't create dummy cases that just break */
+   if (case_block == break_block)
+      return;
+
+   if (case_block->switch_case == NULL) {
+      struct vtn_case *c = ralloc(b, struct vtn_case);
+
+      list_inithead(&c->body);
+      c->start_block = case_block;
+      c->fallthrough = NULL;
+      nir_array_init(&c->values, b);
+      c->is_default = false;
+      c->visited = false;
+
+      list_addtail(&c->link, &swtch->cases);
+
+      case_block->switch_case = c;
+   }
+
+   if (is_default) {
+      case_block->switch_case->is_default = true;
+   } else {
+      nir_array_add(&case_block->switch_case->values, uint32_t, val);
+   }
+}
+
+/* This function performs a depth-first search of the cases and puts them
+ * in fall-through order.
+ */
+static void
+vtn_order_case(struct vtn_switch *swtch, struct vtn_case *cse)
+{
+   if (cse->visited)
+      return;
+
+   cse->visited = true;
+
+   list_del(&cse->link);
+
+   if (cse->fallthrough) {
+      vtn_order_case(swtch, cse->fallthrough);
+
+      /* If we have a fall-through, place this case right before the case it
+       * falls through to.  This ensures that fallthroughs come one after
+       * the other.  These two can never get separated because that would
+       * imply something else falling through to the same case.  Also, this
+       * can't break ordering because the DFS ensures that this case is
+       * visited before anything that falls through to it.
+       */
+      list_addtail(&cse->link, &cse->fallthrough->link);
+   } else {
+      list_add(&cse->link, &swtch->cases);
+   }
+}
+
+static enum vtn_branch_type
+vtn_get_branch_type(struct vtn_block *block,
+                    struct vtn_case *swcase, struct vtn_block *switch_break,
+                    struct vtn_block *loop_break, struct vtn_block *loop_cont)
+{
+   if (block->switch_case) {
+      /* This branch is actually a fallthrough */
+      assert(swcase->fallthrough == NULL ||
+             swcase->fallthrough == block->switch_case);
+      swcase->fallthrough = block->switch_case;
+      return vtn_branch_type_switch_fallthrough;
+   } else if (block == switch_break) {
+      return vtn_branch_type_switch_break;
+   } else if (block == loop_break) {
+      return vtn_branch_type_loop_break;
+   } else if (block == loop_cont) {
+      return vtn_branch_type_loop_continue;
+   } else {
+      return vtn_branch_type_none;
+   }
+}
+
+static void
+vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list,
+                    struct vtn_block *start, struct vtn_case *switch_case,
+                    struct vtn_block *switch_break,
+                    struct vtn_block *loop_break, struct vtn_block *loop_cont,
+                    struct vtn_block *end)
+{
+   struct vtn_block *block = start;
+   while (block != end) {
+      if (block->merge && (*block->merge & SpvOpCodeMask) == SpvOpLoopMerge &&
+          !block->loop) {
+         struct vtn_loop *loop = ralloc(b, struct vtn_loop);
+
+         loop->node.type = vtn_cf_node_type_loop;
+         list_inithead(&loop->body);
+         list_inithead(&loop->cont_body);
+         loop->control = block->merge[3];
+
+         list_addtail(&loop->node.link, cf_list);
+         block->loop = loop;
+
+         struct vtn_block *new_loop_break =
+            vtn_value(b, block->merge[1], vtn_value_type_block)->block;
+         struct vtn_block *new_loop_cont =
+            vtn_value(b, block->merge[2], vtn_value_type_block)->block;
+
+         /* Note: This recursive call will start with the current block as
+          * its start block.  If we weren't careful, we would get here
+          * again and end up in infinite recursion.  This is why we set
+          * block->loop above and check for it before creating one.  This
+          * way, we only create the loop once and the second call that
+          * tries to handle this loop goes to the cases below and gets
+          * handled as a regular block.
+          *
+          * Note: When we make the recursive walk calls, we pass NULL for
+          * the switch break since you have to break out of the loop first.
+          * We do, however, still pass the current switch case because it's
+          * possible that the merge block for the loop is the start of
+          * another case.
+          */
+         vtn_cfg_walk_blocks(b, &loop->body, block, switch_case, NULL,
+                             new_loop_break, new_loop_cont, NULL );
+         vtn_cfg_walk_blocks(b, &loop->cont_body, new_loop_cont, NULL, NULL,
+                             new_loop_break, NULL, block);
+
+         block = new_loop_break;
+         continue;
+      }
+
+      assert(block->node.link.next == NULL);
+      list_addtail(&block->node.link, cf_list);
+
+      switch (*block->branch & SpvOpCodeMask) {
+      case SpvOpBranch: {
+         struct vtn_block *branch_block =
+            vtn_value(b, block->branch[1], vtn_value_type_block)->block;
+
+         block->branch_type = vtn_get_branch_type(branch_block,
+                                                  switch_case, switch_break,
+                                                  loop_break, loop_cont);
+
+         if (block->branch_type != vtn_branch_type_none)
+            return;
+
+         block = branch_block;
+         continue;
+      }
+
+      case SpvOpReturn:
+      case SpvOpReturnValue:
+         block->branch_type = vtn_branch_type_return;
+         return;
+
+      case SpvOpKill:
+         block->branch_type = vtn_branch_type_discard;
+         return;
+
+      case SpvOpBranchConditional: {
+         struct vtn_block *then_block =
+            vtn_value(b, block->branch[2], vtn_value_type_block)->block;
+         struct vtn_block *else_block =
+            vtn_value(b, block->branch[3], vtn_value_type_block)->block;
+
+         struct vtn_if *if_stmt = ralloc(b, struct vtn_if);
+
+         if_stmt->node.type = vtn_cf_node_type_if;
+         if_stmt->condition = block->branch[1];
+         list_inithead(&if_stmt->then_body);
+         list_inithead(&if_stmt->else_body);
+
+         list_addtail(&if_stmt->node.link, cf_list);
+
+         if (block->merge &&
+             (*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge) {
+            if_stmt->control = block->merge[2];
+         }
+
+         if_stmt->then_type = vtn_get_branch_type(then_block,
+                                                  switch_case, switch_break,
+                                                  loop_break, loop_cont);
+         if_stmt->else_type = vtn_get_branch_type(else_block,
+                                                  switch_case, switch_break,
+                                                  loop_break, loop_cont);
+
+         if (if_stmt->then_type == vtn_branch_type_none &&
+             if_stmt->else_type == vtn_branch_type_none) {
+            /* Neither side of the if is something we can short-circuit. */
+            assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge);
+            struct vtn_block *merge_block =
+               vtn_value(b, block->merge[1], vtn_value_type_block)->block;
+
+            vtn_cfg_walk_blocks(b, &if_stmt->then_body, then_block,
+                                switch_case, switch_break,
+                                loop_break, loop_cont, merge_block);
+            vtn_cfg_walk_blocks(b, &if_stmt->else_body, else_block,
+                                switch_case, switch_break,
+                                loop_break, loop_cont, merge_block);
+
+            enum vtn_branch_type merge_type =
+               vtn_get_branch_type(merge_block, switch_case, switch_break,
+                                   loop_break, loop_cont);
+            if (merge_type == vtn_branch_type_none) {
+               block = merge_block;
+               continue;
+            } else {
+               return;
+            }
+         } else if (if_stmt->then_type != vtn_branch_type_none &&
+                    if_stmt->else_type != vtn_branch_type_none) {
+            /* Both sides were short-circuited.  We're done here. */
+            return;
+         } else {
+            /* Exeactly one side of the branch could be short-circuited.
+             * We set the branch up as a predicated break/continue and we
+             * continue on with the other side as if it were what comes
+             * after the if.
+             */
+            if (if_stmt->then_type == vtn_branch_type_none) {
+               block = then_block;
+            } else {
+               block = else_block;
+            }
+            continue;
+         }
+         unreachable("Should have returned or continued");
+      }
+
+      case SpvOpSwitch: {
+         assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge);
+         struct vtn_block *break_block =
+            vtn_value(b, block->merge[1], vtn_value_type_block)->block;
+
+         struct vtn_switch *swtch = ralloc(b, struct vtn_switch);
+
+         swtch->node.type = vtn_cf_node_type_switch;
+         swtch->selector = block->branch[1];
+         list_inithead(&swtch->cases);
+
+         list_addtail(&swtch->node.link, cf_list);
+
+         /* First, we go through and record all of the cases. */
+         const uint32_t *branch_end =
+            block->branch + (block->branch[0] >> SpvWordCountShift);
+
+         vtn_add_case(b, swtch, break_block, block->branch[2], 0, true);
+         for (const uint32_t *w = block->branch + 3; w < branch_end; w += 2)
+            vtn_add_case(b, swtch, break_block, w[1], w[0], false);
+
+         /* Now, we go through and walk the blocks.  While we walk through
+          * the blocks, we also gather the much-needed fall-through
+          * information.
+          */
+         list_for_each_entry(struct vtn_case, cse, &swtch->cases, link) {
+            assert(cse->start_block != break_block);
+            vtn_cfg_walk_blocks(b, &cse->body, cse->start_block, cse,
+                                break_block, NULL, loop_cont, NULL);
+         }
+
+         /* Finally, we walk over all of the cases one more time and put
+          * them in fall-through order.
+          */
+         for (const uint32_t *w = block->branch + 2; w < branch_end; w += 2) {
+            struct vtn_block *case_block =
+               vtn_value(b, *w, vtn_value_type_block)->block;
+
+            if (case_block == break_block)
+               continue;
+
+            assert(case_block->switch_case);
+
+            vtn_order_case(swtch, case_block->switch_case);
+         }
+
+         block = break_block;
+         continue;
+      }
+
+      case SpvOpUnreachable:
+         return;
+
+      default:
+         unreachable("Unhandled opcode");
+      }
+   }
+}
+
+void
+vtn_build_cfg(struct vtn_builder *b, const uint32_t *words, const uint32_t *end)
+{
+   vtn_foreach_instruction(b, words, end,
+                           vtn_cfg_handle_prepass_instruction);
+
+   foreach_list_typed(struct vtn_function, func, node, &b->functions) {
+      vtn_cfg_walk_blocks(b, &func->body, func->start_block,
+                          NULL, NULL, NULL, NULL, NULL);
+   }
+}
+
+static bool
+vtn_handle_phis_first_pass(struct vtn_builder *b, SpvOp opcode,
+                           const uint32_t *w, unsigned count)
+{
+   if (opcode == SpvOpLabel)
+      return true; /* Nothing to do */
+
+   /* If this isn't a phi node, stop. */
+   if (opcode != SpvOpPhi)
+      return false;
+
+   /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
+    * For each phi, we create a variable with the appropreate type and
+    * do a load from that variable.  Then, in a second pass, we add
+    * stores to that variable to each of the predecessor blocks.
+    *
+    * We could do something more intelligent here.  However, in order to
+    * handle loops and things properly, we really need dominance
+    * information.  It would end up basically being the into-SSA
+    * algorithm all over again.  It's easier if we just let
+    * lower_vars_to_ssa do that for us instead of repeating it here.
+    */
+   struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+
+   struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
+   nir_variable *phi_var =
+      nir_local_variable_create(b->nb.impl, type->type, "phi");
+   _mesa_hash_table_insert(b->phi_table, w, phi_var);
+
+   val->ssa = vtn_local_load(b, nir_deref_var_create(b, phi_var));
+
+   return true;
+}
+
+static bool
+vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode,
+                           const uint32_t *w, unsigned count)
+{
+   if (opcode != SpvOpPhi)
+      return true;
+
+   struct hash_entry *phi_entry = _mesa_hash_table_search(b->phi_table, w);
+   assert(phi_entry);
+   nir_variable *phi_var = phi_entry->data;
+
+   for (unsigned i = 3; i < count; i += 2) {
+      struct vtn_ssa_value *src = vtn_ssa_value(b, w[i]);
+      struct vtn_block *pred =
+         vtn_value(b, w[i + 1], vtn_value_type_block)->block;
+
+      b->nb.cursor = nir_after_block_before_jump(pred->end_block);
+
+      vtn_local_store(b, src, nir_deref_var_create(b, phi_var));
+   }
+
+   return true;
+}
+
+static void
+vtn_emit_branch(struct vtn_builder *b, enum vtn_branch_type branch_type,
+                nir_variable *switch_fall_var, bool *has_switch_break)
+{
+   switch (branch_type) {
+   case vtn_branch_type_switch_break:
+      nir_store_var(&b->nb, switch_fall_var, nir_imm_int(&b->nb, NIR_FALSE), 1);
+      *has_switch_break = true;
+      break;
+   case vtn_branch_type_switch_fallthrough:
+      break; /* Nothing to do */
+   case vtn_branch_type_loop_break:
+      nir_jump(&b->nb, nir_jump_break);
+      break;
+   case vtn_branch_type_loop_continue:
+      nir_jump(&b->nb, nir_jump_continue);
+      break;
+   case vtn_branch_type_return:
+      nir_jump(&b->nb, nir_jump_return);
+      break;
+   case vtn_branch_type_discard: {
+      nir_intrinsic_instr *discard =
+         nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_discard);
+      nir_builder_instr_insert(&b->nb, &discard->instr);
+      break;
+   }
+   default:
+      unreachable("Invalid branch type");
+   }
+}
+
+static void
+vtn_emit_cf_list(struct vtn_builder *b, struct list_head *cf_list,
+                 nir_variable *switch_fall_var, bool *has_switch_break,
+                 vtn_instruction_handler handler)
+{
+   list_for_each_entry(struct vtn_cf_node, node, cf_list, link) {
+      switch (node->type) {
+      case vtn_cf_node_type_block: {
+         struct vtn_block *block = (struct vtn_block *)node;
+
+         const uint32_t *block_start = block->label;
+         const uint32_t *block_end = block->merge ? block->merge :
+                                                    block->branch;
+
+         block_start = vtn_foreach_instruction(b, block_start, block_end,
+                                               vtn_handle_phis_first_pass);
+
+         vtn_foreach_instruction(b, block_start, block_end, handler);
+
+         block->end_block = nir_cursor_current_block(b->nb.cursor);
+
+         if ((*block->branch & SpvOpCodeMask) == SpvOpReturnValue) {
+            struct vtn_ssa_value *src = vtn_ssa_value(b, block->branch[1]);
+            vtn_local_store(b, src,
+                            nir_deref_var_create(b, b->impl->return_var));
+         }
+
+         if (block->branch_type != vtn_branch_type_none) {
+            vtn_emit_branch(b, block->branch_type,
+                            switch_fall_var, has_switch_break);
+         }
+
+         break;
+      }
+
+      case vtn_cf_node_type_if: {
+         struct vtn_if *vtn_if = (struct vtn_if *)node;
+
+         nir_if *if_stmt = nir_if_create(b->shader);
+         if_stmt->condition =
+            nir_src_for_ssa(vtn_ssa_value(b, vtn_if->condition)->def);
+         nir_cf_node_insert(b->nb.cursor, &if_stmt->cf_node);
+
+         bool sw_break = false;
+
+         b->nb.cursor = nir_after_cf_list(&if_stmt->then_list);
+         if (vtn_if->then_type == vtn_branch_type_none) {
+            vtn_emit_cf_list(b, &vtn_if->then_body,
+                             switch_fall_var, &sw_break, handler);
+         } else {
+            vtn_emit_branch(b, vtn_if->then_type, switch_fall_var, &sw_break);
+         }
+
+         b->nb.cursor = nir_after_cf_list(&if_stmt->else_list);
+         if (vtn_if->else_type == vtn_branch_type_none) {
+            vtn_emit_cf_list(b, &vtn_if->else_body,
+                             switch_fall_var, &sw_break, handler);
+         } else {
+            vtn_emit_branch(b, vtn_if->else_type, switch_fall_var, &sw_break);
+         }
+
+         b->nb.cursor = nir_after_cf_node(&if_stmt->cf_node);
+
+         /* If we encountered a switch break somewhere inside of the if,
+          * then it would have been handled correctly by calling
+          * emit_cf_list or emit_branch for the interrior.  However, we
+          * need to predicate everything following on wether or not we're
+          * still going.
+          */
+         if (sw_break) {
+            *has_switch_break = true;
+
+            nir_if *switch_if = nir_if_create(b->shader);
+            switch_if->condition =
+               nir_src_for_ssa(nir_load_var(&b->nb, switch_fall_var));
+            nir_cf_node_insert(b->nb.cursor, &switch_if->cf_node);
+
+            b->nb.cursor = nir_after_cf_list(&if_stmt->then_list);
+         }
+         break;
+      }
+
+      case vtn_cf_node_type_loop: {
+         struct vtn_loop *vtn_loop = (struct vtn_loop *)node;
+
+         nir_loop *loop = nir_loop_create(b->shader);
+         nir_cf_node_insert(b->nb.cursor, &loop->cf_node);
+
+         b->nb.cursor = nir_after_cf_list(&loop->body);
+         vtn_emit_cf_list(b, &vtn_loop->body, NULL, NULL, handler);
+
+         if (!list_empty(&vtn_loop->cont_body)) {
+            /* If we have a non-trivial continue body then we need to put
+             * it at the beginning of the loop with a flag to ensure that
+             * it doesn't get executed in the first iteration.
+             */
+            nir_variable *do_cont =
+               nir_local_variable_create(b->nb.impl, glsl_bool_type(), "cont");
+
+            b->nb.cursor = nir_before_cf_node(&loop->cf_node);
+            nir_store_var(&b->nb, do_cont, nir_imm_int(&b->nb, NIR_FALSE), 1);
+
+            b->nb.cursor = nir_before_cf_list(&loop->body);
+            nir_if *cont_if = nir_if_create(b->shader);
+            cont_if->condition = nir_src_for_ssa(nir_load_var(&b->nb, do_cont));
+            nir_cf_node_insert(b->nb.cursor, &cont_if->cf_node);
+
+            b->nb.cursor = nir_after_cf_list(&cont_if->then_list);
+            vtn_emit_cf_list(b, &vtn_loop->cont_body, NULL, NULL, handler);
+
+            b->nb.cursor = nir_after_cf_node(&cont_if->cf_node);
+            nir_store_var(&b->nb, do_cont, nir_imm_int(&b->nb, NIR_TRUE), 1);
+
+            b->has_loop_continue = true;
+         }
+
+         b->nb.cursor = nir_after_cf_node(&loop->cf_node);
+         break;
+      }
+
+      case vtn_cf_node_type_switch: {
+         struct vtn_switch *vtn_switch = (struct vtn_switch *)node;
+
+         /* First, we create a variable to keep track of whether or not the
+          * switch is still going at any given point.  Any switch breaks
+          * will set this variable to false.
+          */
+         nir_variable *fall_var =
+            nir_local_variable_create(b->nb.impl, glsl_bool_type(), "fall");
+         nir_store_var(&b->nb, fall_var, nir_imm_int(&b->nb, NIR_FALSE), 1);
+
+         /* Next, we gather up all of the conditions.  We have to do this
+          * up-front because we also need to build an "any" condition so
+          * that we can use !any for default.
+          */
+         const int num_cases = list_length(&vtn_switch->cases);
+         NIR_VLA(nir_ssa_def *, conditions, num_cases);
+
+         nir_ssa_def *sel = vtn_ssa_value(b, vtn_switch->selector)->def;
+         /* An accumulation of all conditions.  Used for the default */
+         nir_ssa_def *any = NULL;
+
+         int i = 0;
+         list_for_each_entry(struct vtn_case, cse, &vtn_switch->cases, link) {
+            if (cse->is_default) {
+               conditions[i++] = NULL;
+               continue;
+            }
+
+            nir_ssa_def *cond = NULL;
+            nir_array_foreach(&cse->values, uint32_t, val) {
+               nir_ssa_def *is_val =
+                  nir_ieq(&b->nb, sel, nir_imm_int(&b->nb, *val));
+
+               cond = cond ? nir_ior(&b->nb, cond, is_val) : is_val;
+            }
+
+            any = any ? nir_ior(&b->nb, any, cond) : cond;
+            conditions[i++] = cond;
+         }
+         assert(i == num_cases);
+
+         /* Now we can walk the list of cases and actually emit code */
+         i = 0;
+         list_for_each_entry(struct vtn_case, cse, &vtn_switch->cases, link) {
+            /* Figure out the condition */
+            nir_ssa_def *cond = conditions[i++];
+            if (cse->is_default) {
+               assert(cond == NULL);
+               cond = nir_inot(&b->nb, any);
+            }
+            /* Take fallthrough into account */
+            cond = nir_ior(&b->nb, cond, nir_load_var(&b->nb, fall_var));
+
+            nir_if *case_if = nir_if_create(b->nb.shader);
+            case_if->condition = nir_src_for_ssa(cond);
+            nir_cf_node_insert(b->nb.cursor, &case_if->cf_node);
+
+            bool has_break = false;
+            b->nb.cursor = nir_after_cf_list(&case_if->then_list);
+            nir_store_var(&b->nb, fall_var, nir_imm_int(&b->nb, NIR_TRUE), 1);
+            vtn_emit_cf_list(b, &cse->body, fall_var, &has_break, handler);
+            (void)has_break; /* We don't care */
+
+            b->nb.cursor = nir_after_cf_node(&case_if->cf_node);
+         }
+         assert(i == num_cases);
+
+         break;
+      }
+
+      default:
+         unreachable("Invalid CF node type");
+      }
+   }
+}
+
+void
+vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
+                  vtn_instruction_handler instruction_handler)
+{
+   nir_builder_init(&b->nb, func->impl);
+   b->nb.cursor = nir_after_cf_list(&func->impl->body);
+   b->has_loop_continue = false;
+   b->phi_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
+                                          _mesa_key_pointer_equal);
+
+   vtn_emit_cf_list(b, &func->body, NULL, NULL, instruction_handler);
+
+   vtn_foreach_instruction(b, func->start_block->label, func->end,
+                           vtn_handle_phi_second_pass);
+
+   /* Continue blocks for loops get inserted before the body of the loop
+    * but instructions in the continue may use SSA defs in the loop body.
+    * Therefore, we need to repair SSA to insert the needed phi nodes.
+    */
+   if (b->has_loop_continue)
+      nir_repair_ssa_impl(func->impl);
+}
diff --git a/src/compiler/nir/spirv/vtn_glsl450.c b/src/compiler/nir/spirv/vtn_glsl450.c
new file mode 100644 (file)
index 0000000..6b649fd
--- /dev/null
@@ -0,0 +1,669 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jason Ekstrand (jason@jlekstrand.net)
+ *
+ */
+
+#include "vtn_private.h"
+#include "GLSL.std.450.h"
+
+#define M_PIf   ((float) M_PI)
+#define M_PI_2f ((float) M_PI_2)
+#define M_PI_4f ((float) M_PI_4)
+
+static nir_ssa_def *
+build_mat2_det(nir_builder *b, nir_ssa_def *col[2])
+{
+   unsigned swiz[4] = {1, 0, 0, 0};
+   nir_ssa_def *p = nir_fmul(b, col[0], nir_swizzle(b, col[1], swiz, 2, true));
+   return nir_fsub(b, nir_channel(b, p, 0), nir_channel(b, p, 1));
+}
+
+static nir_ssa_def *
+build_mat3_det(nir_builder *b, nir_ssa_def *col[3])
+{
+   unsigned yzx[4] = {1, 2, 0, 0};
+   unsigned zxy[4] = {2, 0, 1, 0};
+
+   nir_ssa_def *prod0 =
+      nir_fmul(b, col[0],
+               nir_fmul(b, nir_swizzle(b, col[1], yzx, 3, true),
+                           nir_swizzle(b, col[2], zxy, 3, true)));
+   nir_ssa_def *prod1 =
+      nir_fmul(b, col[0],
+               nir_fmul(b, nir_swizzle(b, col[1], zxy, 3, true),
+                           nir_swizzle(b, col[2], yzx, 3, true)));
+
+   nir_ssa_def *diff = nir_fsub(b, prod0, prod1);
+
+   return nir_fadd(b, nir_channel(b, diff, 0),
+                      nir_fadd(b, nir_channel(b, diff, 1),
+                                  nir_channel(b, diff, 2)));
+}
+
+static nir_ssa_def *
+build_mat4_det(nir_builder *b, nir_ssa_def **col)
+{
+   nir_ssa_def *subdet[4];
+   for (unsigned i = 0; i < 4; i++) {
+      unsigned swiz[3];
+      for (unsigned j = 0; j < 3; j++)
+         swiz[j] = j + (j >= i);
+
+      nir_ssa_def *subcol[3];
+      subcol[0] = nir_swizzle(b, col[1], swiz, 3, true);
+      subcol[1] = nir_swizzle(b, col[2], swiz, 3, true);
+      subcol[2] = nir_swizzle(b, col[3], swiz, 3, true);
+
+      subdet[i] = build_mat3_det(b, subcol);
+   }
+
+   nir_ssa_def *prod = nir_fmul(b, col[0], nir_vec(b, subdet, 4));
+
+   return nir_fadd(b, nir_fsub(b, nir_channel(b, prod, 0),
+                                  nir_channel(b, prod, 1)),
+                      nir_fsub(b, nir_channel(b, prod, 2),
+                                  nir_channel(b, prod, 3)));
+}
+
+static nir_ssa_def *
+build_mat_det(struct vtn_builder *b, struct vtn_ssa_value *src)
+{
+   unsigned size = glsl_get_vector_elements(src->type);
+
+   nir_ssa_def *cols[4];
+   for (unsigned i = 0; i < size; i++)
+      cols[i] = src->elems[i]->def;
+
+   switch(size) {
+   case 2: return build_mat2_det(&b->nb, cols);
+   case 3: return build_mat3_det(&b->nb, cols);
+   case 4: return build_mat4_det(&b->nb, cols);
+   default:
+      unreachable("Invalid matrix size");
+   }
+}
+
+/* Computes the determinate of the submatrix given by taking src and
+ * removing the specified row and column.
+ */
+static nir_ssa_def *
+build_mat_subdet(struct nir_builder *b, struct vtn_ssa_value *src,
+                 unsigned size, unsigned row, unsigned col)
+{
+   assert(row < size && col < size);
+   if (size == 2) {
+      return nir_channel(b, src->elems[1 - col]->def, 1 - row);
+   } else {
+      /* Swizzle to get all but the specified row */
+      unsigned swiz[3];
+      for (unsigned j = 0; j < 3; j++)
+         swiz[j] = j + (j >= row);
+
+      /* Grab all but the specified column */
+      nir_ssa_def *subcol[3];
+      for (unsigned j = 0; j < size; j++) {
+         if (j != col) {
+            subcol[j - (j > col)] = nir_swizzle(b, src->elems[j]->def,
+                                                swiz, size - 1, true);
+         }
+      }
+
+      if (size == 3) {
+         return build_mat2_det(b, subcol);
+      } else {
+         assert(size == 4);
+         return build_mat3_det(b, subcol);
+      }
+   }
+}
+
+static struct vtn_ssa_value *
+matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
+{
+   nir_ssa_def *adj_col[4];
+   unsigned size = glsl_get_vector_elements(src->type);
+
+   /* Build up an adjugate matrix */
+   for (unsigned c = 0; c < size; c++) {
+      nir_ssa_def *elem[4];
+      for (unsigned r = 0; r < size; r++) {
+         elem[r] = build_mat_subdet(&b->nb, src, size, c, r);
+
+         if ((r + c) % 2)
+            elem[r] = nir_fneg(&b->nb, elem[r]);
+      }
+
+      adj_col[c] = nir_vec(&b->nb, elem, size);
+   }
+
+   nir_ssa_def *det_inv = nir_frcp(&b->nb, build_mat_det(b, src));
+
+   struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type);
+   for (unsigned i = 0; i < size; i++)
+      val->elems[i]->def = nir_fmul(&b->nb, adj_col[i], det_inv);
+
+   return val;
+}
+
+static nir_ssa_def*
+build_length(nir_builder *b, nir_ssa_def *vec)
+{
+   switch (vec->num_components) {
+   case 1: return nir_fsqrt(b, nir_fmul(b, vec, vec));
+   case 2: return nir_fsqrt(b, nir_fdot2(b, vec, vec));
+   case 3: return nir_fsqrt(b, nir_fdot3(b, vec, vec));
+   case 4: return nir_fsqrt(b, nir_fdot4(b, vec, vec));
+   default:
+      unreachable("Invalid number of components");
+   }
+}
+
+static inline nir_ssa_def *
+build_fclamp(nir_builder *b,
+             nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
+{
+   return nir_fmin(b, nir_fmax(b, x, min_val), max_val);
+}
+
+/**
+ * Return e^x.
+ */
+static nir_ssa_def *
+build_exp(nir_builder *b, nir_ssa_def *x)
+{
+   return nir_fexp2(b, nir_fmul(b, x, nir_imm_float(b, M_LOG2E)));
+}
+
+/**
+ * Return ln(x) - the natural logarithm of x.
+ */
+static nir_ssa_def *
+build_log(nir_builder *b, nir_ssa_def *x)
+{
+   return nir_fmul(b, nir_flog2(b, x), nir_imm_float(b, 1.0 / M_LOG2E));
+}
+
+/**
+ * Approximate asin(x) by the formula:
+ *    asin~(x) = sign(x) * (pi/2 - sqrt(1 - |x|) * (pi/2 + |x|(pi/4 - 1 + |x|(p0 + |x|p1))))
+ *
+ * which is correct to first order at x=0 and x=±1 regardless of the p
+ * coefficients but can be made second-order correct at both ends by selecting
+ * the fit coefficients appropriately.  Different p coefficients can be used
+ * in the asin and acos implementation to minimize some relative error metric
+ * in each case.
+ */
+static nir_ssa_def *
+build_asin(nir_builder *b, nir_ssa_def *x, float p0, float p1)
+{
+   nir_ssa_def *abs_x = nir_fabs(b, x);
+   return nir_fmul(b, nir_fsign(b, x),
+                   nir_fsub(b, nir_imm_float(b, M_PI_2f),
+                            nir_fmul(b, nir_fsqrt(b, nir_fsub(b, nir_imm_float(b, 1.0f), abs_x)),
+                                     nir_fadd(b, nir_imm_float(b, M_PI_2f),
+                                              nir_fmul(b, abs_x,
+                                                       nir_fadd(b, nir_imm_float(b, M_PI_4f - 1.0f),
+                                                                nir_fmul(b, abs_x,
+                                                                         nir_fadd(b, nir_imm_float(b, p0),
+                                                                                  nir_fmul(b, abs_x,
+                                                                                           nir_imm_float(b, p1))))))))));
+}
+
+/**
+ * Compute xs[0] + xs[1] + xs[2] + ... using fadd.
+ */
+static nir_ssa_def *
+build_fsum(nir_builder *b, nir_ssa_def **xs, int terms)
+{
+   nir_ssa_def *accum = xs[0];
+
+   for (int i = 1; i < terms; i++)
+      accum = nir_fadd(b, accum, xs[i]);
+
+   return accum;
+}
+
+static nir_ssa_def *
+build_atan(nir_builder *b, nir_ssa_def *y_over_x)
+{
+   nir_ssa_def *abs_y_over_x = nir_fabs(b, y_over_x);
+   nir_ssa_def *one = nir_imm_float(b, 1.0f);
+
+   /*
+    * range-reduction, first step:
+    *
+    *      / y_over_x         if |y_over_x| <= 1.0;
+    * x = <
+    *      \ 1.0 / y_over_x   otherwise
+    */
+   nir_ssa_def *x = nir_fdiv(b, nir_fmin(b, abs_y_over_x, one),
+                                nir_fmax(b, abs_y_over_x, one));
+
+   /*
+    * approximate atan by evaluating polynomial:
+    *
+    * x   * 0.9999793128310355 - x^3  * 0.3326756418091246 +
+    * x^5 * 0.1938924977115610 - x^7  * 0.1173503194786851 +
+    * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
+    */
+   nir_ssa_def *x_2  = nir_fmul(b, x,   x);
+   nir_ssa_def *x_3  = nir_fmul(b, x_2, x);
+   nir_ssa_def *x_5  = nir_fmul(b, x_3, x_2);
+   nir_ssa_def *x_7  = nir_fmul(b, x_5, x_2);
+   nir_ssa_def *x_9  = nir_fmul(b, x_7, x_2);
+   nir_ssa_def *x_11 = nir_fmul(b, x_9, x_2);
+
+   nir_ssa_def *polynomial_terms[] = {
+      nir_fmul(b, x,    nir_imm_float(b,  0.9999793128310355f)),
+      nir_fmul(b, x_3,  nir_imm_float(b, -0.3326756418091246f)),
+      nir_fmul(b, x_5,  nir_imm_float(b,  0.1938924977115610f)),
+      nir_fmul(b, x_7,  nir_imm_float(b, -0.1173503194786851f)),
+      nir_fmul(b, x_9,  nir_imm_float(b,  0.0536813784310406f)),
+      nir_fmul(b, x_11, nir_imm_float(b, -0.0121323213173444f)),
+   };
+
+   nir_ssa_def *tmp =
+      build_fsum(b, polynomial_terms, ARRAY_SIZE(polynomial_terms));
+
+   /* range-reduction fixup */
+   tmp = nir_fadd(b, tmp,
+                  nir_fmul(b,
+                           nir_b2f(b, nir_flt(b, one, abs_y_over_x)),
+                           nir_fadd(b, nir_fmul(b, tmp,
+                                                nir_imm_float(b, -2.0f)),
+                                       nir_imm_float(b, M_PI_2f))));
+
+   /* sign fixup */
+   return nir_fmul(b, tmp, nir_fsign(b, y_over_x));
+}
+
+static nir_ssa_def *
+build_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
+{
+   nir_ssa_def *zero = nir_imm_float(b, 0.0f);
+
+   /* If |x| >= 1.0e-8 * |y|: */
+   nir_ssa_def *condition =
+      nir_fge(b, nir_fabs(b, x),
+              nir_fmul(b, nir_imm_float(b, 1.0e-8f), nir_fabs(b, y)));
+
+   /* Then...call atan(y/x) and fix it up: */
+   nir_ssa_def *atan1 = build_atan(b, nir_fdiv(b, y, x));
+   nir_ssa_def *r_then =
+      nir_bcsel(b, nir_flt(b, x, zero),
+                   nir_fadd(b, atan1,
+                               nir_bcsel(b, nir_fge(b, y, zero),
+                                            nir_imm_float(b, M_PIf),
+                                            nir_imm_float(b, -M_PIf))),
+                   atan1);
+
+   /* Else... */
+   nir_ssa_def *r_else =
+      nir_fmul(b, nir_fsign(b, y), nir_imm_float(b, M_PI_2f));
+
+   return nir_bcsel(b, condition, r_then, r_else);
+}
+
+static nir_ssa_def *
+build_frexp(nir_builder *b, nir_ssa_def *x, nir_ssa_def **exponent)
+{
+   nir_ssa_def *abs_x = nir_fabs(b, x);
+   nir_ssa_def *zero = nir_imm_float(b, 0.0f);
+
+   /* Single-precision floating-point values are stored as
+    *   1 sign bit;
+    *   8 exponent bits;
+    *   23 mantissa bits.
+    *
+    * An exponent shift of 23 will shift the mantissa out, leaving only the
+    * exponent and sign bit (which itself may be zero, if the absolute value
+    * was taken before the bitcast and shift.
+    */
+   nir_ssa_def *exponent_shift = nir_imm_int(b, 23);
+   nir_ssa_def *exponent_bias = nir_imm_int(b, -126);
+
+   nir_ssa_def *sign_mantissa_mask = nir_imm_int(b, 0x807fffffu);
+
+   /* Exponent of floating-point values in the range [0.5, 1.0). */
+   nir_ssa_def *exponent_value = nir_imm_int(b, 0x3f000000u);
+
+   nir_ssa_def *is_not_zero = nir_fne(b, abs_x, zero);
+
+   *exponent =
+      nir_iadd(b, nir_ushr(b, abs_x, exponent_shift),
+                  nir_bcsel(b, is_not_zero, exponent_bias, zero));
+
+   return nir_ior(b, nir_iand(b, x, sign_mantissa_mask),
+                     nir_bcsel(b, is_not_zero, exponent_value, zero));
+}
+
+static void
+handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
+                   const uint32_t *w, unsigned count)
+{
+   struct nir_builder *nb = &b->nb;
+   const struct glsl_type *dest_type =
+      vtn_value(b, w[1], vtn_value_type_type)->type->type;
+
+   struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+   val->ssa = vtn_create_ssa_value(b, dest_type);
+
+   /* Collect the various SSA sources */
+   unsigned num_inputs = count - 5;
+   nir_ssa_def *src[3];
+   for (unsigned i = 0; i < num_inputs; i++)
+      src[i] = vtn_ssa_value(b, w[i + 5])->def;
+
+   nir_op op;
+   switch (entrypoint) {
+   case GLSLstd450Round:       op = nir_op_fround_even;   break; /* TODO */
+   case GLSLstd450RoundEven:   op = nir_op_fround_even;   break;
+   case GLSLstd450Trunc:       op = nir_op_ftrunc;        break;
+   case GLSLstd450FAbs:        op = nir_op_fabs;          break;
+   case GLSLstd450SAbs:        op = nir_op_iabs;          break;
+   case GLSLstd450FSign:       op = nir_op_fsign;         break;
+   case GLSLstd450SSign:       op = nir_op_isign;         break;
+   case GLSLstd450Floor:       op = nir_op_ffloor;        break;
+   case GLSLstd450Ceil:        op = nir_op_fceil;         break;
+   case GLSLstd450Fract:       op = nir_op_ffract;        break;
+   case GLSLstd450Radians:
+      val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 0.01745329251));
+      return;
+   case GLSLstd450Degrees:
+      val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 57.2957795131));
+      return;
+   case GLSLstd450Sin:         op = nir_op_fsin;       break;
+   case GLSLstd450Cos:         op = nir_op_fcos;       break;
+   case GLSLstd450Tan:
+      val->ssa->def = nir_fdiv(nb, nir_fsin(nb, src[0]),
+                               nir_fcos(nb, src[0]));
+      return;
+   case GLSLstd450Pow:         op = nir_op_fpow;       break;
+   case GLSLstd450Exp2:        op = nir_op_fexp2;      break;
+   case GLSLstd450Log2:        op = nir_op_flog2;      break;
+   case GLSLstd450Sqrt:        op = nir_op_fsqrt;      break;
+   case GLSLstd450InverseSqrt: op = nir_op_frsq;       break;
+
+   case GLSLstd450Modf: {
+      nir_ssa_def *sign = nir_fsign(nb, src[0]);
+      nir_ssa_def *abs = nir_fabs(nb, src[0]);
+      val->ssa->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
+      nir_store_deref_var(nb, vtn_nir_deref(b, w[6]),
+                          nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf);
+      return;
+   }
+
+   case GLSLstd450ModfStruct: {
+      nir_ssa_def *sign = nir_fsign(nb, src[0]);
+      nir_ssa_def *abs = nir_fabs(nb, src[0]);
+      assert(glsl_type_is_struct(val->ssa->type));
+      val->ssa->elems[0]->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
+      val->ssa->elems[1]->def = nir_fmul(nb, sign, nir_ffloor(nb, abs));
+      return;
+   }
+
+   case GLSLstd450FMin:        op = nir_op_fmin;       break;
+   case GLSLstd450UMin:        op = nir_op_umin;       break;
+   case GLSLstd450SMin:        op = nir_op_imin;       break;
+   case GLSLstd450FMax:        op = nir_op_fmax;       break;
+   case GLSLstd450UMax:        op = nir_op_umax;       break;
+   case GLSLstd450SMax:        op = nir_op_imax;       break;
+   case GLSLstd450FMix:        op = nir_op_flrp;       break;
+   case GLSLstd450Step:
+      val->ssa->def = nir_sge(nb, src[1], src[0]);
+      return;
+
+   case GLSLstd450Fma:         op = nir_op_ffma;       break;
+   case GLSLstd450Ldexp:       op = nir_op_ldexp;      break;
+
+   /* Packing/Unpacking functions */
+   case GLSLstd450PackSnorm4x8:      op = nir_op_pack_snorm_4x8;      break;
+   case GLSLstd450PackUnorm4x8:      op = nir_op_pack_unorm_4x8;      break;
+   case GLSLstd450PackSnorm2x16:     op = nir_op_pack_snorm_2x16;     break;
+   case GLSLstd450PackUnorm2x16:     op = nir_op_pack_unorm_2x16;     break;
+   case GLSLstd450PackHalf2x16:      op = nir_op_pack_half_2x16;      break;
+   case GLSLstd450UnpackSnorm4x8:    op = nir_op_unpack_snorm_4x8;    break;
+   case GLSLstd450UnpackUnorm4x8:    op = nir_op_unpack_unorm_4x8;    break;
+   case GLSLstd450UnpackSnorm2x16:   op = nir_op_unpack_snorm_2x16;   break;
+   case GLSLstd450UnpackUnorm2x16:   op = nir_op_unpack_unorm_2x16;   break;
+   case GLSLstd450UnpackHalf2x16:    op = nir_op_unpack_half_2x16;    break;
+
+   case GLSLstd450Length:
+      val->ssa->def = build_length(nb, src[0]);
+      return;
+   case GLSLstd450Distance:
+      val->ssa->def = build_length(nb, nir_fsub(nb, src[0], src[1]));
+      return;
+   case GLSLstd450Normalize:
+      val->ssa->def = nir_fdiv(nb, src[0], build_length(nb, src[0]));
+      return;
+
+   case GLSLstd450Exp:
+      val->ssa->def = build_exp(nb, src[0]);
+      return;
+
+   case GLSLstd450Log:
+      val->ssa->def = build_log(nb, src[0]);
+      return;
+
+   case GLSLstd450FClamp:
+      val->ssa->def = build_fclamp(nb, src[0], src[1], src[2]);
+      return;
+   case GLSLstd450UClamp:
+      val->ssa->def = nir_umin(nb, nir_umax(nb, src[0], src[1]), src[2]);
+      return;
+   case GLSLstd450SClamp:
+      val->ssa->def = nir_imin(nb, nir_imax(nb, src[0], src[1]), src[2]);
+      return;
+
+   case GLSLstd450Cross: {
+      unsigned yzx[4] = { 1, 2, 0, 0 };
+      unsigned zxy[4] = { 2, 0, 1, 0 };
+      val->ssa->def =
+         nir_fsub(nb, nir_fmul(nb, nir_swizzle(nb, src[0], yzx, 3, true),
+                                   nir_swizzle(nb, src[1], zxy, 3, true)),
+                      nir_fmul(nb, nir_swizzle(nb, src[0], zxy, 3, true),
+                                   nir_swizzle(nb, src[1], yzx, 3, true)));
+      return;
+   }
+
+   case GLSLstd450SmoothStep: {
+      /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */
+      nir_ssa_def *t =
+         build_fclamp(nb, nir_fdiv(nb, nir_fsub(nb, src[2], src[0]),
+                                       nir_fsub(nb, src[1], src[0])),
+                          nir_imm_float(nb, 0.0), nir_imm_float(nb, 1.0));
+      /* result = t * t * (3 - 2 * t) */
+      val->ssa->def =
+         nir_fmul(nb, t, nir_fmul(nb, t,
+            nir_fsub(nb, nir_imm_float(nb, 3.0),
+                         nir_fmul(nb, nir_imm_float(nb, 2.0), t))));
+      return;
+   }
+
+   case GLSLstd450FaceForward:
+      val->ssa->def =
+         nir_bcsel(nb, nir_flt(nb, nir_fdot(nb, src[2], src[1]),
+                                   nir_imm_float(nb, 0.0)),
+                       src[0], nir_fneg(nb, src[0]));
+      return;
+
+   case GLSLstd450Reflect:
+      /* I - 2 * dot(N, I) * N */
+      val->ssa->def =
+         nir_fsub(nb, src[0], nir_fmul(nb, nir_imm_float(nb, 2.0),
+                              nir_fmul(nb, nir_fdot(nb, src[0], src[1]),
+                                           src[1])));
+      return;
+
+   case GLSLstd450Refract: {
+      nir_ssa_def *I = src[0];
+      nir_ssa_def *N = src[1];
+      nir_ssa_def *eta = src[2];
+      nir_ssa_def *n_dot_i = nir_fdot(nb, N, I);
+      nir_ssa_def *one = nir_imm_float(nb, 1.0);
+      nir_ssa_def *zero = nir_imm_float(nb, 0.0);
+      /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */
+      nir_ssa_def *k =
+         nir_fsub(nb, one, nir_fmul(nb, eta, nir_fmul(nb, eta,
+                      nir_fsub(nb, one, nir_fmul(nb, n_dot_i, n_dot_i)))));
+      nir_ssa_def *result =
+         nir_fsub(nb, nir_fmul(nb, eta, I),
+                      nir_fmul(nb, nir_fadd(nb, nir_fmul(nb, eta, n_dot_i),
+                                                nir_fsqrt(nb, k)), N));
+      /* XXX: bcsel, or if statement? */
+      val->ssa->def = nir_bcsel(nb, nir_flt(nb, k, zero), zero, result);
+      return;
+   }
+
+   case GLSLstd450Sinh:
+      /* 0.5 * (e^x - e^(-x)) */
+      val->ssa->def =
+         nir_fmul(nb, nir_imm_float(nb, 0.5f),
+                      nir_fsub(nb, build_exp(nb, src[0]),
+                                   build_exp(nb, nir_fneg(nb, src[0]))));
+      return;
+
+   case GLSLstd450Cosh:
+      /* 0.5 * (e^x + e^(-x)) */
+      val->ssa->def =
+         nir_fmul(nb, nir_imm_float(nb, 0.5f),
+                      nir_fadd(nb, build_exp(nb, src[0]),
+                                   build_exp(nb, nir_fneg(nb, src[0]))));
+      return;
+
+   case GLSLstd450Tanh:
+      /* (0.5 * (e^x - e^(-x))) / (0.5 * (e^x + e^(-x))) */
+      val->ssa->def =
+         nir_fdiv(nb, nir_fmul(nb, nir_imm_float(nb, 0.5f),
+                                   nir_fsub(nb, build_exp(nb, src[0]),
+                                                build_exp(nb, nir_fneg(nb, src[0])))),
+                      nir_fmul(nb, nir_imm_float(nb, 0.5f),
+                                   nir_fadd(nb, build_exp(nb, src[0]),
+                                                build_exp(nb, nir_fneg(nb, src[0])))));
+      return;
+
+   case GLSLstd450Asinh:
+      val->ssa->def = nir_fmul(nb, nir_fsign(nb, src[0]),
+         build_log(nb, nir_fadd(nb, nir_fabs(nb, src[0]),
+                       nir_fsqrt(nb, nir_fadd(nb, nir_fmul(nb, src[0], src[0]),
+                                                  nir_imm_float(nb, 1.0f))))));
+      return;
+   case GLSLstd450Acosh:
+      val->ssa->def = build_log(nb, nir_fadd(nb, src[0],
+         nir_fsqrt(nb, nir_fsub(nb, nir_fmul(nb, src[0], src[0]),
+                                    nir_imm_float(nb, 1.0f)))));
+      return;
+   case GLSLstd450Atanh: {
+      nir_ssa_def *one = nir_imm_float(nb, 1.0);
+      val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f),
+         build_log(nb, nir_fdiv(nb, nir_fadd(nb, one, src[0]),
+                                    nir_fsub(nb, one, src[0]))));
+      return;
+   }
+
+   case GLSLstd450FindILsb:   op = nir_op_find_lsb;   break;
+   case GLSLstd450FindSMsb:   op = nir_op_ifind_msb;  break;
+   case GLSLstd450FindUMsb:   op = nir_op_ufind_msb;  break;
+
+   case GLSLstd450Asin:
+      val->ssa->def = build_asin(nb, src[0], 0.086566724, -0.03102955);
+      return;
+
+   case GLSLstd450Acos:
+      val->ssa->def = nir_fsub(nb, nir_imm_float(nb, M_PI_2f),
+                               build_asin(nb, src[0], 0.08132463, -0.02363318));
+      return;
+
+   case GLSLstd450Atan:
+      val->ssa->def = build_atan(nb, src[0]);
+      return;
+
+   case GLSLstd450Atan2:
+      val->ssa->def = build_atan2(nb, src[0], src[1]);
+      return;
+
+   case GLSLstd450Frexp: {
+      nir_ssa_def *exponent;
+      val->ssa->def = build_frexp(nb, src[0], &exponent);
+      nir_store_deref_var(nb, vtn_nir_deref(b, w[6]), exponent, 0xf);
+      return;
+   }
+
+   case GLSLstd450FrexpStruct: {
+      assert(glsl_type_is_struct(val->ssa->type));
+      val->ssa->elems[0]->def = build_frexp(nb, src[0],
+                                            &val->ssa->elems[1]->def);
+      return;
+   }
+
+   case GLSLstd450PackDouble2x32:
+   case GLSLstd450UnpackDouble2x32:
+   default:
+      unreachable("Unhandled opcode");
+   }
+
+   nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
+   nir_ssa_dest_init(&instr->instr, &instr->dest.dest,
+                     glsl_get_vector_elements(val->ssa->type), val->name);
+   instr->dest.write_mask = (1 << instr->dest.dest.ssa.num_components) - 1;
+   val->ssa->def = &instr->dest.dest.ssa;
+
+   for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++)
+      instr->src[i].src = nir_src_for_ssa(src[i]);
+
+   nir_builder_instr_insert(nb, &instr->instr);
+}
+
+bool
+vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode,
+                               const uint32_t *w, unsigned count)
+{
+   switch ((enum GLSLstd450)ext_opcode) {
+   case GLSLstd450Determinant: {
+      struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+      val->ssa = rzalloc(b, struct vtn_ssa_value);
+      val->ssa->type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
+      val->ssa->def = build_mat_det(b, vtn_ssa_value(b, w[5]));
+      break;
+   }
+
+   case GLSLstd450MatrixInverse: {
+      struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+      val->ssa = matrix_inverse(b, vtn_ssa_value(b, w[5]));
+      break;
+   }
+
+   case GLSLstd450InterpolateAtCentroid:
+   case GLSLstd450InterpolateAtSample:
+   case GLSLstd450InterpolateAtOffset:
+      unreachable("Unhandled opcode");
+
+   default:
+      handle_glsl450_alu(b, (enum GLSLstd450)ext_opcode, w, count);
+   }
+
+   return true;
+}
diff --git a/src/compiler/nir/spirv/vtn_private.h b/src/compiler/nir/spirv/vtn_private.h
new file mode 100644 (file)
index 0000000..3840d8c
--- /dev/null
@@ -0,0 +1,484 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jason Ekstrand (jason@jlekstrand.net)
+ *
+ */
+
+#include "nir/nir.h"
+#include "nir/nir_builder.h"
+#include "nir/nir_array.h"
+#include "nir_spirv.h"
+#include "spirv.h"
+
+struct vtn_builder;
+struct vtn_decoration;
+
+enum vtn_value_type {
+   vtn_value_type_invalid = 0,
+   vtn_value_type_undef,
+   vtn_value_type_string,
+   vtn_value_type_decoration_group,
+   vtn_value_type_type,
+   vtn_value_type_constant,
+   vtn_value_type_access_chain,
+   vtn_value_type_function,
+   vtn_value_type_block,
+   vtn_value_type_ssa,
+   vtn_value_type_extension,
+   vtn_value_type_image_pointer,
+   vtn_value_type_sampled_image,
+};
+
+enum vtn_branch_type {
+   vtn_branch_type_none,
+   vtn_branch_type_switch_break,
+   vtn_branch_type_switch_fallthrough,
+   vtn_branch_type_loop_break,
+   vtn_branch_type_loop_continue,
+   vtn_branch_type_discard,
+   vtn_branch_type_return,
+};
+
+enum vtn_cf_node_type {
+   vtn_cf_node_type_block,
+   vtn_cf_node_type_if,
+   vtn_cf_node_type_loop,
+   vtn_cf_node_type_switch,
+};
+
+struct vtn_cf_node {
+   struct list_head link;
+   enum vtn_cf_node_type type;
+};
+
+struct vtn_loop {
+   struct vtn_cf_node node;
+
+   /* The main body of the loop */
+   struct list_head body;
+
+   /* The "continue" part of the loop.  This gets executed after the body
+    * and is where you go when you hit a continue.
+    */
+   struct list_head cont_body;
+
+   SpvLoopControlMask control;
+};
+
+struct vtn_if {
+   struct vtn_cf_node node;
+
+   uint32_t condition;
+
+   enum vtn_branch_type then_type;
+   struct list_head then_body;
+
+   enum vtn_branch_type else_type;
+   struct list_head else_body;
+
+   SpvSelectionControlMask control;
+};
+
+struct vtn_case {
+   struct list_head link;
+
+   struct list_head body;
+
+   /* The block that starts this case */
+   struct vtn_block *start_block;
+
+   /* The fallthrough case, if any */
+   struct vtn_case *fallthrough;
+
+   /* The uint32_t values that map to this case */
+   nir_array values;
+
+   /* True if this is the default case */
+   bool is_default;
+
+   /* Initialized to false; used when sorting the list of cases */
+   bool visited;
+};
+
+struct vtn_switch {
+   struct vtn_cf_node node;
+
+   uint32_t selector;
+
+   struct list_head cases;
+};
+
+struct vtn_block {
+   struct vtn_cf_node node;
+
+   /** A pointer to the label instruction */
+   const uint32_t *label;
+
+   /** A pointer to the merge instruction (or NULL if non exists) */
+   const uint32_t *merge;
+
+   /** A pointer to the branch instruction that ends this block */
+   const uint32_t *branch;
+
+   enum vtn_branch_type branch_type;
+
+   /** Points to the loop that this block starts (if it starts a loop) */
+   struct vtn_loop *loop;
+
+   /** Points to the switch case started by this block (if any) */
+   struct vtn_case *switch_case;
+
+   /** The last block in this SPIR-V block. */
+   nir_block *end_block;
+};
+
+struct vtn_function {
+   struct exec_node node;
+
+   nir_function_impl *impl;
+   struct vtn_block *start_block;
+
+   struct list_head body;
+
+   const uint32_t *end;
+
+   SpvFunctionControlMask control;
+};
+
+typedef bool (*vtn_instruction_handler)(struct vtn_builder *, uint32_t,
+                                        const uint32_t *, unsigned);
+
+void vtn_build_cfg(struct vtn_builder *b, const uint32_t *words,
+                   const uint32_t *end);
+void vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
+                       vtn_instruction_handler instruction_handler);
+
+const uint32_t *
+vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
+                        const uint32_t *end, vtn_instruction_handler handler);
+
+struct vtn_ssa_value {
+   union {
+      nir_ssa_def *def;
+      struct vtn_ssa_value **elems;
+   };
+
+   /* For matrices, if this is non-NULL, then this value is actually the
+    * transpose of some other value.  The value that `transposed` points to
+    * always dominates this value.
+    */
+   struct vtn_ssa_value *transposed;
+
+   const struct glsl_type *type;
+};
+
+struct vtn_type {
+   const struct glsl_type *type;
+
+   /* The value that declares this type.  Used for finding decorations */
+   struct vtn_value *val;
+
+   /* for matrices, whether the matrix is stored row-major */
+   bool row_major;
+
+   /* for structs, the offset of each member */
+   unsigned *offsets;
+
+   /* for structs, whether it was decorated as a "non-SSBO-like" block */
+   bool block;
+
+   /* for structs, whether it was decorated as an "SSBO-like" block */
+   bool buffer_block;
+
+   /* for structs with block == true, whether this is a builtin block (i.e. a
+    * block that contains only builtins).
+    */
+   bool builtin_block;
+
+   /* Image format for image_load_store type images */
+   unsigned image_format;
+
+   /* Access qualifier for storage images */
+   SpvAccessQualifier access_qualifier;
+
+   /* for arrays and matrices, the array stride */
+   unsigned stride;
+
+   /* for arrays, the vtn_type for the elements of the array */
+   struct vtn_type *array_element;
+
+   /* for structures, the vtn_type for each member */
+   struct vtn_type **members;
+
+   /* Whether this type, or a parent type, has been decorated as a builtin */
+   bool is_builtin;
+
+   SpvBuiltIn builtin;
+};
+
+struct vtn_variable;
+
+enum vtn_access_mode {
+   vtn_access_mode_id,
+   vtn_access_mode_literal,
+};
+
+struct vtn_access_link {
+   enum vtn_access_mode mode;
+   uint32_t id;
+};
+
+struct vtn_access_chain {
+   struct vtn_variable *var;
+
+   uint32_t length;
+
+   /* Struct elements and array offsets */
+   struct vtn_access_link link[0];
+};
+
+enum vtn_variable_mode {
+   vtn_variable_mode_local,
+   vtn_variable_mode_global,
+   vtn_variable_mode_param,
+   vtn_variable_mode_ubo,
+   vtn_variable_mode_ssbo,
+   vtn_variable_mode_push_constant,
+   vtn_variable_mode_image,
+   vtn_variable_mode_sampler,
+   vtn_variable_mode_workgroup,
+   vtn_variable_mode_input,
+   vtn_variable_mode_output,
+};
+
+struct vtn_variable {
+   enum vtn_variable_mode mode;
+
+   struct vtn_type *type;
+
+   unsigned descriptor_set;
+   unsigned binding;
+
+   nir_variable *var;
+   nir_variable **members;
+
+   struct vtn_access_chain chain;
+};
+
+struct vtn_image_pointer {
+   struct vtn_access_chain *image;
+   nir_ssa_def *coord;
+   nir_ssa_def *sample;
+};
+
+struct vtn_sampled_image {
+   struct vtn_access_chain *image; /* Image or array of images */
+   struct vtn_access_chain *sampler; /* Sampler */
+};
+
+struct vtn_value {
+   enum vtn_value_type value_type;
+   const char *name;
+   struct vtn_decoration *decoration;
+   union {
+      void *ptr;
+      char *str;
+      struct vtn_type *type;
+      struct {
+         nir_constant *constant;
+         const struct glsl_type *const_type;
+      };
+      struct vtn_access_chain *access_chain;
+      struct vtn_image_pointer *image;
+      struct vtn_sampled_image *sampled_image;
+      struct vtn_function *func;
+      struct vtn_block *block;
+      struct vtn_ssa_value *ssa;
+      vtn_instruction_handler ext_handler;
+   };
+};
+
+#define VTN_DEC_DECORATION -1
+#define VTN_DEC_EXECUTION_MODE -2
+#define VTN_DEC_STRUCT_MEMBER0 0
+
+struct vtn_decoration {
+   struct vtn_decoration *next;
+
+   /* Specifies how to apply this decoration.  Negative values represent a
+    * decoration or execution mode. (See the VTN_DEC_ #defines above.)
+    * Non-negative values specify that it applies to a structure member.
+    */
+   int scope;
+
+   const uint32_t *literals;
+   struct vtn_value *group;
+
+   union {
+      SpvDecoration decoration;
+      SpvExecutionMode exec_mode;
+   };
+};
+
+struct vtn_builder {
+   nir_builder nb;
+
+   nir_shader *shader;
+   nir_function_impl *impl;
+   struct vtn_block *block;
+
+   /* Current file, line, and column.  Useful for debugging.  Set
+    * automatically by vtn_foreach_instruction.
+    */
+   char *file;
+   int line, col;
+
+   /*
+    * In SPIR-V, constants are global, whereas in NIR, the load_const
+    * instruction we use is per-function. So while we parse each function, we
+    * keep a hash table of constants we've resolved to nir_ssa_value's so
+    * far, and we lazily resolve them when we see them used in a function.
+    */
+   struct hash_table *const_table;
+
+   /*
+    * Map from phi instructions (pointer to the start of the instruction)
+    * to the variable corresponding to it.
+    */
+   struct hash_table *phi_table;
+
+   unsigned num_specializations;
+   struct nir_spirv_specialization *specializations;
+
+   unsigned value_id_bound;
+   struct vtn_value *values;
+
+   gl_shader_stage entry_point_stage;
+   const char *entry_point_name;
+   struct vtn_value *entry_point;
+   bool origin_upper_left;
+
+   struct vtn_function *func;
+   struct exec_list functions;
+
+   /* Current function parameter index */
+   unsigned func_param_idx;
+
+   bool has_loop_continue;
+};
+
+static inline struct vtn_value *
+vtn_push_value(struct vtn_builder *b, uint32_t value_id,
+               enum vtn_value_type value_type)
+{
+   assert(value_id < b->value_id_bound);
+   assert(b->values[value_id].value_type == vtn_value_type_invalid);
+
+   b->values[value_id].value_type = value_type;
+
+   return &b->values[value_id];
+}
+
+static inline struct vtn_value *
+vtn_untyped_value(struct vtn_builder *b, uint32_t value_id)
+{
+   assert(value_id < b->value_id_bound);
+   return &b->values[value_id];
+}
+
+static inline struct vtn_value *
+vtn_value(struct vtn_builder *b, uint32_t value_id,
+          enum vtn_value_type value_type)
+{
+   struct vtn_value *val = vtn_untyped_value(b, value_id);
+   assert(val->value_type == value_type);
+   return val;
+}
+
+struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id);
+
+struct vtn_ssa_value *vtn_create_ssa_value(struct vtn_builder *b,
+                                           const struct glsl_type *type);
+
+struct vtn_ssa_value *vtn_ssa_transpose(struct vtn_builder *b,
+                                        struct vtn_ssa_value *src);
+
+nir_ssa_def *vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src,
+                                unsigned index);
+nir_ssa_def *vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+                                        nir_ssa_def *index);
+nir_ssa_def *vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src,
+                               nir_ssa_def *insert, unsigned index);
+nir_ssa_def *vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+                                       nir_ssa_def *insert, nir_ssa_def *index);
+
+nir_deref_var *vtn_nir_deref(struct vtn_builder *b, uint32_t id);
+
+nir_deref_var *vtn_access_chain_to_deref(struct vtn_builder *b,
+                                         struct vtn_access_chain *chain);
+nir_ssa_def *
+vtn_access_chain_to_offset(struct vtn_builder *b,
+                           struct vtn_access_chain *chain,
+                           nir_ssa_def **index_out, struct vtn_type **type_out,
+                           unsigned *end_idx_out, bool stop_at_matrix);
+
+struct vtn_ssa_value *vtn_local_load(struct vtn_builder *b, nir_deref_var *src);
+
+void vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+                     nir_deref_var *dest);
+
+struct vtn_ssa_value *
+vtn_variable_load(struct vtn_builder *b, struct vtn_access_chain *src);
+
+void vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+                        struct vtn_access_chain *dest);
+
+void vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
+                          const uint32_t *w, unsigned count);
+
+
+typedef void (*vtn_decoration_foreach_cb)(struct vtn_builder *,
+                                          struct vtn_value *,
+                                          int member,
+                                          const struct vtn_decoration *,
+                                          void *);
+
+void vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
+                            vtn_decoration_foreach_cb cb, void *data);
+
+typedef void (*vtn_execution_mode_foreach_cb)(struct vtn_builder *,
+                                              struct vtn_value *,
+                                              const struct vtn_decoration *,
+                                              void *);
+
+void vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
+                                vtn_execution_mode_foreach_cb cb, void *data);
+
+nir_op vtn_nir_alu_op_for_spirv_opcode(SpvOp opcode, bool *swap);
+
+void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
+                    const uint32_t *w, unsigned count);
+
+bool vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode,
+                                    const uint32_t *words, unsigned count);
diff --git a/src/compiler/nir/spirv/vtn_variables.c b/src/compiler/nir/spirv/vtn_variables.c
new file mode 100644 (file)
index 0000000..31bf416
--- /dev/null
@@ -0,0 +1,1412 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jason Ekstrand (jason@jlekstrand.net)
+ *
+ */
+
+#include "vtn_private.h"
+
+static struct vtn_access_chain *
+vtn_access_chain_extend(struct vtn_builder *b, struct vtn_access_chain *old,
+                        unsigned new_ids)
+{
+   struct vtn_access_chain *chain;
+
+   unsigned new_len = old->length + new_ids;
+   chain = ralloc_size(b, sizeof(*chain) + new_len * sizeof(chain->link[0]));
+
+   chain->var = old->var;
+   chain->length = new_len;
+
+   for (unsigned i = 0; i < old->length; i++)
+      chain->link[i] = old->link[i];
+
+   return chain;
+}
+
+static nir_ssa_def *
+vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link,
+                       unsigned stride)
+{
+   assert(stride > 0);
+   if (link.mode == vtn_access_mode_literal) {
+      return nir_imm_int(&b->nb, link.id * stride);
+   } else if (stride == 1) {
+      return vtn_ssa_value(b, link.id)->def;
+   } else {
+      return nir_imul(&b->nb, vtn_ssa_value(b, link.id)->def,
+                              nir_imm_int(&b->nb, stride));
+   }
+}
+
+static struct vtn_type *
+vtn_access_chain_tail_type(struct vtn_builder *b,
+                           struct vtn_access_chain *chain)
+{
+   struct vtn_type *type = chain->var->type;
+   for (unsigned i = 0; i < chain->length; i++) {
+      if (glsl_type_is_struct(type->type)) {
+         assert(chain->link[i].mode == vtn_access_mode_literal);
+         type = type->members[chain->link[i].id];
+      } else {
+         type = type->array_element;
+      }
+   }
+   return type;
+}
+
+/* Crawls a chain of array derefs and rewrites the types so that the
+ * lengths stay the same but the terminal type is the one given by
+ * tail_type.  This is useful for split structures.
+ */
+static void
+rewrite_deref_types(nir_deref *deref, const struct glsl_type *type)
+{
+   deref->type = type;
+   if (deref->child) {
+      assert(deref->child->deref_type == nir_deref_type_array);
+      assert(glsl_type_is_array(deref->type));
+      rewrite_deref_types(deref->child, glsl_get_array_element(type));
+   }
+}
+
+nir_deref_var *
+vtn_access_chain_to_deref(struct vtn_builder *b, struct vtn_access_chain *chain)
+{
+   nir_deref_var *deref_var;
+   if (chain->var->var) {
+      deref_var = nir_deref_var_create(b, chain->var->var);
+   } else {
+      assert(chain->var->members);
+      /* Create the deref_var manually.  It will get filled out later. */
+      deref_var = rzalloc(b, nir_deref_var);
+      deref_var->deref.deref_type = nir_deref_type_var;
+   }
+
+   struct vtn_type *deref_type = chain->var->type;
+   nir_deref *tail = &deref_var->deref;
+   nir_variable **members = chain->var->members;
+
+   for (unsigned i = 0; i < chain->length; i++) {
+      enum glsl_base_type base_type = glsl_get_base_type(deref_type->type);
+      switch (base_type) {
+      case GLSL_TYPE_UINT:
+      case GLSL_TYPE_INT:
+      case GLSL_TYPE_FLOAT:
+      case GLSL_TYPE_DOUBLE:
+      case GLSL_TYPE_BOOL:
+      case GLSL_TYPE_ARRAY: {
+         deref_type = deref_type->array_element;
+
+         nir_deref_array *deref_arr = nir_deref_array_create(b);
+         deref_arr->deref.type = deref_type->type;
+
+         if (chain->link[i].mode == vtn_access_mode_literal) {
+            deref_arr->deref_array_type = nir_deref_array_type_direct;
+            deref_arr->base_offset = chain->link[i].id;
+         } else {
+            assert(chain->link[i].mode == vtn_access_mode_id);
+            deref_arr->deref_array_type = nir_deref_array_type_indirect;
+            deref_arr->base_offset = 0;
+            deref_arr->indirect =
+               nir_src_for_ssa(vtn_ssa_value(b, chain->link[i].id)->def);
+         }
+         tail->child = &deref_arr->deref;
+         tail = tail->child;
+         break;
+      }
+
+      case GLSL_TYPE_STRUCT: {
+         assert(chain->link[i].mode == vtn_access_mode_literal);
+         unsigned idx = chain->link[i].id;
+         deref_type = deref_type->members[idx];
+         if (members) {
+            /* This is a pre-split structure. */
+            deref_var->var = members[idx];
+            rewrite_deref_types(&deref_var->deref, members[idx]->type);
+            assert(tail->type == deref_type->type);
+            members = NULL;
+         } else {
+            nir_deref_struct *deref_struct = nir_deref_struct_create(b, idx);
+            deref_struct->deref.type = deref_type->type;
+            tail->child = &deref_struct->deref;
+            tail = tail->child;
+         }
+         break;
+      }
+      default:
+         unreachable("Invalid type for deref");
+      }
+   }
+
+   assert(members == NULL);
+   return deref_var;
+}
+
+static void
+_vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_var *deref,
+                      nir_deref *tail, struct vtn_ssa_value *inout)
+{
+   /* The deref tail may contain a deref to select a component of a vector (in
+    * other words, it might not be an actual tail) so we have to save it away
+    * here since we overwrite it later.
+    */
+   nir_deref *old_child = tail->child;
+
+   if (glsl_type_is_vector_or_scalar(tail->type)) {
+      /* Terminate the deref chain in case there is one more link to pick
+       * off a component of the vector.
+       */
+      tail->child = NULL;
+
+      nir_intrinsic_op op = load ? nir_intrinsic_load_var :
+                                   nir_intrinsic_store_var;
+
+      nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
+      intrin->variables[0] =
+         nir_deref_as_var(nir_copy_deref(intrin, &deref->deref));
+      intrin->num_components = glsl_get_vector_elements(tail->type);
+
+      if (load) {
+         nir_ssa_dest_init(&intrin->instr, &intrin->dest,
+                           intrin->num_components, NULL);
+         inout->def = &intrin->dest.ssa;
+      } else {
+         nir_intrinsic_set_write_mask(intrin, (1 << intrin->num_components) - 1);
+         intrin->src[0] = nir_src_for_ssa(inout->def);
+      }
+
+      nir_builder_instr_insert(&b->nb, &intrin->instr);
+   } else if (glsl_get_base_type(tail->type) == GLSL_TYPE_ARRAY ||
+              glsl_type_is_matrix(tail->type)) {
+      unsigned elems = glsl_get_length(tail->type);
+      nir_deref_array *deref_arr = nir_deref_array_create(b);
+      deref_arr->deref_array_type = nir_deref_array_type_direct;
+      deref_arr->deref.type = glsl_get_array_element(tail->type);
+      tail->child = &deref_arr->deref;
+      for (unsigned i = 0; i < elems; i++) {
+         deref_arr->base_offset = i;
+         _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]);
+      }
+   } else {
+      assert(glsl_get_base_type(tail->type) == GLSL_TYPE_STRUCT);
+      unsigned elems = glsl_get_length(tail->type);
+      nir_deref_struct *deref_struct = nir_deref_struct_create(b, 0);
+      tail->child = &deref_struct->deref;
+      for (unsigned i = 0; i < elems; i++) {
+         deref_struct->index = i;
+         deref_struct->deref.type = glsl_get_struct_field(tail->type, i);
+         _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]);
+      }
+   }
+
+   tail->child = old_child;
+}
+
+nir_deref_var *
+vtn_nir_deref(struct vtn_builder *b, uint32_t id)
+{
+   struct vtn_access_chain *chain =
+      vtn_value(b, id, vtn_value_type_access_chain)->access_chain;
+
+   return vtn_access_chain_to_deref(b, chain);
+}
+
+/*
+ * Gets the NIR-level deref tail, which may have as a child an array deref
+ * selecting which component due to OpAccessChain supporting per-component
+ * indexing in SPIR-V.
+ */
+static nir_deref *
+get_deref_tail(nir_deref_var *deref)
+{
+   nir_deref *cur = &deref->deref;
+   while (!glsl_type_is_vector_or_scalar(cur->type) && cur->child)
+      cur = cur->child;
+
+   return cur;
+}
+
+struct vtn_ssa_value *
+vtn_local_load(struct vtn_builder *b, nir_deref_var *src)
+{
+   nir_deref *src_tail = get_deref_tail(src);
+   struct vtn_ssa_value *val = vtn_create_ssa_value(b, src_tail->type);
+   _vtn_local_load_store(b, true, src, src_tail, val);
+
+   if (src_tail->child) {
+      nir_deref_array *vec_deref = nir_deref_as_array(src_tail->child);
+      assert(vec_deref->deref.child == NULL);
+      val->type = vec_deref->deref.type;
+      if (vec_deref->deref_array_type == nir_deref_array_type_direct)
+         val->def = vtn_vector_extract(b, val->def, vec_deref->base_offset);
+      else
+         val->def = vtn_vector_extract_dynamic(b, val->def,
+                                               vec_deref->indirect.ssa);
+   }
+
+   return val;
+}
+
+void
+vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+                nir_deref_var *dest)
+{
+   nir_deref *dest_tail = get_deref_tail(dest);
+
+   if (dest_tail->child) {
+      struct vtn_ssa_value *val = vtn_create_ssa_value(b, dest_tail->type);
+      _vtn_local_load_store(b, true, dest, dest_tail, val);
+      nir_deref_array *deref = nir_deref_as_array(dest_tail->child);
+      assert(deref->deref.child == NULL);
+      if (deref->deref_array_type == nir_deref_array_type_direct)
+         val->def = vtn_vector_insert(b, val->def, src->def,
+                                      deref->base_offset);
+      else
+         val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
+                                              deref->indirect.ssa);
+      _vtn_local_load_store(b, false, dest, dest_tail, val);
+   } else {
+      _vtn_local_load_store(b, false, dest, dest_tail, src);
+   }
+}
+
+static nir_ssa_def *
+get_vulkan_resource_index(struct vtn_builder *b, struct vtn_access_chain *chain,
+                          struct vtn_type **type, unsigned *chain_idx)
+{
+   /* Push constants have no explicit binding */
+   if (chain->var->mode == vtn_variable_mode_push_constant) {
+      *chain_idx = 0;
+      *type = chain->var->type;
+      return NULL;
+   }
+
+   nir_ssa_def *array_index;
+   if (glsl_type_is_array(chain->var->type->type)) {
+      assert(chain->length > 0);
+      array_index = vtn_access_link_as_ssa(b, chain->link[0], 1);
+      *chain_idx = 1;
+      *type = chain->var->type->array_element;
+   } else {
+      array_index = nir_imm_int(&b->nb, 0);
+      *chain_idx = 0;
+      *type = chain->var->type;
+   }
+
+   nir_intrinsic_instr *instr =
+      nir_intrinsic_instr_create(b->nb.shader,
+                                 nir_intrinsic_vulkan_resource_index);
+   instr->src[0] = nir_src_for_ssa(array_index);
+   nir_intrinsic_set_desc_set(instr, chain->var->descriptor_set);
+   nir_intrinsic_set_binding(instr, chain->var->binding);
+
+   nir_ssa_dest_init(&instr->instr, &instr->dest, 1, NULL);
+   nir_builder_instr_insert(&b->nb, &instr->instr);
+
+   return &instr->dest.ssa;
+}
+
+nir_ssa_def *
+vtn_access_chain_to_offset(struct vtn_builder *b,
+                           struct vtn_access_chain *chain,
+                           nir_ssa_def **index_out, struct vtn_type **type_out,
+                           unsigned *end_idx_out, bool stop_at_matrix)
+{
+   unsigned idx = 0;
+   struct vtn_type *type;
+   *index_out = get_vulkan_resource_index(b, chain, &type, &idx);
+
+   nir_ssa_def *offset = nir_imm_int(&b->nb, 0);
+   for (; idx < chain->length; idx++) {
+      enum glsl_base_type base_type = glsl_get_base_type(type->type);
+      switch (base_type) {
+      case GLSL_TYPE_UINT:
+      case GLSL_TYPE_INT:
+      case GLSL_TYPE_FLOAT:
+      case GLSL_TYPE_DOUBLE:
+      case GLSL_TYPE_BOOL:
+         /* Some users may not want matrix or vector derefs */
+         if (stop_at_matrix)
+            goto end;
+         /* Fall through */
+
+      case GLSL_TYPE_ARRAY:
+         offset = nir_iadd(&b->nb, offset,
+                           vtn_access_link_as_ssa(b, chain->link[idx],
+                                                  type->stride));
+
+         type = type->array_element;
+         break;
+
+      case GLSL_TYPE_STRUCT: {
+         assert(chain->link[idx].mode == vtn_access_mode_literal);
+         unsigned member = chain->link[idx].id;
+         offset = nir_iadd(&b->nb, offset,
+                           nir_imm_int(&b->nb, type->offsets[member]));
+         type = type->members[member];
+         break;
+      }
+
+      default:
+         unreachable("Invalid type for deref");
+      }
+   }
+
+end:
+   *type_out = type;
+   if (end_idx_out)
+      *end_idx_out = idx;
+
+   return offset;
+}
+
+static void
+_vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
+                     nir_ssa_def *index, nir_ssa_def *offset,
+                     struct vtn_ssa_value **inout, const struct glsl_type *type)
+{
+   nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
+   instr->num_components = glsl_get_vector_elements(type);
+
+   int src = 0;
+   if (!load) {
+      nir_intrinsic_set_write_mask(instr, (1 << instr->num_components) - 1);
+      instr->src[src++] = nir_src_for_ssa((*inout)->def);
+   }
+
+   /* We set the base and size for push constant load to the entire push
+    * constant block for now.
+    */
+   if (op == nir_intrinsic_load_push_constant) {
+      nir_intrinsic_set_base(instr, 0);
+      nir_intrinsic_set_range(instr, 128);
+   }
+
+   if (index)
+      instr->src[src++] = nir_src_for_ssa(index);
+
+   instr->src[src++] = nir_src_for_ssa(offset);
+
+   if (load) {
+      nir_ssa_dest_init(&instr->instr, &instr->dest,
+                        instr->num_components, NULL);
+      (*inout)->def = &instr->dest.ssa;
+   }
+
+   nir_builder_instr_insert(&b->nb, &instr->instr);
+
+   if (load && glsl_get_base_type(type) == GLSL_TYPE_BOOL)
+      (*inout)->def = nir_ine(&b->nb, (*inout)->def, nir_imm_int(&b->nb, 0));
+}
+
+static void
+_vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
+                      nir_ssa_def *index, nir_ssa_def *offset,
+                      struct vtn_access_chain *chain, unsigned chain_idx,
+                      struct vtn_type *type, struct vtn_ssa_value **inout)
+{
+   if (chain && chain_idx >= chain->length)
+      chain = NULL;
+
+   if (load && chain == NULL && *inout == NULL)
+      *inout = vtn_create_ssa_value(b, type->type);
+
+   enum glsl_base_type base_type = glsl_get_base_type(type->type);
+   switch (base_type) {
+   case GLSL_TYPE_UINT:
+   case GLSL_TYPE_INT:
+   case GLSL_TYPE_FLOAT:
+   case GLSL_TYPE_BOOL:
+      /* This is where things get interesting.  At this point, we've hit
+       * a vector, a scalar, or a matrix.
+       */
+      if (glsl_type_is_matrix(type->type)) {
+         if (chain == NULL) {
+            /* Loading the whole matrix */
+            struct vtn_ssa_value *transpose;
+            unsigned num_ops, vec_width;
+            if (type->row_major) {
+               num_ops = glsl_get_vector_elements(type->type);
+               vec_width = glsl_get_matrix_columns(type->type);
+               if (load) {
+                  const struct glsl_type *transpose_type =
+                     glsl_matrix_type(base_type, vec_width, num_ops);
+                  *inout = vtn_create_ssa_value(b, transpose_type);
+               } else {
+                  transpose = vtn_ssa_transpose(b, *inout);
+                  inout = &transpose;
+               }
+            } else {
+               num_ops = glsl_get_matrix_columns(type->type);
+               vec_width = glsl_get_vector_elements(type->type);
+            }
+
+            for (unsigned i = 0; i < num_ops; i++) {
+               nir_ssa_def *elem_offset =
+                  nir_iadd(&b->nb, offset,
+                           nir_imm_int(&b->nb, i * type->stride));
+               _vtn_load_store_tail(b, op, load, index, elem_offset,
+                                    &(*inout)->elems[i],
+                                    glsl_vector_type(base_type, vec_width));
+            }
+
+            if (load && type->row_major)
+               *inout = vtn_ssa_transpose(b, *inout);
+         } else if (type->row_major) {
+            /* Row-major but with an access chiain. */
+            nir_ssa_def *col_offset =
+               vtn_access_link_as_ssa(b, chain->link[chain_idx],
+                                      type->array_element->stride);
+            offset = nir_iadd(&b->nb, offset, col_offset);
+
+            if (chain_idx + 1 < chain->length) {
+               /* Picking off a single element */
+               nir_ssa_def *row_offset =
+                  vtn_access_link_as_ssa(b, chain->link[chain_idx + 1],
+                                         type->stride);
+               offset = nir_iadd(&b->nb, offset, row_offset);
+               if (load)
+                  *inout = vtn_create_ssa_value(b, glsl_scalar_type(base_type));
+               _vtn_load_store_tail(b, op, load, index, offset, inout,
+                                    glsl_scalar_type(base_type));
+            } else {
+               /* Grabbing a column; picking one element off each row */
+               unsigned num_comps = glsl_get_vector_elements(type->type);
+               const struct glsl_type *column_type =
+                  glsl_get_column_type(type->type);
+
+               nir_ssa_def *comps[4];
+               for (unsigned i = 0; i < num_comps; i++) {
+                  nir_ssa_def *elem_offset =
+                     nir_iadd(&b->nb, offset,
+                              nir_imm_int(&b->nb, i * type->stride));
+
+                  struct vtn_ssa_value *comp, temp_val;
+                  if (!load) {
+                     temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
+                     temp_val.type = glsl_scalar_type(base_type);
+                  }
+                  comp = &temp_val;
+                  _vtn_load_store_tail(b, op, load, index, elem_offset,
+                                       &comp, glsl_scalar_type(base_type));
+                  comps[i] = comp->def;
+               }
+
+               if (load) {
+                  if (*inout == NULL)
+                     *inout = vtn_create_ssa_value(b, column_type);
+
+                  (*inout)->def = nir_vec(&b->nb, comps, num_comps);
+               }
+            }
+         } else {
+            /* Column-major with a deref. Fall through to array case. */
+            nir_ssa_def *col_offset =
+               vtn_access_link_as_ssa(b, chain->link[chain_idx], type->stride);
+            offset = nir_iadd(&b->nb, offset, col_offset);
+
+            _vtn_block_load_store(b, op, load, index, offset,
+                                  chain, chain_idx + 1,
+                                  type->array_element, inout);
+         }
+      } else if (chain == NULL) {
+         /* Single whole vector */
+         assert(glsl_type_is_vector_or_scalar(type->type));
+         _vtn_load_store_tail(b, op, load, index, offset, inout, type->type);
+      } else {
+         /* Single component of a vector. Fall through to array case. */
+         nir_ssa_def *elem_offset =
+            vtn_access_link_as_ssa(b, chain->link[chain_idx], type->stride);
+         offset = nir_iadd(&b->nb, offset, elem_offset);
+
+         _vtn_block_load_store(b, op, load, index, offset, NULL, 0,
+                               type->array_element, inout);
+      }
+      return;
+
+   case GLSL_TYPE_ARRAY: {
+      unsigned elems = glsl_get_length(type->type);
+      for (unsigned i = 0; i < elems; i++) {
+         nir_ssa_def *elem_off =
+            nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
+         _vtn_block_load_store(b, op, load, index, elem_off, NULL, 0,
+                               type->array_element, &(*inout)->elems[i]);
+      }
+      return;
+   }
+
+   case GLSL_TYPE_STRUCT: {
+      unsigned elems = glsl_get_length(type->type);
+      for (unsigned i = 0; i < elems; i++) {
+         nir_ssa_def *elem_off =
+            nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
+         _vtn_block_load_store(b, op, load, index, elem_off, NULL, 0,
+                               type->members[i], &(*inout)->elems[i]);
+      }
+      return;
+   }
+
+   default:
+      unreachable("Invalid block member type");
+   }
+}
+
+static struct vtn_ssa_value *
+vtn_block_load(struct vtn_builder *b, struct vtn_access_chain *src)
+{
+   nir_intrinsic_op op;
+   switch (src->var->mode) {
+   case vtn_variable_mode_ubo:
+      op = nir_intrinsic_load_ubo;
+      break;
+   case vtn_variable_mode_ssbo:
+      op = nir_intrinsic_load_ssbo;
+      break;
+   case vtn_variable_mode_push_constant:
+      op = nir_intrinsic_load_push_constant;
+      break;
+   default:
+      assert(!"Invalid block variable mode");
+   }
+
+   nir_ssa_def *offset, *index = NULL;
+   struct vtn_type *type;
+   unsigned chain_idx;
+   offset = vtn_access_chain_to_offset(b, src, &index, &type, &chain_idx, true);
+
+   struct vtn_ssa_value *value = NULL;
+   _vtn_block_load_store(b, op, true, index, offset,
+                         src, chain_idx, type, &value);
+   return value;
+}
+
+static void
+vtn_block_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+                struct vtn_access_chain *dst)
+{
+   nir_ssa_def *offset, *index = NULL;
+   struct vtn_type *type;
+   unsigned chain_idx;
+   offset = vtn_access_chain_to_offset(b, dst, &index, &type, &chain_idx, true);
+
+   _vtn_block_load_store(b, nir_intrinsic_store_ssbo, false, index, offset,
+                         dst, chain_idx, type, &src);
+}
+
+static bool
+vtn_variable_is_external_block(struct vtn_variable *var)
+{
+   return var->mode == vtn_variable_mode_ssbo ||
+          var->mode == vtn_variable_mode_ubo ||
+          var->mode == vtn_variable_mode_push_constant;
+}
+
+static void
+_vtn_variable_load_store(struct vtn_builder *b, bool load,
+                         struct vtn_access_chain *chain,
+                         struct vtn_type *tail_type,
+                         struct vtn_ssa_value **inout)
+{
+   enum glsl_base_type base_type = glsl_get_base_type(tail_type->type);
+   switch (base_type) {
+   case GLSL_TYPE_UINT:
+   case GLSL_TYPE_INT:
+   case GLSL_TYPE_FLOAT:
+   case GLSL_TYPE_BOOL:
+      /* At this point, we have a scalar, vector, or matrix so we know that
+       * there cannot be any structure splitting still in the way.  By
+       * stopping at the matrix level rather than the vector level, we
+       * ensure that matrices get loaded in the optimal way even if they
+       * are storred row-major in a UBO.
+       */
+      if (load) {
+         *inout = vtn_local_load(b, vtn_access_chain_to_deref(b, chain));
+      } else {
+         vtn_local_store(b, *inout, vtn_access_chain_to_deref(b, chain));
+      }
+      return;
+
+   case GLSL_TYPE_ARRAY:
+   case GLSL_TYPE_STRUCT: {
+      struct vtn_access_chain *new_chain =
+         vtn_access_chain_extend(b, chain, 1);
+      new_chain->link[chain->length].mode = vtn_access_mode_literal;
+      unsigned elems = glsl_get_length(tail_type->type);
+      if (load) {
+         assert(*inout == NULL);
+         *inout = rzalloc(b, struct vtn_ssa_value);
+         (*inout)->type = tail_type->type;
+         (*inout)->elems = rzalloc_array(b, struct vtn_ssa_value *, elems);
+      }
+      for (unsigned i = 0; i < elems; i++) {
+         new_chain->link[chain->length].id = i;
+         struct vtn_type *elem_type = base_type == GLSL_TYPE_ARRAY ?
+            tail_type->array_element : tail_type->members[i];
+         _vtn_variable_load_store(b, load, new_chain, elem_type,
+                                  &(*inout)->elems[i]);
+      }
+      return;
+   }
+
+   default:
+      unreachable("Invalid access chain type");
+   }
+}
+
+struct vtn_ssa_value *
+vtn_variable_load(struct vtn_builder *b, struct vtn_access_chain *src)
+{
+   if (vtn_variable_is_external_block(src->var)) {
+      return vtn_block_load(b, src);
+   } else {
+      struct vtn_type *tail_type = vtn_access_chain_tail_type(b, src);
+      struct vtn_ssa_value *val = NULL;
+      _vtn_variable_load_store(b, true, src, tail_type, &val);
+      return val;
+   }
+}
+
+void
+vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+                   struct vtn_access_chain *dest)
+{
+   if (vtn_variable_is_external_block(dest->var)) {
+      assert(dest->var->mode == vtn_variable_mode_ssbo);
+      vtn_block_store(b, src, dest);
+   } else {
+      struct vtn_type *tail_type = vtn_access_chain_tail_type(b, dest);
+      _vtn_variable_load_store(b, false, dest, tail_type, &src);
+   }
+}
+
+static void
+_vtn_variable_copy(struct vtn_builder *b, struct vtn_access_chain *dest,
+                   struct vtn_access_chain *src, struct vtn_type *tail_type)
+{
+   enum glsl_base_type base_type = glsl_get_base_type(tail_type->type);
+   switch (base_type) {
+   case GLSL_TYPE_UINT:
+   case GLSL_TYPE_INT:
+   case GLSL_TYPE_FLOAT:
+   case GLSL_TYPE_BOOL:
+      /* At this point, we have a scalar, vector, or matrix so we know that
+       * there cannot be any structure splitting still in the way.  By
+       * stopping at the matrix level rather than the vector level, we
+       * ensure that matrices get loaded in the optimal way even if they
+       * are storred row-major in a UBO.
+       */
+      vtn_variable_store(b, vtn_variable_load(b, src), dest);
+      return;
+
+   case GLSL_TYPE_ARRAY:
+   case GLSL_TYPE_STRUCT: {
+      struct vtn_access_chain *new_src, *new_dest;
+      new_src = vtn_access_chain_extend(b, src, 1);
+      new_dest = vtn_access_chain_extend(b, dest, 1);
+      new_src->link[src->length].mode = vtn_access_mode_literal;
+      new_dest->link[dest->length].mode = vtn_access_mode_literal;
+      unsigned elems = glsl_get_length(tail_type->type);
+      for (unsigned i = 0; i < elems; i++) {
+         new_src->link[src->length].id = i;
+         new_dest->link[dest->length].id = i;
+         struct vtn_type *elem_type = base_type == GLSL_TYPE_ARRAY ?
+            tail_type->array_element : tail_type->members[i];
+         _vtn_variable_copy(b, new_dest, new_src, elem_type);
+      }
+      return;
+   }
+
+   default:
+      unreachable("Invalid access chain type");
+   }
+}
+
+static void
+vtn_variable_copy(struct vtn_builder *b, struct vtn_access_chain *dest,
+                  struct vtn_access_chain *src)
+{
+   struct vtn_type *tail_type = vtn_access_chain_tail_type(b, src);
+   assert(vtn_access_chain_tail_type(b, dest)->type == tail_type->type);
+
+   /* TODO: At some point, we should add a special-case for when we can
+    * just emit a copy_var intrinsic.
+    */
+   _vtn_variable_copy(b, dest, src, tail_type);
+}
+
+static void
+set_mode_system_value(nir_variable_mode *mode)
+{
+   assert(*mode == nir_var_system_value || *mode == nir_var_shader_in);
+   *mode = nir_var_system_value;
+}
+
+static void
+vtn_get_builtin_location(struct vtn_builder *b,
+                         SpvBuiltIn builtin, int *location,
+                         nir_variable_mode *mode)
+{
+   switch (builtin) {
+   case SpvBuiltInPosition:
+      *location = VARYING_SLOT_POS;
+      break;
+   case SpvBuiltInPointSize:
+      *location = VARYING_SLOT_PSIZ;
+      break;
+   case SpvBuiltInClipDistance:
+      *location = VARYING_SLOT_CLIP_DIST0; /* XXX CLIP_DIST1? */
+      break;
+   case SpvBuiltInCullDistance:
+      /* XXX figure this out */
+      break;
+   case SpvBuiltInVertexIndex:
+      *location = SYSTEM_VALUE_VERTEX_ID;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInVertexId:
+      /* Vulkan defines VertexID to be zero-based and reserves the new
+       * builtin keyword VertexIndex to indicate the non-zero-based value.
+       */
+      *location = SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInInstanceIndex:
+      *location = SYSTEM_VALUE_INSTANCE_INDEX;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInInstanceId:
+      *location = SYSTEM_VALUE_INSTANCE_ID;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInPrimitiveId:
+      *location = VARYING_SLOT_PRIMITIVE_ID;
+      *mode = nir_var_shader_out;
+      break;
+   case SpvBuiltInInvocationId:
+      *location = SYSTEM_VALUE_INVOCATION_ID;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInLayer:
+      *location = VARYING_SLOT_LAYER;
+      *mode = nir_var_shader_out;
+      break;
+   case SpvBuiltInViewportIndex:
+      *location = VARYING_SLOT_VIEWPORT;
+      if (b->shader->stage == MESA_SHADER_GEOMETRY)
+         *mode = nir_var_shader_out;
+      else if (b->shader->stage == MESA_SHADER_FRAGMENT)
+         *mode = nir_var_shader_in;
+      else
+         unreachable("invalid stage for SpvBuiltInViewportIndex");
+      break;
+   case SpvBuiltInTessLevelOuter:
+   case SpvBuiltInTessLevelInner:
+   case SpvBuiltInTessCoord:
+   case SpvBuiltInPatchVertices:
+      unreachable("no tessellation support");
+   case SpvBuiltInFragCoord:
+      *location = VARYING_SLOT_POS;
+      assert(*mode == nir_var_shader_in);
+      break;
+   case SpvBuiltInPointCoord:
+      *location = VARYING_SLOT_PNTC;
+      assert(*mode == nir_var_shader_in);
+      break;
+   case SpvBuiltInFrontFacing:
+      *location = VARYING_SLOT_FACE;
+      assert(*mode == nir_var_shader_in);
+      break;
+   case SpvBuiltInSampleId:
+      *location = SYSTEM_VALUE_SAMPLE_ID;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInSamplePosition:
+      *location = SYSTEM_VALUE_SAMPLE_POS;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInSampleMask:
+      *location = SYSTEM_VALUE_SAMPLE_MASK_IN; /* XXX out? */
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInFragDepth:
+      *location = FRAG_RESULT_DEPTH;
+      assert(*mode == nir_var_shader_out);
+      break;
+   case SpvBuiltInNumWorkgroups:
+      *location = SYSTEM_VALUE_NUM_WORK_GROUPS;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInWorkgroupSize:
+      /* This should already be handled */
+      unreachable("unsupported builtin");
+      break;
+   case SpvBuiltInWorkgroupId:
+      *location = SYSTEM_VALUE_WORK_GROUP_ID;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInLocalInvocationId:
+      *location = SYSTEM_VALUE_LOCAL_INVOCATION_ID;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInLocalInvocationIndex:
+      *location = SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInGlobalInvocationId:
+      *location = SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
+      set_mode_system_value(mode);
+      break;
+   case SpvBuiltInHelperInvocation:
+   default:
+      unreachable("unsupported builtin");
+   }
+}
+
+static void
+var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
+                  const struct vtn_decoration *dec, void *void_var)
+{
+   struct vtn_variable *vtn_var = void_var;
+
+   /* Handle decorations that apply to a vtn_variable as a whole */
+   switch (dec->decoration) {
+   case SpvDecorationBinding:
+      vtn_var->binding = dec->literals[0];
+      return;
+   case SpvDecorationDescriptorSet:
+      vtn_var->descriptor_set = dec->literals[0];
+      return;
+
+   case SpvDecorationLocation: {
+      unsigned location = dec->literals[0];
+      bool is_vertex_input;
+      if (b->shader->stage == MESA_SHADER_FRAGMENT &&
+          vtn_var->mode == vtn_variable_mode_output) {
+         is_vertex_input = false;
+         location += FRAG_RESULT_DATA0;
+      } else if (b->shader->stage == MESA_SHADER_VERTEX &&
+                 vtn_var->mode == vtn_variable_mode_input) {
+         is_vertex_input = true;
+         location += VERT_ATTRIB_GENERIC0;
+      } else if (vtn_var->mode == vtn_variable_mode_input ||
+                 vtn_var->mode == vtn_variable_mode_output) {
+         is_vertex_input = false;
+         location += VARYING_SLOT_VAR0;
+      } else {
+         assert(!"Location must be on input or output variable");
+      }
+
+      if (vtn_var->var) {
+         vtn_var->var->data.location = location;
+         vtn_var->var->data.explicit_location = true;
+      } else {
+         assert(vtn_var->members);
+         unsigned length = glsl_get_length(vtn_var->type->type);
+         for (unsigned i = 0; i < length; i++) {
+            vtn_var->members[i]->data.location = location;
+            vtn_var->members[i]->data.explicit_location = true;
+            location +=
+               glsl_count_attribute_slots(vtn_var->members[i]->interface_type,
+                                          is_vertex_input);
+         }
+      }
+      return;
+   }
+
+   default:
+      break;
+   }
+
+   /* Now we handle decorations that apply to a particular nir_variable */
+   nir_variable *nir_var = vtn_var->var;
+   if (val->value_type == vtn_value_type_access_chain) {
+      assert(val->access_chain->length == 0);
+      assert(val->access_chain->var == void_var);
+      assert(member == -1);
+   } else {
+      assert(val->value_type == vtn_value_type_type);
+      if (member != -1)
+         nir_var = vtn_var->members[member];
+   }
+
+   if (nir_var == NULL)
+      return;
+
+   switch (dec->decoration) {
+   case SpvDecorationRelaxedPrecision:
+      break; /* FIXME: Do nothing with this for now. */
+   case SpvDecorationNoPerspective:
+      nir_var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+      break;
+   case SpvDecorationFlat:
+      nir_var->data.interpolation = INTERP_QUALIFIER_FLAT;
+      break;
+   case SpvDecorationCentroid:
+      nir_var->data.centroid = true;
+      break;
+   case SpvDecorationSample:
+      nir_var->data.sample = true;
+      break;
+   case SpvDecorationInvariant:
+      nir_var->data.invariant = true;
+      break;
+   case SpvDecorationConstant:
+      assert(nir_var->constant_initializer != NULL);
+      nir_var->data.read_only = true;
+      break;
+   case SpvDecorationNonWritable:
+      nir_var->data.read_only = true;
+      break;
+   case SpvDecorationComponent:
+      nir_var->data.location_frac = dec->literals[0];
+      break;
+   case SpvDecorationIndex:
+      nir_var->data.explicit_index = true;
+      nir_var->data.index = dec->literals[0];
+      break;
+   case SpvDecorationBuiltIn: {
+      SpvBuiltIn builtin = dec->literals[0];
+
+      if (builtin == SpvBuiltInWorkgroupSize) {
+         /* This shouldn't be a builtin.  It's actually a constant. */
+         nir_var->data.mode = nir_var_global;
+         nir_var->data.read_only = true;
+
+         nir_constant *c = rzalloc(nir_var, nir_constant);
+         c->value.u[0] = b->shader->info.cs.local_size[0];
+         c->value.u[1] = b->shader->info.cs.local_size[1];
+         c->value.u[2] = b->shader->info.cs.local_size[2];
+         nir_var->constant_initializer = c;
+         break;
+      }
+
+      nir_variable_mode mode = nir_var->data.mode;
+      vtn_get_builtin_location(b, builtin, &nir_var->data.location, &mode);
+      nir_var->data.explicit_location = true;
+      nir_var->data.mode = mode;
+
+      if (builtin == SpvBuiltInFragCoord || builtin == SpvBuiltInSamplePosition)
+         nir_var->data.origin_upper_left = b->origin_upper_left;
+      break;
+   }
+   case SpvDecorationRowMajor:
+   case SpvDecorationColMajor:
+   case SpvDecorationGLSLShared:
+   case SpvDecorationPatch:
+   case SpvDecorationRestrict:
+   case SpvDecorationAliased:
+   case SpvDecorationVolatile:
+   case SpvDecorationCoherent:
+   case SpvDecorationNonReadable:
+   case SpvDecorationUniform:
+      /* This is really nice but we have no use for it right now. */
+   case SpvDecorationCPacked:
+   case SpvDecorationSaturatedConversion:
+   case SpvDecorationStream:
+   case SpvDecorationOffset:
+   case SpvDecorationXfbBuffer:
+   case SpvDecorationFuncParamAttr:
+   case SpvDecorationFPRoundingMode:
+   case SpvDecorationFPFastMathMode:
+   case SpvDecorationLinkageAttributes:
+   case SpvDecorationSpecId:
+      break;
+   default:
+      unreachable("Unhandled variable decoration");
+   }
+}
+
+/* Tries to compute the size of an interface block based on the strides and
+ * offsets that are provided to us in the SPIR-V source.
+ */
+static unsigned
+vtn_type_block_size(struct vtn_type *type)
+{
+   enum glsl_base_type base_type = glsl_get_base_type(type->type);
+   switch (base_type) {
+   case GLSL_TYPE_UINT:
+   case GLSL_TYPE_INT:
+   case GLSL_TYPE_FLOAT:
+   case GLSL_TYPE_BOOL:
+   case GLSL_TYPE_DOUBLE: {
+      unsigned cols = type->row_major ? glsl_get_vector_elements(type->type) :
+                                        glsl_get_matrix_columns(type->type);
+      if (cols > 1) {
+         assert(type->stride > 0);
+         return type->stride * cols;
+      } else if (base_type == GLSL_TYPE_DOUBLE) {
+         return glsl_get_vector_elements(type->type) * 8;
+      } else {
+         return glsl_get_vector_elements(type->type) * 4;
+      }
+   }
+
+   case GLSL_TYPE_STRUCT:
+   case GLSL_TYPE_INTERFACE: {
+      unsigned size = 0;
+      unsigned num_fields = glsl_get_length(type->type);
+      for (unsigned f = 0; f < num_fields; f++) {
+         unsigned field_end = type->offsets[f] +
+                              vtn_type_block_size(type->members[f]);
+         size = MAX2(size, field_end);
+      }
+      return size;
+   }
+
+   case GLSL_TYPE_ARRAY:
+      assert(type->stride > 0);
+      assert(glsl_get_length(type->type) > 0);
+      return type->stride * glsl_get_length(type->type);
+
+   default:
+      assert(!"Invalid block type");
+      return 0;
+   }
+}
+
+void
+vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
+                     const uint32_t *w, unsigned count)
+{
+   switch (opcode) {
+   case SpvOpVariable: {
+      struct vtn_variable *var = rzalloc(b, struct vtn_variable);
+      var->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+
+      var->chain.var = var;
+      var->chain.length = 0;
+
+      struct vtn_value *val =
+         vtn_push_value(b, w[2], vtn_value_type_access_chain);
+      val->access_chain = &var->chain;
+
+      struct vtn_type *without_array = var->type;
+      while(glsl_type_is_array(without_array->type))
+         without_array = without_array->array_element;
+
+      nir_variable_mode nir_mode;
+      switch ((SpvStorageClass)w[3]) {
+      case SpvStorageClassUniform:
+      case SpvStorageClassUniformConstant:
+         if (without_array->block) {
+            var->mode = vtn_variable_mode_ubo;
+            b->shader->info.num_ubos++;
+         } else if (without_array->buffer_block) {
+            var->mode = vtn_variable_mode_ssbo;
+            b->shader->info.num_ssbos++;
+         } else if (glsl_type_is_image(without_array->type)) {
+            var->mode = vtn_variable_mode_image;
+            nir_mode = nir_var_uniform;
+            b->shader->info.num_images++;
+         } else if (glsl_type_is_sampler(without_array->type)) {
+            var->mode = vtn_variable_mode_sampler;
+            nir_mode = nir_var_uniform;
+            b->shader->info.num_textures++;
+         } else {
+            assert(!"Invalid uniform variable type");
+         }
+         break;
+      case SpvStorageClassPushConstant:
+         var->mode = vtn_variable_mode_push_constant;
+         assert(b->shader->num_uniforms == 0);
+         b->shader->num_uniforms = vtn_type_block_size(var->type) * 4;
+         break;
+      case SpvStorageClassInput:
+         var->mode = vtn_variable_mode_input;
+         nir_mode = nir_var_shader_in;
+         break;
+      case SpvStorageClassOutput:
+         var->mode = vtn_variable_mode_output;
+         nir_mode = nir_var_shader_out;
+         break;
+      case SpvStorageClassPrivate:
+         var->mode = vtn_variable_mode_global;
+         nir_mode = nir_var_global;
+         break;
+      case SpvStorageClassFunction:
+         var->mode = vtn_variable_mode_local;
+         nir_mode = nir_var_local;
+         break;
+      case SpvStorageClassWorkgroup:
+         var->mode = vtn_variable_mode_workgroup;
+         nir_mode = nir_var_shared;
+         break;
+      case SpvStorageClassCrossWorkgroup:
+      case SpvStorageClassGeneric:
+      case SpvStorageClassAtomicCounter:
+      default:
+         unreachable("Unhandled variable storage class");
+      }
+
+      switch (var->mode) {
+      case vtn_variable_mode_local:
+      case vtn_variable_mode_global:
+      case vtn_variable_mode_image:
+      case vtn_variable_mode_sampler:
+      case vtn_variable_mode_workgroup:
+         /* For these, we create the variable normally */
+         var->var = rzalloc(b->shader, nir_variable);
+         var->var->name = ralloc_strdup(var->var, val->name);
+         var->var->type = var->type->type;
+         var->var->data.mode = nir_mode;
+
+         switch (var->mode) {
+         case vtn_variable_mode_image:
+         case vtn_variable_mode_sampler:
+            var->var->interface_type = without_array->type;
+            break;
+         default:
+            var->var->interface_type = NULL;
+            break;
+         }
+         break;
+
+      case vtn_variable_mode_input:
+      case vtn_variable_mode_output: {
+         /* For inputs and outputs, we immediately split structures.  This
+          * is for a couple of reasons.  For one, builtins may all come in
+          * a struct and we really want those split out into separate
+          * variables.  For another, interpolation qualifiers can be
+          * applied to members of the top-level struct ane we need to be
+          * able to preserve that information.
+          */
+
+         int array_length = -1;
+         struct vtn_type *interface_type = var->type;
+         if (b->shader->stage == MESA_SHADER_GEOMETRY &&
+             glsl_type_is_array(var->type->type)) {
+            /* In Geometry shaders (and some tessellation), inputs come
+             * in per-vertex arrays.  However, some builtins come in
+             * non-per-vertex, hence the need for the is_array check.  In
+             * any case, there are no non-builtin arrays allowed so this
+             * check should be sufficient.
+             */
+            interface_type = var->type->array_element;
+            array_length = glsl_get_length(var->type->type);
+         }
+
+         if (glsl_type_is_struct(interface_type->type)) {
+            /* It's a struct.  Split it. */
+            unsigned num_members = glsl_get_length(interface_type->type);
+            var->members = ralloc_array(b, nir_variable *, num_members);
+
+            for (unsigned i = 0; i < num_members; i++) {
+               const struct glsl_type *mtype = interface_type->members[i]->type;
+               if (array_length >= 0)
+                  mtype = glsl_array_type(mtype, array_length);
+
+               var->members[i] = rzalloc(b->shader, nir_variable);
+               var->members[i]->name =
+                  ralloc_asprintf(var->members[i], "%s.%d", val->name, i);
+               var->members[i]->type = mtype;
+               var->members[i]->interface_type =
+                  interface_type->members[i]->type;
+               var->members[i]->data.mode = nir_mode;
+            }
+         } else {
+            var->var = rzalloc(b->shader, nir_variable);
+            var->var->name = ralloc_strdup(var->var, val->name);
+            var->var->type = var->type->type;
+            var->var->interface_type = interface_type->type;
+            var->var->data.mode = nir_mode;
+         }
+
+         /* For inputs and outputs, we need to grab locations and builtin
+          * information from the interface type.
+          */
+         vtn_foreach_decoration(b, interface_type->val, var_decoration_cb, var);
+         break;
+
+      case vtn_variable_mode_param:
+         unreachable("Not created through OpVariable");
+      }
+
+      case vtn_variable_mode_ubo:
+      case vtn_variable_mode_ssbo:
+      case vtn_variable_mode_push_constant:
+         /* These don't need actual variables. */
+         break;
+      }
+
+      if (count > 4) {
+         assert(count == 5);
+         nir_constant *constant =
+            vtn_value(b, w[4], vtn_value_type_constant)->constant;
+         var->var->constant_initializer =
+            nir_constant_clone(constant, var->var);
+      }
+
+      vtn_foreach_decoration(b, val, var_decoration_cb, var);
+
+      if (var->mode == vtn_variable_mode_image ||
+          var->mode == vtn_variable_mode_sampler) {
+         /* XXX: We still need the binding information in the nir_variable
+          * for these. We should fix that.
+          */
+         var->var->data.binding = var->binding;
+         var->var->data.descriptor_set = var->descriptor_set;
+
+         if (var->mode == vtn_variable_mode_image)
+            var->var->data.image.format = without_array->image_format;
+      }
+
+      if (var->mode == vtn_variable_mode_local) {
+         assert(var->members == NULL && var->var != NULL);
+         nir_function_impl_add_variable(b->impl, var->var);
+      } else if (var->var) {
+         nir_shader_add_variable(b->shader, var->var);
+      } else if (var->members) {
+         unsigned count = glsl_get_length(without_array->type);
+         for (unsigned i = 0; i < count; i++) {
+            assert(var->members[i]->data.mode != nir_var_local);
+            nir_shader_add_variable(b->shader, var->members[i]);
+         }
+      } else {
+         assert(var->mode == vtn_variable_mode_ubo ||
+                var->mode == vtn_variable_mode_ssbo ||
+                var->mode == vtn_variable_mode_push_constant);
+      }
+      break;
+   }
+
+   case SpvOpAccessChain:
+   case SpvOpInBoundsAccessChain: {
+      struct vtn_access_chain *base, *chain;
+      struct vtn_value *base_val = vtn_untyped_value(b, w[3]);
+      if (base_val->value_type == vtn_value_type_sampled_image) {
+         /* This is rather insane.  SPIR-V allows you to use OpSampledImage
+          * to combine an array of images with a single sampler to get an
+          * array of sampled images that all share the same sampler.
+          * Fortunately, this means that we can more-or-less ignore the
+          * sampler when crawling the access chain, but it does leave us
+          * with this rather awkward little special-case.
+          */
+         base = base_val->sampled_image->image;
+      } else {
+         assert(base_val->value_type == vtn_value_type_access_chain);
+         base = base_val->access_chain;
+      }
+
+      chain = vtn_access_chain_extend(b, base, count - 4);
+
+      unsigned idx = base->length;
+      for (int i = 4; i < count; i++) {
+         struct vtn_value *link_val = vtn_untyped_value(b, w[i]);
+         if (link_val->value_type == vtn_value_type_constant) {
+            chain->link[idx].mode = vtn_access_mode_literal;
+            chain->link[idx].id = link_val->constant->value.u[0];
+         } else {
+            chain->link[idx].mode = vtn_access_mode_id;
+            chain->link[idx].id = w[i];
+         }
+         idx++;
+      }
+
+      if (base_val->value_type == vtn_value_type_sampled_image) {
+         struct vtn_value *val =
+            vtn_push_value(b, w[2], vtn_value_type_sampled_image);
+         val->sampled_image = ralloc(b, struct vtn_sampled_image);
+         val->sampled_image->image = chain;
+         val->sampled_image->sampler = base_val->sampled_image->sampler;
+      } else {
+         struct vtn_value *val =
+            vtn_push_value(b, w[2], vtn_value_type_access_chain);
+         val->access_chain = chain;
+      }
+      break;
+   }
+
+   case SpvOpCopyMemory: {
+      struct vtn_value *dest = vtn_value(b, w[1], vtn_value_type_access_chain);
+      struct vtn_value *src = vtn_value(b, w[2], vtn_value_type_access_chain);
+
+      vtn_variable_copy(b, dest->access_chain, src->access_chain);
+      break;
+   }
+
+   case SpvOpLoad: {
+      struct vtn_access_chain *src =
+         vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+
+      if (src->var->mode == vtn_variable_mode_image ||
+          src->var->mode == vtn_variable_mode_sampler) {
+         vtn_push_value(b, w[2], vtn_value_type_access_chain)->access_chain = src;
+         return;
+      }
+
+      struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+      val->ssa = vtn_variable_load(b, src);
+      break;
+   }
+
+   case SpvOpStore: {
+      struct vtn_access_chain *dest =
+         vtn_value(b, w[1], vtn_value_type_access_chain)->access_chain;
+      struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
+      vtn_variable_store(b, src, dest);
+      break;
+   }
+
+   case SpvOpArrayLength: {
+      struct vtn_access_chain *chain =
+         vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+
+      const uint32_t offset = chain->var->type->offsets[w[4]];
+      const uint32_t stride = chain->var->type->members[w[4]]->stride;
+
+      unsigned chain_idx;
+      struct vtn_type *type;
+      nir_ssa_def *index =
+         get_vulkan_resource_index(b, chain, &type, &chain_idx);
+
+      nir_intrinsic_instr *instr =
+         nir_intrinsic_instr_create(b->nb.shader,
+                                    nir_intrinsic_get_buffer_size);
+      instr->src[0] = nir_src_for_ssa(index);
+      nir_ssa_dest_init(&instr->instr, &instr->dest, 1, NULL);
+      nir_builder_instr_insert(&b->nb, &instr->instr);
+      nir_ssa_def *buf_size = &instr->dest.ssa;
+
+      /* array_length = max(buffer_size - offset, 0) / stride */
+      nir_ssa_def *array_length =
+         nir_idiv(&b->nb,
+                  nir_imax(&b->nb,
+                           nir_isub(&b->nb,
+                                    buf_size,
+                                    nir_imm_int(&b->nb, offset)),
+                           nir_imm_int(&b->nb, 0u)),
+                  nir_imm_int(&b->nb, stride));
+
+      struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+      val->ssa = vtn_create_ssa_value(b, glsl_uint_type());
+      val->ssa->def = array_length;
+      break;
+   }
+
+   case SpvOpCopyMemorySized:
+   default:
+      unreachable("Unhandled opcode");
+   }
+}
diff --git a/src/compiler/nir/spirv2nir.c b/src/compiler/nir/spirv2nir.c
new file mode 100644 (file)
index 0000000..c837186
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jason Ekstrand (jason@jlekstrand.net)
+ *
+ */
+
+/*
+ * A simple executable that opens a SPIR-V shader, converts it to NIR, and
+ * dumps out the result.  This should be useful for testing the
+ * spirv_to_nir code.
+ */
+
+#include "spirv/nir_spirv.h"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+int main(int argc, char **argv)
+{
+   int fd = open(argv[1], O_RDONLY);
+   off_t len = lseek(fd, 0, SEEK_END);
+
+   assert(len % 4 == 0);
+   size_t word_count = len / 4;
+
+   const void *map = mmap(NULL, len, PROT_READ, MAP_PRIVATE, fd, 0);
+   assert(map != NULL);
+
+   nir_function *func = spirv_to_nir(map, word_count, NULL, 0,
+                                     MESA_SHADER_FRAGMENT, "main", NULL);
+   nir_print_shader(func->shader, stderr);
+}
index 3669cfed360535c43f208c9e8b4b30f603916ccf..70e9cd397fc3bb042912ef0e831df4382155b195 100644 (file)
@@ -124,6 +124,13 @@ glsl_get_aoa_size(const struct glsl_type *type)
    return type->arrays_of_arrays_size();
 }
 
+unsigned
+glsl_count_attribute_slots(const struct glsl_type *type,
+                           bool vertex_input_slots)
+{
+   return type->count_attribute_slots(vertex_input_slots);
+}
+
 const char *
 glsl_get_struct_elem_name(const struct glsl_type *type, unsigned index)
 {
index 18d64b768d4f967cc7d0e885de2d0e8855ab2d1f..d92605bf4fb99639a76e901f333f1d37925691c2 100644 (file)
@@ -68,6 +68,9 @@ unsigned glsl_get_length(const struct glsl_type *type);
 
 unsigned glsl_get_aoa_size(const struct glsl_type *type);
 
+unsigned glsl_count_attribute_slots(const struct glsl_type *type,
+                                    bool vertex_input_slots);
+
 const char *glsl_get_struct_elem_name(const struct glsl_type *type,
                                       unsigned index);
 
index 942d152b1296ad019f43517b1d6b32a57f03fb31..ff2f564dc986ce5d0dbea78c32dbc17fbcb5b9fc 100644 (file)
@@ -201,6 +201,7 @@ gl_system_value_name(gl_system_value sysval)
    static const char *names[] = {
      ENUM(SYSTEM_VALUE_VERTEX_ID),
      ENUM(SYSTEM_VALUE_INSTANCE_ID),
+     ENUM(SYSTEM_VALUE_INSTANCE_INDEX),
      ENUM(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE),
      ENUM(SYSTEM_VALUE_BASE_VERTEX),
      ENUM(SYSTEM_VALUE_INVOCATION_ID),
@@ -214,6 +215,8 @@ gl_system_value_name(gl_system_value sysval)
      ENUM(SYSTEM_VALUE_TESS_LEVEL_OUTER),
      ENUM(SYSTEM_VALUE_TESS_LEVEL_INNER),
      ENUM(SYSTEM_VALUE_LOCAL_INVOCATION_ID),
+     ENUM(SYSTEM_VALUE_LOCAL_INVOCATION_INDEX),
+     ENUM(SYSTEM_VALUE_GLOBAL_INVOCATION_ID),
      ENUM(SYSTEM_VALUE_WORK_GROUP_ID),
      ENUM(SYSTEM_VALUE_NUM_WORK_GROUPS),
      ENUM(SYSTEM_VALUE_VERTEX_CNT),
index d4326c55a9d14ebe5919fd8b9a1000b6e3a46009..d44aabf8f3c298511db11b2815f253bd6c18b5f6 100644 (file)
@@ -378,6 +378,13 @@ typedef enum
     */
    SYSTEM_VALUE_INSTANCE_ID,
 
+   /**
+    * Vulkan InstanceIndex.
+    *
+    * InstanceIndex = gl_InstanceID + gl_BaseInstance
+    */
+   SYSTEM_VALUE_INSTANCE_INDEX,
+
    /**
     * DirectX-style vertex ID.
     *
@@ -452,6 +459,8 @@ typedef enum
     */
    /*@{*/
    SYSTEM_VALUE_LOCAL_INVOCATION_ID,
+   SYSTEM_VALUE_LOCAL_INVOCATION_INDEX,
+   SYSTEM_VALUE_GLOBAL_INVOCATION_ID,
    SYSTEM_VALUE_WORK_GROUP_ID,
    SYSTEM_VALUE_NUM_WORK_GROUPS,
    /*@}*/
diff --git a/src/intel/Makefile.am b/src/intel/Makefile.am
new file mode 100644 (file)
index 0000000..d5bd0b3
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright © 2016 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+SUBDIRS = genxml isl vulkan
diff --git a/src/intel/genxml/.gitignore b/src/intel/genxml/.gitignore
new file mode 100644 (file)
index 0000000..dd11495
--- /dev/null
@@ -0,0 +1 @@
+gen*_pack.h
diff --git a/src/intel/genxml/Makefile.am b/src/intel/genxml/Makefile.am
new file mode 100644 (file)
index 0000000..36ba526
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright © 2016 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+BUILT_SOURCES =                                         \
+       gen7_pack.h                                     \
+       gen75_pack.h                                    \
+       gen8_pack.h                                     \
+       gen9_pack.h
+
+%_pack.h : %.xml gen_pack_header.py
+       $(AM_V_GEN) $(srcdir)/gen_pack_header.py $< > $@
+
+CLEANFILES = $(BUILT_SOURCES)
diff --git a/src/intel/genxml/README b/src/intel/genxml/README
new file mode 100644 (file)
index 0000000..bc518c6
--- /dev/null
@@ -0,0 +1,60 @@
+This provides some background the design of the generated headers.  We
+started out trying to generate bit fields but it evolved into the pack
+functions because of a few limitations:
+
+  1) Bit fields still generate terrible code today. Even with modern
+     optimizing compilers you get multiple load+mask+store operations
+     to the same dword in memory as you set individual bits. The
+     compiler also has to generate code to mask out overflowing values
+     (for example, if you assign 200 to a 2 bit field). Our driver
+     never writes overflowing values so that's not needed. On the
+     other hand, most compiler recognize that the template struct we
+     use is a temporary variable and copy propagate the individual
+     fields and do amazing constant folding.  You should take a look
+     at the code that gets generated when you compile in release mode
+     with optimizations.
+
+  2) For some types we need to have overlapping bit fields. For
+     example, some values are 64 byte aligned 32 bit offsets. The
+     lower 5 bits of the offset are always zero, so the hw packs in a
+     few misc bits in the lower 5 bits there. Other times a field can
+     be either a u32 or a float. I tried to do this with overlapping
+     anonymous unions and it became a big mess. Also, when using
+     initializers, you can only initialize one union member so this
+     just doesn't work with out approach.
+
+     The pack functions on the other hand allows us a great deal of
+     flexibility in how we combine things. In the case of overlapping
+     fields (the u32 and float case), if we only set one of them in
+     the pack function, the compiler will recognize that the other is
+     initialized to 0 and optimize out the code to or it it.
+
+  3) Bit fields (and certainly overlapping anonymous unions of bit
+     fields) aren't generally stable across compilers in how they're
+     laid out and aligned. Our pack functions let us control exactly
+     how things get packed, using only simple and unambiguous bitwise
+     shifting and or'ing that works on any compiler.
+
+Once we have the pack function it allows us to hook in various
+transformations and validation as we go from template struct to dwords
+in memory:
+
+  1) Validation: As I said above, our driver isn't supposed to write
+     overflowing values to the fields, but we've of course had lots of
+     cases where we make mistakes and write overflowing values. With
+     the pack function, we can actually assert on that and catch it at
+     runtime.  bitfields would just silently truncate.
+
+  2) Type conversions: some times it's just a matter of writing a
+     float to a u32, but we also convert from bool to bits, from
+     floats to fixed point integers.
+
+  3) Relocations: whenever we have a pointer from one buffer to
+     another (for example a pointer from the meta data for a texture
+     to the raw texture data), we have to tell the kernel about it so
+     it can adjust the pointer to point to the final location. That
+     means extra work we have to do extra work to record and annotate
+     the dword location that holds the pointer. With bit fields, we'd
+     have to call a function to do this, but with the pack function we
+     generate code in the pack function to do this for us. That's a
+     lot less error prone and less work.
diff --git a/src/intel/genxml/gen7.xml b/src/intel/genxml/gen7.xml
new file mode 100644 (file)
index 0000000..268ca3d
--- /dev/null
@@ -0,0 +1,2511 @@
+<genxml name="IVB" gen="7">
+  <struct name="MEMORY_OBJECT_CONTROL_STATE" length="1">
+    <field name="Graphics Data Type (GFDT)" start="2" end="2" type="uint"/>
+    <field name="LLC Cacheability Control (LLCCC)" start="1" end="1" type="uint"/>
+    <field name="L3 Cacheability Control (L3CC)" start="0" end="0" type="uint"/>
+  </struct>
+
+  <struct name="3DSTATE_CONSTANT_BODY" length="6">
+    <field name="Constant Buffer 1 Read Length" start="16" end="31" type="uint"/>
+    <field name="Constant Buffer 0 Read Length" start="0" end="15" type="uint"/>
+    <field name="Constant Buffer 3 Read Length" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer 2 Read Length" start="32" end="47" type="uint"/>
+    <field name="Pointer To Constant Buffer 0" start="69" end="95" type="address"/>
+    <field name="Constant Buffer Object Control State" start="64" end="68" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Pointer To Constant Buffer 1" start="101" end="127" type="address"/>
+    <field name="Pointer To Constant Buffer 2" start="133" end="159" type="address"/>
+    <field name="Pointer To Constant Buffer 3" start="165" end="191" type="address"/>
+  </struct>
+
+  <struct name="VERTEX_BUFFER_STATE" length="4">
+    <field name="Vertex Buffer Index" start="26" end="31" type="uint"/>
+    <field name="Buffer Access Type" start="20" end="20" type="uint">
+      <value name="VERTEXDATA" value="0"/>
+      <value name="INSTANCEDATA" value="1"/>
+    </field>
+    <field name="Vertex Buffer Memory Object Control State" start="16" end="19" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Address Modify Enable" start="14" end="14" type="uint"/>
+    <field name="Null Vertex Buffer" start="13" end="13" type="bool"/>
+    <field name="Vertex Fetch Invalidate" start="12" end="12" type="uint" default="0"/>
+    <field name="Buffer Pitch" start="0" end="11" type="uint"/>
+    <field name="Buffer Starting Address" start="32" end="63" type="address"/>
+    <field name="End Address" start="64" end="95" type="address"/>
+    <field name="Instance Data Step Rate" start="96" end="127" type="uint"/>
+  </struct>
+
+  <struct name="VERTEX_ELEMENT_STATE" length="2">
+    <field name="Vertex Buffer Index" start="26" end="31" type="uint"/>
+    <field name="Valid" start="25" end="25" type="uint"/>
+    <field name="Source Element Format" start="16" end="24" type="uint"/>
+    <field name="Edge Flag Enable" start="15" end="15" type="bool"/>
+    <field name="Source Element Offset" start="0" end="11" type="uint"/>
+    <field name="Component 0 Control" start="60" end="62" type="uint"/>
+    <field name="Component 1 Control" start="56" end="58" type="uint"/>
+    <field name="Component 2 Control" start="52" end="54" type="uint"/>
+    <field name="Component 3 Control" start="48" end="50" type="uint"/>
+  </struct>
+
+  <struct name="SO_DECL" length="1">
+    <field name="Output Buffer Slot" start="12" end="13" type="uint"/>
+    <field name="Hole Flag" start="11" end="11" type="uint"/>
+    <field name="Register Index" start="4" end="9" type="uint"/>
+    <field name="Component Mask" start="0" end="3" type="uint" default="0"/>
+  </struct>
+
+  <struct name="SO_DECL_ENTRY" length="2">
+    <field name="Stream 3 Decl" start="48" end="63" type="SO_DECL"/>
+    <field name="Stream 2 Decl" start="32" end="47" type="SO_DECL"/>
+    <field name="Stream 1 Decl" start="16" end="31" type="SO_DECL"/>
+    <field name="Stream 0 Decl" start="0" end="15" type="SO_DECL"/>
+  </struct>
+
+  <struct name="SF_OUTPUT_ATTRIBUTE_DETAIL" length="1">
+    <field name="Component Override W" start="15" end="15" type="bool"/>
+    <field name="Component Override Z" start="14" end="14" type="bool"/>
+    <field name="Component Override Y" start="13" end="13" type="bool"/>
+    <field name="Component Override X" start="12" end="12" type="bool"/>
+    <field name="Swizzle Control Mode" start="11" end="11" type="uint"/>
+    <field name="Constant Source" start="9" end="10" type="uint">
+      <value name="CONST_0000" value="0"/>
+      <value name="CONST_0001_FLOAT" value="1"/>
+      <value name="CONST_1111_FLOAT" value="2"/>
+      <value name="PRIM_ID" value="3"/>
+    </field>
+    <field name="Swizzle Select" start="6" end="7" type="uint">
+      <value name="INPUTATTR" value="0"/>
+      <value name="INPUTATTR_FACING" value="1"/>
+      <value name="INPUTATTR_W" value="2"/>
+      <value name="INPUTATTR_FACING_W" value="3"/>
+    </field>
+    <field name="Source Attribute" start="0" end="4" type="uint"/>
+  </struct>
+
+  <struct name="SCISSOR_RECT" length="2">
+    <field name="Scissor Rectangle Y Min" start="16" end="31" type="uint"/>
+    <field name="Scissor Rectangle X Min" start="0" end="15" type="uint"/>
+    <field name="Scissor Rectangle Y Max" start="48" end="63" type="uint"/>
+    <field name="Scissor Rectangle X Max" start="32" end="47" type="uint"/>
+  </struct>
+
+  <struct name="SF_CLIP_VIEWPORT" length="16">
+    <field name="Viewport Matrix Element m00" start="0" end="31" type="float"/>
+    <field name="Viewport Matrix Element m11" start="32" end="63" type="float"/>
+    <field name="Viewport Matrix Element m22" start="64" end="95" type="float"/>
+    <field name="Viewport Matrix Element m30" start="96" end="127" type="float"/>
+    <field name="Viewport Matrix Element m31" start="128" end="159" type="float"/>
+    <field name="Viewport Matrix Element m32" start="160" end="191" type="float"/>
+    <field name="X Min Clip Guardband" start="256" end="287" type="float"/>
+    <field name="X Max Clip Guardband" start="288" end="319" type="float"/>
+    <field name="Y Min Clip Guardband" start="320" end="351" type="float"/>
+    <field name="Y Max Clip Guardband" start="352" end="383" type="float"/>
+    <group count="4" start="384" size="32">
+    </group>
+  </struct>
+
+  <struct name="BLEND_STATE" length="2">
+    <field name="Color Buffer Blend Enable" start="31" end="31" type="bool"/>
+    <field name="Independent Alpha Blend Enable" start="30" end="30" type="bool"/>
+    <field name="Alpha Blend Function" start="26" end="28" type="uint">
+      <value name="BLENDFUNCTION_ADD" value="0"/>
+      <value name="BLENDFUNCTION_SUBTRACT" value="1"/>
+      <value name="BLENDFUNCTION_REVERSE_SUBTRACT" value="2"/>
+      <value name="BLENDFUNCTION_MIN" value="3"/>
+      <value name="BLENDFUNCTION_MAX" value="4"/>
+    </field>
+    <field name="Source Alpha Blend Factor" start="20" end="24" type="uint">
+      <value name="BLENDFACTOR_ONE" value="1"/>
+      <value name="BLENDFACTOR_SRC_COLOR" value="2"/>
+      <value name="BLENDFACTOR_SRC_ALPHA" value="3"/>
+      <value name="BLENDFACTOR_DST_ALPHA" value="4"/>
+      <value name="BLENDFACTOR_DST_COLOR" value="5"/>
+      <value name="BLENDFACTOR_SRC_ALPHA_SATURATE" value="6"/>
+      <value name="BLENDFACTOR_CONST_COLOR" value="7"/>
+      <value name="BLENDFACTOR_CONST_ALPHA" value="8"/>
+      <value name="BLENDFACTOR_SRC1_COLOR" value="9"/>
+      <value name="BLENDFACTOR_SRC1_ALPHA" value="10"/>
+      <value name="BLENDFACTOR_ZERO" value="17"/>
+      <value name="BLENDFACTOR_INV_SRC_COLOR" value="18"/>
+      <value name="BLENDFACTOR_INV_SRC_ALPHA" value="19"/>
+      <value name="BLENDFACTOR_INV_DST_ALPHA" value="20"/>
+      <value name="BLENDFACTOR_INV_DST_COLOR" value="21"/>
+      <value name="BLENDFACTOR_INV_CONST_COLOR" value="23"/>
+      <value name="BLENDFACTOR_INV_CONST_ALPHA" value="24"/>
+      <value name="BLENDFACTOR_INV_SRC1_COLOR" value="25"/>
+      <value name="BLENDFACTOR_INV_SRC1_ALPHA" value="26"/>
+    </field>
+    <field name="Destination Alpha Blend Factor" start="15" end="19" type="uint"/>
+    <field name="Color Blend Function" start="11" end="13" type="uint">
+      <value name="BLENDFUNCTION_ADD" value="0"/>
+      <value name="BLENDFUNCTION_SUBTRACT" value="1"/>
+      <value name="BLENDFUNCTION_REVERSE_SUBTRACT" value="2"/>
+      <value name="BLENDFUNCTION_MIN  " value="3"/>
+      <value name="BLENDFUNCTION_MAX" value="4"/>
+    </field>
+    <field name="Source Blend Factor" start="5" end="9" type="uint"/>
+    <field name="Destination Blend Factor" start="0" end="4" type="uint"/>
+    <field name="AlphaToCoverage Enable" start="63" end="63" type="bool"/>
+    <field name="AlphaToOne Enable" start="62" end="62" type="bool"/>
+    <field name="AlphaToCoverage Dither Enable" start="61" end="61" type="bool"/>
+    <field name="Write Disable Alpha" start="59" end="59" type="bool"/>
+    <field name="Write Disable Red" start="58" end="58" type="bool"/>
+    <field name="Write Disable Green" start="57" end="57" type="bool"/>
+    <field name="Write Disable Blue" start="56" end="56" type="bool"/>
+    <field name="Logic Op Enable" start="54" end="54" type="bool"/>
+    <field name="Logic Op Function" start="50" end="53" type="uint">
+      <value name="LOGICOP_CLEAR" value="0"/>
+      <value name="LOGICOP_NOR" value="1"/>
+      <value name="LOGICOP_AND_INVERTED" value="2"/>
+      <value name="LOGICOP_COPY_INVERTED" value="3"/>
+      <value name="LOGICOP_AND_REVERSE" value="4"/>
+      <value name="LOGICOP_INVERT" value="5"/>
+      <value name="LOGICOP_XOR" value="6"/>
+      <value name="LOGICOP_NAND" value="7"/>
+      <value name="LOGICOP_AND" value="8"/>
+      <value name="LOGICOP_EQUIV" value="9"/>
+      <value name="LOGICOP_NOOP" value="10"/>
+      <value name="LOGICOP_OR_INVERTED" value="11"/>
+      <value name="LOGICOP_COPY" value="12"/>
+      <value name="LOGICOP_OR_REVERSE" value="13"/>
+      <value name="LOGICOP_OR" value="14"/>
+      <value name="LOGICOP_SET" value="15"/>
+    </field>
+    <field name="Alpha Test Enable" start="48" end="48" type="bool"/>
+    <field name="Alpha Test Function" start="45" end="47" type="uint">
+      <value name="COMPAREFUNCTION_ALWAYS" value="0"/>
+      <value name="COMPAREFUNCTION_NEVER" value="1"/>
+      <value name="COMPAREFUNCTION_LESS" value="2"/>
+      <value name="COMPAREFUNCTION_EQUAL" value="3"/>
+      <value name="COMPAREFUNCTION_LEQUAL" value="4"/>
+      <value name="COMPAREFUNCTION_GREATER" value="5"/>
+      <value name="COMPAREFUNCTION_NOTEQUAL" value="6"/>
+      <value name="COMPAREFUNCTION_GEQUAL" value="7"/>
+    </field>
+    <field name="Color Dither Enable" start="44" end="44" type="bool"/>
+    <field name="X Dither Offset" start="42" end="43" type="uint"/>
+    <field name="Y Dither Offset" start="40" end="41" type="uint"/>
+    <field name="Color Clamp Range" start="34" end="35" type="uint">
+      <value name="COLORCLAMP_UNORM" value="0"/>
+      <value name="COLORCLAMP_SNORM" value="1"/>
+      <value name="COLORCLAMP_RTFORMAT" value="2"/>
+    </field>
+    <field name="Pre-Blend Color Clamp Enable" start="33" end="33" type="bool"/>
+    <field name="Post-Blend Color Clamp Enable" start="32" end="32" type="bool"/>
+  </struct>
+
+  <struct name="CC_VIEWPORT" length="2">
+    <field name="Minimum Depth" start="0" end="31" type="float"/>
+    <field name="Maximum Depth" start="32" end="63" type="float"/>
+  </struct>
+
+  <struct name="COLOR_CALC_STATE" length="6">
+    <field name="Stencil Reference Value" start="24" end="31" type="uint"/>
+    <field name="BackFace Stencil Reference Value" start="16" end="23" type="uint"/>
+    <field name="Round Disable Function Disable" start="15" end="15" type="bool"/>
+    <field name="Alpha Test Format" start="0" end="0" type="uint">
+      <value name="ALPHATEST_UNORM8" value="0"/>
+      <value name="ALPHATEST_FLOAT32" value="1"/>
+    </field>
+    <field name="Alpha Reference Value As UNORM8" start="32" end="63" type="uint"/>
+    <field name="Alpha Reference Value As FLOAT32" start="32" end="63" type="float"/>
+    <field name="Blend Constant Color Red" start="64" end="95" type="float"/>
+    <field name="Blend Constant Color Green" start="96" end="127" type="float"/>
+    <field name="Blend Constant Color Blue" start="128" end="159" type="float"/>
+    <field name="Blend Constant Color Alpha" start="160" end="191" type="float"/>
+  </struct>
+
+  <struct name="DEPTH_STENCIL_STATE" length="3">
+    <field name="Stencil Test Enable" start="31" end="31" type="bool"/>
+    <field name="Stencil Test Function" start="28" end="30" type="uint">
+      <value name="COMPAREFUNCTION_ALWAYS" value="0"/>
+      <value name="COMPAREFUNCTION_NEVER" value="1"/>
+      <value name="COMPAREFUNCTION_LESS" value="2"/>
+      <value name="COMPAREFUNCTION_EQUAL" value="3"/>
+      <value name="COMPAREFUNCTION_LEQUAL" value="4"/>
+      <value name="COMPAREFUNCTION_GREATER" value="5"/>
+      <value name="COMPAREFUNCTION_NOTEQUAL" value="6"/>
+      <value name="COMPAREFUNCTION_GEQUAL" value="7"/>
+    </field>
+    <field name="Stencil Fail Op" start="25" end="27" type="uint">
+      <value name="STENCILOP_KEEP" value="0"/>
+      <value name="STENCILOP_ZERO" value="1"/>
+      <value name="STENCILOP_REPLACE" value="2"/>
+      <value name="STENCILOP_INCRSAT" value="3"/>
+      <value name="STENCILOP_DECRSAT" value="4"/>
+      <value name="STENCILOP_INCR" value="5"/>
+      <value name="STENCILOP_DECR" value="6"/>
+      <value name="STENCILOP_INVERT" value="7"/>
+    </field>
+    <field name="Stencil Pass Depth Fail Op" start="22" end="24" type="uint"/>
+    <field name="Stencil Pass Depth Pass Op" start="19" end="21" type="uint"/>
+    <field name="Stencil Buffer Write Enable" start="18" end="18" type="bool"/>
+    <field name="Double Sided Stencil Enable" start="15" end="15" type="bool"/>
+    <field name="BackFace Stencil Test Function" start="12" end="14" type="uint">
+      <value name="COMPAREFUNCTION_ALWAYS" value="0"/>
+      <value name="COMPAREFUNCTION_NEVER" value="1"/>
+      <value name="COMPAREFUNCTION_LESS" value="2"/>
+      <value name="COMPAREFUNCTION_EQUAL" value="3"/>
+      <value name="COMPAREFUNCTION_LEQUAL" value="4"/>
+      <value name="COMPAREFUNCTION_GREATER" value="5"/>
+      <value name="COMPAREFUNCTION_NOTEQUAL" value="6"/>
+      <value name="COMPAREFUNCTION_GEQUAL" value="7"/>
+    </field>
+    <field name="Backface Stencil Fail Op" start="9" end="11" type="uint">
+      <value name="STENCILOP_KEEP" value="0"/>
+      <value name="STENCILOP_ZERO" value="1"/>
+      <value name="STENCILOP_REPLACE" value="2"/>
+      <value name="STENCILOP_INCRSAT" value="3"/>
+      <value name="STENCILOP_DECRSAT" value="4"/>
+      <value name="STENCILOP_INCR" value="5"/>
+      <value name="STENCILOP_DECR" value="6"/>
+      <value name="STENCILOP_INVERT" value="7"/>
+    </field>
+    <field name="Backface Stencil Pass Depth Fail Op" start="6" end="8" type="uint"/>
+    <field name="Backface Stencil Pass Depth Pass Op" start="3" end="5" type="uint"/>
+    <field name="Stencil Test Mask" start="56" end="63" type="uint"/>
+    <field name="Stencil Write Mask" start="48" end="55" type="uint"/>
+    <field name="Backface Stencil Test Mask" start="40" end="47" type="uint"/>
+    <field name="Backface Stencil Write Mask" start="32" end="39" type="uint"/>
+    <field name="Depth Test Enable" start="95" end="95" type="bool"/>
+    <field name="Depth Test Function" start="91" end="93" type="uint">
+      <value name="COMPAREFUNCTION_ALWAYS" value="0"/>
+      <value name="COMPAREFUNCTION_NEVER" value="1"/>
+      <value name="COMPAREFUNCTION_LESS" value="2"/>
+      <value name="COMPAREFUNCTION_EQUAL" value="3"/>
+      <value name="COMPAREFUNCTION_LEQUAL" value="4"/>
+      <value name="COMPAREFUNCTION_GREATER" value="5"/>
+      <value name="COMPAREFUNCTION_NOTEQUAL" value="6"/>
+      <value name="COMPAREFUNCTION_GEQUAL" value="7"/>
+    </field>
+    <field name="Depth Buffer Write Enable" start="90" end="90" type="bool"/>
+  </struct>
+
+  <struct name="INTERFACE_DESCRIPTOR_DATA" length="8">
+    <field name="Kernel Start Pointer" start="6" end="31" type="offset"/>
+    <field name="Single Program Flow" start="50" end="50" type="uint"/>
+    <field name="Thread Priority" start="49" end="49" type="uint">
+      <value name="Normal Priority" value="0"/>
+      <value name="High Priority" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="48" end="48" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="45" end="45" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="43" end="43" type="bool"/>
+    <field name="Software Exception Enable" start="39" end="39" type="bool"/>
+    <field name="Sampler State Pointer" start="69" end="95" type="offset"/>
+    <field name="Sampler Count" start="66" end="68" type="uint">
+      <value name="No samplers used" value="0"/>
+      <value name="Between 1 and 4 samplers used" value="1"/>
+      <value name="Between 5 and 8 samplers used" value="2"/>
+      <value name="Between 9 and 12 samplers used" value="3"/>
+      <value name="Between 13 and 16 samplers used" value="4"/>
+    </field>
+    <field name="Binding Table Pointer" start="101" end="111" type="offset"/>
+    <field name="Binding Table Entry Count" start="96" end="100" type="uint"/>
+    <field name="Constant URB Entry Read Length" start="144" end="159" type="uint"/>
+    <field name="Constant URB Entry Read Offset" start="128" end="143" type="uint"/>
+    <field name="Rounding Mode" start="182" end="183" type="uint">
+      <value name="RTNE" value="0"/>
+      <value name="RU" value="1"/>
+      <value name="RD" value="2"/>
+      <value name="RTZ" value="3"/>
+    </field>
+    <field name="Barrier Enable" start="181" end="181" type="bool"/>
+    <field name="Shared Local Memory Size" start="176" end="180" type="uint"/>
+    <field name="Number of Threads in GPGPU Thread Group" start="160" end="167" type="uint"/>
+  </struct>
+
+  <struct name="PALETTE_ENTRY" length="1">
+    <field name="Alpha" start="24" end="31" type="uint"/>
+    <field name="Red" start="16" end="23" type="uint"/>
+    <field name="Green" start="8" end="15" type="uint"/>
+    <field name="Blue" start="0" end="7" type="uint"/>
+  </struct>
+
+  <struct name="BINDING_TABLE_STATE" length="1">
+    <field name="Surface State Pointer" start="5" end="31" type="offset"/>
+  </struct>
+
+  <struct name="RENDER_SURFACE_STATE" length="8">
+    <field name="Surface Type" start="29" end="31" type="uint">
+      <value name="SURFTYPE_1D" value="0"/>
+      <value name="SURFTYPE_2D" value="1"/>
+      <value name="SURFTYPE_3D" value="2"/>
+      <value name="SURFTYPE_CUBE" value="3"/>
+      <value name="SURFTYPE_BUFFER" value="4"/>
+      <value name="SURFTYPE_STRBUF" value="5"/>
+      <value name="SURFTYPE_NULL" value="7"/>
+    </field>
+    <field name="Surface Array" start="28" end="28" type="bool"/>
+    <field name="Surface Format" start="18" end="26" type="uint"/>
+    <field name="Surface Vertical Alignment" start="16" end="17" type="uint">
+      <value name="VALIGN_2" value="0"/>
+      <value name="VALIGN_4" value="1"/>
+    </field>
+    <field name="Surface Horizontal Alignment" start="15" end="15" type="uint">
+      <value name="HALIGN_4" value="0"/>
+      <value name="HALIGN_8" value="1"/>
+    </field>
+    <field name="Tiled Surface" start="14" end="14" type="uint"/>
+    <field name="Tile Walk" start="13" end="13" type="uint">
+      <value name="TILEWALK_XMAJOR" value="0"/>
+      <value name="TILEWALK_YMAJOR" value="1"/>
+    </field>
+    <field name="Vertical Line Stride" start="12" end="12" type="uint"/>
+    <field name="Vertical Line Stride Offset" start="11" end="11" type="uint"/>
+    <field name="Surface Array Spacing" start="10" end="10" type="uint">
+      <value name="ARYSPC_FULL" value="0"/>
+      <value name="ARYSPC_LOD0" value="1"/>
+    </field>
+    <field name="Render Cache Read Write Mode" start="8" end="8" type="uint"/>
+    <field name="Media Boundary Pixel Mode" start="6" end="7" type="uint">
+      <value name="NORMAL_MODE" value="0"/>
+      <value name="PROGRESSIVE_FRAME" value="2"/>
+      <value name="INTERLACED_FRAME" value="3"/>
+    </field>
+    <field name="Cube Face Enables" start="0" end="5" type="uint"/>
+    <field name="Surface Base Address" start="32" end="63" type="address"/>
+    <field name="Height" start="80" end="93" type="uint"/>
+    <field name="Width" start="64" end="77" type="uint"/>
+    <field name="Depth" start="117" end="127" type="uint"/>
+    <field name="Surface Pitch" start="96" end="113" type="uint"/>
+    <field name="Render Target Rotation" start="157" end="158" type="uint">
+      <value name="RTROTATE_0DEG" value="0"/>
+      <value name="RTROTATE_90DEG" value="1"/>
+      <value name="RTROTATE_270DEG" value="3"/>
+    </field>
+    <field name="Minimum Array Element" start="146" end="156" type="uint"/>
+    <field name="Render Target View Extent" start="135" end="145" type="uint"/>
+    <field name="Multisampled Surface Storage Format" start="134" end="134" type="uint">
+      <value name="MSFMT_MSS" value="0"/>
+      <value name="MSFMT_DEPTH_STENCIL" value="1"/>
+    </field>
+    <field name="Number of Multisamples" start="131" end="133" type="uint">
+      <value name="MULTISAMPLECOUNT_1" value="0"/>
+      <value name="MULTISAMPLECOUNT_4" value="2"/>
+      <value name="MULTISAMPLECOUNT_8" value="3"/>
+    </field>
+    <field name="Multisample Position Palette Index" start="128" end="130" type="uint"/>
+    <field name="Strbuf Minimum Array Element" start="128" end="154" type="uint"/>
+    <field name="X Offset" start="185" end="191" type="offset"/>
+    <field name="Y Offset" start="180" end="183" type="offset"/>
+    <field name="Surface Object Control State" start="176" end="179" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="MOCS" start="176" end="179" type="uint"/>
+    <field name="Surface Min LOD" start="164" end="167" type="uint"/>
+    <field name="MIP Count / LOD" start="160" end="163" type="uint"/>
+    <field name="MCS Base Address" start="204" end="223" type="address"/>
+    <field name="MCS Surface Pitch" start="195" end="203" type="uint"/>
+    <field name="Append Counter Address" start="198" end="223" type="address"/>
+    <field name="Append Counter Enable" start="193" end="193" type="bool"/>
+    <field name="MCS Enable" start="192" end="192" type="bool"/>
+    <field name="Reserved: MBZ" start="222" end="223" type="uint"/>
+    <field name="X Offset for UV Plane" start="208" end="221" type="uint"/>
+    <field name="Y Offset for UV Plane" start="192" end="205" type="uint"/>
+    <field name="Red Clear Color" start="255" end="255" type="uint">
+      <value name="CC_ZERO" value="0"/>
+      <value name="CC_ONE" value="1"/>
+    </field>
+    <field name="Green Clear Color" start="254" end="254" type="uint">
+      <value name="CC_ZERO" value="0"/>
+      <value name="CC_ONE" value="1"/>
+    </field>
+    <field name="Blue Clear Color" start="253" end="253" type="uint">
+      <value name="CC_ZERO" value="0"/>
+      <value name="CC_ONE" value="1"/>
+    </field>
+    <field name="Alpha Clear Color" start="252" end="252" type="uint">
+      <value name="CC_ZERO" value="0"/>
+      <value name="CC_ONE" value="1"/>
+    </field>
+    <field name="Resource Min LOD" start="224" end="235" type="u4.8"/>
+  </struct>
+
+  <struct name="SAMPLER_STATE" length="4">
+    <field name="Sampler Disable" start="31" end="31" type="bool"/>
+    <field name="Texture Border Color Mode" start="29" end="29" type="uint">
+      <value name="DX10/OGL" value="0"/>
+      <value name="DX9" value="1"/>
+    </field>
+    <field name="LOD PreClamp Enable" start="28" end="28" type="uint" prefix="CLAMP_ENABLE">
+      <value name="OGL" value="1"/>
+    </field>
+    <field name="Base Mip Level" start="22" end="26" type="u4.1"/>
+    <field name="Mip Mode Filter" start="20" end="21" type="uint" prefix="MIPFILTER">
+      <value name="NONE" value="0"/>
+      <value name="NEAREST" value="1"/>
+      <value name="LINEAR" value="3"/>
+    </field>
+    <field name="Mag Mode Filter" start="17" end="19" type="uint" prefix="MAPFILTER">
+      <value name="NEAREST" value="0"/>
+      <value name="LINEAR" value="1"/>
+      <value name="ANISOTROPIC" value="2"/>
+      <value name="MONO" value="6"/>
+    </field>
+    <field name="Min Mode Filter" start="14" end="16" type="uint" prefix="MAPFILTER">
+      <value name="NEAREST" value="0"/>
+      <value name="LINEAR" value="1"/>
+      <value name="ANISOTROPIC" value="2"/>
+      <value name="MONO" value="6"/>
+    </field>
+    <field name="Texture LOD Bias" start="1" end="13" type="s4.8"/>
+    <field name="Anisotropic Algorithm" start="0" end="0" type="uint">
+      <value name="LEGACY" value="0"/>
+      <value name="EWA Approximation" value="1"/>
+    </field>
+    <field name="Min LOD" start="52" end="63" type="u4.8"/>
+    <field name="Max LOD" start="40" end="51" type="u4.8"/>
+    <field name="Shadow Function" start="33" end="35" type="uint">
+      <value name="PREFILTEROP ALWAYS" value="0"/>
+      <value name="PREFILTEROP NEVER" value="1"/>
+      <value name="PREFILTEROP LESS" value="2"/>
+      <value name="PREFILTEROP EQUAL" value="3"/>
+      <value name="PREFILTEROP LEQUAL" value="4"/>
+      <value name="PREFILTEROP GREATER" value="5"/>
+      <value name="PREFILTEROP NOTEQUAL" value="6"/>
+      <value name="PREFILTEROP GEQUAL" value="7"/>
+    </field>
+    <field name="Cube Surface Control Mode" start="32" end="32" type="uint">
+      <value name="PROGRAMMED" value="0"/>
+      <value name="OVERRIDE" value="1"/>
+    </field>
+    <field name="Border Color Pointer" start="69" end="95" type="offset"/>
+    <field name="ChromaKey Enable" start="121" end="121" type="bool"/>
+    <field name="ChromaKey Index" start="119" end="120" type="uint"/>
+    <field name="ChromaKey Mode" start="118" end="118" type="uint">
+      <value name="KEYFILTER_KILL_ON_ANY_MATCH" value="0"/>
+      <value name="KEYFILTER_REPLACE_BLACK" value="1"/>
+    </field>
+    <field name="Maximum Anisotropy" start="115" end="117" type="uint">
+      <value name="RATIO 2:1" value="0"/>
+      <value name="RATIO 4:1" value="1"/>
+      <value name="RATIO 6:1" value="2"/>
+      <value name="RATIO 8:1" value="3"/>
+      <value name="RATIO 10:1" value="4"/>
+      <value name="RATIO 12:1" value="5"/>
+      <value name="RATIO 14:1" value="6"/>
+      <value name="RATIO 16:1" value="7"/>
+    </field>
+    <field name="R Address Min Filter Rounding Enable" start="109" end="109" type="bool"/>
+    <field name="R Address Mag Filter Rounding Enable" start="110" end="110" type="bool"/>
+    <field name="V Address Min Filter Rounding Enable" start="111" end="111" type="bool"/>
+    <field name="V Address Mag Filter Rounding Enable" start="112" end="112" type="bool"/>
+    <field name="U Address Min Filter Rounding Enable" start="113" end="113" type="bool"/>
+    <field name="U Address Mag Filter Rounding Enable" start="114" end="114" type="bool"/>
+    <field name="Trilinear Filter Quality" start="107" end="108" type="uint">
+      <value name="FULL" value="0"/>
+      <value name="MED" value="2"/>
+      <value name="LOW" value="3"/>
+    </field>
+    <field name="Non-normalized Coordinate Enable" start="106" end="106" type="bool"/>
+    <field name="TCX Address Control Mode" start="102" end="104" type="uint"/>
+    <field name="TCY Address Control Mode" start="99" end="101" type="uint"/>
+    <field name="TCZ Address Control Mode" start="96" end="98" type="uint"/>
+  </struct>
+
+  <enum name="3D_Prim_Topo_Type" prefix="3DPRIM">
+    <value name="POINTLIST" value="1"/>
+    <value name="LINELIST" value="2"/>
+    <value name="LINESTRIP" value="3"/>
+    <value name="TRILIST" value="4"/>
+    <value name="TRISTRIP" value="5"/>
+    <value name="TRIFAN" value="6"/>
+    <value name="QUADLIST" value="7"/>
+    <value name="QUADSTRIP" value="8"/>
+    <value name="LINELIST_ADJ" value="9"/>
+    <value name="LINESTRIP_ADJ" value="10"/>
+    <value name="TRILIST_ADJ" value="11"/>
+    <value name="TRISTRIP_ADJ" value="12"/>
+    <value name="TRISTRIP_REVERSE" value="13"/>
+    <value name="POLYGON" value="14"/>
+    <value name="RECTLIST" value="15"/>
+    <value name="LINELOOP" value="16"/>
+    <value name="POINTLIST _BF" value="17"/>
+    <value name="LINESTRIP_CONT" value="18"/>
+    <value name="LINESTRIP_BF" value="19"/>
+    <value name="LINESTRIP_CONT_BF" value="20"/>
+    <value name="TRIFAN_NOSTIPPLE" value="22"/>
+    <value name="PATCHLIST_1" value="32"/>
+    <value name="PATCHLIST_2" value="33"/>
+    <value name="PATCHLIST_3" value="34"/>
+    <value name="PATCHLIST_4" value="35"/>
+    <value name="PATCHLIST_5" value="36"/>
+    <value name="PATCHLIST_6" value="37"/>
+    <value name="PATCHLIST_7" value="38"/>
+    <value name="PATCHLIST_8" value="39"/>
+    <value name="PATCHLIST_9" value="40"/>
+    <value name="PATCHLIST_10" value="41"/>
+    <value name="PATCHLIST_11" value="42"/>
+    <value name="PATCHLIST_12" value="43"/>
+    <value name="PATCHLIST_13" value="44"/>
+    <value name="PATCHLIST_14" value="45"/>
+    <value name="PATCHLIST_15" value="46"/>
+    <value name="PATCHLIST_16" value="47"/>
+    <value name="PATCHLIST_17" value="48"/>
+    <value name="PATCHLIST_18" value="49"/>
+    <value name="PATCHLIST_19" value="50"/>
+    <value name="PATCHLIST_20" value="51"/>
+    <value name="PATCHLIST_21" value="52"/>
+    <value name="PATCHLIST_22" value="53"/>
+    <value name="PATCHLIST_23" value="54"/>
+    <value name="PATCHLIST_24" value="55"/>
+    <value name="PATCHLIST_25" value="56"/>
+    <value name="PATCHLIST_26" value="57"/>
+    <value name="PATCHLIST_27" value="58"/>
+    <value name="PATCHLIST_28" value="59"/>
+    <value name="PATCHLIST_29" value="60"/>
+    <value name="PATCHLIST_30" value="61"/>
+    <value name="PATCHLIST_31" value="62"/>
+    <value name="PATCHLIST_32" value="63"/>
+  </enum>
+
+  <enum name="3D_Vertex_Component_Control" prefix="VFCOMP">
+    <value name="NOSTORE" value="0"/>
+    <value name="STORE_SRC" value="1"/>
+    <value name="STORE_0" value="2"/>
+    <value name="STORE_1_FP" value="3"/>
+    <value name="STORE_1_INT" value="4"/>
+    <value name="STORE_VID" value="5"/>
+    <value name="STORE_IID" value="6"/>
+    <value name="STORE_PID" value="7"/>
+  </enum>
+
+  <enum name="3D_Compare_Function" prefix="COMPAREFUNCTION">
+    <value name="ALWAYS" value="0"/>
+    <value name="NEVER" value="1"/>
+    <value name="LESS" value="2"/>
+    <value name="EQUAL" value="3"/>
+    <value name="LEQUAL" value="4"/>
+    <value name="GREATER" value="5"/>
+    <value name="NOTEQUAL" value="6"/>
+    <value name="GEQUAL" value="7"/>
+  </enum>
+
+  <enum name="SURFACE_FORMAT" prefix="SF">
+    <value name="R32G32B32A32_FLOAT" value="0"/>
+    <value name="R32G32B32A32_SINT" value="1"/>
+    <value name="R32G32B32A32_UINT" value="2"/>
+    <value name="R32G32B32A32_UNORM" value="3"/>
+    <value name="R32G32B32A32_SNORM" value="4"/>
+    <value name="R64G64_FLOAT" value="5"/>
+    <value name="R32G32B32X32_FLOAT" value="6"/>
+    <value name="R32G32B32A32_SSCALED" value="7"/>
+    <value name="R32G32B32A32_USCALED" value="8"/>
+    <value name="R32G32B32A32_SFIXED" value="32"/>
+    <value name="R64G64_PASSTHRU" value="33"/>
+    <value name="R32G32B32_FLOAT" value="64"/>
+    <value name="R32G32B32_SINT" value="65"/>
+    <value name="R32G32B32_UINT" value="66"/>
+    <value name="R32G32B32_UNORM" value="67"/>
+    <value name="R32G32B32_SNORM" value="68"/>
+    <value name="R32G32B32_SSCALED" value="69"/>
+    <value name="R32G32B32_USCALED" value="70"/>
+    <value name="R32G32B32_SFIXED" value="80"/>
+    <value name="R16G16B16A16_UNORM" value="128"/>
+    <value name="R16G16B16A16_SNORM" value="129"/>
+    <value name="R16G16B16A16_SINT" value="130"/>
+    <value name="R16G16B16A16_UINT" value="131"/>
+    <value name="R16G16B16A16_FLOAT" value="132"/>
+    <value name="R32G32_FLOAT" value="133"/>
+    <value name="R32G32_SINT" value="134"/>
+    <value name="R32G32_UINT" value="135"/>
+    <value name="R32_FLOAT_X8X24_TYPELESS" value="136"/>
+    <value name="X32_TYPELESS_G8X24_UINT" value="137"/>
+    <value name="L32A32_FLOAT" value="138"/>
+    <value name="R32G32_UNORM" value="139"/>
+    <value name="R32G32_SNORM" value="140"/>
+    <value name="R64_FLOAT" value="141"/>
+    <value name="R16G16B16X16_UNORM" value="142"/>
+    <value name="R16G16B16X16_FLOAT" value="143"/>
+    <value name="A32X32_FLOAT" value="144"/>
+    <value name="L32X32_FLOAT" value="145"/>
+    <value name="I32X32_FLOAT" value="146"/>
+    <value name="R16G16B16A16_SSCALED" value="147"/>
+    <value name="R16G16B16A16_USCALED" value="148"/>
+    <value name="R32G32_SSCALED" value="149"/>
+    <value name="R32G32_USCALED" value="150"/>
+    <value name="R32G32_SFIXED" value="160"/>
+    <value name="R64_PASSTHRU" value="161"/>
+    <value name="B8G8R8A8_UNORM" value="192"/>
+    <value name="B8G8R8A8_UNORM_SRGB" value="193"/>
+    <value name="R10G10B10A2_UNORM" value="194"/>
+    <value name="R10G10B10A2_UNORM_SRGB" value="195"/>
+    <value name="R10G10B10A2_UINT" value="196"/>
+    <value name="R10G10B10_SNORM_A2_UNORM" value="197"/>
+    <value name="R8G8B8A8_UNORM" value="199"/>
+    <value name="R8G8B8A8_UNORM_SRGB" value="200"/>
+    <value name="R8G8B8A8_SNORM" value="201"/>
+    <value name="R8G8B8A8_SINT" value="202"/>
+    <value name="R8G8B8A8_UINT" value="203"/>
+    <value name="R16G16_UNORM" value="204"/>
+    <value name="R16G16_SNORM" value="205"/>
+    <value name="R16G16_SINT" value="206"/>
+    <value name="R16G16_UINT" value="207"/>
+    <value name="R16G16_FLOAT" value="208"/>
+    <value name="B10G10R10A2_UNORM" value="209"/>
+    <value name="B10G10R10A2_UNORM_SRGB" value="210"/>
+    <value name="R11G11B10_FLOAT" value="211"/>
+    <value name="R32_SINT" value="214"/>
+    <value name="R32_UINT" value="215"/>
+    <value name="R32_FLOAT" value="216"/>
+    <value name="R24_UNORM_X8_TYPELESS" value="217"/>
+    <value name="X24_TYPELESS_G8_UINT" value="218"/>
+    <value name="L32_UNORM" value="221"/>
+    <value name="A32_UNORM" value="222"/>
+    <value name="L16A16_UNORM" value="223"/>
+    <value name="I24X8_UNORM" value="224"/>
+    <value name="L24X8_UNORM" value="225"/>
+    <value name="A24X8_UNORM" value="226"/>
+    <value name="I32_FLOAT" value="227"/>
+    <value name="L32_FLOAT" value="228"/>
+    <value name="A32_FLOAT" value="229"/>
+    <value name="X8B8_UNORM_G8R8_SNORM" value="230"/>
+    <value name="A8X8_UNORM_G8R8_SNORM" value="231"/>
+    <value name="B8X8_UNORM_G8R8_SNORM" value="232"/>
+    <value name="B8G8R8X8_UNORM" value="233"/>
+    <value name="B8G8R8X8_UNORM_SRGB" value="234"/>
+    <value name="R8G8B8X8_UNORM" value="235"/>
+    <value name="R8G8B8X8_UNORM_SRGB" value="236"/>
+    <value name="R9G9B9E5_SHAREDEXP" value="237"/>
+    <value name="B10G10R10X2_UNORM" value="238"/>
+    <value name="L16A16_FLOAT" value="240"/>
+    <value name="R32_UNORM" value="241"/>
+    <value name="R32_SNORM" value="242"/>
+    <value name="R10G10B10X2_USCALED" value="243"/>
+    <value name="R8G8B8A8_SSCALED" value="244"/>
+    <value name="R8G8B8A8_USCALED" value="245"/>
+    <value name="R16G16_SSCALED" value="246"/>
+    <value name="R16G16_USCALED" value="247"/>
+    <value name="R32_SSCALED" value="248"/>
+    <value name="R32_USCALED" value="249"/>
+    <value name="B5G6R5_UNORM" value="256"/>
+    <value name="B5G6R5_UNORM_SRGB" value="257"/>
+    <value name="B5G5R5A1_UNORM" value="258"/>
+    <value name="B5G5R5A1_UNORM_SRGB" value="259"/>
+    <value name="B4G4R4A4_UNORM" value="260"/>
+    <value name="B4G4R4A4_UNORM_SRGB" value="261"/>
+    <value name="R8G8_UNORM" value="262"/>
+    <value name="R8G8_SNORM" value="263"/>
+    <value name="R8G8_SINT" value="264"/>
+    <value name="R8G8_UINT" value="265"/>
+    <value name="R16_UNORM" value="266"/>
+    <value name="R16_SNORM" value="267"/>
+    <value name="R16_SINT" value="268"/>
+    <value name="R16_UINT" value="269"/>
+    <value name="R16_FLOAT" value="270"/>
+    <value name="A8P8_UNORM_PALETTE0" value="271"/>
+    <value name="A8P8_UNORM_PALETTE1" value="272"/>
+    <value name="I16_UNORM" value="273"/>
+    <value name="L16_UNORM" value="274"/>
+    <value name="A16_UNORM" value="275"/>
+    <value name="L8A8_UNORM" value="276"/>
+    <value name="I16_FLOAT" value="277"/>
+    <value name="L16_FLOAT" value="278"/>
+    <value name="A16_FLOAT" value="279"/>
+    <value name="L8A8_UNORM_SRGB" value="280"/>
+    <value name="R5G5_SNORM_B6_UNORM" value="281"/>
+    <value name="B5G5R5X1_UNORM" value="282"/>
+    <value name="B5G5R5X1_UNORM_SRGB" value="283"/>
+    <value name="R8G8_SSCALED" value="284"/>
+    <value name="R8G8_USCALED" value="285"/>
+    <value name="R16_SSCALED" value="286"/>
+    <value name="R16_USCALED" value="287"/>
+    <value name="P8A8_UNORM_PALETTE0" value="290"/>
+    <value name="P8A8_UNORM_PALETTE1" value="291"/>
+    <value name="A1B5G5R5_UNORM" value="292"/>
+    <value name="A4B4G4R4_UNORM" value="293"/>
+    <value name="L8A8_UINT" value="294"/>
+    <value name="L8A8_SINT" value="295"/>
+    <value name="R8_UNORM" value="320"/>
+    <value name="R8_SNORM" value="321"/>
+    <value name="R8_SINT" value="322"/>
+    <value name="R8_UINT" value="323"/>
+    <value name="A8_UNORM" value="324"/>
+    <value name="I8_UNORM" value="325"/>
+    <value name="L8_UNORM" value="326"/>
+    <value name="P4A4_UNORM_PALETTE0" value="327"/>
+    <value name="A4P4_UNORM_PALETTE0" value="328"/>
+    <value name="R8_SSCALED" value="329"/>
+    <value name="R8_USCALED" value="330"/>
+    <value name="P8_UNORM_PALETTE0" value="331"/>
+    <value name="L8_UNORM_SRGB" value="332"/>
+    <value name="P8_UNORM_PALETTE1" value="333"/>
+    <value name="P4A4_UNORM_PALETTE1" value="334"/>
+    <value name="A4P4_UNORM_PALETTE1" value="335"/>
+    <value name="Y8_UNORM" value="336"/>
+    <value name="L8_UINT" value="338"/>
+    <value name="L8_SINT" value="339"/>
+    <value name="I8_UINT" value="340"/>
+    <value name="I8_SINT" value="341"/>
+    <value name="DXT1_RGB_SRGB" value="384"/>
+    <value name="R1_UNORM" value="385"/>
+    <value name="YCRCB_NORMAL" value="386"/>
+    <value name="YCRCB_SWAPUVY" value="387"/>
+    <value name="P2_UNORM_PALETTE0" value="388"/>
+    <value name="P2_UNORM_PALETTE1" value="389"/>
+    <value name="BC1_UNORM" value="390"/>
+    <value name="BC2_UNORM" value="391"/>
+    <value name="BC3_UNORM" value="392"/>
+    <value name="BC4_UNORM" value="393"/>
+    <value name="BC5_UNORM" value="394"/>
+    <value name="BC1_UNORM_SRGB" value="395"/>
+    <value name="BC2_UNORM_SRGB" value="396"/>
+    <value name="BC3_UNORM_SRGB" value="397"/>
+    <value name="MONO8" value="398"/>
+    <value name="YCRCB_SWAPUV" value="399"/>
+    <value name="YCRCB_SWAPY" value="400"/>
+    <value name="DXT1_RGB" value="401"/>
+    <value name="FXT1" value="402"/>
+    <value name="R8G8B8_UNORM" value="403"/>
+    <value name="R8G8B8_SNORM" value="404"/>
+    <value name="R8G8B8_SSCALED" value="405"/>
+    <value name="R8G8B8_USCALED" value="406"/>
+    <value name="R64G64B64A64_FLOAT" value="407"/>
+    <value name="R64G64B64_FLOAT" value="408"/>
+    <value name="BC4_SNORM" value="409"/>
+    <value name="BC5_SNORM" value="410"/>
+    <value name="R16G16B16_FLOAT" value="411"/>
+    <value name="R16G16B16_UNORM" value="412"/>
+    <value name="R16G16B16_SNORM" value="413"/>
+    <value name="R16G16B16_SSCALED" value="414"/>
+    <value name="R16G16B16_USCALED" value="415"/>
+    <value name="BC6H_SF16" value="417"/>
+    <value name="BC7_UNORM" value="418"/>
+    <value name="BC7_UNORM_SRGB" value="419"/>
+    <value name="BC6H_UF16" value="420"/>
+    <value name="PLANAR_420_8" value="421"/>
+    <value name="R8G8B8_UNORM_SRGB" value="424"/>
+    <value name="ETC1_RGB8" value="425"/>
+    <value name="ETC2_RGB8" value="426"/>
+    <value name="EAC_R11" value="427"/>
+    <value name="EAC_RG11" value="428"/>
+    <value name="EAC_SIGNED_R11" value="429"/>
+    <value name="EAC_SIGNED_RG11" value="430"/>
+    <value name="ETC2_SRGB8" value="431"/>
+    <value name="R16G16B16_UINT" value="432"/>
+    <value name="R16G16B16_SINT" value="433"/>
+    <value name="R32_SFIXED" value="434"/>
+    <value name="R10G10B10A2_SNORM" value="435"/>
+    <value name="R10G10B10A2_USCALED" value="436"/>
+    <value name="R10G10B10A2_SSCALED" value="437"/>
+    <value name="R10G10B10A2_SINT" value="438"/>
+    <value name="B10G10R10A2_SNORM" value="439"/>
+    <value name="B10G10R10A2_USCALED" value="440"/>
+    <value name="B10G10R10A2_SSCALED" value="441"/>
+    <value name="B10G10R10A2_UINT" value="442"/>
+    <value name="B10G10R10A2_SINT" value="443"/>
+    <value name="R64G64B64A64_PASSTHRU" value="444"/>
+    <value name="R64G64B64_PASSTHRU" value="445"/>
+    <value name="ETC2_RGB8_PTA" value="448"/>
+    <value name="ETC2_SRGB8_PTA" value="449"/>
+    <value name="ETC2_EAC_RGBA8" value="450"/>
+    <value name="ETC2_EAC_SRGB8_A8" value="451"/>
+    <value name="R8G8B8_UINT" value="456"/>
+    <value name="R8G8B8_SINT" value="457"/>
+    <value name="RAW" value="511"/>
+  </enum>
+
+  <enum name="Texture Coordinate Mode" prefix="TCM">
+    <value name="WRAP" value="0"/>
+    <value name="MIRROR" value="1"/>
+    <value name="CLAMP" value="2"/>
+    <value name="CUBE" value="3"/>
+    <value name="CLAMP_BORDER" value="4"/>
+    <value name="MIRROR_ONCE" value="5"/>
+  </enum>
+
+  <instruction name="3DPRIMITIVE" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="3"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="Indirect Parameter Enable" start="10" end="10" type="bool"/>
+    <field name="Predicate Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="End Offset Enable" start="41" end="41" type="bool"/>
+    <field name="Vertex Access Type" start="40" end="40" type="uint">
+      <value name="SEQUENTIAL" value="0"/>
+      <value name="RANDOM" value="1"/>
+    </field>
+    <field name="Primitive Topology Type" start="32" end="37" type="uint"/>
+    <field name="Vertex Count Per Instance" start="64" end="95" type="uint"/>
+    <field name="Start Vertex Location" start="96" end="127" type="uint"/>
+    <field name="Instance Count" start="128" end="159" type="uint"/>
+    <field name="Start Instance Location" start="160" end="191" type="uint"/>
+    <field name="Base Vertex Location" start="192" end="223" type="int"/>
+  </instruction>
+
+  <instruction name="3DSTATE_AA_LINE_PARAMETERS" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="10"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="AA Coverage Bias" start="48" end="55" type="u0.8"/>
+    <field name="AA Coverage Slope" start="32" end="39" type="u0.8"/>
+    <field name="AA Coverage EndCap Bias" start="80" end="87" type="u0.8"/>
+    <field name="AA Coverage EndCap Slope" start="64" end="71" type="u0.8"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="40"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to DS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="41"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to GS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="39"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to HS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="42"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to PS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="38"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to VS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BLEND_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="36"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Blend State Pointer" start="38" end="63" type="offset"/>
+    <field start="32" end="32" type="mbo"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CC_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="14"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Color Calc State Pointer" start="38" end="63" type="offset"/>
+    <field start="32" end="32" type="mbo"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CHROMA_KEY" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="ChromaKey Table Index" start="62" end="63" type="uint"/>
+    <field name="ChromaKey Low Value" start="64" end="95" type="uint"/>
+    <field name="ChromaKey High Value" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CLEAR_PARAMS" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Depth Clear Value" start="32" end="63" type="uint"/>
+    <field name="Depth Clear Value Valid" start="64" end="64" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CLIP" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="18"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Front Winding" start="52" end="52" type="uint"/>
+    <field name="Vertex Sub Pixel Precision Select" start="51" end="51" type="uint"/>
+    <field name="EarlyCull Enable" start="50" end="50" type="bool"/>
+    <field name="Cull Mode" start="48" end="49" type="uint" prefix="CULLMODE">
+      <value name="BOTH" value="0"/>
+      <value name="NONE" value="1"/>
+      <value name="FRONT" value="2"/>
+      <value name="BACK" value="3"/>
+    </field>
+    <field name="Clipper Statistics Enable" start="42" end="42" type="bool"/>
+    <field name="User Clip Distance Cull Test Enable Bitmask" start="32" end="39" type="uint"/>
+    <field name="Clip Enable" start="95" end="95" type="bool"/>
+    <field name="API Mode" start="94" end="94" type="uint">
+      <value name="APIMODE_OGL" value="0"/>
+    </field>
+    <field name="Viewport XY ClipTest Enable" start="92" end="92" type="bool"/>
+    <field name="Viewport Z ClipTest Enable" start="91" end="91" type="bool"/>
+    <field name="Guardband ClipTest Enable" start="90" end="90" type="bool"/>
+    <field name="User Clip Distance Clip Test Enable Bitmask" start="80" end="87" type="uint"/>
+    <field name="Clip Mode" start="77" end="79" type="uint">
+      <value name="CLIPMODE_NORMAL" value="0"/>
+      <value name="CLIPMODE_REJECT_ALL" value="3"/>
+      <value name="CLIPMODE_ACCEPT_ALL" value="4"/>
+    </field>
+    <field name="Perspective Divide Disable" start="73" end="73" type="bool"/>
+    <field name="Non-Perspective Barycentric Enable" start="72" end="72" type="bool"/>
+    <field name="Triangle Strip/List Provoking Vertex Select" start="68" end="69" type="uint">
+      <value name="Vertex 0" value="0"/>
+      <value name="Vertex 1" value="1"/>
+      <value name="Vertex 2" value="2"/>
+    </field>
+    <field name="Line Strip/List Provoking Vertex Select" start="66" end="67" type="uint">
+      <value name="Vertex 0" value="0"/>
+      <value name="Vertex 1" value="1"/>
+    </field>
+    <field name="Triangle Fan Provoking Vertex Select" start="64" end="65" type="uint">
+      <value name="Vertex 0" value="0"/>
+      <value name="Vertex 1" value="1"/>
+      <value name="Vertex 2" value="2"/>
+    </field>
+    <field name="Minimum Point Width" start="113" end="123" type="u8.3"/>
+    <field name="Maximum Point Width" start="102" end="112" type="u8.3"/>
+    <field name="Force Zero RTAIndex Enable" start="101" end="101" type="bool"/>
+    <field name="Maximum VPIndex" start="96" end="99" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_DS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="26"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Constant Body" start="32" end="223" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_GS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="22"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Constant Body" start="32" end="223" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_HS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="25"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Constant Body" start="32" end="223" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_PS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="23"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Constant Body" start="32" end="223" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_VS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="21"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Constant Body" start="32" end="223" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DEPTH_BUFFER" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="5"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Surface Type" start="61" end="63" type="uint">
+      <value name="SURFTYPE_1D" value="0"/>
+      <value name="SURFTYPE_2D" value="1"/>
+      <value name="SURFTYPE_3D" value="2"/>
+      <value name="SURFTYPE_CUBE" value="3"/>
+      <value name="SURFTYPE_NULL" value="7"/>
+    </field>
+    <field name="Depth Write Enable" start="60" end="60" type="bool"/>
+    <field name="Stencil Write Enable" start="59" end="59" type="bool"/>
+    <field name="Hierarchical Depth Buffer Enable" start="54" end="54" type="bool"/>
+    <field name="Surface Format" start="50" end="52" type="uint">
+      <value name="D32_FLOAT" value="1"/>
+      <value name="D24_UNORM_X8_UINT" value="3"/>
+      <value name="D16_UNORM" value="5"/>
+    </field>
+    <field name="Surface Pitch" start="32" end="49" type="uint"/>
+    <field name="Surface Base Address" start="64" end="95" type="address"/>
+    <field name="Height" start="114" end="127" type="uint"/>
+    <field name="Width" start="100" end="113" type="uint"/>
+    <field name="LOD" start="96" end="99" type="uint"/>
+    <field name="Depth" start="149" end="159" type="uint">
+      <value name="SURFTYPE_CUBE (must be zero)" value="0"/>
+    </field>
+    <field name="Minimum Array Element" start="138" end="148" type="uint"/>
+    <field name="Depth Buffer Object Control State" start="128" end="131" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Depth Coordinate Offset Y" start="176" end="191" type="int"/>
+    <field name="Depth Coordinate Offset X" start="160" end="175" type="int"/>
+    <field name="Render Target View Extent" start="213" end="223" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DEPTH_STENCIL_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="37"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to DEPTH_STENCIL_STATE" start="38" end="63" type="offset"/>
+    <field start="32" end="32" type="mbo"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DRAWING_RECTANGLE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Clipped Drawing Rectangle Y Min" start="48" end="63" type="uint"/>
+    <field name="Clipped Drawing Rectangle X Min" start="32" end="47" type="uint"/>
+    <field name="Clipped Drawing Rectangle Y Max" start="80" end="95" type="uint"/>
+    <field name="Clipped Drawing Rectangle X Max" start="64" end="79" type="uint"/>
+    <field name="Drawing Rectangle Origin Y" start="112" end="127" type="int"/>
+    <field name="Drawing Rectangle Origin X" start="96" end="111" type="int"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DS" bias="2" length="6">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="29"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="4"/>
+    <field name="Kernel Start Pointer" start="38" end="63" type="offset"/>
+    <field name="Single Domain Point Dispatch" start="95" end="95" type="uint"/>
+    <field name="Vector Mask Enable" start="94" end="94" type="bool"/>
+    <field name="Sampler Count" start="91" end="93" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="82" end="89" type="uint"/>
+    <field name="Floating Point Mode" start="80" end="80" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="77" end="77" type="bool"/>
+    <field name="Software Exception Enable" start="71" end="71" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="106" end="127" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="96" end="99" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="148" end="152" type="uint"/>
+    <field name="Patch URB Entry Read Length" start="139" end="145" type="uint"/>
+    <field name="Patch URB Entry Read Offset" start="132" end="137" type="uint"/>
+    <field name="Maximum Number of Threads" start="185" end="191" type="uint"/>
+    <field name="Statistics Enable" start="170" end="170" type="bool"/>
+    <field name="Compute W Coordinate Enable" start="162" end="162" type="bool"/>
+    <field name="DS Cache Disable" start="161" end="161" type="bool"/>
+    <field name="DS Function Enable" start="160" end="160" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_GS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="17"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Kernel Start Pointer" start="38" end="63" type="offset"/>
+    <field name="Single Program Flow (SPF)" start="95" end="95" type="uint"/>
+    <field name="Vector Mask Enable (VME)" start="94" end="94" type="uint"/>
+    <field name="Sampler Count" start="91" end="93" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="82" end="89" type="uint"/>
+    <field name="Thread Priority" start="81" end="81" type="uint">
+      <value name="Normal Priority" value="0"/>
+      <value name="High Priority" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="80" end="80" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="77" end="77" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="75" end="75" type="bool"/>
+    <field name="Software  Exception Enable" start="71" end="71" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="106" end="127" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="96" end="99" type="uint"/>
+    <field name="Output Vertex Size" start="151" end="156" type="uint"/>
+    <field name="Output Topology" start="145" end="150" type="uint" prefix="OUTPUT"/>
+    <field name="Vertex URB Entry Read Length" start="139" end="144" type="uint"/>
+    <field name="Include Vertex Handles" start="138" end="138" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="132" end="137" type="uint"/>
+    <field name="Dispatch GRF Start Register for URB Data" start="128" end="131" type="uint"/>
+    <field name="Maximum Number of Threads" start="185" end="191" type="uint"/>
+    <field name="Control Data Format" start="184" end="184" type="uint">
+      <value name="GSCTL_CUT" value="0"/>
+      <value name="GSCTL_SID" value="1"/>
+    </field>
+    <field name="Control Data Header Size" start="180" end="183" type="uint"/>
+    <field name="Instance Control" start="175" end="179" type="uint"/>
+    <field name="Default StreamID" start="173" end="174" type="uint"/>
+    <field name="Dispatch Mode" start="171" end="172" type="uint" prefix="DISPATCH_MODE">
+      <value name="SINGLE" value="0"/>
+      <value name="DUAL_INSTANCE" value="1"/>
+      <value name="DUAL_OBJECT" value="2"/>
+    </field>
+    <field name="GS Statistics Enable" start="170" end="170" type="uint"/>
+    <field name="GS Invocations Increment Value" start="165" end="169" type="uint"/>
+    <field name="Include Primitive ID" start="164" end="164" type="uint"/>
+    <field name="Hint" start="163" end="163" type="uint"/>
+    <field name="Reorder Enable" start="162" end="162" type="bool"/>
+    <field name="Discard Adjacency" start="161" end="161" type="bool"/>
+    <field name="GS Enable" start="160" end="160" type="bool"/>
+    <field name="Semaphore Handle" start="192" end="203" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_HIER_DEPTH_BUFFER" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="7"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Hierarchical Depth Buffer Object Control State" start="57" end="60" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface Pitch" start="32" end="48" type="uint"/>
+    <field name="Surface Base Address" start="64" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_HS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="27"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Sampler Count" start="59" end="61" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="50" end="57" type="uint"/>
+    <field name="Floating Point Mode" start="48" end="48" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="45" end="45" type="bool"/>
+    <field name="Software  Exception Enable" start="39" end="39" type="bool"/>
+    <field name="Maximum Number of Threads" start="32" end="38" type="uint"/>
+    <field name="Enable" start="95" end="95" type="bool"/>
+    <field name="Statistics Enable" start="93" end="93" type="bool"/>
+    <field name="Instance Count" start="64" end="67" type="uint"/>
+    <field name="Kernel Start Pointer" start="102" end="127" type="offset"/>
+    <field name="Scratch Space Base Pointer" start="138" end="159" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="128" end="131" type="uint"/>
+    <field name="Single Program Flow" start="187" end="187" type="uint"/>
+    <field name="Vector Mask Enable" start="186" end="186" type="bool"/>
+    <field name="Include Vertex Handles" start="184" end="184" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="179" end="183" type="uint"/>
+    <field name="Vertex URB Entry Read Length" start="171" end="176" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="164" end="169" type="uint"/>
+    <field name="Semaphore Handle" start="192" end="203" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_INDEX_BUFFER" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="10"/>
+    <field name="Memory Object Control State" start="12" end="15" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Cut Index Enable" start="10" end="10" type="bool"/>
+    <field name="Index Format" start="8" end="9" type="uint" prefix="INDEX">
+      <value name="BYTE" value="0"/>
+      <value name="WORD" value="1"/>
+      <value name="DWORD" value="2"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Buffer Starting Address" start="32" end="63" type="address"/>
+    <field name="Buffer Ending Address" start="64" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_LINE_STIPPLE" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="8"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Modify Enable (Current Repeat Counter, Current Stipple Index)" start="63" end="63" type="bool"/>
+    <field name="Current Repeat Counter" start="53" end="61" type="uint"/>
+    <field name="Current Stipple Index" start="48" end="51" type="uint"/>
+    <field name="Line Stipple Pattern" start="32" end="47" type="uint"/>
+    <field name="Line Stipple Inverse Repeat Count" start="79" end="95" type="u1.16"/>
+    <field name="Line Stipple Repeat Count" start="64" end="72" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_MONOFILTER_SIZE" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="17"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Monochrome Filter Width" start="35" end="37" type="uint"/>
+    <field name="Monochrome Filter Height" start="32" end="34" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_MULTISAMPLE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="13"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Pixel Location" start="36" end="36" type="uint">
+      <value name="PIXLOC_CENTER" value="0"/>
+      <value name="PIXLOC_UL_CORNER" value="1"/>
+    </field>
+    <field name="Number of Multisamples" start="33" end="35" type="uint">
+      <value name="NUMSAMPLES_1" value="0"/>
+      <value name="NUMSAMPLES_4" value="2"/>
+      <value name="NUMSAMPLES_8" value="3"/>
+    </field>
+    <field name="Sample3 X Offset" start="92" end="95" type="u0.4"/>
+    <field name="Sample3 Y Offset" start="88" end="91" type="u0.4"/>
+    <field name="Sample2 X Offset" start="84" end="87" type="u0.4"/>
+    <field name="Sample2 Y Offset" start="80" end="83" type="u0.4"/>
+    <field name="Sample1 X Offset" start="76" end="79" type="u0.4"/>
+    <field name="Sample1 Y Offset" start="72" end="75" type="u0.4"/>
+    <field name="Sample0 X Offset" start="68" end="71" type="u0.4"/>
+    <field name="Sample0 Y Offset" start="64" end="67" type="u0.4"/>
+    <field name="Sample7 X Offset" start="124" end="127" type="u0.4"/>
+    <field name="Sample7 Y Offset" start="120" end="123" type="u0.4"/>
+    <field name="Sample6 X Offset" start="116" end="119" type="u0.4"/>
+    <field name="Sample6 Y Offset" start="112" end="115" type="u0.4"/>
+    <field name="Sample5 X Offset" start="108" end="111" type="u0.4"/>
+    <field name="Sample5 Y Offset" start="104" end="107" type="u0.4"/>
+    <field name="Sample4 X Offset" start="100" end="103" type="u0.4"/>
+    <field name="Sample4 Y Offset" start="96" end="99" type="u0.4"/>
+  </instruction>
+
+  <instruction name="3DSTATE_POLY_STIPPLE_OFFSET" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="6"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Polygon Stipple X Offset" start="40" end="44" type="uint"/>
+    <field name="Polygon Stipple Y Offset" start="32" end="36" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_POLY_STIPPLE_PATTERN" bias="2" length="33">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="7"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="31"/>
+    <group count="32" start="32" size="32">
+      <field name="Pattern Row" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_PS" bias="2" length="8">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="32"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="6"/>
+    <field name="Kernel Start Pointer[0]" start="38" end="63" type="offset"/>
+    <field name="Single Program Flow (SPF)" start="95" end="95" type="uint"/>
+    <field name="Vector Mask Enable (VME)" start="94" end="94" type="uint"/>
+    <field name="Sampler Count" start="91" end="93" type="uint"/>
+    <field name="Denormal Mode" start="90" end="90" type="uint">
+      <value name="FTZ" value="0"/>
+      <value name="RET" value="1"/>
+    </field>
+    <field name="Binding Table Entry Count" start="82" end="89" type="uint"/>
+    <field name="Floating Point Mode" start="80" end="80" type="uint">
+      <value name="IEEE-745" value="0"/>
+      <value name="Alt" value="1"/>
+    </field>
+    <field name="Rounding Mode" start="78" end="79" type="uint">
+      <value name="RTNE" value="0"/>
+      <value name="RU" value="1"/>
+      <value name="RD" value="2"/>
+      <value name="RTZ" value="3"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="77" end="77" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="75" end="75" type="bool"/>
+    <field name="Software  Exception Enable" start="71" end="71" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="106" end="127" type="offset"/>
+    <field name="Per Thread Scratch Space" start="96" end="99" type="uint"/>
+    <field name="Maximum Number of Threads" start="152" end="159" type="uint"/>
+    <field name="Push Constant Enable" start="139" end="139" type="bool"/>
+    <field name="Attribute Enable" start="138" end="138" type="bool"/>
+    <field name="oMask Present to RenderTarget" start="137" end="137" type="bool"/>
+    <field name="Render Target Fast Clear Enable" start="136" end="136" type="bool"/>
+    <field name="Dual Source Blend Enable" start="135" end="135" type="bool"/>
+    <field name="Render Target Resolve Enable" start="134" end="134" type="bool"/>
+    <field name="Position XY Offset Select" start="131" end="132" type="uint">
+      <value name="POSOFFSET_NONE" value="0"/>
+      <value name="POSOFFSET_CENTROID" value="2"/>
+      <value name="POSOFFSET_SAMPLE" value="3"/>
+    </field>
+    <field name="32 Pixel Dispatch Enable" start="130" end="130" type="bool"/>
+    <field name="16 Pixel Dispatch Enable" start="129" end="129" type="bool"/>
+    <field name="8 Pixel Dispatch Enable" start="128" end="128" type="bool"/>
+    <field name="Dispatch GRF Start Register for Constant/Setup Data [0]" start="176" end="182" type="uint"/>
+    <field name="Dispatch GRF Start Register for Constant/Setup Data [1]" start="168" end="174" type="uint"/>
+    <field name="Dispatch GRF Start Register for Constant/Setup Data [2]" start="160" end="166" type="uint"/>
+    <field name="Kernel Start Pointer[1]" start="198" end="223" type="offset"/>
+    <field name="Kernel Start Pointer[2]" start="230" end="255" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="20"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="51" type="uint">
+      <value name="0KB" value="0"/>
+    </field>
+    <field name="Constant Buffer Size" start="32" end="36" type="uint">
+      <value name="0KB" value="0"/>
+    </field>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="21"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="51" type="uint">
+      <value name="0KB" value="0"/>
+    </field>
+    <field name="Constant Buffer Size" start="32" end="36" type="uint">
+      <value name="0KB" value="0"/>
+    </field>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="19"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="51" type="uint">
+      <value name="0KB" value="0"/>
+    </field>
+    <field name="Constant Buffer Size" start="32" end="36" type="uint">
+      <value name="0KB" value="0"/>
+    </field>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="22"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="51" type="uint">
+      <value name="0KB" value="0"/>
+    </field>
+    <field name="Constant Buffer Size" start="32" end="36" type="uint">
+      <value name="0KB" value="0"/>
+    </field>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="18"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="51" type="uint">
+      <value name="0KB" value="0"/>
+    </field>
+    <field name="Constant Buffer Size" start="32" end="36" type="uint">
+      <value name="0KB" value="0"/>
+    </field>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_PALETTE_LOAD0" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="7" type="uint"/>
+    <group count="0" start="32" size="32">
+      <field name="Entry" start="0" end="31" type="PALETTE_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_PALETTE_LOAD1" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="12"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <group count="0" start="32" size="32">
+      <field name="Palette Alpha[0:N-1]" start="24" end="31" type="uint"/>
+      <field name="Palette Red[0:N-1]" start="16" end="23" type="uint"/>
+      <field name="Palette Green[0:N-1]" start="8" end="15" type="uint"/>
+      <field name="Palette Blue[0:N-1]" start="0" end="7" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="45"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to DS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="46"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to GS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="44"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to HS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="47"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to PS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="43"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to VS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLE_MASK" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Sample Mask" start="32" end="39" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SBE" bias="2" length="14">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="31"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="12"/>
+    <field name="Attribute Swizzle Control Mode" start="60" end="60" type="uint">
+      <value name="SWIZ_0_15" value="0"/>
+      <value name="SWIZ_16_31" value="1"/>
+    </field>
+    <field name="Number of SF Output Attributes" start="54" end="59" type="uint"/>
+    <field name="Attribute Swizzle Enable" start="53" end="53" type="bool"/>
+    <field name="Point Sprite Texture Coordinate Origin" start="52" end="52" type="uint">
+      <value name="UPPERLEFT" value="0"/>
+      <value name="LOWERLEFT" value="1"/>
+    </field>
+    <field name="Vertex URB Entry Read Length" start="43" end="47" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="36" end="41" type="uint"/>
+    <group count="16" start="64" size="16">
+      <field name="Attribute" start="0" end="15" type="SF_OUTPUT_ATTRIBUTE_DETAIL"/>
+    </group>
+    <field name="Point Sprite Texture Coordinate Enable" start="320" end="351" type="uint"/>
+    <field name="Constant Interpolation Enable[31:0]" start="352" end="383" type="uint"/>
+    <field name="Attribute 7 WrapShortest Enables" start="412" end="415" type="uint"/>
+    <field name="Attribute 6 WrapShortest Enables" start="408" end="411" type="uint"/>
+    <field name="Attribute 5 WrapShortest Enables" start="404" end="407" type="uint"/>
+    <field name="Attribute 4 WrapShortest Enables" start="400" end="403" type="uint"/>
+    <field name="Attribute 3 WrapShortest Enables" start="396" end="399" type="uint"/>
+    <field name="Attribute 2 WrapShortest Enables" start="392" end="395" type="uint"/>
+    <field name="Attribute 1 WrapShortest Enables" start="388" end="391" type="uint"/>
+    <field name="Attribute 0 WrapShortest Enables" start="384" end="387" type="uint"/>
+    <field name="Attribute 15 WrapShortest Enables" start="444" end="447" type="uint"/>
+    <field name="Attribute 14 WrapShortest Enables" start="440" end="443" type="uint"/>
+    <field name="Attribute 13 WrapShortest Enables" start="436" end="439" type="uint"/>
+    <field name="Attribute 12 WrapShortest Enables" start="432" end="435" type="uint"/>
+    <field name="Attribute 11 WrapShortest Enables" start="428" end="431" type="uint"/>
+    <field name="Attribute 10 WrapShortest Enables" start="424" end="427" type="uint"/>
+    <field name="Attribute 9 WrapShortest Enables" start="420" end="423" type="uint"/>
+    <field name="Attribute 8 WrapShortest Enables" start="416" end="419" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SCISSOR_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="15"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Scissor Rect Pointer" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SF" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="19"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Depth Buffer Surface Format" start="44" end="46" type="uint">
+      <value name="D32_FLOAT_S8X24_UINT" value="0"/>
+      <value name="D32_FLOAT" value="1"/>
+      <value name="D24_UNORM_S8_UINT" value="2"/>
+      <value name="D24_UNORM_X8_UINT" value="3"/>
+      <value name="D16_UNORM" value="5"/>
+    </field>
+    <field name="Legacy Global Depth Bias Enable" start="43" end="43" type="bool"/>
+    <field name="Statistics Enable" start="42" end="42" type="bool"/>
+    <field name="Global Depth Offset Enable Solid" start="41" end="41" type="bool"/>
+    <field name="Global Depth Offset Enable Wireframe" start="40" end="40" type="bool"/>
+    <field name="Global Depth Offset Enable Point" start="39" end="39" type="bool"/>
+    <field name="FrontFace Fill Mode" start="37" end="38" type="uint" prefix="FILL_MODE">
+      <value name="SOLID" value="0"/>
+      <value name="WIREFRAME" value="1"/>
+      <value name="POINT" value="2"/>
+    </field>
+    <field name="BackFace Fill Mode" start="35" end="36" type="uint" prefix="FILL_MODE">
+      <value name="SOLID" value="0"/>
+      <value name="WIREFRAME" value="1"/>
+      <value name="POINT" value="2"/>
+    </field>
+    <field name="View Transform Enable" start="33" end="33" type="bool"/>
+    <field name="Front Winding" start="32" end="32" type="uint"/>
+    <field name="Anti-Aliasing Enable" start="95" end="95" type="bool"/>
+    <field name="Cull Mode" start="93" end="94" type="uint" prefix="CULLMODE">
+      <value name="BOTH" value="0"/>
+      <value name="NONE" value="1"/>
+      <value name="FRONT" value="2"/>
+      <value name="BACK" value="3"/>
+    </field>
+    <field name="Line Width" start="82" end="91" type="u3.7"/>
+    <field name="Line End Cap Antialiasing Region Width" start="80" end="81" type="uint"/>
+    <field name="Scissor Rectangle Enable" start="75" end="75" type="bool"/>
+    <field name="Multisample Rasterization  Mode" start="72" end="73" type="uint"/>
+    <field name="Last Pixel Enable" start="127" end="127" type="bool"/>
+    <field name="Triangle Strip/List Provoking Vertex Select" start="125" end="126" type="uint">
+      <value name="Vertex 0" value="0"/>
+      <value name="Vertex 1" value="1"/>
+      <value name="Vertex 2" value="2"/>
+    </field>
+    <field name="Line Strip/List Provoking Vertex Select" start="123" end="124" type="uint"/>
+    <field name="Triangle Fan Provoking Vertex Select" start="121" end="122" type="uint">
+      <value name="Vertex 0" value="0"/>
+      <value name="Vertex 1" value="1"/>
+      <value name="Vertex 2" value="2"/>
+    </field>
+    <field name="AA Line Distance Mode" start="110" end="110" type="uint">
+      <value name="AALINEDISTANCE_TRUE" value="1"/>
+    </field>
+    <field name="Vertex Sub Pixel Precision Select" start="108" end="108" type="uint"/>
+    <field name="Use Point Width State" start="107" end="107" type="uint"/>
+    <field name="Point Width" start="96" end="106" type="u8.3"/>
+    <field name="Global Depth Offset Constant" start="128" end="159" type="float"/>
+    <field name="Global Depth Offset Scale" start="160" end="191" type="float"/>
+    <field name="Global Depth Offset Clamp" start="192" end="223" type="float"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SO_BUFFER" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="SO Buffer Index" start="61" end="62" type="uint"/>
+    <field name="SO Buffer Object Control State" start="57" end="60" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface Pitch" start="32" end="43" type="uint"/>
+    <field name="Surface Base Address" start="66" end="95" type="address"/>
+    <field name="Surface End Address" start="98" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SO_DECL_LIST" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="23"/>
+    <field name="DWord Length" start="0" end="8" type="uint"/>
+    <field name="Stream to Buffer Selects [3]" start="44" end="47" type="uint"/>
+    <field name="Stream to Buffer Selects [2]" start="40" end="43" type="uint"/>
+    <field name="Stream to Buffer Selects [1]" start="36" end="39" type="uint"/>
+    <field name="Stream to Buffer Selects [0]" start="32" end="35" type="uint"/>
+    <field name="Num Entries [3]" start="88" end="95" type="uint"/>
+    <field name="Num Entries [2]" start="80" end="87" type="uint"/>
+    <field name="Num Entries [1]" start="72" end="79" type="uint"/>
+    <field name="Num Entries [0]" start="64" end="71" type="uint"/>
+    <group count="0" start="96" size="64">
+      <field name="Entry" start="0" end="63" type="SO_DECL_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_STENCIL_BUFFER" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="6"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Stencil Buffer Object Control State" start="57" end="60" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface Pitch" start="32" end="48" type="uint"/>
+    <field name="Surface Base Address" start="64" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_STREAMOUT" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="30"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="SO Function Enable" start="63" end="63" type="uint"/>
+    <field name="Rendering Disable" start="62" end="62" type="uint"/>
+    <field name="Render Stream Select" start="59" end="60" type="uint"/>
+    <field name="Reorder Mode" start="58" end="58" type="uint">
+      <value name="LEADING" value="0"/>
+      <value name="TRAILING" value="1"/>
+    </field>
+    <field name="SO Statistics Enable" start="57" end="57" type="bool"/>
+    <field name="SO Buffer Enable [3]" start="43" end="43" type="uint"/>
+    <field name="SO Buffer Enable [2]" start="42" end="42" type="uint"/>
+    <field name="SO Buffer Enable [1]" start="41" end="41" type="uint"/>
+    <field name="SO Buffer Enable [0]" start="40" end="40" type="uint"/>
+    <field name="Stream 3 Vertex Read Offset" start="93" end="93" type="uint"/>
+    <field name="Stream 3 Vertex Read Length" start="88" end="92" type="uint"/>
+    <field name="Stream 2 Vertex Read Offset" start="85" end="85" type="uint"/>
+    <field name="Stream 2 Vertex Read Length" start="80" end="84" type="uint"/>
+    <field name="Stream 1 Vertex Read Offset" start="77" end="77" type="uint"/>
+    <field name="Stream 1 Vertex Read Length" start="72" end="76" type="uint"/>
+    <field name="Stream 0 Vertex Read Offset" start="69" end="69" type="uint"/>
+    <field name="Stream 0 Vertex Read Length" start="64" end="68" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_TE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="28"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Partitioning" start="44" end="45" type="uint">
+      <value name="INTEGER" value="0"/>
+      <value name="ODD_FRACTIONAL" value="1"/>
+      <value name="EVEN_FRACTIONAL" value="2"/>
+    </field>
+    <field name="Output Topology" start="40" end="41" type="uint" prefix="OUTPUT">
+      <value name="POINT" value="0"/>
+      <value name="LINE" value="1"/>
+      <value name="TRI_CW" value="2"/>
+      <value name="TRI_CCW" value="3"/>
+    </field>
+    <field name="TE Domain" start="36" end="37" type="uint">
+      <value name="QUAD" value="0"/>
+      <value name="TRI" value="1"/>
+      <value name="ISOLINE" value="2"/>
+    </field>
+    <field name="TE Mode" start="33" end="34" type="uint">
+      <value name="HW_TESS" value="0"/>
+      <value name="SW_TESS" value="1"/>
+    </field>
+    <field name="TE Enable" start="32" end="32" type="bool"/>
+    <field name="Maximum Tessellation Factor Odd" start="64" end="95" type="float"/>
+    <field name="Maximum Tessellation Factor Not Odd" start="96" end="127" type="float"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="50"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="DS URB Starting Address" start="57" end="61" type="uint"/>
+    <field name="DS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="DS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="51"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="GS URB Starting Address" start="57" end="61" type="uint"/>
+    <field name="GS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="GS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="49"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="HS URB Starting Address" start="57" end="61" type="uint"/>
+    <field name="HS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="HS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="48"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="VS URB Starting Address" start="57" end="61" type="uint"/>
+    <field name="VS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="VS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VERTEX_BUFFERS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="8"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <group count="0" start="32" size="128">
+      <field name="Vertex Buffer State" start="0" end="127" type="VERTEX_BUFFER_STATE"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_VERTEX_ELEMENTS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="9"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <group count="0" start="32" size="64">
+      <field name="Element" start="0" end="63" type="VERTEX_ELEMENT_STATE"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_STATISTICS" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="1"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="11"/>
+    <field name="Statistics Enable" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VIEWPORT_STATE_POINTERS_CC" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="35"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="CC Viewport Pointer" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="33"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="SF Clip Viewport Pointer" start="38" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VS" bias="2" length="6">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="16"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="4"/>
+    <field name="Kernel Start Pointer" start="38" end="63" type="offset"/>
+    <field name="Single Vertex Dispatch" start="95" end="95" type="bool"/>
+    <field name="Vector Mask Enable (VME)" start="94" end="94" type="uint"/>
+    <field name="Sampler Count" start="91" end="93" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="82" end="89" type="uint"/>
+    <field name="Floating Point Mode" start="80" end="80" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="77" end="77" type="bool"/>
+    <field name="Software  Exception Enable" start="71" end="71" type="bool"/>
+    <field name="Scratch Space Base Offset" start="106" end="127" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="96" end="99" type="uint"/>
+    <field name="Dispatch GRF Start Register for URB Data" start="148" end="152" type="uint"/>
+    <field name="Vertex URB Entry Read Length" start="139" end="144" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="132" end="137" type="uint"/>
+    <field name="Maximum Number of Threads" start="185" end="191" type="uint"/>
+    <field name="Statistics Enable" start="170" end="170" type="bool"/>
+    <field name="Vertex Cache Disable" start="161" end="161" type="bool"/>
+    <field name="VS Function Enable" start="160" end="160" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_WM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="20"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Statistics Enable" start="63" end="63" type="bool"/>
+    <field name="Depth Buffer Clear" start="62" end="62" type="bool"/>
+    <field name="Thread Dispatch Enable" start="61" end="61" type="bool"/>
+    <field name="Depth Buffer Resolve Enable" start="60" end="60" type="bool"/>
+    <field name="Hierarchical Depth Buffer Resolve Enable" start="59" end="59" type="bool"/>
+    <field name="Legacy Diamond Line Rasterization" start="58" end="58" type="bool"/>
+    <field name="Pixel Shader Kill Pixel" start="57" end="57" type="bool"/>
+    <field name="Pixel Shader Computed Depth Mode" start="55" end="56" type="uint">
+      <value name="PSCDEPTH_OFF" value="0"/>
+      <value name="PSCDEPTH_ON" value="1"/>
+      <value name="PSCDEPTH_ON_GE" value="2"/>
+      <value name="PSCDEPTH_ON_LE" value="3"/>
+    </field>
+    <field name="Early Depth/Stencil Control" start="53" end="54" type="uint">
+      <value name="EDSC_NORMAL" value="0"/>
+      <value name="EDSC_PSEXEC" value="1"/>
+      <value name="EDSC_PREPS" value="2"/>
+    </field>
+    <field name="Pixel Shader Uses Source Depth" start="52" end="52" type="bool"/>
+    <field name="Pixel Shader Uses Source W" start="51" end="51" type="bool"/>
+    <field name="Position ZW Interpolation Mode" start="49" end="50" type="uint">
+      <value name="INTERP_PIXEL" value="0"/>
+      <value name="INTERP_CENTROID" value="2"/>
+      <value name="INTERP_SAMPLE" value="3"/>
+    </field>
+    <field name="Barycentric Interpolation Mode" start="43" end="48" type="uint"/>
+    <field name="Pixel Shader Uses Input Coverage Mask" start="42" end="42" type="bool"/>
+    <field name="Line End Cap Antialiasing Region Width" start="40" end="41" type="uint"/>
+    <field name="Line Antialiasing Region Width" start="38" end="39" type="uint"/>
+    <field name="Polygon Stipple Enable" start="36" end="36" type="bool"/>
+    <field name="Line Stipple Enable" start="35" end="35" type="bool"/>
+    <field name="Point Rasterization Rule" start="34" end="34" type="uint">
+      <value name="RASTRULE_UPPER_LEFT" value="0"/>
+      <value name="RASTRULE_UPPER_RIGHT" value="1"/>
+    </field>
+    <field name="Multisample Rasterization Mode" start="32" end="33" type="uint">
+      <value name="MSRASTMODE_OFF_PIXEL" value="0"/>
+      <value name="MSRASTMODE_OFF_PATTERN" value="1"/>
+      <value name="MSRASTMODE_ON_PIXEL" value="2"/>
+      <value name="MSRASTMODE_ON_PATTERN" value="3"/>
+    </field>
+    <field name="Multisample Dispatch Mode" start="95" end="95" type="uint">
+      <value name="MSDISPMODE_PERSAMPLE" value="0"/>
+      <value name="MSDISPMODE_PERPIXEL" value="1"/>
+    </field>
+  </instruction>
+
+  <instruction name="GPGPU_OBJECT" bias="2" length="8">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="4"/>
+    <field name="Predicate Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="6"/>
+    <field name="Shared Local Memory Fixed Offset" start="39" end="39" type="uint"/>
+    <field name="Interface Descriptor Offset" start="32" end="36" type="uint"/>
+    <field name="Shared Local Memory Offset" start="92" end="95" type="uint"/>
+    <field name="End of Thread Group" start="88" end="88" type="uint"/>
+    <field name="Half-Slice Destination Select" start="81" end="82" type="uint">
+      <value name="Half-Slice 1" value="2"/>
+      <value name="Half-Slice 0" value="1"/>
+      <value name="Either Half-Slice" value="0"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="offset"/>
+    <field name="Thread Group ID X" start="128" end="159" type="uint"/>
+    <field name="Thread Group ID Y" start="160" end="191" type="uint"/>
+    <field name="Thread Group ID Z" start="192" end="223" type="uint"/>
+    <field name="Execution Mask" start="224" end="255" type="uint"/>
+  </instruction>
+
+  <instruction name="GPGPU_WALKER" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode A" start="16" end="23" type="uint" default="5"/>
+    <field name="Indirect Parameter Enable" start="10" end="10" type="bool"/>
+    <field name="Predicate Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Interface Descriptor Offset" start="32" end="36" type="uint"/>
+    <field name="SIMD Size" start="94" end="95" type="uint">
+      <value name="SIMD8" value="0"/>
+      <value name="SIMD16" value="1"/>
+      <value name="SIMD32" value="2"/>
+    </field>
+    <field name="Thread Depth Counter Maximum" start="80" end="85" type="uint"/>
+    <field name="Thread Height Counter Maximum" start="72" end="77" type="uint"/>
+    <field name="Thread Width Counter Maximum" start="64" end="69" type="uint"/>
+    <field name="Thread Group ID Starting X" start="96" end="127" type="uint"/>
+    <field name="Thread Group ID X Dimension" start="128" end="159" type="uint"/>
+    <field name="Thread Group ID Starting Y" start="160" end="191" type="uint"/>
+    <field name="Thread Group ID Y Dimension" start="192" end="223" type="uint"/>
+    <field name="Thread Group ID Starting Z" start="224" end="255" type="uint"/>
+    <field name="Thread Group ID Z Dimension" start="256" end="287" type="uint"/>
+    <field name="Right Execution Mask" start="288" end="319" type="uint"/>
+    <field name="Bottom Execution Mask" start="320" end="351" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_CURBE_LOAD" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="1"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="2"/>
+    <field name="CURBE Total Data Length" start="64" end="80" type="uint"/>
+    <field name="CURBE Data Start Address" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_INTERFACE_DESCRIPTOR_LOAD" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="2"/>
+    <field name="Interface Descriptor Total Length" start="64" end="80" type="uint"/>
+    <field name="Interface Descriptor Data Start Address" start="96" end="127" type="offset"/>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Media Command Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="Media Command Sub-Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="4"/>
+    <field name="Interface Descriptor Offset" start="32" end="36" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="bool"/>
+    <field name="Thread Synchronization" start="88" end="88" type="uint">
+      <value name="No thread synchronization" value="0"/>
+      <value name="Thread dispatch is synchronized by the 'spawn root thread' message" value="1"/>
+    </field>
+    <field name="Use Scoreboard" start="85" end="85" type="uint">
+      <value name="Not using scoreboard" value="0"/>
+      <value name="Using scoreboard" value="1"/>
+    </field>
+    <field name="Half-Slice Destination Select" start="81" end="82" type="uint">
+      <value name="Half-Slice 1" value="2"/>
+      <value name="Half-Slice 0" value="1"/>
+      <value name="Either half-slice" value="0"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="address"/>
+    <field name="Scoredboard Y" start="144" end="152" type="uint"/>
+    <field name="Scoreboard X" start="128" end="136" type="uint"/>
+    <field name="Scoreboard Color" start="176" end="179" type="uint"/>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <group count="0" start="192" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT_PRT" bias="2" length="16">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="14"/>
+    <field name="Interface Descriptor Offset" start="32" end="36" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="bool"/>
+    <field name="PRT_Fence Needed" start="87" end="87" type="bool"/>
+    <field name="PRT_FenceType" start="86" end="86" type="uint">
+      <value name="Root thread queue" value="0"/>
+      <value name="VFE state flush" value="1"/>
+    </field>
+    <group count="12" start="128" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT_WALKER" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="15"/>
+    <field name="Interface Descriptor Offset" start="32" end="36" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="uint"/>
+    <field name="Thread Synchronization" start="88" end="88" type="uint">
+      <value name="No thread synchronization" value="0"/>
+      <value name="Thread dispatch is synchronized by the 'spawn root thread' message" value="1"/>
+    </field>
+    <field name="Use Scoreboard" start="85" end="85" type="uint">
+      <value name="Not using scoreboard" value="0"/>
+      <value name="Using scoreboard" value="1"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="offset"/>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <field name="Dual Mode" start="223" end="223" type="uint"/>
+    <field name="Repel" start="222" end="222" type="uint"/>
+    <field name="Color Count Minus One" start="216" end="219" type="uint"/>
+    <field name="Middle Loop Extra Steps" start="208" end="212" type="uint"/>
+    <field name="Local Mid-Loop Unit Y" start="204" end="205" type="int"/>
+    <field name="Mid-Loop Unit X" start="200" end="201" type="int"/>
+    <field name="Global Loop Exec Count" start="240" end="249" type="uint"/>
+    <field name="Local Loop Exec Count" start="224" end="233" type="uint"/>
+    <field name="Block Resolution Y" start="272" end="280" type="uint"/>
+    <field name="Block Resolution X" start="256" end="264" type="uint"/>
+    <field name="Local Start Y" start="304" end="312" type="uint"/>
+    <field name="Local Start X" start="288" end="296" type="uint"/>
+    <field name="Local End Y" start="336" end="344" type="uint"/>
+    <field name="Local End X" start="320" end="328" type="uint"/>
+    <field name="Local Outer Loop Stride Y" start="368" end="377" type="int"/>
+    <field name="Local Outer Loop Stride X" start="352" end="361" type="int"/>
+    <field name="Local Inner Loop Unit Y" start="400" end="409" type="int"/>
+    <field name="Local Inner Loop Unit X" start="384" end="393" type="int"/>
+    <field name="Global Resolution Y" start="432" end="440" type="uint"/>
+    <field name="Global Resolution X" start="416" end="424" type="uint"/>
+    <field name="Global Start Y" start="464" end="473" type="int"/>
+    <field name="Global Start X" start="448" end="457" type="int"/>
+    <field name="Global Outer Loop Stride Y" start="496" end="505" type="int"/>
+    <field name="Global Outer Loop Stride X" start="480" end="489" type="int"/>
+    <field name="Global Inner Loop Unit Y" start="528" end="537" type="int"/>
+    <field name="Global Inner Loop Unit X" start="512" end="521" type="int"/>
+    <group count="0" start="544" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_STATE_FLUSH" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="0"/>
+    <field name="Watermark Required" start="38" end="38" type="uint"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_VFE_STATE" bias="2" length="8">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="6"/>
+    <field name="Scratch Space Base Pointer" start="42" end="63" type="offset"/>
+    <field name="Per Thread Scratch Space" start="32" end="35" type="uint"/>
+    <field name="Maximum Number of Threads" start="80" end="95" type="uint"/>
+    <field name="Number of URB Entries" start="72" end="79" type="uint"/>
+    <field name="Reset Gateway Timer" start="71" end="71" type="uint">
+      <value name="Maintaining the existing timestamp state" value="0"/>
+      <value name="Resetting relative timer and latching the global timestamp" value="1"/>
+    </field>
+    <field name="Bypass Gateway Control" start="70" end="70" type="uint">
+      <value name="Maintaining OpenGateway/ForwardMsg/CloseGateway protocol (legacy mode)" value="0"/>
+      <value name="Bypassing OpenGateway/CloseGateway protocol" value="1"/>
+    </field>
+    <field name="Gateway MMIO Access Control" start="67" end="68" type="uint">
+      <value name="No MMIO read/write allowed" value="0"/>
+      <value name="MMIO read/write to any address" value="2"/>
+    </field>
+    <field name="GPGPU Mode" start="66" end="66" type="uint"/>
+    <field name="URB Entry Allocation Size" start="144" end="159" type="uint"/>
+    <field name="CURBE Allocation Size" start="128" end="143" type="uint"/>
+    <field name="Scoreboard Enable" start="191" end="191" type="uint">
+      <value name="Scoreboard disabled" value="0"/>
+      <value name="Scoreboard enabled" value="1"/>
+    </field>
+    <field name="Scoreboard Type" start="190" end="190" type="uint">
+      <value name="Stalling Scoreboard" value="0"/>
+      <value name="Non-Stalling Scoreboard" value="1"/>
+    </field>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <field name="Scoreboard 3 Delta Y" start="220" end="223" type="int"/>
+    <field name="Scoreboard 3 Delta X" start="216" end="219" type="int"/>
+    <field name="Scoreboard 2 Delta Y" start="212" end="215" type="int"/>
+    <field name="Scoreboard 2 Delta X" start="208" end="211" type="int"/>
+    <field name="Scoreboard 1 Delta Y" start="204" end="207" type="int"/>
+    <field name="Scoreboard 1 Delta X" start="200" end="203" type="int"/>
+    <field name="Scoreboard 0 Delta Y" start="196" end="199" type="int"/>
+    <field name="Scoreboard 0 Delta X" start="192" end="195" type="int"/>
+    <field name="Scoreboard 7 Delta Y" start="252" end="255" type="int"/>
+    <field name="Scoreboard 7 Delta X" start="248" end="251" type="int"/>
+    <field name="Scoreboard 6 Delta Y" start="244" end="247" type="int"/>
+    <field name="Scoreboard 6 Delta X" start="240" end="243" type="int"/>
+    <field name="Scoreboard 5 Delta Y" start="236" end="239" type="int"/>
+    <field name="Scoreboard 5 Delta X" start="232" end="235" type="int"/>
+    <field name="Scoreboard 4 Delta Y" start="228" end="231" type="int"/>
+    <field name="Scoreboard 4 Delta X" start="224" end="227" type="int"/>
+  </instruction>
+
+  <instruction name="MI_ARB_CHECK" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="5"/>
+  </instruction>
+
+  <instruction name="MI_ARB_ON_OFF" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="8"/>
+    <field name="Arbitration Enable" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="MI_BATCH_BUFFER_END" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="10"/>
+  </instruction>
+
+  <instruction name="MI_BATCH_BUFFER_START" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="49"/>
+    <field name="Clear Command Buffer Enable" start="11" end="11" type="bool"/>
+    <field name="Address Space Indicator" start="8" end="8" type="uint" prefix="ASI">
+      <value name="GGTT" value="0"/>
+      <value name="PPGTT" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Batch Buffer Start Address" start="34" end="63" type="address"/>
+  </instruction>
+
+  <instruction name="MI_CLFLUSH" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="39"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="9" type="uint" default="1"/>
+    <field name="Page Base Address" start="44" end="63" type="address"/>
+    <field name="Starting Cacheline Offset" start="38" end="43" type="uint"/>
+    <field name="Page Base Address High" start="64" end="79" type="address"/>
+    <group count="0" start="96" size="32">
+      <field name="DW Representing a Half Cache Line" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MI_CONDITIONAL_BATCH_BUFFER_END" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="54"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint" default="0"/>
+    <field name="Compare Semaphore" start="21" end="21" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Compare Data Dword" start="32" end="63" type="uint"/>
+    <field name="Compare Address" start="67" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="MI_FLUSH" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="4"/>
+    <field name="Indirect State Pointers Disable" start="5" end="5" type="bool"/>
+    <field name="Generic Media State Clear" start="4" end="4" type="bool"/>
+    <field name="Global Snapshot Count Reset" start="3" end="3" type="uint">
+      <value name="Don't Reset" value="0"/>
+      <value name="Reset" value="1"/>
+    </field>
+    <field name="Render Cache Flush Inhibit" start="2" end="2" type="uint">
+      <value name="Flush" value="0"/>
+      <value name="Don't Flush" value="1"/>
+    </field>
+    <field name="State/Instruction Cache Invalidate" start="1" end="1" type="uint">
+      <value name="Don't Invalidate" value="0"/>
+      <value name="Invalidate" value="1"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_IMM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="34"/>
+    <field name="Byte Write Disables" start="8" end="11" type="uint"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Register Offset" start="34" end="54" type="offset"/>
+    <field name="Data DWord" start="64" end="95" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_MEM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="41"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="Async Mode Enable" start="21" end="21" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Register Address" start="34" end="54" type="offset"/>
+    <field name="Memory Address" start="66" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="MI_NOOP" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="0"/>
+    <field name="Identification Number Register Write Enable" start="22" end="22" type="bool"/>
+    <field name="Identification Number" start="0" end="21" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_PREDICATE" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="12"/>
+    <field name="Load Operation" start="6" end="7" type="uint" prefix="LOAD">
+      <value name="KEEP" value="0"/>
+      <value name="LOAD" value="2"/>
+      <value name="LOADINV" value="3"/>
+    </field>
+    <field name="Combine Operation" start="3" end="4" type="uint" prefix="COMBINE">
+      <value name="SET" value="0"/>
+      <value name="AND" value="1"/>
+      <value name="OR" value="2"/>
+      <value name="XOR" value="3"/>
+    </field>
+    <field name="Compare Operation" start="0" end="1" type="uint" prefix="COMPARE">
+      <value name="SRCS_EQUAL" value="2"/>
+      <value name="DELTAS_EQUAL" value="3"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_REPORT_HEAD" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="7"/>
+  </instruction>
+
+  <instruction name="MI_REPORT_PERF_COUNT" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="40"/>
+    <field name="DWord Length" start="0" end="5" type="uint" default="1"/>
+    <field name="Memory Address" start="38" end="63" type="address"/>
+    <field name="Use Global GTT" start="32" end="32" type="uint"/>
+    <field name="Report ID" start="64" end="95" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SEMAPHORE_MBOX" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="22"/>
+    <field name="Register Select" start="16" end="17" type="uint">
+      <value name="RVSYNC" value="0"/>
+      <value name="RBSYNC" value="2"/>
+      <value name="Use General Register Select" value="3"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Semaphore Data Dword" start="32" end="63" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SET_CONTEXT" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Logical Context Address" start="44" end="63" type="address"/>
+    <field name="Reserved, Must be 1" start="40" end="40" type="uint"/>
+    <field name="Extended State Save Enable" start="35" end="35" type="bool"/>
+    <field name="Extended State Restore Enable" start="34" end="34" type="bool"/>
+    <field name="Force Restore" start="33" end="33" type="uint"/>
+    <field name="Restore Inhibit" start="32" end="32" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_STORE_DATA_IMM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="32"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="DWord Length" start="0" end="5" type="uint" default="2"/>
+    <field name="Address" start="66" end="95" type="uint"/>
+    <field name="Core Mode Enable" start="64" end="64" type="uint"/>
+    <field name="Data DWord 0" start="96" end="127" type="uint"/>
+    <field name="Data DWord 1" start="128" end="159" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_STORE_DATA_INDEX" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="33"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Offset" start="34" end="43" type="uint"/>
+    <field name="Data DWord 0" start="64" end="95" type="uint"/>
+    <field name="Data DWord 1" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_STORE_REGISTER_MEM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="36"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Register Address" start="34" end="54" type="offset"/>
+    <field name="Memory Address" start="66" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="MI_SUSPEND_FLUSH" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="11"/>
+    <field name="Suspend Flush" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="MI_TOPOLOGY_FILTER" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="13"/>
+    <field name="Topology Filter Value" start="0" end="5" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_URB_CLEAR" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="25"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="URB Clear Length" start="48" end="60" type="uint"/>
+    <field name="URB Address" start="32" end="45" type="offset"/>
+  </instruction>
+
+  <instruction name="MI_USER_INTERRUPT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="2"/>
+  </instruction>
+
+  <instruction name="MI_WAIT_FOR_EVENT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="3"/>
+    <field name="Display Pipe C Horizontal Blank Wait Enable" start="22" end="22" type="bool"/>
+    <field name="Display Pipe C Vertical Blank Wait Enable" start="21" end="21" type="bool"/>
+    <field name="Display Sprite C Flip Pending Wait Enable" start="20" end="20" type="bool"/>
+    <field name="Condition Code Wait Select" start="16" end="19" type="uint">
+      <value name="Not enabled" value="0"/>
+    </field>
+    <field name="Display Plane C Flip Pending Wait Enable" start="15" end="15" type="bool"/>
+    <field name="Display Pipe C Scan Line Wait Enable" start="14" end="14" type="bool"/>
+    <field name="Display Pipe B Horizontal Blank Wait Enable" start="13" end="13" type="bool"/>
+    <field name="Display Pipe B Vertical Blank Wait Enable" start="11" end="11" type="bool"/>
+    <field name="Display Sprite B Flip Pending Wait Enable" start="10" end="10" type="bool"/>
+    <field name="Display Plane B Flip Pending Wait Enable" start="9" end="9" type="bool"/>
+    <field name="Display Pipe B Scan Line Wait Enable" start="8" end="8" type="bool"/>
+    <field name="Display Pipe A Horizontal Blank Wait Enable" start="5" end="5" type="bool"/>
+    <field name="Display Pipe A Vertical Blank Wait Enable" start="3" end="3" type="bool"/>
+    <field name="Display Sprite A Flip Pending Wait Enable" start="2" end="2" type="bool"/>
+    <field name="Display Plane A Flip Pending Wait Enable" start="1" end="1" type="bool"/>
+    <field name="Display Pipe A Scan Line Wait Enable" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="PIPELINE_SELECT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="1"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="Pipeline Selection" start="0" end="1" type="uint">
+      <value name="3D" value="0"/>
+      <value name="Media" value="1"/>
+      <value name="GPGPU" value="2"/>
+    </field>
+  </instruction>
+
+  <instruction name="PIPE_CONTROL" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="2"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Destination Address Type" start="56" end="56" type="uint" prefix="DAT">
+      <value name="PPGTT" value="0"/>
+      <value name="GGTT" value="1"/>
+    </field>
+    <field name="LRI Post Sync Operation" start="55" end="55" type="uint">
+      <value name="No LRI Operation" value="0"/>
+      <value name="MMIO Write Immediate Data" value="1"/>
+    </field>
+    <field name="Store Data Index" start="53" end="53" type="uint"/>
+    <field name="Command Streamer Stall Enable" start="52" end="52" type="uint"/>
+    <field name="Global Snapshot Count Reset" start="51" end="51" type="uint">
+      <value name="Don't Reset" value="0"/>
+      <value name="Reset" value="1"/>
+    </field>
+    <field name="TLB Invalidate" start="50" end="50" type="uint"/>
+    <field name="Generic Media State Clear" start="48" end="48" type="bool"/>
+    <field name="Post Sync Operation" start="46" end="47" type="uint">
+      <value name="No Write" value="0"/>
+      <value name="Write Immediate Data" value="1"/>
+      <value name="Write PS Depth Count" value="2"/>
+      <value name="Write Timestamp" value="3"/>
+    </field>
+    <field name="Depth Stall Enable" start="45" end="45" type="bool"/>
+    <field name="Render Target Cache Flush Enable" start="44" end="44" type="bool"/>
+    <field name="Instruction Cache Invalidate Enable" start="43" end="43" type="bool"/>
+    <field name="Texture Cache Invalidation Enable" start="42" end="42" type="bool"/>
+    <field name="Indirect State Pointers Disable" start="41" end="41" type="bool"/>
+    <field name="Notify Enable" start="40" end="40" type="bool"/>
+    <field name="Pipe Control Flush Enable" start="39" end="39" type="bool"/>
+    <field name="DC  Flush Enable" start="37" end="37" type="bool"/>
+    <field name="VF Cache Invalidation Enable" start="36" end="36" type="bool"/>
+    <field name="Constant Cache Invalidation Enable" start="35" end="35" type="bool"/>
+    <field name="State Cache Invalidation Enable" start="34" end="34" type="bool"/>
+    <field name="Stall At Pixel Scoreboard" start="33" end="33" type="bool"/>
+    <field name="Depth Cache Flush Enable" start="32" end="32" type="bool"/>
+    <field name="Address" start="66" end="95" type="address"/>
+    <field name="Immediate Data" start="96" end="159" type="uint"/>
+  </instruction>
+
+  <instruction name="STATE_BASE_ADDRESS" bias="2" length="10">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="1"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="8"/>
+    <field name="General State Base Address" start="44" end="63" type="address"/>
+    <field name="General State Memory Object Control State" start="40" end="43" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Stateless Data Port Access Memory Object Control State" start="36" end="39" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Stateless Data Port Access Force Write Thru" start="35" end="35" type="uint"/>
+    <field name="General State Base Address Modify Enable" start="32" end="32" type="bool"/>
+    <field name="Surface State Base Address" start="76" end="95" type="address"/>
+    <field name="Surface State Memory Object Control State" start="72" end="75" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface State Base Address Modify Enable" start="64" end="64" type="bool"/>
+    <field name="Dynamic State Base Address" start="108" end="127" type="address"/>
+    <field name="Dynamic State Memory Object Control State" start="104" end="107" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Dynamic State Base Address Modify Enable" start="96" end="96" type="bool"/>
+    <field name="Indirect Object Base Address" start="140" end="159" type="address"/>
+    <field name="Indirect Object Memory Object Control State" start="136" end="139" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Indirect Object Base Address Modify Enable" start="128" end="128" type="bool"/>
+    <field name="Instruction Base Address" start="172" end="191" type="address"/>
+    <field name="Instruction Memory Object Control State" start="168" end="171" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Instruction Base Address Modify Enable" start="160" end="160" type="bool"/>
+    <field name="General State Access Upper Bound" start="204" end="223" type="address"/>
+    <field name="General State Access Upper Bound Modify Enable" start="192" end="192" type="bool"/>
+    <field name="Dynamic State Access Upper Bound" start="236" end="255" type="address"/>
+    <field name="Dynamic State Access Upper Bound Modify Enable" start="224" end="224" type="bool"/>
+    <field name="Indirect Object Access Upper Bound" start="268" end="287" type="address"/>
+    <field name="Indirect Object Access Upper Bound Modify Enable" start="256" end="256" type="bool"/>
+    <field name="Instruction Access Upper Bound" start="300" end="319" type="address"/>
+    <field name="Instruction Access Upper Bound Modify Enable" start="288" end="288" type="bool"/>
+  </instruction>
+
+  <instruction name="STATE_PREFETCH" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Prefetch Pointer" start="38" end="63" type="address"/>
+    <field name="Prefetch Count" start="32" end="34" type="uint"/>
+  </instruction>
+
+  <instruction name="STATE_SIP" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="System Instruction Pointer" start="36" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="SWTESS_BASE_ADDRESS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="SW Tessellation Base Address" start="44" end="63" type="address"/>
+    <field name="SW Tessellation Memory Object Control State" start="40" end="43" type="MEMORY_OBJECT_CONTROL_STATE"/>
+  </instruction>
+
+</genxml>
diff --git a/src/intel/genxml/gen75.xml b/src/intel/genxml/gen75.xml
new file mode 100644 (file)
index 0000000..94bb64e
--- /dev/null
@@ -0,0 +1,2909 @@
+<genxml name="HSW" gen="7.5">
+  <struct name="MEMORY_OBJECT_CONTROL_STATE" length="1">
+    <field name="LLC/eLLC Cacheability Control (LLCCC)" start="1" end="2" type="uint"/>
+    <field name="L3 Cacheability Control (L3CC)" start="0" end="0" type="uint"/>
+  </struct>
+
+  <struct name="3DSTATE_CONSTANT_BODY" length="6">
+    <field name="Constant Buffer 1 Read Length" start="16" end="31" type="uint"/>
+    <field name="Constant Buffer 0 Read Length" start="0" end="15" type="uint"/>
+    <field name="Constant Buffer 3 Read Length" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer 2 Read Length" start="32" end="47" type="uint"/>
+    <field name="Pointer To Constant Buffer 0" start="69" end="95" type="address"/>
+    <field name="Constant Buffer Object Control State" start="64" end="68" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Pointer To Constant Buffer 1" start="101" end="127" type="address"/>
+    <field name="Pointer To Constant Buffer 2" start="133" end="159" type="address"/>
+    <field name="Pointer To Constant Buffer 3" start="165" end="191" type="address"/>
+  </struct>
+
+  <struct name="BINDING_TABLE_EDIT_ENTRY" length="1">
+    <field name="Binding Table Index" start="16" end="23" type="uint"/>
+    <field name="Surface State Pointer" start="0" end="15" type="offset"/>
+  </struct>
+
+  <struct name="GATHER_CONSTANT_ENTRY" length="1">
+    <field name="Constant Buffer Offset" start="8" end="15" type="offset"/>
+    <field name="Channel Mask" start="4" end="7" type="uint"/>
+    <field name="Binding Table Index Offset" start="0" end="3" type="uint"/>
+  </struct>
+
+  <struct name="VERTEX_BUFFER_STATE" length="4">
+    <field name="Vertex Buffer Index" start="26" end="31" type="uint"/>
+    <field name="Buffer Access Type" start="20" end="20" type="uint">
+      <value name="VERTEXDATA" value="0"/>
+      <value name="INSTANCEDATA" value="1"/>
+    </field>
+    <field name="Vertex Buffer Memory Object Control State" start="16" end="19" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Address Modify Enable" start="14" end="14" type="uint"/>
+    <field name="Null Vertex Buffer" start="13" end="13" type="bool"/>
+    <field name="Vertex Fetch Invalidate" start="12" end="12" type="uint" default="0"/>
+    <field name="Buffer Pitch" start="0" end="11" type="uint"/>
+    <field name="Buffer Starting Address" start="32" end="63" type="address"/>
+    <field name="End Address" start="64" end="95" type="address"/>
+    <field name="Instance Data Step Rate" start="96" end="127" type="uint"/>
+  </struct>
+
+  <struct name="VERTEX_ELEMENT_STATE" length="2">
+    <field name="Vertex Buffer Index" start="26" end="31" type="uint"/>
+    <field name="Valid" start="25" end="25" type="uint"/>
+    <field name="Source Element Format" start="16" end="24" type="uint"/>
+    <field name="Edge Flag Enable" start="15" end="15" type="bool"/>
+    <field name="Source Element Offset" start="0" end="11" type="uint"/>
+    <field name="Component 0 Control" start="60" end="62" type="uint"/>
+    <field name="Component 1 Control" start="56" end="58" type="uint"/>
+    <field name="Component 2 Control" start="52" end="54" type="uint"/>
+    <field name="Component 3 Control" start="48" end="50" type="uint"/>
+  </struct>
+
+  <struct name="SO_DECL" length="1">
+    <field name="Output Buffer Slot" start="12" end="13" type="uint"/>
+    <field name="Hole Flag" start="11" end="11" type="uint"/>
+    <field name="Register Index" start="4" end="9" type="uint"/>
+    <field name="Component Mask" start="0" end="3" type="uint" default="0"/>
+  </struct>
+
+  <struct name="SO_DECL_ENTRY" length="2">
+    <field name="Stream 3 Decl" start="48" end="63" type="SO_DECL"/>
+    <field name="Stream 2 Decl" start="32" end="47" type="SO_DECL"/>
+    <field name="Stream 1 Decl" start="16" end="31" type="SO_DECL"/>
+    <field name="Stream 0 Decl" start="0" end="15" type="SO_DECL"/>
+  </struct>
+
+  <struct name="SF_OUTPUT_ATTRIBUTE_DETAIL" length="1">
+    <field name="Component Override W" start="15" end="15" type="bool"/>
+    <field name="Component Override Z" start="14" end="14" type="bool"/>
+    <field name="Component Override Y" start="13" end="13" type="bool"/>
+    <field name="Component Override X" start="12" end="12" type="bool"/>
+    <field name="Swizzle Control Mode" start="11" end="11" type="uint"/>
+    <field name="Constant Source" start="9" end="10" type="uint">
+      <value name="CONST_0000" value="0"/>
+      <value name="CONST_0001_FLOAT" value="1"/>
+      <value name="CONST_1111_FLOAT" value="2"/>
+      <value name="PRIM_ID" value="3"/>
+    </field>
+    <field name="Swizzle Select" start="6" end="7" type="uint">
+      <value name="INPUTATTR" value="0"/>
+      <value name="INPUTATTR_FACING" value="1"/>
+      <value name="INPUTATTR_W" value="2"/>
+      <value name="INPUTATTR_FACING_W" value="3"/>
+    </field>
+    <field name="Source Attribute" start="0" end="4" type="uint"/>
+  </struct>
+
+  <struct name="SCISSOR_RECT" length="2">
+    <field name="Scissor Rectangle Y Min" start="16" end="31" type="uint"/>
+    <field name="Scissor Rectangle X Min" start="0" end="15" type="uint"/>
+    <field name="Scissor Rectangle Y Max" start="48" end="63" type="uint"/>
+    <field name="Scissor Rectangle X Max" start="32" end="47" type="uint"/>
+  </struct>
+
+  <struct name="SF_CLIP_VIEWPORT" length="16">
+    <field name="Viewport Matrix Element m00" start="0" end="31" type="float"/>
+    <field name="Viewport Matrix Element m11" start="32" end="63" type="float"/>
+    <field name="Viewport Matrix Element m22" start="64" end="95" type="float"/>
+    <field name="Viewport Matrix Element m30" start="96" end="127" type="float"/>
+    <field name="Viewport Matrix Element m31" start="128" end="159" type="float"/>
+    <field name="Viewport Matrix Element m32" start="160" end="191" type="float"/>
+    <field name="X Min Clip Guardband" start="256" end="287" type="float"/>
+    <field name="X Max Clip Guardband" start="288" end="319" type="float"/>
+    <field name="Y Min Clip Guardband" start="320" end="351" type="float"/>
+    <field name="Y Max Clip Guardband" start="352" end="383" type="float"/>
+    <group count="4" start="384" size="32">
+    </group>
+  </struct>
+
+  <struct name="BLEND_STATE" length="2">
+    <field name="Color Buffer Blend Enable" start="31" end="31" type="bool"/>
+    <field name="Independent Alpha Blend Enable" start="30" end="30" type="bool"/>
+    <field name="Alpha Blend Function" start="26" end="28" type="uint">
+      <value name="BLENDFUNCTION_ADD" value="0"/>
+      <value name="BLENDFUNCTION_SUBTRACT" value="1"/>
+      <value name="BLENDFUNCTION_REVERSE_SUBTRACT" value="2"/>
+      <value name="BLENDFUNCTION_MIN" value="3"/>
+      <value name="BLENDFUNCTION_MAX" value="4"/>
+    </field>
+    <field name="Source Alpha Blend Factor" start="20" end="24" type="uint">
+      <value name="BLENDFACTOR_ONE" value="1"/>
+      <value name="BLENDFACTOR_SRC_COLOR" value="2"/>
+      <value name="BLENDFACTOR_SRC_ALPHA" value="3"/>
+      <value name="BLENDFACTOR_DST_ALPHA" value="4"/>
+      <value name="BLENDFACTOR_DST_COLOR" value="5"/>
+      <value name="BLENDFACTOR_SRC_ALPHA_SATURATE" value="6"/>
+      <value name="BLENDFACTOR_CONST_COLOR" value="7"/>
+      <value name="BLENDFACTOR_CONST_ALPHA" value="8"/>
+      <value name="BLENDFACTOR_SRC1_COLOR" value="9"/>
+      <value name="BLENDFACTOR_SRC1_ALPHA" value="10"/>
+      <value name="BLENDFACTOR_ZERO" value="17"/>
+      <value name="BLENDFACTOR_INV_SRC_COLOR" value="18"/>
+      <value name="BLENDFACTOR_INV_SRC_ALPHA" value="19"/>
+      <value name="BLENDFACTOR_INV_DST_ALPHA" value="20"/>
+      <value name="BLENDFACTOR_INV_DST_COLOR" value="21"/>
+      <value name="BLENDFACTOR_INV_CONST_COLOR" value="23"/>
+      <value name="BLENDFACTOR_INV_CONST_ALPHA" value="24"/>
+      <value name="BLENDFACTOR_INV_SRC1_COLOR" value="25"/>
+      <value name="BLENDFACTOR_INV_SRC1_ALPHA" value="26"/>
+    </field>
+    <field name="Destination Alpha Blend Factor" start="15" end="19" type="uint"/>
+    <field name="Color Blend Function" start="11" end="13" type="uint">
+      <value name="BLENDFUNCTION_ADD" value="0"/>
+      <value name="BLENDFUNCTION_SUBTRACT" value="1"/>
+      <value name="BLENDFUNCTION_REVERSE_SUBTRACT" value="2"/>
+      <value name="BLENDFUNCTION_MIN  " value="3"/>
+      <value name="BLENDFUNCTION_MAX" value="4"/>
+    </field>
+    <field name="Source Blend Factor" start="5" end="9" type="uint"/>
+    <field name="Destination Blend Factor" start="0" end="4" type="uint"/>
+    <field name="AlphaToCoverage Enable" start="63" end="63" type="bool"/>
+    <field name="AlphaToOne Enable" start="62" end="62" type="bool"/>
+    <field name="AlphaToCoverage Dither Enable" start="61" end="61" type="bool"/>
+    <field name="Write Disable Alpha" start="59" end="59" type="bool"/>
+    <field name="Write Disable Red" start="58" end="58" type="bool"/>
+    <field name="Write Disable Green" start="57" end="57" type="bool"/>
+    <field name="Write Disable Blue" start="56" end="56" type="bool"/>
+    <field name="Logic Op Enable" start="54" end="54" type="bool"/>
+    <field name="Logic Op Function" start="50" end="53" type="uint">
+      <value name="LOGICOP_CLEAR" value="0"/>
+      <value name="LOGICOP_NOR" value="1"/>
+      <value name="LOGICOP_AND_INVERTED" value="2"/>
+      <value name="LOGICOP_COPY_INVERTED" value="3"/>
+      <value name="LOGICOP_AND_REVERSE" value="4"/>
+      <value name="LOGICOP_INVERT" value="5"/>
+      <value name="LOGICOP_XOR" value="6"/>
+      <value name="LOGICOP_NAND" value="7"/>
+      <value name="LOGICOP_AND" value="8"/>
+      <value name="LOGICOP_EQUIV" value="9"/>
+      <value name="LOGICOP_NOOP" value="10"/>
+      <value name="LOGICOP_OR_INVERTED" value="11"/>
+      <value name="LOGICOP_COPY" value="12"/>
+      <value name="LOGICOP_OR_REVERSE" value="13"/>
+      <value name="LOGICOP_OR" value="14"/>
+      <value name="LOGICOP_SET" value="15"/>
+    </field>
+    <field name="Alpha Test Enable" start="48" end="48" type="bool"/>
+    <field name="Alpha Test Function" start="45" end="47" type="uint">
+      <value name="COMPAREFUNCTION_ALWAYS" value="0"/>
+      <value name="COMPAREFUNCTION_NEVER" value="1"/>
+      <value name="COMPAREFUNCTION_LESS" value="2"/>
+      <value name="COMPAREFUNCTION_EQUAL" value="3"/>
+      <value name="COMPAREFUNCTION_LEQUAL" value="4"/>
+      <value name="COMPAREFUNCTION_GREATER" value="5"/>
+      <value name="COMPAREFUNCTION_NOTEQUAL" value="6"/>
+      <value name="COMPAREFUNCTION_GEQUAL" value="7"/>
+    </field>
+    <field name="Color Dither Enable" start="44" end="44" type="bool"/>
+    <field name="X Dither Offset" start="42" end="43" type="uint"/>
+    <field name="Y Dither Offset" start="40" end="41" type="uint"/>
+    <field name="Color Clamp Range" start="34" end="35" type="uint">
+      <value name="COLORCLAMP_UNORM" value="0"/>
+      <value name="COLORCLAMP_SNORM" value="1"/>
+      <value name="COLORCLAMP_RTFORMAT" value="2"/>
+    </field>
+    <field name="Pre-Blend Color Clamp Enable" start="33" end="33" type="bool"/>
+    <field name="Post-Blend Color Clamp Enable" start="32" end="32" type="bool"/>
+  </struct>
+
+  <struct name="CC_VIEWPORT" length="2">
+    <field name="Minimum Depth" start="0" end="31" type="float"/>
+    <field name="Maximum Depth" start="32" end="63" type="float"/>
+  </struct>
+
+  <struct name="COLOR_CALC_STATE" length="6">
+    <field name="Stencil Reference Value" start="24" end="31" type="uint"/>
+    <field name="BackFace Stencil Reference Value" start="16" end="23" type="uint"/>
+    <field name="Round Disable Function Disable" start="15" end="15" type="bool"/>
+    <field name="Alpha Test Format" start="0" end="0" type="uint">
+      <value name="ALPHATEST_UNORM8" value="0"/>
+      <value name="ALPHATEST_FLOAT32" value="1"/>
+    </field>
+    <field name="Alpha Reference Value As UNORM8" start="32" end="63" type="uint"/>
+    <field name="Alpha Reference Value As FLOAT32" start="32" end="63" type="float"/>
+    <field name="Blend Constant Color Red" start="64" end="95" type="float"/>
+    <field name="Blend Constant Color Green" start="96" end="127" type="float"/>
+    <field name="Blend Constant Color Blue" start="128" end="159" type="float"/>
+    <field name="Blend Constant Color Alpha" start="160" end="191" type="float"/>
+  </struct>
+
+  <struct name="DEPTH_STENCIL_STATE" length="3">
+    <field name="Stencil Test Enable" start="31" end="31" type="bool"/>
+    <field name="Stencil Test Function" start="28" end="30" type="uint">
+      <value name="COMPAREFUNCTION_ALWAYS" value="0"/>
+      <value name="COMPAREFUNCTION_NEVER" value="1"/>
+      <value name="COMPAREFUNCTION_LESS" value="2"/>
+      <value name="COMPAREFUNCTION_EQUAL" value="3"/>
+      <value name="COMPAREFUNCTION_LEQUAL" value="4"/>
+      <value name="COMPAREFUNCTION_GREATER" value="5"/>
+      <value name="COMPAREFUNCTION_NOTEQUAL" value="6"/>
+      <value name="COMPAREFUNCTION_GEQUAL" value="7"/>
+    </field>
+    <field name="Stencil Fail Op" start="25" end="27" type="uint">
+      <value name="STENCILOP_KEEP" value="0"/>
+      <value name="STENCILOP_ZERO" value="1"/>
+      <value name="STENCILOP_REPLACE" value="2"/>
+      <value name="STENCILOP_INCRSAT" value="3"/>
+      <value name="STENCILOP_DECRSAT" value="4"/>
+      <value name="STENCILOP_INCR" value="5"/>
+      <value name="STENCILOP_DECR" value="6"/>
+      <value name="STENCILOP_INVERT" value="7"/>
+    </field>
+    <field name="Stencil Pass Depth Fail Op" start="22" end="24" type="uint"/>
+    <field name="Stencil Pass Depth Pass Op" start="19" end="21" type="uint"/>
+    <field name="Stencil Buffer Write Enable" start="18" end="18" type="bool"/>
+    <field name="Double Sided Stencil Enable" start="15" end="15" type="bool"/>
+    <field name="BackFace Stencil Test Function" start="12" end="14" type="uint">
+      <value name="COMPAREFUNCTION_ALWAYS" value="0"/>
+      <value name="COMPAREFUNCTION_NEVER" value="1"/>
+      <value name="COMPAREFUNCTION_LESS" value="2"/>
+      <value name="COMPAREFUNCTION_EQUAL" value="3"/>
+      <value name="COMPAREFUNCTION_LEQUAL" value="4"/>
+      <value name="COMPAREFUNCTION_GREATER" value="5"/>
+      <value name="COMPAREFUNCTION_NOTEQUAL" value="6"/>
+      <value name="COMPAREFUNCTION_GEQUAL" value="7"/>
+    </field>
+    <field name="Backface Stencil Fail Op" start="9" end="11" type="uint">
+      <value name="STENCILOP_KEEP" value="0"/>
+      <value name="STENCILOP_ZERO" value="1"/>
+      <value name="STENCILOP_REPLACE" value="2"/>
+      <value name="STENCILOP_INCRSAT" value="3"/>
+      <value name="STENCILOP_DECRSAT" value="4"/>
+      <value name="STENCILOP_INCR" value="5"/>
+      <value name="STENCILOP_DECR" value="6"/>
+      <value name="STENCILOP_INVERT" value="7"/>
+    </field>
+    <field name="Backface Stencil Pass Depth Fail Op" start="6" end="8" type="uint"/>
+    <field name="Backface Stencil Pass Depth Pass Op" start="3" end="5" type="uint"/>
+    <field name="Stencil Test Mask" start="56" end="63" type="uint"/>
+    <field name="Stencil Write Mask" start="48" end="55" type="uint"/>
+    <field name="Backface Stencil Test Mask" start="40" end="47" type="uint"/>
+    <field name="Backface Stencil Write Mask" start="32" end="39" type="uint"/>
+    <field name="Depth Test Enable" start="95" end="95" type="bool"/>
+    <field name="Depth Test Function" start="91" end="93" type="uint">
+      <value name="COMPAREFUNCTION_ALWAYS" value="0"/>
+      <value name="COMPAREFUNCTION_NEVER" value="1"/>
+      <value name="COMPAREFUNCTION_LESS" value="2"/>
+      <value name="COMPAREFUNCTION_EQUAL" value="3"/>
+      <value name="COMPAREFUNCTION_LEQUAL" value="4"/>
+      <value name="COMPAREFUNCTION_GREATER" value="5"/>
+      <value name="COMPAREFUNCTION_NOTEQUAL" value="6"/>
+      <value name="COMPAREFUNCTION_GEQUAL" value="7"/>
+    </field>
+    <field name="Depth Buffer Write Enable" start="90" end="90" type="bool"/>
+  </struct>
+
+  <struct name="INTERFACE_DESCRIPTOR_DATA" length="8">
+    <field name="Kernel Start Pointer" start="6" end="31" type="offset"/>
+    <field name="Single Program Flow" start="50" end="50" type="uint"/>
+    <field name="Thread Priority" start="49" end="49" type="uint">
+      <value name="Normal Priority" value="0"/>
+      <value name="High Priority" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="48" end="48" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="45" end="45" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="43" end="43" type="bool"/>
+    <field name="Software Exception Enable" start="39" end="39" type="bool"/>
+    <field name="Sampler State Pointer" start="69" end="95" type="offset"/>
+    <field name="Sampler Count" start="66" end="68" type="uint">
+      <value name="No samplers used" value="0"/>
+      <value name="Between 1 and 4 samplers used" value="1"/>
+      <value name="Between 5 and 8 samplers used" value="2"/>
+      <value name="Between 9 and 12 samplers used" value="3"/>
+      <value name="Between 13 and 16 samplers used" value="4"/>
+    </field>
+    <field name="Binding Table Pointer" start="101" end="111" type="offset"/>
+    <field name="Binding Table Entry Count" start="96" end="100" type="uint"/>
+    <field name="Constant URB Entry Read Length" start="144" end="159" type="uint"/>
+    <field name="Rounding Mode" start="182" end="183" type="uint">
+      <value name="RTNE" value="0"/>
+      <value name="RU" value="1"/>
+      <value name="RD" value="2"/>
+      <value name="RTZ" value="3"/>
+    </field>
+    <field name="Barrier Enable" start="181" end="181" type="bool"/>
+    <field name="Shared Local Memory Size" start="176" end="180" type="uint"/>
+    <field name="Number of Threads in GPGPU Thread Group" start="160" end="167" type="uint"/>
+    <field name="Cross-Thread Constant Data Read Length" start="192" end="199" type="uint"/>
+  </struct>
+
+  <struct name="PALETTE_ENTRY" length="1">
+    <field name="Alpha" start="24" end="31" type="uint"/>
+    <field name="Red" start="16" end="23" type="uint"/>
+    <field name="Green" start="8" end="15" type="uint"/>
+    <field name="Blue" start="0" end="7" type="uint"/>
+  </struct>
+
+  <struct name="BINDING_TABLE_STATE" length="1">
+    <field name="Surface State Pointer" start="5" end="31" type="offset"/>
+  </struct>
+
+  <struct name="RENDER_SURFACE_STATE" length="8">
+    <field name="Surface Type" start="29" end="31" type="uint">
+      <value name="SURFTYPE_1D" value="0"/>
+      <value name="SURFTYPE_2D" value="1"/>
+      <value name="SURFTYPE_3D" value="2"/>
+      <value name="SURFTYPE_CUBE" value="3"/>
+      <value name="SURFTYPE_BUFFER" value="4"/>
+      <value name="SURFTYPE_STRBUF" value="5"/>
+      <value name="SURFTYPE_NULL" value="7"/>
+    </field>
+    <field name="Surface Array" start="28" end="28" type="bool"/>
+    <field name="Surface Format" start="18" end="26" type="uint"/>
+    <field name="Surface Vertical Alignment" start="16" end="17" type="uint">
+      <value name="VALIGN_2" value="0"/>
+      <value name="VALIGN_4" value="1"/>
+    </field>
+    <field name="Surface Horizontal Alignment" start="15" end="15" type="uint">
+      <value name="HALIGN_4" value="0"/>
+      <value name="HALIGN_8" value="1"/>
+    </field>
+    <field name="Tiled Surface" start="14" end="14" type="uint"/>
+    <field name="Tile Walk" start="13" end="13" type="uint">
+      <value name="TILEWALK_XMAJOR" value="0"/>
+      <value name="TILEWALK_YMAJOR" value="1"/>
+    </field>
+    <field name="Vertical Line Stride" start="12" end="12" type="uint"/>
+    <field name="Vertical Line Stride Offset" start="11" end="11" type="uint"/>
+    <field name="Surface Array Spacing" start="10" end="10" type="uint">
+      <value name="ARYSPC_FULL" value="0"/>
+      <value name="ARYSPC_LOD0" value="1"/>
+    </field>
+    <field name="Render Cache Read Write Mode" start="8" end="8" type="uint"/>
+    <field name="Media Boundary Pixel Mode" start="6" end="7" type="uint">
+      <value name="NORMAL_MODE" value="0"/>
+      <value name="PROGRESSIVE_FRAME" value="2"/>
+      <value name="INTERLACED_FRAME" value="3"/>
+    </field>
+    <field name="Cube Face Enables" start="0" end="5" type="uint"/>
+    <field name="Surface Base Address" start="32" end="63" type="address"/>
+    <field name="Height" start="80" end="93" type="uint"/>
+    <field name="Width" start="64" end="77" type="uint"/>
+    <field name="Depth" start="117" end="127" type="uint"/>
+    <field name="Integer Surface Format" start="114" end="116" type="uint"/>
+    <field name="Surface Pitch" start="96" end="113" type="uint"/>
+    <field name="Render Target Rotation" start="157" end="158" type="uint">
+      <value name="RTROTATE_0DEG" value="0"/>
+      <value name="RTROTATE_90DEG" value="1"/>
+      <value name="RTROTATE_270DEG" value="3"/>
+    </field>
+    <field name="Minimum Array Element" start="146" end="156" type="uint"/>
+    <field name="Render Target View Extent" start="135" end="145" type="uint"/>
+    <field name="Multisampled Surface Storage Format" start="134" end="134" type="uint">
+      <value name="MSFMT_MSS" value="0"/>
+      <value name="MSFMT_DEPTH_STENCIL" value="1"/>
+    </field>
+    <field name="Number of Multisamples" start="131" end="133" type="uint">
+      <value name="MULTISAMPLECOUNT_1" value="0"/>
+      <value name="MULTISAMPLECOUNT_4" value="2"/>
+      <value name="MULTISAMPLECOUNT_8" value="3"/>
+    </field>
+    <field name="Multisample Position Palette Index" start="128" end="130" type="uint"/>
+    <field name="Strbuf Minimum Array Element" start="128" end="154" type="uint"/>
+    <field name="X Offset" start="185" end="191" type="offset"/>
+    <field name="Y Offset" start="180" end="183" type="offset"/>
+    <field name="Surface Object Control State" start="176" end="179" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="MOCS" start="176" end="179" type="uint"/>
+    <field name="Surface Min LOD" start="164" end="167" type="uint"/>
+    <field name="MIP Count / LOD" start="160" end="163" type="uint"/>
+    <field name="MCS Base Address" start="204" end="223" type="address"/>
+    <field name="MCS Surface Pitch" start="195" end="203" type="uint"/>
+    <field name="Append Counter Address" start="198" end="223" type="address"/>
+    <field name="Append Counter Enable" start="193" end="193" type="bool"/>
+    <field name="MCS Enable" start="192" end="192" type="bool"/>
+    <field name="Reserved: MBZ" start="222" end="223" type="uint"/>
+    <field name="X Offset for UV Plane" start="208" end="221" type="uint"/>
+    <field name="Y Offset for UV Plane" start="192" end="205" type="uint"/>
+    <field name="Red Clear Color" start="255" end="255" type="uint" />
+    <field name="Green Clear Color" start="254" end="254" type="uint" />
+    <field name="Blue Clear Color" start="253" end="253" type="uint" />
+    <field name="Alpha Clear Color" start="252" end="252" type="uint" />
+    <field name="Shader Channel Select Red" start="249" end="251" type="uint">
+      <value name="SCS_ZERO" value="0"/>
+      <value name="SCS_ONE" value="1"/>
+      <value name="SCS_RED" value="4"/>
+      <value name="SCS_GREEN" value="5"/>
+      <value name="SCS_BLUE" value="6"/>
+      <value name="SCS_ALPHA" value="7"/>
+    </field>
+    <field name="Shader Channel Select Green" start="246" end="248" type="uint"/>
+    <field name="Shader Channel Select Blue" start="243" end="245" type="uint"/>
+    <field name="Shader Channel Select Alpha" start="240" end="242" type="uint"/>
+    <field name="Resource Min LOD" start="224" end="235" type="u4.8"/>
+  </struct>
+
+  <struct name="SAMPLER_STATE" length="4">
+    <field name="Sampler Disable" start="31" end="31" type="bool"/>
+    <field name="Texture Border Color Mode" start="29" end="29" type="uint">
+      <value name="DX10/OGL" value="0"/>
+      <value name="DX9" value="1"/>
+    </field>
+    <field name="LOD PreClamp Enable" start="28" end="28" type="uint" prefix="CLAMP_ENABLE">
+      <value name="OGL" value="1"/>
+    </field>
+    <field name="Base Mip Level" start="22" end="26" type="u4.1"/>
+    <field name="Mip Mode Filter" start="20" end="21" type="uint" prefix="MIPFILTER">
+      <value name="NONE" value="0"/>
+      <value name="NEAREST" value="1"/>
+      <value name="LINEAR" value="3"/>
+    </field>
+    <field name="Mag Mode Filter" start="17" end="19" type="uint" prefix="MAPFILTER">
+      <value name="NEAREST" value="0"/>
+      <value name="LINEAR" value="1"/>
+      <value name="ANISOTROPIC" value="2"/>
+      <value name="MONO" value="6"/>
+    </field>
+    <field name="Min Mode Filter" start="14" end="16" type="uint" prefix="MAPFILTER">
+      <value name="NEAREST" value="0"/>
+      <value name="LINEAR" value="1"/>
+      <value name="ANISOTROPIC" value="2"/>
+      <value name="MONO" value="6"/>
+    </field>
+    <field name="Texture LOD Bias" start="1" end="13" type="s4.8"/>
+    <field name="Anisotropic Algorithm" start="0" end="0" type="uint">
+      <value name="LEGACY" value="0"/>
+      <value name="EWA Approximation" value="1"/>
+    </field>
+    <field name="Min LOD" start="52" end="63" type="u4.8"/>
+    <field name="Max LOD" start="40" end="51" type="u4.8"/>
+    <field name="Shadow Function" start="33" end="35" type="uint">
+      <value name="PREFILTEROP ALWAYS" value="0"/>
+      <value name="PREFILTEROP NEVER" value="1"/>
+      <value name="PREFILTEROP LESS" value="2"/>
+      <value name="PREFILTEROP EQUAL" value="3"/>
+      <value name="PREFILTEROP LEQUAL" value="4"/>
+      <value name="PREFILTEROP GREATER" value="5"/>
+      <value name="PREFILTEROP NOTEQUAL" value="6"/>
+      <value name="PREFILTEROP GEQUAL" value="7"/>
+    </field>
+    <field name="Cube Surface Control Mode" start="32" end="32" type="uint">
+      <value name="PROGRAMMED" value="0"/>
+      <value name="OVERRIDE" value="1"/>
+    </field>
+    <field name="Border Color Pointer" start="69" end="95" type="offset"/>
+    <field name="ChromaKey Enable" start="121" end="121" type="bool"/>
+    <field name="ChromaKey Index" start="119" end="120" type="uint"/>
+    <field name="ChromaKey Mode" start="118" end="118" type="uint">
+      <value name="KEYFILTER_KILL_ON_ANY_MATCH" value="0"/>
+      <value name="KEYFILTER_REPLACE_BLACK" value="1"/>
+    </field>
+    <field name="Maximum Anisotropy" start="115" end="117" type="uint">
+      <value name="RATIO 2:1" value="0"/>
+      <value name="RATIO 4:1" value="1"/>
+      <value name="RATIO 6:1" value="2"/>
+      <value name="RATIO 8:1" value="3"/>
+      <value name="RATIO 10:1" value="4"/>
+      <value name="RATIO 12:1" value="5"/>
+      <value name="RATIO 14:1" value="6"/>
+      <value name="RATIO 16:1" value="7"/>
+    </field>
+    <field name="R Address Min Filter Rounding Enable" start="109" end="109" type="bool"/>
+    <field name="R Address Mag Filter Rounding Enable" start="110" end="110" type="bool"/>
+    <field name="V Address Min Filter Rounding Enable" start="111" end="111" type="bool"/>
+    <field name="V Address Mag Filter Rounding Enable" start="112" end="112" type="bool"/>
+    <field name="U Address Min Filter Rounding Enable" start="113" end="113" type="bool"/>
+    <field name="U Address Mag Filter Rounding Enable" start="114" end="114" type="bool"/>
+    <field name="Trilinear Filter Quality" start="107" end="108" type="uint">
+      <value name="FULL" value="0"/>
+      <value name="TRIQUAL_HIGH/MAG_CLAMP_MIPFILTER" value="1"/>
+      <value name="MED" value="2"/>
+      <value name="LOW" value="3"/>
+    </field>
+    <field name="Non-normalized Coordinate Enable" start="106" end="106" type="bool"/>
+    <field name="TCX Address Control Mode" start="102" end="104" type="uint"/>
+    <field name="TCY Address Control Mode" start="99" end="101" type="uint"/>
+    <field name="TCZ Address Control Mode" start="96" end="98" type="uint"/>
+  </struct>
+
+  <enum name="3D_Prim_Topo_Type" prefix="3DPRIM">
+    <value name="POINTLIST" value="1"/>
+    <value name="LINELIST" value="2"/>
+    <value name="LINESTRIP" value="3"/>
+    <value name="TRILIST" value="4"/>
+    <value name="TRISTRIP" value="5"/>
+    <value name="TRIFAN" value="6"/>
+    <value name="QUADLIST" value="7"/>
+    <value name="QUADSTRIP" value="8"/>
+    <value name="LINELIST_ADJ" value="9"/>
+    <value name="LINESTRIP_ADJ" value="10"/>
+    <value name="TRILIST_ADJ" value="11"/>
+    <value name="TRISTRIP_ADJ" value="12"/>
+    <value name="TRISTRIP_REVERSE" value="13"/>
+    <value name="POLYGON" value="14"/>
+    <value name="RECTLIST" value="15"/>
+    <value name="LINELOOP" value="16"/>
+    <value name="POINTLIST _BF" value="17"/>
+    <value name="LINESTRIP_CONT" value="18"/>
+    <value name="LINESTRIP_BF" value="19"/>
+    <value name="LINESTRIP_CONT_BF" value="20"/>
+    <value name="TRIFAN_NOSTIPPLE" value="22"/>
+    <value name="PATCHLIST_1" value="32"/>
+    <value name="PATCHLIST_2" value="33"/>
+    <value name="PATCHLIST_3" value="34"/>
+    <value name="PATCHLIST_4" value="35"/>
+    <value name="PATCHLIST_5" value="36"/>
+    <value name="PATCHLIST_6" value="37"/>
+    <value name="PATCHLIST_7" value="38"/>
+    <value name="PATCHLIST_8" value="39"/>
+    <value name="PATCHLIST_9" value="40"/>
+    <value name="PATCHLIST_10" value="41"/>
+    <value name="PATCHLIST_11" value="42"/>
+    <value name="PATCHLIST_12" value="43"/>
+    <value name="PATCHLIST_13" value="44"/>
+    <value name="PATCHLIST_14" value="45"/>
+    <value name="PATCHLIST_15" value="46"/>
+    <value name="PATCHLIST_16" value="47"/>
+    <value name="PATCHLIST_17" value="48"/>
+    <value name="PATCHLIST_18" value="49"/>
+    <value name="PATCHLIST_19" value="50"/>
+    <value name="PATCHLIST_20" value="51"/>
+    <value name="PATCHLIST_21" value="52"/>
+    <value name="PATCHLIST_22" value="53"/>
+    <value name="PATCHLIST_23" value="54"/>
+    <value name="PATCHLIST_24" value="55"/>
+    <value name="PATCHLIST_25" value="56"/>
+    <value name="PATCHLIST_26" value="57"/>
+    <value name="PATCHLIST_27" value="58"/>
+    <value name="PATCHLIST_28" value="59"/>
+    <value name="PATCHLIST_29" value="60"/>
+    <value name="PATCHLIST_30" value="61"/>
+    <value name="PATCHLIST_31" value="62"/>
+    <value name="PATCHLIST_32" value="63"/>
+  </enum>
+
+  <enum name="3D_Vertex_Component_Control" prefix="VFCOMP">
+    <value name="NOSTORE" value="0"/>
+    <value name="STORE_SRC" value="1"/>
+    <value name="STORE_0" value="2"/>
+    <value name="STORE_1_FP" value="3"/>
+    <value name="STORE_1_INT" value="4"/>
+    <value name="STORE_VID" value="5"/>
+    <value name="STORE_IID" value="6"/>
+    <value name="STORE_PID" value="7"/>
+  </enum>
+
+  <enum name="3D_Compare_Function" prefix="COMPAREFUNCTION">
+    <value name="ALWAYS" value="0"/>
+    <value name="NEVER" value="1"/>
+    <value name="LESS" value="2"/>
+    <value name="EQUAL" value="3"/>
+    <value name="LEQUAL" value="4"/>
+    <value name="GREATER" value="5"/>
+    <value name="NOTEQUAL" value="6"/>
+    <value name="GEQUAL" value="7"/>
+  </enum>
+
+  <enum name="SURFACE_FORMAT" prefix="SF">
+    <value name="R32G32B32A32_FLOAT" value="0"/>
+    <value name="R32G32B32A32_SINT" value="1"/>
+    <value name="R32G32B32A32_UINT" value="2"/>
+    <value name="R32G32B32A32_UNORM" value="3"/>
+    <value name="R32G32B32A32_SNORM" value="4"/>
+    <value name="R64G64_FLOAT" value="5"/>
+    <value name="R32G32B32X32_FLOAT" value="6"/>
+    <value name="R32G32B32A32_SSCALED" value="7"/>
+    <value name="R32G32B32A32_USCALED" value="8"/>
+    <value name="R32G32B32A32_SFIXED" value="32"/>
+    <value name="R64G64_PASSTHRU" value="33"/>
+    <value name="R32G32B32_FLOAT" value="64"/>
+    <value name="R32G32B32_SINT" value="65"/>
+    <value name="R32G32B32_UINT" value="66"/>
+    <value name="R32G32B32_UNORM" value="67"/>
+    <value name="R32G32B32_SNORM" value="68"/>
+    <value name="R32G32B32_SSCALED" value="69"/>
+    <value name="R32G32B32_USCALED" value="70"/>
+    <value name="R32G32B32_SFIXED" value="80"/>
+    <value name="R16G16B16A16_UNORM" value="128"/>
+    <value name="R16G16B16A16_SNORM" value="129"/>
+    <value name="R16G16B16A16_SINT" value="130"/>
+    <value name="R16G16B16A16_UINT" value="131"/>
+    <value name="R16G16B16A16_FLOAT" value="132"/>
+    <value name="R32G32_FLOAT" value="133"/>
+    <value name="R32G32_SINT" value="134"/>
+    <value name="R32G32_UINT" value="135"/>
+    <value name="R32_FLOAT_X8X24_TYPELESS" value="136"/>
+    <value name="X32_TYPELESS_G8X24_UINT" value="137"/>
+    <value name="L32A32_FLOAT" value="138"/>
+    <value name="R32G32_UNORM" value="139"/>
+    <value name="R32G32_SNORM" value="140"/>
+    <value name="R64_FLOAT" value="141"/>
+    <value name="R16G16B16X16_UNORM" value="142"/>
+    <value name="R16G16B16X16_FLOAT" value="143"/>
+    <value name="A32X32_FLOAT" value="144"/>
+    <value name="L32X32_FLOAT" value="145"/>
+    <value name="I32X32_FLOAT" value="146"/>
+    <value name="R16G16B16A16_SSCALED" value="147"/>
+    <value name="R16G16B16A16_USCALED" value="148"/>
+    <value name="R32G32_SSCALED" value="149"/>
+    <value name="R32G32_USCALED" value="150"/>
+    <value name="R32G32_SFIXED" value="160"/>
+    <value name="R64_PASSTHRU" value="161"/>
+    <value name="B8G8R8A8_UNORM" value="192"/>
+    <value name="B8G8R8A8_UNORM_SRGB" value="193"/>
+    <value name="R10G10B10A2_UNORM" value="194"/>
+    <value name="R10G10B10A2_UNORM_SRGB" value="195"/>
+    <value name="R10G10B10A2_UINT" value="196"/>
+    <value name="R10G10B10_SNORM_A2_UNORM" value="197"/>
+    <value name="R8G8B8A8_UNORM" value="199"/>
+    <value name="R8G8B8A8_UNORM_SRGB" value="200"/>
+    <value name="R8G8B8A8_SNORM" value="201"/>
+    <value name="R8G8B8A8_SINT" value="202"/>
+    <value name="R8G8B8A8_UINT" value="203"/>
+    <value name="R16G16_UNORM" value="204"/>
+    <value name="R16G16_SNORM" value="205"/>
+    <value name="R16G16_SINT" value="206"/>
+    <value name="R16G16_UINT" value="207"/>
+    <value name="R16G16_FLOAT" value="208"/>
+    <value name="B10G10R10A2_UNORM" value="209"/>
+    <value name="B10G10R10A2_UNORM_SRGB" value="210"/>
+    <value name="R11G11B10_FLOAT" value="211"/>
+    <value name="R32_SINT" value="214"/>
+    <value name="R32_UINT" value="215"/>
+    <value name="R32_FLOAT" value="216"/>
+    <value name="R24_UNORM_X8_TYPELESS" value="217"/>
+    <value name="X24_TYPELESS_G8_UINT" value="218"/>
+    <value name="L32_UNORM" value="221"/>
+    <value name="A32_UNORM" value="222"/>
+    <value name="L16A16_UNORM" value="223"/>
+    <value name="I24X8_UNORM" value="224"/>
+    <value name="L24X8_UNORM" value="225"/>
+    <value name="A24X8_UNORM" value="226"/>
+    <value name="I32_FLOAT" value="227"/>
+    <value name="L32_FLOAT" value="228"/>
+    <value name="A32_FLOAT" value="229"/>
+    <value name="X8B8_UNORM_G8R8_SNORM" value="230"/>
+    <value name="A8X8_UNORM_G8R8_SNORM" value="231"/>
+    <value name="B8X8_UNORM_G8R8_SNORM" value="232"/>
+    <value name="B8G8R8X8_UNORM" value="233"/>
+    <value name="B8G8R8X8_UNORM_SRGB" value="234"/>
+    <value name="R8G8B8X8_UNORM" value="235"/>
+    <value name="R8G8B8X8_UNORM_SRGB" value="236"/>
+    <value name="R9G9B9E5_SHAREDEXP" value="237"/>
+    <value name="B10G10R10X2_UNORM" value="238"/>
+    <value name="L16A16_FLOAT" value="240"/>
+    <value name="R32_UNORM" value="241"/>
+    <value name="R32_SNORM" value="242"/>
+    <value name="R10G10B10X2_USCALED" value="243"/>
+    <value name="R8G8B8A8_SSCALED" value="244"/>
+    <value name="R8G8B8A8_USCALED" value="245"/>
+    <value name="R16G16_SSCALED" value="246"/>
+    <value name="R16G16_USCALED" value="247"/>
+    <value name="R32_SSCALED" value="248"/>
+    <value name="R32_USCALED" value="249"/>
+    <value name="B5G6R5_UNORM" value="256"/>
+    <value name="B5G6R5_UNORM_SRGB" value="257"/>
+    <value name="B5G5R5A1_UNORM" value="258"/>
+    <value name="B5G5R5A1_UNORM_SRGB" value="259"/>
+    <value name="B4G4R4A4_UNORM" value="260"/>
+    <value name="B4G4R4A4_UNORM_SRGB" value="261"/>
+    <value name="R8G8_UNORM" value="262"/>
+    <value name="R8G8_SNORM" value="263"/>
+    <value name="R8G8_SINT" value="264"/>
+    <value name="R8G8_UINT" value="265"/>
+    <value name="R16_UNORM" value="266"/>
+    <value name="R16_SNORM" value="267"/>
+    <value name="R16_SINT" value="268"/>
+    <value name="R16_UINT" value="269"/>
+    <value name="R16_FLOAT" value="270"/>
+    <value name="A8P8_UNORM_PALETTE0" value="271"/>
+    <value name="A8P8_UNORM_PALETTE1" value="272"/>
+    <value name="I16_UNORM" value="273"/>
+    <value name="L16_UNORM" value="274"/>
+    <value name="A16_UNORM" value="275"/>
+    <value name="L8A8_UNORM" value="276"/>
+    <value name="I16_FLOAT" value="277"/>
+    <value name="L16_FLOAT" value="278"/>
+    <value name="A16_FLOAT" value="279"/>
+    <value name="L8A8_UNORM_SRGB" value="280"/>
+    <value name="R5G5_SNORM_B6_UNORM" value="281"/>
+    <value name="B5G5R5X1_UNORM" value="282"/>
+    <value name="B5G5R5X1_UNORM_SRGB" value="283"/>
+    <value name="R8G8_SSCALED" value="284"/>
+    <value name="R8G8_USCALED" value="285"/>
+    <value name="R16_SSCALED" value="286"/>
+    <value name="R16_USCALED" value="287"/>
+    <value name="P8A8_UNORM_PALETTE0" value="290"/>
+    <value name="P8A8_UNORM_PALETTE1" value="291"/>
+    <value name="A1B5G5R5_UNORM" value="292"/>
+    <value name="A4B4G4R4_UNORM" value="293"/>
+    <value name="L8A8_UINT" value="294"/>
+    <value name="L8A8_SINT" value="295"/>
+    <value name="R8_UNORM" value="320"/>
+    <value name="R8_SNORM" value="321"/>
+    <value name="R8_SINT" value="322"/>
+    <value name="R8_UINT" value="323"/>
+    <value name="A8_UNORM" value="324"/>
+    <value name="I8_UNORM" value="325"/>
+    <value name="L8_UNORM" value="326"/>
+    <value name="P4A4_UNORM_PALETTE0" value="327"/>
+    <value name="A4P4_UNORM_PALETTE0" value="328"/>
+    <value name="R8_SSCALED" value="329"/>
+    <value name="R8_USCALED" value="330"/>
+    <value name="P8_UNORM_PALETTE0" value="331"/>
+    <value name="L8_UNORM_SRGB" value="332"/>
+    <value name="P8_UNORM_PALETTE1" value="333"/>
+    <value name="P4A4_UNORM_PALETTE1" value="334"/>
+    <value name="A4P4_UNORM_PALETTE1" value="335"/>
+    <value name="Y8_UNORM" value="336"/>
+    <value name="L8_UINT" value="338"/>
+    <value name="L8_SINT" value="339"/>
+    <value name="I8_UINT" value="340"/>
+    <value name="I8_SINT" value="341"/>
+    <value name="DXT1_RGB_SRGB" value="384"/>
+    <value name="R1_UNORM" value="385"/>
+    <value name="YCRCB_NORMAL" value="386"/>
+    <value name="YCRCB_SWAPUVY" value="387"/>
+    <value name="P2_UNORM_PALETTE0" value="388"/>
+    <value name="P2_UNORM_PALETTE1" value="389"/>
+    <value name="BC1_UNORM" value="390"/>
+    <value name="BC2_UNORM" value="391"/>
+    <value name="BC3_UNORM" value="392"/>
+    <value name="BC4_UNORM" value="393"/>
+    <value name="BC5_UNORM" value="394"/>
+    <value name="BC1_UNORM_SRGB" value="395"/>
+    <value name="BC2_UNORM_SRGB" value="396"/>
+    <value name="BC3_UNORM_SRGB" value="397"/>
+    <value name="MONO8" value="398"/>
+    <value name="YCRCB_SWAPUV" value="399"/>
+    <value name="YCRCB_SWAPY" value="400"/>
+    <value name="DXT1_RGB" value="401"/>
+    <value name="FXT1" value="402"/>
+    <value name="R8G8B8_UNORM" value="403"/>
+    <value name="R8G8B8_SNORM" value="404"/>
+    <value name="R8G8B8_SSCALED" value="405"/>
+    <value name="R8G8B8_USCALED" value="406"/>
+    <value name="R64G64B64A64_FLOAT" value="407"/>
+    <value name="R64G64B64_FLOAT" value="408"/>
+    <value name="BC4_SNORM" value="409"/>
+    <value name="BC5_SNORM" value="410"/>
+    <value name="R16G16B16_FLOAT" value="411"/>
+    <value name="R16G16B16_UNORM" value="412"/>
+    <value name="R16G16B16_SNORM" value="413"/>
+    <value name="R16G16B16_SSCALED" value="414"/>
+    <value name="R16G16B16_USCALED" value="415"/>
+    <value name="BC6H_SF16" value="417"/>
+    <value name="BC7_UNORM" value="418"/>
+    <value name="BC7_UNORM_SRGB" value="419"/>
+    <value name="BC6H_UF16" value="420"/>
+    <value name="PLANAR_420_8" value="421"/>
+    <value name="R8G8B8_UNORM_SRGB" value="424"/>
+    <value name="ETC1_RGB8" value="425"/>
+    <value name="ETC2_RGB8" value="426"/>
+    <value name="EAC_R11" value="427"/>
+    <value name="EAC_RG11" value="428"/>
+    <value name="EAC_SIGNED_R11" value="429"/>
+    <value name="EAC_SIGNED_RG11" value="430"/>
+    <value name="ETC2_SRGB8" value="431"/>
+    <value name="R16G16B16_UINT" value="432"/>
+    <value name="R16G16B16_SINT" value="433"/>
+    <value name="R32_SFIXED" value="434"/>
+    <value name="R10G10B10A2_SNORM" value="435"/>
+    <value name="R10G10B10A2_USCALED" value="436"/>
+    <value name="R10G10B10A2_SSCALED" value="437"/>
+    <value name="R10G10B10A2_SINT" value="438"/>
+    <value name="B10G10R10A2_SNORM" value="439"/>
+    <value name="B10G10R10A2_USCALED" value="440"/>
+    <value name="B10G10R10A2_SSCALED" value="441"/>
+    <value name="B10G10R10A2_UINT" value="442"/>
+    <value name="B10G10R10A2_SINT" value="443"/>
+    <value name="R64G64B64A64_PASSTHRU" value="444"/>
+    <value name="R64G64B64_PASSTHRU" value="445"/>
+    <value name="ETC2_RGB8_PTA" value="448"/>
+    <value name="ETC2_SRGB8_PTA" value="449"/>
+    <value name="ETC2_EAC_RGBA8" value="450"/>
+    <value name="ETC2_EAC_SRGB8_A8" value="451"/>
+    <value name="R8G8B8_UINT" value="456"/>
+    <value name="R8G8B8_SINT" value="457"/>
+    <value name="RAW" value="511"/>
+  </enum>
+
+  <enum name="Texture Coordinate Mode" prefix="TCM">
+    <value name="WRAP" value="0"/>
+    <value name="MIRROR" value="1"/>
+    <value name="CLAMP" value="2"/>
+    <value name="CUBE" value="3"/>
+    <value name="CLAMP_BORDER" value="4"/>
+    <value name="MIRROR_ONCE" value="5"/>
+  </enum>
+
+  <instruction name="3DPRIMITIVE" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="3"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="Indirect Parameter Enable" start="10" end="10" type="bool"/>
+    <field name="UAV Coherency Required" start="9" end="9" type="bool"/>
+    <field name="Predicate Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="End Offset Enable" start="41" end="41" type="bool"/>
+    <field name="Vertex Access Type" start="40" end="40" type="uint">
+      <value name="SEQUENTIAL" value="0"/>
+      <value name="RANDOM" value="1"/>
+    </field>
+    <field name="Primitive Topology Type" start="32" end="37" type="uint"/>
+    <field name="Vertex Count Per Instance" start="64" end="95" type="uint"/>
+    <field name="Start Vertex Location" start="96" end="127" type="uint"/>
+    <field name="Instance Count" start="128" end="159" type="uint"/>
+    <field name="Start Instance Location" start="160" end="191" type="uint"/>
+    <field name="Base Vertex Location" start="192" end="223" type="int"/>
+  </instruction>
+
+  <instruction name="3DSTATE_AA_LINE_PARAMETERS" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="10"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="AA Coverage Bias" start="48" end="55" type="u0.8"/>
+    <field name="AA Coverage Slope" start="32" end="39" type="u0.8"/>
+    <field name="AA Coverage EndCap Bias" start="80" end="87" type="u0.8"/>
+    <field name="AA Coverage EndCap Slope" start="64" end="71" type="u0.8"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_DS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="70"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_GS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="68"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_HS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="69"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_PS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="71"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_VS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="67"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="40"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to DS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="41"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to GS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="39"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to HS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="42"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to PS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="38"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to VS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POOL_ALLOC" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="25"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Binding Table Pool Base Address" start="44" end="63" type="address"/>
+    <field name="Binding Table Pool Enable" start="43" end="43" type="uint"/>
+    <field name="Surface Object Control State" start="39" end="42" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Binding Table Pool Upper Bound" start="76" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BLEND_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="36"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Blend State Pointer" start="38" end="63" type="offset"/>
+    <field start="32" end="32" type="mbo"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CC_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="14"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Color Calc State Pointer" start="38" end="63" type="offset"/>
+    <field start="32" end="32" type="mbo"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CHROMA_KEY" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="ChromaKey Table Index" start="62" end="63" type="uint"/>
+    <field name="ChromaKey Low Value" start="64" end="95" type="uint"/>
+    <field name="ChromaKey High Value" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CLEAR_PARAMS" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Depth Clear Value" start="32" end="63" type="uint"/>
+    <field name="Depth Clear Value Valid" start="64" end="64" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CLIP" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="18"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Front Winding" start="52" end="52" type="uint"/>
+    <field name="Vertex Sub Pixel Precision Select" start="51" end="51" type="uint"/>
+    <field name="EarlyCull Enable" start="50" end="50" type="bool"/>
+    <field name="Cull Mode" start="48" end="49" type="uint" prefix="CULLMODE">
+      <value name="BOTH" value="0"/>
+      <value name="NONE" value="1"/>
+      <value name="FRONT" value="2"/>
+      <value name="BACK" value="3"/>
+    </field>
+    <field name="Clipper Statistics Enable" start="42" end="42" type="bool"/>
+    <field name="User Clip Distance Cull Test Enable Bitmask" start="32" end="39" type="uint"/>
+    <field name="Clip Enable" start="95" end="95" type="bool"/>
+    <field name="API Mode" start="94" end="94" type="uint">
+      <value name="APIMODE_OGL" value="0"/>
+    </field>
+    <field name="Viewport XY ClipTest Enable" start="92" end="92" type="bool"/>
+    <field name="Viewport Z ClipTest Enable" start="91" end="91" type="bool"/>
+    <field name="Guardband ClipTest Enable" start="90" end="90" type="bool"/>
+    <field name="User Clip Distance Clip Test Enable Bitmask" start="80" end="87" type="uint"/>
+    <field name="Clip Mode" start="77" end="79" type="uint">
+      <value name="CLIPMODE_NORMAL" value="0"/>
+      <value name="CLIPMODE_REJECT_ALL" value="3"/>
+      <value name="CLIPMODE_ACCEPT_ALL" value="4"/>
+    </field>
+    <field name="Perspective Divide Disable" start="73" end="73" type="bool"/>
+    <field name="Non-Perspective Barycentric Enable" start="72" end="72" type="bool"/>
+    <field name="Triangle Strip/List Provoking Vertex Select" start="68" end="69" type="uint">
+      <value name="Vertex 0" value="0"/>
+      <value name="Vertex 1" value="1"/>
+      <value name="Vertex 2" value="2"/>
+    </field>
+    <field name="Line Strip/List Provoking Vertex Select" start="66" end="67" type="uint">
+      <value name="Vertex 0" value="0"/>
+      <value name="Vertex 1" value="1"/>
+    </field>
+    <field name="Triangle Fan Provoking Vertex Select" start="64" end="65" type="uint">
+      <value name="Vertex 0" value="0"/>
+      <value name="Vertex 1" value="1"/>
+      <value name="Vertex 2" value="2"/>
+    </field>
+    <field name="Minimum Point Width" start="113" end="123" type="u8.3"/>
+    <field name="Maximum Point Width" start="102" end="112" type="u8.3"/>
+    <field name="Force Zero RTAIndex Enable" start="101" end="101" type="bool"/>
+    <field name="Maximum VPIndex" start="96" end="99" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_DS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="26"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Constant Body" start="32" end="223" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_GS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="22"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Constant Body" start="32" end="223" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_HS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="25"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Constant Body" start="32" end="223" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_PS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="23"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Constant Body" start="32" end="223" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_VS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="21"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Constant Body" start="32" end="223" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DEPTH_BUFFER" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="5"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Surface Type" start="61" end="63" type="uint">
+      <value name="SURFTYPE_1D" value="0"/>
+      <value name="SURFTYPE_2D" value="1"/>
+      <value name="SURFTYPE_3D" value="2"/>
+      <value name="SURFTYPE_CUBE" value="3"/>
+      <value name="SURFTYPE_NULL" value="7"/>
+    </field>
+    <field name="Depth Write Enable" start="60" end="60" type="bool"/>
+    <field name="Stencil Write Enable" start="59" end="59" type="bool"/>
+    <field name="Hierarchical Depth Buffer Enable" start="54" end="54" type="bool"/>
+    <field name="Surface Format" start="50" end="52" type="uint">
+      <value name="D32_FLOAT" value="1"/>
+      <value name="D24_UNORM_X8_UINT" value="3"/>
+      <value name="D16_UNORM" value="5"/>
+    </field>
+    <field name="Surface Pitch" start="32" end="49" type="uint"/>
+    <field name="Surface Base Address" start="64" end="95" type="address"/>
+    <field name="Height" start="114" end="127" type="uint"/>
+    <field name="Width" start="100" end="113" type="uint"/>
+    <field name="LOD" start="96" end="99" type="uint"/>
+    <field name="Depth" start="149" end="159" type="uint">
+      <value name="SURFTYPE_CUBE (must be zero)" value="0"/>
+    </field>
+    <field name="Minimum Array Element" start="138" end="148" type="uint"/>
+    <field name="Depth Buffer Object Control State" start="128" end="131" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Depth Coordinate Offset Y" start="176" end="191" type="int"/>
+    <field name="Depth Coordinate Offset X" start="160" end="175" type="int"/>
+    <field name="Render Target View Extent" start="213" end="223" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DEPTH_STENCIL_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="37"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to DEPTH_STENCIL_STATE" start="38" end="63" type="offset"/>
+    <field start="32" end="32" type="mbo"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DRAWING_RECTANGLE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="Core Mode Select" start="14" end="15" type="uint">
+      <value name="Legacy" value="0"/>
+      <value name="Core 0 Enabled" value="1"/>
+      <value name="Core 1 Enabled" value="2"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Clipped Drawing Rectangle Y Min" start="48" end="63" type="uint"/>
+    <field name="Clipped Drawing Rectangle X Min" start="32" end="47" type="uint"/>
+    <field name="Clipped Drawing Rectangle Y Max" start="80" end="95" type="uint"/>
+    <field name="Clipped Drawing Rectangle X Max" start="64" end="79" type="uint"/>
+    <field name="Drawing Rectangle Origin Y" start="112" end="127" type="int"/>
+    <field name="Drawing Rectangle Origin X" start="96" end="111" type="int"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DS" bias="2" length="6">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="29"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="4"/>
+    <field name="Kernel Start Pointer" start="38" end="63" type="offset"/>
+    <field name="Single Domain Point Dispatch" start="95" end="95" type="uint"/>
+    <field name="Vector Mask Enable" start="94" end="94" type="bool"/>
+    <field name="Sampler Count" start="91" end="93" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="82" end="89" type="uint"/>
+    <field name="Thread Dispatch Priority" start="81" end="81" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="80" end="80" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Accesses UAV" start="78" end="78" type="bool"/>
+    <field name="Illegal Opcode Exception Enable" start="77" end="77" type="bool"/>
+    <field name="Software Exception Enable" start="71" end="71" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="106" end="127" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="96" end="99" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="148" end="152" type="uint"/>
+    <field name="Patch URB Entry Read Length" start="139" end="145" type="uint"/>
+    <field name="Patch URB Entry Read Offset" start="132" end="137" type="uint"/>
+    <field name="Maximum Number of Threads" start="181" end="189" type="uint"/>
+    <field name="Statistics Enable" start="170" end="170" type="bool"/>
+    <field name="Compute W Coordinate Enable" start="162" end="162" type="bool"/>
+    <field name="DS Cache Disable" start="161" end="161" type="bool"/>
+    <field name="DS Function Enable" start="160" end="160" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_DS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="55"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_GS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="53"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_HS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="54"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_PS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="56"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Enable" start="68" end="68" type="bool"/>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_VS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="52"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Enable" start="68" end="68" type="bool"/>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_POOL_ALLOC" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="26"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Gather Pool Base Address" start="44" end="63" type="address"/>
+    <field name="Gather Pool Enable" start="43" end="43" type="bool"/>
+    <field start="36" end="37" type="mbo"/>
+    <field name="Memory Object Control State" start="32" end="35" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Gather Pool Upper Bound" start="76" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_GS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="17"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Kernel Start Pointer" start="38" end="63" type="offset"/>
+    <field name="Single Program Flow (SPF)" start="95" end="95" type="uint"/>
+    <field name="Vector Mask Enable (VME)" start="94" end="94" type="uint"/>
+    <field name="Sampler Count" start="91" end="93" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="82" end="89" type="uint"/>
+    <field name="Thread Priority" start="81" end="81" type="uint">
+      <value name="Normal Priority" value="0"/>
+      <value name="High Priority" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="80" end="80" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="77" end="77" type="bool"/>
+    <field name="GS accesses UAV" start="76" end="76" type="uint"/>
+    <field name="Mask Stack Exception Enable" start="75" end="75" type="bool"/>
+    <field name="Software  Exception Enable" start="71" end="71" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="106" end="127" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="96" end="99" type="uint"/>
+    <field name="Output Vertex Size" start="151" end="156" type="uint"/>
+    <field name="Output Topology" start="145" end="150" type="uint" prefix="OUTPUT"/>
+    <field name="Vertex URB Entry Read Length" start="139" end="144" type="uint"/>
+    <field name="Include Vertex Handles" start="138" end="138" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="132" end="137" type="uint"/>
+    <field name="Dispatch GRF Start Register for URB Data" start="128" end="131" type="uint"/>
+    <field name="Maximum Number of Threads" start="184" end="191" type="uint"/>
+    <field name="Control Data Header Size" start="180" end="183" type="uint"/>
+    <field name="Instance Control" start="175" end="179" type="uint"/>
+    <field name="Default StreamID" start="173" end="174" type="uint"/>
+    <field name="Dispatch Mode" start="171" end="172" type="uint" prefix="DISPATCH_MODE">
+      <value name="SINGLE" value="0"/>
+      <value name="DUAL_INSTANCE" value="1"/>
+      <value name="DUAL_OBJECT" value="2"/>
+    </field>
+    <field name="GS Statistics Enable" start="170" end="170" type="uint"/>
+    <field name="GS Invocations Increment Value" start="165" end="169" type="uint"/>
+    <field name="Include Primitive ID" start="164" end="164" type="uint"/>
+    <field name="Hint" start="163" end="163" type="uint"/>
+    <field name="Reorder Mode" start="162" end="162" type="uint">
+      <value name="REORDER_LEADING" value="0"/>
+      <value name="REORDER_TRAILING" value="1"/>
+    </field>
+    <field name="Discard Adjacency" start="161" end="161" type="bool"/>
+    <field name="GS Enable" start="160" end="160" type="bool"/>
+    <field name="Control Data Format" start="223" end="223" type="uint">
+      <value name="GSCTL_CUT" value="0"/>
+      <value name="GSCTL_SID" value="1"/>
+    </field>
+    <field name="Semaphore Handle" start="192" end="204" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_HIER_DEPTH_BUFFER" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="7"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Hierarchical Depth Buffer Object Control State" start="57" end="60" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface Pitch" start="32" end="48" type="uint"/>
+    <field name="Surface Base Address" start="64" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_HS" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="27"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Sampler Count" start="59" end="61" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="50" end="57" type="uint"/>
+    <field name="Thread Dispatch Priority" start="49" end="49" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="48" end="48" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="45" end="45" type="bool"/>
+    <field name="Software Exception Enable" start="44" end="44" type="bool"/>
+    <field name="Maximum Number of Threads" start="32" end="39" type="uint"/>
+    <field name="Enable" start="95" end="95" type="bool"/>
+    <field name="Statistics Enable" start="93" end="93" type="bool"/>
+    <field name="Instance Count" start="64" end="67" type="uint"/>
+    <field name="Kernel Start Pointer" start="102" end="127" type="offset"/>
+    <field name="Scratch Space Base Pointer" start="138" end="159" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="128" end="131" type="uint"/>
+    <field name="Single Program Flow" start="187" end="187" type="uint"/>
+    <field name="Vector Mask Enable" start="186" end="186" type="bool"/>
+    <field name="HS accesses UAV" start="185" end="185" type="bool"/>
+    <field name="Include Vertex Handles" start="184" end="184" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="179" end="183" type="uint"/>
+    <field name="Vertex URB Entry Read Length" start="171" end="176" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="164" end="169" type="uint"/>
+    <field name="Semaphore Handle" start="192" end="204" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_INDEX_BUFFER" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="10"/>
+    <field name="Memory Object Control State" start="12" end="15" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Index Format" start="8" end="9" type="uint" prefix="INDEX">
+      <value name="BYTE" value="0"/>
+      <value name="WORD" value="1"/>
+      <value name="DWORD" value="2"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Buffer Starting Address" start="32" end="63" type="address"/>
+    <field name="Buffer Ending Address" start="64" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_LINE_STIPPLE" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="8"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Modify Enable (Current Repeat Counter, Current Stipple Index)" start="63" end="63" type="bool"/>
+    <field name="Current Repeat Counter" start="53" end="61" type="uint"/>
+    <field name="Current Stipple Index" start="48" end="51" type="uint"/>
+    <field name="Line Stipple Pattern" start="32" end="47" type="uint"/>
+    <field name="Line Stipple Inverse Repeat Count" start="79" end="95" type="u1.16"/>
+    <field name="Line Stipple Repeat Count" start="64" end="72" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_MONOFILTER_SIZE" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="17"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Monochrome Filter Width" start="35" end="37" type="uint"/>
+    <field name="Monochrome Filter Height" start="32" end="34" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_MULTISAMPLE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="13"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Multi Sample Enable" start="37" end="37" type="bool"/>
+    <field name="Pixel Location" start="36" end="36" type="uint">
+      <value name="PIXLOC_CENTER" value="0"/>
+      <value name="PIXLOC_UL_CORNER" value="1"/>
+    </field>
+    <field name="Number of Multisamples" start="33" end="35" type="uint">
+      <value name="NUMSAMPLES_1" value="0"/>
+      <value name="NUMSAMPLES_4" value="2"/>
+      <value name="NUMSAMPLES_8" value="3"/>
+    </field>
+    <field name="Sample3 X Offset" start="92" end="95" type="u0.4"/>
+    <field name="Sample3 Y Offset" start="88" end="91" type="u0.4"/>
+    <field name="Sample2 X Offset" start="84" end="87" type="u0.4"/>
+    <field name="Sample2 Y Offset" start="80" end="83" type="u0.4"/>
+    <field name="Sample1 X Offset" start="76" end="79" type="u0.4"/>
+    <field name="Sample1 Y Offset" start="72" end="75" type="u0.4"/>
+    <field name="Sample0 X Offset" start="68" end="71" type="u0.4"/>
+    <field name="Sample0 Y Offset" start="64" end="67" type="u0.4"/>
+    <field name="Sample7 X Offset" start="124" end="127" type="u0.4"/>
+    <field name="Sample7 Y Offset" start="120" end="123" type="u0.4"/>
+    <field name="Sample6 X Offset" start="116" end="119" type="u0.4"/>
+    <field name="Sample6 Y Offset" start="112" end="115" type="u0.4"/>
+    <field name="Sample5 X Offset" start="108" end="111" type="u0.4"/>
+    <field name="Sample5 Y Offset" start="104" end="107" type="u0.4"/>
+    <field name="Sample4 X Offset" start="100" end="103" type="u0.4"/>
+    <field name="Sample4 Y Offset" start="96" end="99" type="u0.4"/>
+  </instruction>
+
+  <instruction name="3DSTATE_POLY_STIPPLE_OFFSET" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="6"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Polygon Stipple X Offset" start="40" end="44" type="uint"/>
+    <field name="Polygon Stipple Y Offset" start="32" end="36" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_POLY_STIPPLE_PATTERN" bias="2" length="33">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="7"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="31"/>
+    <group count="32" start="32" size="32">
+      <field name="Pattern Row" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_PS" bias="2" length="8">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="32"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="6"/>
+    <field name="Kernel Start Pointer[0]" start="38" end="63" type="offset"/>
+    <field name="Single Program Flow (SPF)" start="95" end="95" type="uint"/>
+    <field name="Vector Mask Enable (VME)" start="94" end="94" type="uint"/>
+    <field name="Sampler Count" start="91" end="93" type="uint"/>
+    <field name="Denormal Mode" start="90" end="90" type="uint">
+      <value name="FTZ" value="0"/>
+      <value name="RET" value="1"/>
+    </field>
+    <field name="Binding Table Entry Count" start="82" end="89" type="uint"/>
+    <field name="Thread Priority" start="81" end="81" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="80" end="80" type="uint">
+      <value name="IEEE-745" value="0"/>
+      <value name="Alt" value="1"/>
+    </field>
+    <field name="Rounding Mode" start="78" end="79" type="uint">
+      <value name="RTNE" value="0"/>
+      <value name="RU" value="1"/>
+      <value name="RD" value="2"/>
+      <value name="RTZ" value="3"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="77" end="77" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="75" end="75" type="bool"/>
+    <field name="Software  Exception Enable" start="71" end="71" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="106" end="127" type="offset"/>
+    <field name="Per Thread Scratch Space" start="96" end="99" type="uint"/>
+    <field name="Maximum Number of Threads" start="151" end="159" type="uint"/>
+    <field name="Sample Mask" start="140" end="147" type="uint"/>
+    <field name="Push Constant Enable" start="139" end="139" type="bool"/>
+    <field name="Attribute Enable" start="138" end="138" type="bool"/>
+    <field name="oMask Present to RenderTarget" start="137" end="137" type="bool"/>
+    <field name="Render Target Fast Clear Enable" start="136" end="136" type="bool"/>
+    <field name="Dual Source Blend Enable" start="135" end="135" type="bool"/>
+    <field name="Render Target Resolve Enable" start="134" end="134" type="bool"/>
+    <field name="PS Accesses UAV" start="133" end="133" type="bool"/>
+    <field name="Position XY Offset Select" start="131" end="132" type="uint">
+      <value name="POSOFFSET_NONE" value="0"/>
+      <value name="POSOFFSET_CENTROID" value="2"/>
+      <value name="POSOFFSET_SAMPLE" value="3"/>
+    </field>
+    <field name="32 Pixel Dispatch Enable" start="130" end="130" type="bool"/>
+    <field name="16 Pixel Dispatch Enable" start="129" end="129" type="bool"/>
+    <field name="8 Pixel Dispatch Enable" start="128" end="128" type="bool"/>
+    <field name="Dispatch GRF Start Register for Constant/Setup Data [0]" start="176" end="182" type="uint"/>
+    <field name="Dispatch GRF Start Register for Constant/Setup Data [1]" start="168" end="174" type="uint"/>
+    <field name="Dispatch GRF Start Register for Constant/Setup Data [2]" start="160" end="166" type="uint"/>
+    <field name="Kernel Start Pointer[1]" start="198" end="223" type="offset"/>
+    <field name="Kernel Start Pointer[2]" start="230" end="255" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="20"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="21"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="19"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="22"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="18"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_RAST_MULTISAMPLE" bias="2" length="6">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="14"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="4"/>
+    <field name="Number of Rasterization Multisamples" start="33" end="35" type="uint" prefix="NRM">
+      <value name="NUMRASTSAMPLES_1" value="0"/>
+      <value name="NUMRASTSAMPLES_2" value="1"/>
+      <value name="NUMRASTSAMPLES_4" value="2"/>
+      <value name="NUMRASTSAMPLES_8" value="3"/>
+      <value name="NUMRASTSAMPLES_16" value="4"/>
+    </field>
+    <field name="Sample3 X Offset" start="92" end="95" type="u0.4"/>
+    <field name="Sample3 Y Offset" start="88" end="91" type="u0.4"/>
+    <field name="Sample2 X Offset" start="84" end="87" type="u0.4"/>
+    <field name="Sample2 Y Offset" start="80" end="83" type="u0.4"/>
+    <field name="Sample1 X Offset" start="76" end="79" type="u0.4"/>
+    <field name="Sample1 Y Offset" start="72" end="75" type="u0.4"/>
+    <field name="Sample0 X Offset" start="68" end="71" type="u0.4"/>
+    <field name="Sample0 Y Offset" start="64" end="67" type="u0.4"/>
+    <field name="Sample7 X Offset" start="124" end="127" type="u0.4"/>
+    <field name="Sample7 Y Offset" start="120" end="123" type="u0.4"/>
+    <field name="Sample6 X Offset" start="116" end="119" type="u0.4"/>
+    <field name="Sample6 Y Offset" start="112" end="115" type="u0.4"/>
+    <field name="Sample5 X Offset" start="108" end="111" type="u0.4"/>
+    <field name="Sample5 Y Offset" start="104" end="107" type="u0.4"/>
+    <field name="Sample4 X Offset" start="100" end="103" type="u0.4"/>
+    <field name="Sample4 Y Offset" start="96" end="99" type="u0.4"/>
+    <field name="Sample11 X Offset" start="156" end="159" type="u0.4"/>
+    <field name="Sample11 Y Offset" start="152" end="155" type="u0.4"/>
+    <field name="Sample10 X Offset" start="148" end="151" type="u0.4"/>
+    <field name="Sample10 Y Offset" start="144" end="147" type="u0.4"/>
+    <field name="Sample9 X Offset" start="140" end="143" type="u0.4"/>
+    <field name="Sample9 Y Offset" start="136" end="139" type="u0.4"/>
+    <field name="Sample8 X Offset" start="132" end="135" type="u0.4"/>
+    <field name="Sample8 Y Offset" start="128" end="131" type="u0.4"/>
+    <field name="Sample15 X Offset" start="188" end="191" type="u0.4"/>
+    <field name="Sample15 Y Offset" start="184" end="187" type="u0.4"/>
+    <field name="Sample14 X Offset" start="180" end="183" type="u0.4"/>
+    <field name="Sample14 Y Offset" start="176" end="179" type="u0.4"/>
+    <field name="Sample13 X Offset" start="172" end="175" type="u0.4"/>
+    <field name="Sample13 Y Offset" start="168" end="171" type="u0.4"/>
+    <field name="Sample12 X Offset" start="164" end="167" type="u0.4"/>
+    <field name="Sample12 Y Offset" start="160" end="163" type="u0.4"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_PALETTE_LOAD0" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="7" type="uint"/>
+    <group count="0" start="32" size="32">
+      <field name="Entry" start="0" end="31" type="PALETTE_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_PALETTE_LOAD1" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="12"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <group count="0" start="32" size="32">
+      <field name="Palette Alpha[0:N-1]" start="24" end="31" type="uint"/>
+      <field name="Palette Red[0:N-1]" start="16" end="23" type="uint"/>
+      <field name="Palette Green[0:N-1]" start="8" end="15" type="uint"/>
+      <field name="Palette Blue[0:N-1]" start="0" end="7" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="45"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to DS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="46"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to GS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="44"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to HS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="47"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to PS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="43"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to VS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLE_MASK" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Sample Mask" start="32" end="39" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SBE" bias="2" length="14">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="31"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="12"/>
+    <field name="Attribute Swizzle Control Mode" start="60" end="60" type="uint"/>
+    <field name="Number of SF Output Attributes" start="54" end="59" type="uint"/>
+    <field name="Attribute Swizzle Enable" start="53" end="53" type="bool"/>
+    <field name="Point Sprite Texture Coordinate Origin" start="52" end="52" type="uint">
+      <value name="UPPERLEFT" value="0"/>
+      <value name="LOWERLEFT" value="1"/>
+    </field>
+    <field name="Vertex URB Entry Read Length" start="43" end="47" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="36" end="41" type="uint"/>
+    <group count="16" start="64" size="16">
+      <field name="Attribute" start="0" end="15" type="SF_OUTPUT_ATTRIBUTE_DETAIL"/>
+    </group>
+    <field name="Point Sprite Texture Coordinate Enable" start="320" end="351" type="uint"/>
+    <field name="Constant Interpolation Enable[31:0]" start="352" end="383" type="uint"/>
+    <field name="Attribute 7 WrapShortest Enables" start="412" end="415" type="uint"/>
+    <field name="Attribute 6 WrapShortest Enables" start="408" end="411" type="uint"/>
+    <field name="Attribute 5 WrapShortest Enables" start="404" end="407" type="uint"/>
+    <field name="Attribute 4 WrapShortest Enables" start="400" end="403" type="uint"/>
+    <field name="Attribute 3 WrapShortest Enables" start="396" end="399" type="uint"/>
+    <field name="Attribute 2 WrapShortest Enables" start="392" end="395" type="uint"/>
+    <field name="Attribute 1 WrapShortest Enables" start="388" end="391" type="uint"/>
+    <field name="Attribute 0 WrapShortest Enables" start="384" end="387" type="uint"/>
+    <field name="Attribute 15 WrapShortest Enables" start="444" end="447" type="uint"/>
+    <field name="Attribute 14 WrapShortest Enables" start="440" end="443" type="uint"/>
+    <field name="Attribute 13 WrapShortest Enables" start="436" end="439" type="uint"/>
+    <field name="Attribute 12 WrapShortest Enables" start="432" end="435" type="uint"/>
+    <field name="Attribute 11 WrapShortest Enables" start="428" end="431" type="uint"/>
+    <field name="Attribute 10 WrapShortest Enables" start="424" end="427" type="uint"/>
+    <field name="Attribute 9 WrapShortest Enables" start="420" end="423" type="uint"/>
+    <field name="Attribute 8 WrapShortest Enables" start="416" end="419" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SCISSOR_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="15"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Scissor Rect Pointer" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SF" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="19"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="Depth Buffer Surface Format" start="44" end="46" type="uint">
+      <value name="D32_FLOAT_S8X24_UINT" value="0"/>
+      <value name="D32_FLOAT" value="1"/>
+      <value name="D24_UNORM_S8_UINT" value="2"/>
+      <value name="D24_UNORM_X8_UINT" value="3"/>
+      <value name="D16_UNORM" value="5"/>
+    </field>
+    <field name="Legacy Global Depth Bias Enable" start="43" end="43" type="bool"/>
+    <field name="Statistics Enable" start="42" end="42" type="bool"/>
+    <field name="Global Depth Offset Enable Solid" start="41" end="41" type="bool"/>
+    <field name="Global Depth Offset Enable Wireframe" start="40" end="40" type="bool"/>
+    <field name="Global Depth Offset Enable Point" start="39" end="39" type="bool"/>
+    <field name="FrontFace Fill Mode" start="37" end="38" type="uint" prefix="FILL_MODE">
+      <value name="SOLID" value="0"/>
+      <value name="WIREFRAME" value="1"/>
+      <value name="POINT" value="2"/>
+    </field>
+    <field name="BackFace Fill Mode" start="35" end="36" type="uint" prefix="FILL_MODE">
+      <value name="SOLID" value="0"/>
+      <value name="WIREFRAME" value="1"/>
+      <value name="POINT" value="2"/>
+    </field>
+    <field name="View Transform Enable" start="33" end="33" type="bool"/>
+    <field name="Front Winding" start="32" end="32" type="uint"/>
+    <field name="Anti-Aliasing Enable" start="95" end="95" type="bool"/>
+    <field name="Cull Mode" start="93" end="94" type="uint" prefix="CULLMODE">
+      <value name="BOTH" value="0"/>
+      <value name="NONE" value="1"/>
+      <value name="FRONT" value="2"/>
+      <value name="BACK" value="3"/>
+    </field>
+    <field name="Line Width" start="82" end="91" type="u3.7"/>
+    <field name="Line End Cap Antialiasing Region Width" start="80" end="81" type="uint"/>
+    <field name="Line Stipple Enable" start="78" end="78" type="bool"/>
+    <field name="Scissor Rectangle Enable" start="75" end="75" type="bool"/>
+    <field name="RT Independent Rasterization Enable" start="74" end="74" type="bool"/>
+    <field name="Multisample Rasterization  Mode" start="72" end="73" type="uint"/>
+    <field name="Last Pixel Enable" start="127" end="127" type="bool"/>
+    <field name="Triangle Strip/List Provoking Vertex Select" start="125" end="126" type="uint">
+      <value name="Vertex 0" value="0"/>
+      <value name="Vertex 1" value="1"/>
+      <value name="Vertex 2" value="2"/>
+    </field>
+    <field name="Line Strip/List Provoking Vertex Select" start="123" end="124" type="uint"/>
+    <field name="Triangle Fan Provoking Vertex Select" start="121" end="122" type="uint">
+      <value name="Vertex 0" value="0"/>
+      <value name="Vertex 1" value="1"/>
+      <value name="Vertex 2" value="2"/>
+    </field>
+    <field name="AA Line Distance Mode" start="110" end="110" type="uint">
+      <value name="AALINEDISTANCE_TRUE" value="1"/>
+    </field>
+    <field name="Vertex Sub Pixel Precision Select" start="108" end="108" type="uint"/>
+    <field name="Use Point Width State" start="107" end="107" type="uint"/>
+    <field name="Point Width" start="96" end="106" type="u8.3"/>
+    <field name="Global Depth Offset Constant" start="128" end="159" type="float"/>
+    <field name="Global Depth Offset Scale" start="160" end="191" type="float"/>
+    <field name="Global Depth Offset Clamp" start="192" end="223" type="float"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SO_BUFFER" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="SO Buffer Index" start="61" end="62" type="uint"/>
+    <field name="SO Buffer Object Control State" start="57" end="60" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface Pitch" start="32" end="43" type="uint"/>
+    <field name="Surface Base Address" start="66" end="95" type="address"/>
+    <field name="Surface End Address" start="98" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SO_DECL_LIST" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="23"/>
+    <field name="DWord Length" start="0" end="8" type="uint"/>
+    <field name="Stream to Buffer Selects [3]" start="44" end="47" type="uint"/>
+    <field name="Stream to Buffer Selects [2]" start="40" end="43" type="uint"/>
+    <field name="Stream to Buffer Selects [1]" start="36" end="39" type="uint"/>
+    <field name="Stream to Buffer Selects [0]" start="32" end="35" type="uint"/>
+    <field name="Num Entries [3]" start="88" end="95" type="uint"/>
+    <field name="Num Entries [2]" start="80" end="87" type="uint"/>
+    <field name="Num Entries [1]" start="72" end="79" type="uint"/>
+    <field name="Num Entries [0]" start="64" end="71" type="uint"/>
+    <group count="0" start="96" size="64">
+      <field name="Entry" start="0" end="63" type="SO_DECL_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_STENCIL_BUFFER" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="6"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Stencil Buffer Enable" start="63" end="63" type="uint"/>
+    <field name="Stencil Buffer Object Control State" start="57" end="60" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface Pitch" start="32" end="48" type="uint"/>
+    <field name="Surface Base Address" start="64" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_STREAMOUT" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="30"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="SO Function Enable" start="63" end="63" type="uint"/>
+    <field name="Rendering Disable" start="62" end="62" type="uint"/>
+    <field name="Render Stream Select" start="59" end="60" type="uint"/>
+    <field name="Reorder Mode" start="58" end="58" type="uint">
+      <value name="LEADING" value="0"/>
+      <value name="TRAILING" value="1"/>
+    </field>
+    <field name="SO Statistics Enable" start="57" end="57" type="bool"/>
+    <field name="SO Buffer Enable [3]" start="43" end="43" type="uint"/>
+    <field name="SO Buffer Enable [2]" start="42" end="42" type="uint"/>
+    <field name="SO Buffer Enable [1]" start="41" end="41" type="uint"/>
+    <field name="SO Buffer Enable [0]" start="40" end="40" type="uint"/>
+    <field name="Stream 3 Vertex Read Offset" start="93" end="93" type="uint"/>
+    <field name="Stream 3 Vertex Read Length" start="88" end="92" type="uint"/>
+    <field name="Stream 2 Vertex Read Offset" start="85" end="85" type="uint"/>
+    <field name="Stream 2 Vertex Read Length" start="80" end="84" type="uint"/>
+    <field name="Stream 1 Vertex Read Offset" start="77" end="77" type="uint"/>
+    <field name="Stream 1 Vertex Read Length" start="72" end="76" type="uint"/>
+    <field name="Stream 0 Vertex Read Offset" start="69" end="69" type="uint"/>
+    <field name="Stream 0 Vertex Read Length" start="64" end="68" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_TE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="28"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Partitioning" start="44" end="45" type="uint">
+      <value name="INTEGER" value="0"/>
+      <value name="ODD_FRACTIONAL" value="1"/>
+      <value name="EVEN_FRACTIONAL" value="2"/>
+    </field>
+    <field name="Output Topology" start="40" end="41" type="uint" prefix="OUTPUT">
+      <value name="POINT" value="0"/>
+      <value name="LINE" value="1"/>
+      <value name="TRI_CW" value="2"/>
+      <value name="TRI_CCW" value="3"/>
+    </field>
+    <field name="TE Domain" start="36" end="37" type="uint">
+      <value name="QUAD" value="0"/>
+      <value name="TRI" value="1"/>
+      <value name="ISOLINE" value="2"/>
+    </field>
+    <field name="TE Mode" start="33" end="34" type="uint">
+      <value name="HW_TESS" value="0"/>
+      <value name="SW_TESS" value="1"/>
+    </field>
+    <field name="TE Enable" start="32" end="32" type="bool"/>
+    <field name="Maximum Tessellation Factor Odd" start="64" end="95" type="float"/>
+    <field name="Maximum Tessellation Factor Not Odd" start="96" end="127" type="float"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="50"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="DS URB Starting Address" start="57" end="62" type="uint"/>
+    <field name="DS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="DS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="51"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="GS URB Starting Address" start="57" end="62" type="uint"/>
+    <field name="GS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="GS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="49"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="HS URB Starting Address" start="57" end="62" type="uint"/>
+    <field name="HS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="HS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="48"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="VS URB Starting Address" start="57" end="62" type="uint"/>
+    <field name="VS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="VS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VERTEX_BUFFERS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="8"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <group count="0" start="32" size="128">
+      <field name="Vertex Buffer State" start="0" end="127" type="VERTEX_BUFFER_STATE"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_VERTEX_ELEMENTS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="9"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <group count="0" start="32" size="64">
+      <field name="Element" start="0" end="63" type="VERTEX_ELEMENT_STATE"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_VF" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="12"/>
+    <field name="Indexed Draw Cut Index Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Cut Index" start="32" end="63" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_STATISTICS" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="1"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="11"/>
+    <field name="Statistics Enable" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VIEWPORT_STATE_POINTERS_CC" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="35"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="CC Viewport Pointer" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="33"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="SF Clip Viewport Pointer" start="38" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VS" bias="2" length="6">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="16"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="4"/>
+    <field name="Kernel Start Pointer" start="38" end="63" type="offset"/>
+    <field name="Single Vertex Dispatch" start="95" end="95" type="bool"/>
+    <field name="Vector Mask Enable (VME)" start="94" end="94" type="uint"/>
+    <field name="Sampler Count" start="91" end="93" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="82" end="89" type="uint"/>
+    <field name="Thread Priority" start="81" end="81" type="uint">
+      <value name="Normal Priority" value="0"/>
+      <value name="High Priority" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="80" end="80" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="77" end="77" type="bool"/>
+    <field name="VS accesses UAV" start="76" end="76" type="bool"/>
+    <field name="Software  Exception Enable" start="71" end="71" type="bool"/>
+    <field name="Scratch Space Base Offset" start="106" end="127" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="96" end="99" type="uint"/>
+    <field name="Dispatch GRF Start Register for URB Data" start="148" end="152" type="uint"/>
+    <field name="Vertex URB Entry Read Length" start="139" end="144" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="132" end="137" type="uint"/>
+    <field name="Maximum Number of Threads" start="183" end="191" type="uint"/>
+    <field name="Statistics Enable" start="170" end="170" type="bool"/>
+    <field name="Vertex Cache Disable" start="161" end="161" type="bool"/>
+    <field name="VS Function Enable" start="160" end="160" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_WM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="20"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Statistics Enable" start="63" end="63" type="bool"/>
+    <field name="Depth Buffer Clear" start="62" end="62" type="bool"/>
+    <field name="Thread Dispatch Enable" start="61" end="61" type="bool"/>
+    <field name="Depth Buffer Resolve Enable" start="60" end="60" type="bool"/>
+    <field name="Hierarchical Depth Buffer Resolve Enable" start="59" end="59" type="bool"/>
+    <field name="Legacy Diamond Line Rasterization" start="58" end="58" type="bool"/>
+    <field name="Pixel Shader Kill Pixel" start="57" end="57" type="bool"/>
+    <field name="Pixel Shader Computed Depth Mode" start="55" end="56" type="uint">
+      <value name="PSCDEPTH_OFF" value="0"/>
+      <value name="PSCDEPTH_ON" value="1"/>
+      <value name="PSCDEPTH_ON_GE" value="2"/>
+      <value name="PSCDEPTH_ON_LE" value="3"/>
+    </field>
+    <field name="Early Depth/Stencil Control" start="53" end="54" type="uint">
+      <value name="EDSC_NORMAL" value="0"/>
+      <value name="EDSC_PSEXEC" value="1"/>
+      <value name="EDSC_PREPS" value="2"/>
+    </field>
+    <field name="Pixel Shader Uses Source Depth" start="52" end="52" type="bool"/>
+    <field name="Pixel Shader Uses Source W" start="51" end="51" type="bool"/>
+    <field name="Position ZW Interpolation Mode" start="49" end="50" type="uint">
+      <value name="INTERP_PIXEL" value="0"/>
+      <value name="INTERP_CENTROID" value="2"/>
+      <value name="INTERP_SAMPLE" value="3"/>
+    </field>
+    <field name="Barycentric Interpolation Mode" start="43" end="48" type="uint"/>
+    <field name="Pixel Shader Uses Input Coverage Mask" start="42" end="42" type="bool"/>
+    <field name="Line End Cap Antialiasing Region Width" start="40" end="41" type="uint"/>
+    <field name="Line Antialiasing Region Width" start="38" end="39" type="uint"/>
+    <field name="RT Independent Rasterization Enable" start="37" end="37" type="bool"/>
+    <field name="Polygon Stipple Enable" start="36" end="36" type="bool"/>
+    <field name="Line Stipple Enable" start="35" end="35" type="bool"/>
+    <field name="Point Rasterization Rule" start="34" end="34" type="uint">
+      <value name="RASTRULE_UPPER_LEFT" value="0"/>
+      <value name="RASTRULE_UPPER_RIGHT" value="1"/>
+    </field>
+    <field name="Multisample Rasterization Mode" start="32" end="33" type="uint">
+      <value name="MSRASTMODE_OFF_PIXEL" value="0"/>
+      <value name="MSRASTMODE_OFF_PATTERN" value="1"/>
+      <value name="MSRASTMODE_ON_PIXEL" value="2"/>
+      <value name="MSRASTMODE_ON_PATTERN" value="3"/>
+    </field>
+    <field name="Multisample Dispatch Mode" start="95" end="95" type="uint">
+      <value name="MSDISPMODE_PERSAMPLE" value="0"/>
+      <value name="MSDISPMODE_PERPIXEL" value="1"/>
+    </field>
+    <field name="PS UAV-only" start="94" end="94" type="uint">
+      <value name="OFF" value="0"/>
+      <value name="ON" value="1"/>
+    </field>
+  </instruction>
+
+  <instruction name="GPGPU_CSR_BASE_ADDRESS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="GPGPU CSR Base Address" start="44" end="63" type="address"/>
+  </instruction>
+
+  <instruction name="GPGPU_OBJECT" bias="2" length="8">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="4"/>
+    <field name="Predicate Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="6"/>
+    <field name="Shared Local Memory Fixed Offset" start="39" end="39" type="uint"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Shared Local Memory Offset" start="92" end="95" type="uint"/>
+    <field name="End of Thread Group" start="88" end="88" type="uint"/>
+    <field name="Slice Destination Select" start="83" end="83" type="uint">
+      <value name="Slice 0" value="0"/>
+      <value name="Slice 1" value="1"/>
+    </field>
+    <field name="Half-Slice Destination Select" start="81" end="82" type="uint">
+      <value name="Half-Slice 1" value="2"/>
+      <value name="Half-Slice 0" value="1"/>
+      <value name="Either Half-Slice" value="0"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="offset"/>
+    <field name="Thread Group ID X" start="128" end="159" type="uint"/>
+    <field name="Thread Group ID Y" start="160" end="191" type="uint"/>
+    <field name="Thread Group ID Z" start="192" end="223" type="uint"/>
+    <field name="Execution Mask" start="224" end="255" type="uint"/>
+  </instruction>
+
+  <instruction name="GPGPU_WALKER" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode A" start="16" end="23" type="uint" default="5"/>
+    <field name="Indirect Parameter Enable" start="10" end="10" type="bool"/>
+    <field name="Predicate Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="SIMD Size" start="94" end="95" type="uint">
+      <value name="SIMD8" value="0"/>
+      <value name="SIMD16" value="1"/>
+      <value name="SIMD32" value="2"/>
+    </field>
+    <field name="Thread Depth Counter Maximum" start="80" end="85" type="uint"/>
+    <field name="Thread Height Counter Maximum" start="72" end="77" type="uint"/>
+    <field name="Thread Width Counter Maximum" start="64" end="69" type="uint"/>
+    <field name="Thread Group ID Starting X" start="96" end="127" type="uint"/>
+    <field name="Thread Group ID X Dimension" start="128" end="159" type="uint"/>
+    <field name="Thread Group ID Starting Y" start="160" end="191" type="uint"/>
+    <field name="Thread Group ID Y Dimension" start="192" end="223" type="uint"/>
+    <field name="Thread Group ID Starting Z" start="224" end="255" type="uint"/>
+    <field name="Thread Group ID Z Dimension" start="256" end="287" type="uint"/>
+    <field name="Right Execution Mask" start="288" end="319" type="uint"/>
+    <field name="Bottom Execution Mask" start="320" end="351" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_CURBE_LOAD" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="1"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="2"/>
+    <field name="CURBE Total Data Length" start="64" end="80" type="uint"/>
+    <field name="CURBE Data Start Address" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_INTERFACE_DESCRIPTOR_LOAD" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="2"/>
+    <field name="Interface Descriptor Total Length" start="64" end="80" type="uint"/>
+    <field name="Interface Descriptor Data Start Address" start="96" end="127" type="offset"/>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Media Command Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="Media Command Sub-Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="4"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="bool"/>
+    <field name="Thread Synchronization" start="88" end="88" type="uint">
+      <value name="No thread synchronization" value="0"/>
+      <value name="Thread dispatch is synchronized by the 'spawn root thread' message" value="1"/>
+    </field>
+    <field name="Use Scoreboard" start="85" end="85" type="uint">
+      <value name="Not using scoreboard" value="0"/>
+      <value name="Using scoreboard" value="1"/>
+    </field>
+    <field name="Slice Destination Select" start="83" end="83" type="uint">
+      <value name="Slice 0" value="0"/>
+      <value name="Slice 1" value="1"/>
+      <value name="Either Slice" value="0"/>
+    </field>
+    <field name="Half-Slice Destination Select" start="81" end="82" type="uint">
+      <value name="Half-Slice 1" value="2"/>
+      <value name="Half-Slice 0" value="1"/>
+      <value name="Either half-slice" value="0"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="address"/>
+    <field name="Scoredboard Y" start="144" end="152" type="uint"/>
+    <field name="Scoreboard X" start="128" end="136" type="uint"/>
+    <field name="Scoreboard Color" start="176" end="179" type="uint"/>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <group count="0" start="192" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT_PRT" bias="2" length="16">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="14"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="bool"/>
+    <field name="PRT_Fence Needed" start="87" end="87" type="bool"/>
+    <field name="PRT_FenceType" start="86" end="86" type="uint">
+      <value name="Root thread queue" value="0"/>
+      <value name="VFE state flush" value="1"/>
+    </field>
+    <group count="12" start="128" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT_WALKER" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="15"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="uint"/>
+    <field name="Thread Synchronization" start="88" end="88" type="uint">
+      <value name="No thread synchronization" value="0"/>
+      <value name="Thread dispatch is synchronized by the 'spawn root thread' message" value="1"/>
+    </field>
+    <field name="Use Scoreboard" start="85" end="85" type="uint">
+      <value name="Not using scoreboard" value="0"/>
+      <value name="Using scoreboard" value="1"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="offset"/>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <field name="Dual Mode" start="223" end="223" type="uint"/>
+    <field name="Repel" start="222" end="222" type="uint"/>
+    <field name="Quad Mode" start="221" end="221" type="uint"/>
+    <field name="Color Count Minus One" start="216" end="219" type="uint"/>
+    <field name="Middle Loop Extra Steps" start="208" end="212" type="uint"/>
+    <field name="Local Mid-Loop Unit Y" start="204" end="205" type="int"/>
+    <field name="Mid-Loop Unit X" start="200" end="201" type="int"/>
+    <field name="Global Loop Exec Count" start="240" end="249" type="uint"/>
+    <field name="Local Loop Exec Count" start="224" end="233" type="uint"/>
+    <field name="Block Resolution Y" start="272" end="280" type="uint"/>
+    <field name="Block Resolution X" start="256" end="264" type="uint"/>
+    <field name="Local Start Y" start="304" end="312" type="uint"/>
+    <field name="Local Start X" start="288" end="296" type="uint"/>
+    <field name="Local Outer Loop Stride Y" start="368" end="377" type="int"/>
+    <field name="Local Outer Loop Stride X" start="352" end="361" type="int"/>
+    <field name="Local Inner Loop Unit Y" start="400" end="409" type="int"/>
+    <field name="Local Inner Loop Unit X" start="384" end="393" type="int"/>
+    <field name="Global Resolution Y" start="432" end="440" type="uint"/>
+    <field name="Global Resolution X" start="416" end="424" type="uint"/>
+    <field name="Global Start Y" start="464" end="473" type="int"/>
+    <field name="Global Start X" start="448" end="457" type="int"/>
+    <field name="Global Outer Loop Stride Y" start="496" end="505" type="int"/>
+    <field name="Global Outer Loop Stride X" start="480" end="489" type="int"/>
+    <field name="Global Inner Loop Unit Y" start="528" end="537" type="int"/>
+    <field name="Global Inner Loop Unit X" start="512" end="521" type="int"/>
+    <group count="0" start="544" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_STATE_FLUSH" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="0"/>
+    <field name="Disable Preemption" start="40" end="40" type="bool"/>
+    <field name="Flush to GO" start="39" end="39" type="bool"/>
+    <field name="Watermark Required" start="38" end="38" type="uint"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_VFE_STATE" bias="2" length="8">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="6"/>
+    <field name="Scratch Space Base Pointer" start="42" end="63" type="offset"/>
+    <field name="Stack Size" start="36" end="39" type="uint"/>
+    <field name="Per Thread Scratch Space" start="32" end="35" type="uint"/>
+    <field name="Maximum Number of Threads" start="80" end="95" type="uint"/>
+    <field name="Number of URB Entries" start="72" end="79" type="uint"/>
+    <field name="Reset Gateway Timer" start="71" end="71" type="uint">
+      <value name="Maintaining the existing timestamp state" value="0"/>
+      <value name="Resetting relative timer and latching the global timestamp" value="1"/>
+    </field>
+    <field name="Bypass Gateway Control" start="70" end="70" type="uint">
+      <value name="Maintaining OpenGateway/ForwardMsg/CloseGateway protocol (legacy mode)" value="0"/>
+      <value name="Bypassing OpenGateway/CloseGateway protocol" value="1"/>
+    </field>
+    <field name="GPGPU Mode" start="66" end="66" type="uint"/>
+    <field name="Half-Slice Disable" start="96" end="97" type="uint"/>
+    <field name="URB Entry Allocation Size" start="144" end="159" type="uint"/>
+    <field name="CURBE Allocation Size" start="128" end="143" type="uint"/>
+    <field name="Scoreboard Enable" start="191" end="191" type="uint">
+      <value name="Scoreboard disabled" value="0"/>
+      <value name="Scoreboard enabled" value="1"/>
+    </field>
+    <field name="Scoreboard Type" start="190" end="190" type="uint">
+      <value name="Stalling Scoreboard" value="0"/>
+      <value name="Non-Stalling Scoreboard" value="1"/>
+    </field>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <field name="Scoreboard 3 Delta Y" start="220" end="223" type="int"/>
+    <field name="Scoreboard 3 Delta X" start="216" end="219" type="int"/>
+    <field name="Scoreboard 2 Delta Y" start="212" end="215" type="int"/>
+    <field name="Scoreboard 2 Delta X" start="208" end="211" type="int"/>
+    <field name="Scoreboard 1 Delta Y" start="204" end="207" type="int"/>
+    <field name="Scoreboard 1 Delta X" start="200" end="203" type="int"/>
+    <field name="Scoreboard 0 Delta Y" start="196" end="199" type="int"/>
+    <field name="Scoreboard 0 Delta X" start="192" end="195" type="int"/>
+    <field name="Scoreboard 7 Delta Y" start="252" end="255" type="int"/>
+    <field name="Scoreboard 7 Delta X" start="248" end="251" type="int"/>
+    <field name="Scoreboard 6 Delta Y" start="244" end="247" type="int"/>
+    <field name="Scoreboard 6 Delta X" start="240" end="243" type="int"/>
+    <field name="Scoreboard 5 Delta Y" start="236" end="239" type="int"/>
+    <field name="Scoreboard 5 Delta X" start="232" end="235" type="int"/>
+    <field name="Scoreboard 4 Delta Y" start="228" end="231" type="int"/>
+    <field name="Scoreboard 4 Delta X" start="224" end="227" type="int"/>
+  </instruction>
+
+  <instruction name="MI_ARB_CHECK" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="5"/>
+  </instruction>
+
+  <instruction name="MI_ARB_ON_OFF" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="8"/>
+    <field name="Arbitration Enable" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="MI_BATCH_BUFFER_END" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="10"/>
+  </instruction>
+
+  <instruction name="MI_BATCH_BUFFER_START" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="49"/>
+    <field name="2nd Level Batch Buffer" start="22" end="22" type="uint">
+      <value name="1st level batch" value="0"/>
+      <value name="2nd level batch" value="1"/>
+    </field>
+    <field name="Add Offset Enable" start="16" end="16" type="bool"/>
+    <field name="Predication Enable" start="15" end="15" type="bool"/>
+    <field name="Non-Privileged" start="13" end="13" type="bool"/>
+    <field name="Clear Command Buffer Enable" start="11" end="11" type="bool"/>
+    <field name="Resource Streamer Enable" start="10" end="10" type="bool"/>
+    <field name="Address Space Indicator" start="8" end="8" type="uint" prefix="ASI">
+      <value name="GGTT" value="0"/>
+      <value name="PPGTT" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Batch Buffer Start Address" start="34" end="63" type="address"/>
+  </instruction>
+
+  <instruction name="MI_CLFLUSH" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="39"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="9" type="uint" default="1"/>
+    <field name="Page Base Address" start="44" end="63" type="address"/>
+    <field name="Starting Cacheline Offset" start="38" end="43" type="uint"/>
+    <field name="Page Base Address High" start="64" end="79" type="address"/>
+    <group count="0" start="96" size="32">
+      <field name="DW Representing a Half Cache Line" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MI_CONDITIONAL_BATCH_BUFFER_END" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="54"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint" default="0"/>
+    <field name="Compare Semaphore" start="21" end="21" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Compare Data Dword" start="32" end="63" type="uint"/>
+    <field name="Compare Address" start="67" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="MI_FLUSH" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="4"/>
+    <field name="Indirect State Pointers Disable" start="5" end="5" type="bool"/>
+    <field name="Generic Media State Clear" start="4" end="4" type="bool"/>
+    <field name="Global Snapshot Count Reset" start="3" end="3" type="uint">
+      <value name="Don't Reset" value="0"/>
+      <value name="Reset" value="1"/>
+    </field>
+    <field name="Render Cache Flush Inhibit" start="2" end="2" type="uint">
+      <value name="Flush" value="0"/>
+      <value name="Don't Flush" value="1"/>
+    </field>
+    <field name="State/Instruction Cache Invalidate" start="1" end="1" type="uint">
+      <value name="Don't Invalidate" value="0"/>
+      <value name="Invalidate" value="1"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_IMM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="34"/>
+    <field name="Byte Write Disables" start="8" end="11" type="uint"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Register Offset" start="34" end="54" type="offset"/>
+    <field name="Data DWord" start="64" end="95" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_MEM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="41"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="Async Mode Enable" start="21" end="21" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Register Address" start="34" end="54" type="offset"/>
+    <field name="Memory Address" start="66" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_REG" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="42"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Source Register Address" start="34" end="54" type="offset"/>
+    <field name="Destination Register Address" start="66" end="86" type="offset"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_SCAN_LINES_EXCL" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="19"/>
+    <field name="Display (Plane) Select" start="19" end="21" type="uint">
+      <value name="Display Plane A" value="0"/>
+      <value name="Display Plane B" value="1"/>
+      <value name="Display Plane C" value="4"/>
+    </field>
+    <field name="DWord Length" start="0" end="5" type="uint" default="0"/>
+    <field name="Start Scan Line Number" start="48" end="60" type="uint"/>
+    <field name="End Scan Line Number" start="32" end="44" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_SCAN_LINES_INCL" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="18"/>
+    <field name="Display (Plane) Select" start="19" end="21" type="uint">
+      <value name="Display Plane A" value="0"/>
+      <value name="Display Plane B" value="1"/>
+      <value name="Display Plane C" value="4"/>
+    </field>
+    <field name="DWord Length" start="0" end="5" type="uint" default="0"/>
+    <field name="Start Scan Line Number" start="48" end="60" type="uint"/>
+    <field name="End Scan Line Number" start="32" end="44" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_URB_MEM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="44"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="URB Address" start="34" end="46" type="uint"/>
+    <field name="Memory Address" start="70" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="MI_MATH" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="26"/>
+    <field name="DWord Length" start="0" end="5" type="uint" default="0"/>
+    <field name="ALU INSTRUCTION 1" start="32" end="63" type="uint"/>
+    <field name="ALU INSTRUCTION 2" start="64" end="95" type="uint"/>
+    <group count="0" start="96" size="32">
+      <field name="ALU INSTRUCTION n" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MI_NOOP" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="0"/>
+    <field name="Identification Number Register Write Enable" start="22" end="22" type="bool"/>
+    <field name="Identification Number" start="0" end="21" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_PREDICATE" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="12"/>
+    <field name="Load Operation" start="6" end="7" type="uint" prefix="LOAD">
+      <value name="KEEP" value="0"/>
+      <value name="LOAD" value="2"/>
+      <value name="LOADINV" value="3"/>
+    </field>
+    <field name="Combine Operation" start="3" end="4" type="uint" prefix="COMBINE">
+      <value name="SET" value="0"/>
+      <value name="AND" value="1"/>
+      <value name="OR" value="2"/>
+      <value name="XOR" value="3"/>
+    </field>
+    <field name="Compare Operation" start="0" end="1" type="uint" prefix="COMPARE">
+      <value name="SRCS_EQUAL" value="2"/>
+      <value name="DELTAS_EQUAL" value="3"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_REPORT_HEAD" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="7"/>
+  </instruction>
+
+  <instruction name="MI_REPORT_PERF_COUNT" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="40"/>
+    <field name="DWord Length" start="0" end="5" type="uint" default="1"/>
+    <field name="Memory Address" start="38" end="63" type="address"/>
+    <field name="Core Mode Enable" start="36" end="36" type="uint"/>
+    <field name="Use Global GTT" start="32" end="32" type="uint"/>
+    <field name="Report ID" start="64" end="95" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_RS_CONTEXT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="15"/>
+    <field name="Resource Streamer Save" start="0" end="0" type="uint" prefix="RS">
+      <value name="Restore" value="0"/>
+      <value name="Save" value="1"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_RS_CONTROL" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="6"/>
+    <field name="Resource Streamer Control" start="0" end="0" type="uint" prefix="RS">
+      <value name="Stop" value="0"/>
+      <value name="Start" value="1"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_RS_STORE_DATA_IMM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="43"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Destination Address" start="66" end="95" type="address"/>
+    <field name="Core Mode Enable" start="64" end="64" type="uint"/>
+    <field name="Data DWord 0" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SEMAPHORE_MBOX" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="22"/>
+    <field name="Register Select" start="16" end="17" type="uint">
+      <value name="RVSYNC" value="0"/>
+      <value name="RVESYNC" value="1"/>
+      <value name="RBSYNC" value="2"/>
+      <value name="Use General Register Select" value="3"/>
+    </field>
+    <field name="General Register Select" start="8" end="13" type="uint"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Semaphore Data Dword" start="32" end="63" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SET_CONTEXT" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Logical Context Address" start="44" end="63" type="address"/>
+    <field name="Reserved, Must be 1" start="40" end="40" type="uint"/>
+    <field name="Core Mode Enable" start="36" end="36" type="bool"/>
+    <field name="Resource Streamer State Save Enable" start="35" end="35" type="bool"/>
+    <field name="Resource Streamer State Restore Enable" start="34" end="34" type="bool"/>
+    <field name="Force Restore" start="33" end="33" type="uint"/>
+    <field name="Restore Inhibit" start="32" end="32" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SET_PREDICATE" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="1"/>
+    <field name="PREDICATE ENABLE" start="0" end="1" type="bool" default="6"/>
+  </instruction>
+
+  <instruction name="MI_STORE_DATA_IMM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="32"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="DWord Length" start="0" end="5" type="uint" default="2"/>
+    <field name="Address" start="66" end="95" type="uint"/>
+    <field name="Core Mode Enable" start="64" end="64" type="uint"/>
+    <field name="Data DWord 0" start="96" end="127" type="uint"/>
+    <field name="Data DWord 1" start="128" end="159" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_STORE_DATA_INDEX" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="33"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Offset" start="34" end="43" type="uint"/>
+    <field name="Data DWord 0" start="64" end="95" type="uint"/>
+    <field name="Data DWord 1" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_STORE_REGISTER_MEM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="36"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="Predicate Enable" start="21" end="21" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Register Address" start="34" end="54" type="offset"/>
+    <field name="Memory Address" start="66" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="MI_STORE_URB_MEM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="45"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="URB Address" start="34" end="46" type="uint"/>
+    <field name="Memory Address" start="70" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="MI_SUSPEND_FLUSH" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="11"/>
+    <field name="Suspend Flush" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="MI_TOPOLOGY_FILTER" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="13"/>
+    <field name="Topology Filter Value" start="0" end="5" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_URB_ATOMIC_ALLOC" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="9"/>
+    <field name="URB Atomic Storage Offset" start="12" end="19" type="uint"/>
+    <field name="URB Atomic Storage Size" start="0" end="8" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_URB_CLEAR" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="25"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="URB Clear Length" start="48" end="61" type="uint"/>
+    <field name="URB Address" start="32" end="46" type="offset"/>
+  </instruction>
+
+  <instruction name="MI_USER_INTERRUPT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="2"/>
+  </instruction>
+
+  <instruction name="MI_WAIT_FOR_EVENT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="3"/>
+    <field name="Display Pipe C Horizontal Blank Wait Enable" start="22" end="22" type="bool"/>
+    <field name="Display Pipe C Vertical Blank Wait Enable" start="21" end="21" type="bool"/>
+    <field name="Display Sprite C Flip Pending Wait Enable" start="20" end="20" type="bool"/>
+    <field name="Condition Code Wait Select" start="16" end="19" type="uint">
+      <value name="Not enabled" value="0"/>
+    </field>
+    <field name="Display Plane C Flip Pending Wait Enable" start="15" end="15" type="bool"/>
+    <field name="Display Pipe C Scan Line Wait Enable" start="14" end="14" type="bool"/>
+    <field name="Display Pipe B Horizontal Blank Wait Enable" start="13" end="13" type="bool"/>
+    <field name="Display Pipe B Vertical Blank Wait Enable" start="11" end="11" type="bool"/>
+    <field name="Display Sprite B Flip Pending Wait Enable" start="10" end="10" type="bool"/>
+    <field name="Display Plane B Flip Pending Wait Enable" start="9" end="9" type="bool"/>
+    <field name="Display Pipe B Scan Line Wait Enable" start="8" end="8" type="bool"/>
+    <field name="Display Pipe A Horizontal Blank Wait Enable" start="5" end="5" type="bool"/>
+    <field name="Display Pipe A Vertical Blank Wait Enable" start="3" end="3" type="bool"/>
+    <field name="Display Sprite A Flip Pending Wait Enable" start="2" end="2" type="bool"/>
+    <field name="Display Plane A Flip Pending Wait Enable" start="1" end="1" type="bool"/>
+    <field name="Display Pipe A Scan Line Wait Enable" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="PIPELINE_SELECT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="1"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="Pipeline Selection" start="0" end="1" type="uint">
+      <value name="3D" value="0"/>
+      <value name="Media" value="1"/>
+      <value name="GPGPU" value="2"/>
+    </field>
+  </instruction>
+
+  <instruction name="PIPE_CONTROL" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="2"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Destination Address Type" start="56" end="56" type="uint" prefix="DAT">
+      <value name="PPGTT" value="0"/>
+      <value name="GGTT" value="1"/>
+    </field>
+    <field name="LRI Post Sync Operation" start="55" end="55" type="uint">
+      <value name="No LRI Operation" value="0"/>
+      <value name="MMIO Write Immediate Data" value="1"/>
+    </field>
+    <field name="Store Data Index" start="53" end="53" type="uint"/>
+    <field name="Command Streamer Stall Enable" start="52" end="52" type="uint"/>
+    <field name="Global Snapshot Count Reset" start="51" end="51" type="uint">
+      <value name="Don't Reset" value="0"/>
+      <value name="Reset" value="1"/>
+    </field>
+    <field name="TLB Invalidate" start="50" end="50" type="uint"/>
+    <field name="Generic Media State Clear" start="48" end="48" type="bool"/>
+    <field name="Post Sync Operation" start="46" end="47" type="uint">
+      <value name="No Write" value="0"/>
+      <value name="Write Immediate Data" value="1"/>
+      <value name="Write PS Depth Count" value="2"/>
+      <value name="Write Timestamp" value="3"/>
+    </field>
+    <field name="Depth Stall Enable" start="45" end="45" type="bool"/>
+    <field name="Render Target Cache Flush Enable" start="44" end="44" type="bool"/>
+    <field name="Instruction Cache Invalidate Enable" start="43" end="43" type="bool"/>
+    <field name="Texture Cache Invalidation Enable" start="42" end="42" type="bool"/>
+    <field name="Indirect State Pointers Disable" start="41" end="41" type="bool"/>
+    <field name="Notify Enable" start="40" end="40" type="bool"/>
+    <field name="Pipe Control Flush Enable" start="39" end="39" type="bool"/>
+    <field name="DC  Flush Enable" start="37" end="37" type="bool"/>
+    <field name="VF Cache Invalidation Enable" start="36" end="36" type="bool"/>
+    <field name="Constant Cache Invalidation Enable" start="35" end="35" type="bool"/>
+    <field name="State Cache Invalidation Enable" start="34" end="34" type="bool"/>
+    <field name="Stall At Pixel Scoreboard" start="33" end="33" type="bool"/>
+    <field name="Depth Cache Flush Enable" start="32" end="32" type="bool"/>
+    <field name="Address" start="66" end="95" type="address"/>
+    <field name="Immediate Data" start="96" end="159" type="uint"/>
+  </instruction>
+
+  <instruction name="STATE_BASE_ADDRESS" bias="2" length="10">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="1"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="8"/>
+    <field name="General State Base Address" start="44" end="63" type="address"/>
+    <field name="General State Memory Object Control State" start="40" end="43" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Stateless Data Port Access Memory Object Control State" start="36" end="39" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="General State Base Address Modify Enable" start="32" end="32" type="bool"/>
+    <field name="Surface State Base Address" start="76" end="95" type="address"/>
+    <field name="Surface State Memory Object Control State" start="72" end="75" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface State Base Address Modify Enable" start="64" end="64" type="bool"/>
+    <field name="Dynamic State Base Address" start="108" end="127" type="address"/>
+    <field name="Dynamic State Memory Object Control State" start="104" end="107" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Dynamic State Base Address Modify Enable" start="96" end="96" type="bool"/>
+    <field name="Indirect Object Base Address" start="140" end="159" type="address"/>
+    <field name="Indirect Object Memory Object Control State" start="136" end="139" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Indirect Object Base Address Modify Enable" start="128" end="128" type="bool"/>
+    <field name="Instruction Base Address" start="172" end="191" type="address"/>
+    <field name="Instruction Memory Object Control State" start="168" end="171" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Instruction Base Address Modify Enable" start="160" end="160" type="bool"/>
+    <field name="General State Access Upper Bound" start="204" end="223" type="address"/>
+    <field name="General State Access Upper Bound Modify Enable" start="192" end="192" type="bool"/>
+    <field name="Dynamic State Access Upper Bound" start="236" end="255" type="address"/>
+    <field name="Dynamic State Access Upper Bound Modify Enable" start="224" end="224" type="bool"/>
+    <field name="Indirect Object Access Upper Bound" start="268" end="287" type="address"/>
+    <field name="Indirect Object Access Upper Bound Modify Enable" start="256" end="256" type="bool"/>
+    <field name="Instruction Access Upper Bound" start="300" end="319" type="address"/>
+    <field name="Instruction Access Upper Bound Modify Enable" start="288" end="288" type="bool"/>
+  </instruction>
+
+  <instruction name="STATE_PREFETCH" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Prefetch Pointer" start="38" end="63" type="address"/>
+    <field name="Prefetch Count" start="32" end="34" type="uint"/>
+  </instruction>
+
+  <instruction name="STATE_SIP" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="System Instruction Pointer" start="36" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="SWTESS_BASE_ADDRESS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="SW Tessellation Base Address" start="44" end="63" type="address"/>
+    <field name="SW Tessellation Memory Object Control State" start="40" end="43" type="MEMORY_OBJECT_CONTROL_STATE"/>
+  </instruction>
+
+</genxml>
diff --git a/src/intel/genxml/gen8.xml b/src/intel/genxml/gen8.xml
new file mode 100644 (file)
index 0000000..96eda70
--- /dev/null
@@ -0,0 +1,3166 @@
+<genxml name="BDW" gen="8">
+  <struct name="3DSTATE_CONSTANT_BODY" length="10">
+    <field name="Constant Buffer 1 Read Length" start="16" end="31" type="uint"/>
+    <field name="Constant Buffer 0 Read Length" start="0" end="15" type="uint"/>
+    <field name="Constant Buffer 3 Read Length" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer 2 Read Length" start="32" end="47" type="uint"/>
+    <field name="Pointer To Constant Buffer 0" start="69" end="127" type="address"/>
+    <field name="Pointer To Constant Buffer 1" start="133" end="191" type="address"/>
+    <field name="Pointer To Constant Buffer 2" start="197" end="255" type="address"/>
+    <field name="Pointer To Constant Buffer 3" start="261" end="319" type="address"/>
+  </struct>
+
+  <struct name="BINDING_TABLE_EDIT_ENTRY" length="1">
+    <field name="Binding Table Index" start="16" end="23" type="uint"/>
+    <field name="Surface State Pointer" start="0" end="15" type="offset"/>
+  </struct>
+
+  <struct name="GATHER_CONSTANT_ENTRY" length="1">
+    <field name="Constant Buffer Offset" start="8" end="15" type="offset"/>
+    <field name="Channel Mask" start="4" end="7" type="uint"/>
+    <field name="Binding Table Index Offset" start="0" end="3" type="uint"/>
+  </struct>
+
+  <struct name="MEMORY_OBJECT_CONTROL_STATE" length="1">
+    <field name="Memory Type:LLC/eLLC Cacheability Control" start="5" end="6" type="uint">
+      <value name=" UC with Fence (if coherent cycle)" value="0"/>
+      <value name="UC (Uncacheable)" value="1"/>
+      <value name="WT" value="2"/>
+      <value name="WB" value="3"/>
+    </field>
+    <field name="Target Cache" start="3" end="4" type="uint">
+      <value name="eLLC Only (when eDRAM is present, else gets allocated in LLC)" value="0"/>
+      <value name="LLC Only" value="1"/>
+      <value name="LLC/eLLC Allowed" value="2"/>
+      <value name="L3 + Defer to PAT for LLC/eLLC selection" value="3"/>
+    </field>
+    <field name="Age for QUADLRU" start="0" end="1" type="uint"/>
+  </struct>
+
+  <struct name="VERTEX_BUFFER_STATE" length="4">
+    <field name="Vertex Buffer Index" start="26" end="31" type="uint"/>
+    <field name="Memory Object Control State" start="16" end="22" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Address Modify Enable" start="14" end="14" type="uint"/>
+    <field name="Null Vertex Buffer" start="13" end="13" type="bool"/>
+    <field name="Buffer Pitch" start="0" end="11" type="uint"/>
+    <field name="Buffer Starting Address" start="32" end="95" type="address"/>
+    <field name="Buffer Size" start="96" end="127" type="uint"/>
+  </struct>
+
+  <struct name="VERTEX_ELEMENT_STATE" length="2">
+    <field name="Vertex Buffer Index" start="26" end="31" type="uint"/>
+    <field name="Valid" start="25" end="25" type="uint"/>
+    <field name="Source Element Format" start="16" end="24" type="uint"/>
+    <field name="Edge Flag Enable" start="15" end="15" type="bool"/>
+    <field name="Source Element Offset" start="0" end="11" type="uint"/>
+    <field name="Component 0 Control" start="60" end="62" type="uint"/>
+    <field name="Component 1 Control" start="56" end="58" type="uint"/>
+    <field name="Component 2 Control" start="52" end="54" type="uint"/>
+    <field name="Component 3 Control" start="48" end="50" type="uint"/>
+  </struct>
+
+  <struct name="SO_DECL" length="1">
+    <field name="Output Buffer Slot" start="12" end="13" type="uint"/>
+    <field name="Hole Flag" start="11" end="11" type="uint"/>
+    <field name="Register Index" start="4" end="9" type="uint"/>
+    <field name="Component Mask" start="0" end="3" type="uint" default="0"/>
+  </struct>
+
+  <struct name="SO_DECL_ENTRY" length="2">
+    <field name="Stream 3 Decl" start="48" end="63" type="SO_DECL"/>
+    <field name="Stream 2 Decl" start="32" end="47" type="SO_DECL"/>
+    <field name="Stream 1 Decl" start="16" end="31" type="SO_DECL"/>
+    <field name="Stream 0 Decl" start="0" end="15" type="SO_DECL"/>
+  </struct>
+
+  <struct name="SF_OUTPUT_ATTRIBUTE_DETAIL" length="1">
+    <field name="Component Override W" start="15" end="15" type="bool"/>
+    <field name="Component Override Z" start="14" end="14" type="bool"/>
+    <field name="Component Override Y" start="13" end="13" type="bool"/>
+    <field name="Component Override X" start="12" end="12" type="bool"/>
+    <field name="Swizzle Control Mode" start="11" end="11" type="uint"/>
+    <field name="Constant Source" start="9" end="10" type="uint">
+      <value name="CONST_0000" value="0"/>
+      <value name="CONST_0001_FLOAT" value="1"/>
+      <value name="CONST_1111_FLOAT" value="2"/>
+      <value name="PRIM_ID" value="3"/>
+    </field>
+    <field name="Swizzle Select" start="6" end="7" type="uint">
+      <value name="INPUTATTR" value="0"/>
+      <value name="INPUTATTR_FACING" value="1"/>
+      <value name="INPUTATTR_W" value="2"/>
+      <value name="INPUTATTR_FACING_W" value="3"/>
+    </field>
+    <field name="Source Attribute" start="0" end="4" type="uint"/>
+  </struct>
+
+  <struct name="SCISSOR_RECT" length="2">
+    <field name="Scissor Rectangle Y Min" start="16" end="31" type="uint"/>
+    <field name="Scissor Rectangle X Min" start="0" end="15" type="uint"/>
+    <field name="Scissor Rectangle Y Max" start="48" end="63" type="uint"/>
+    <field name="Scissor Rectangle X Max" start="32" end="47" type="uint"/>
+  </struct>
+
+  <struct name="SF_CLIP_VIEWPORT" length="16">
+    <field name="Viewport Matrix Element m00" start="0" end="31" type="float"/>
+    <field name="Viewport Matrix Element m11" start="32" end="63" type="float"/>
+    <field name="Viewport Matrix Element m22" start="64" end="95" type="float"/>
+    <field name="Viewport Matrix Element m30" start="96" end="127" type="float"/>
+    <field name="Viewport Matrix Element m31" start="128" end="159" type="float"/>
+    <field name="Viewport Matrix Element m32" start="160" end="191" type="float"/>
+    <field name="X Min Clip Guardband" start="256" end="287" type="float"/>
+    <field name="X Max Clip Guardband" start="288" end="319" type="float"/>
+    <field name="Y Min Clip Guardband" start="320" end="351" type="float"/>
+    <field name="Y Max Clip Guardband" start="352" end="383" type="float"/>
+    <field name="X Min ViewPort" start="384" end="415" type="float"/>
+    <field name="X Max ViewPort" start="416" end="447" type="float"/>
+    <field name="Y Min ViewPort" start="448" end="479" type="float"/>
+    <field name="Y Max ViewPort" start="480" end="511" type="float"/>
+  </struct>
+
+  <struct name="BLEND_STATE_ENTRY" length="2">
+    <field name="Logic Op Enable" start="63" end="63" type="bool"/>
+    <field name="Logic Op Function" start="59" end="62" type="uint"/>
+    <field name="Pre-Blend Source Only Clamp Enable" start="36" end="36" type="bool"/>
+    <field name="Color Clamp Range" start="34" end="35" type="uint">
+      <value name="COLORCLAMP_UNORM" value="0"/>
+      <value name="COLORCLAMP_SNORM" value="1"/>
+      <value name="COLORCLAMP_RTFORMAT" value="2"/>
+    </field>
+    <field name="Pre-Blend Color Clamp Enable" start="33" end="33" type="bool"/>
+    <field name="Post-Blend Color Clamp Enable" start="32" end="32" type="bool"/>
+    <field name="Color Buffer Blend Enable" start="31" end="31" type="bool"/>
+    <field name="Source Blend Factor" start="26" end="30" type="uint"/>
+    <field name="Destination Blend Factor" start="21" end="25" type="uint"/>
+    <field name="Color Blend Function" start="18" end="20" type="uint"/>
+    <field name="Source Alpha Blend Factor" start="13" end="17" type="uint"/>
+    <field name="Destination Alpha Blend Factor" start="8" end="12" type="uint"/>
+    <field name="Alpha Blend Function" start="5" end="7" type="uint"/>
+    <field name="Write Disable Alpha" start="3" end="3" type="bool"/>
+    <field name="Write Disable Red" start="2" end="2" type="bool"/>
+    <field name="Write Disable Green" start="1" end="1" type="bool"/>
+    <field name="Write Disable Blue" start="0" end="0" type="bool"/>
+  </struct>
+
+  <struct name="BLEND_STATE" length="17">
+    <field name="Alpha To Coverage Enable" start="31" end="31" type="bool"/>
+    <field name="Independent Alpha Blend Enable" start="30" end="30" type="bool"/>
+    <field name="Alpha To One Enable" start="29" end="29" type="bool"/>
+    <field name="Alpha To Coverage Dither Enable" start="28" end="28" type="bool"/>
+    <field name="Alpha Test Enable" start="27" end="27" type="bool"/>
+    <field name="Alpha Test Function" start="24" end="26" type="uint"/>
+    <field name="Color Dither Enable" start="23" end="23" type="bool"/>
+    <field name="X Dither Offset" start="21" end="22" type="uint"/>
+    <field name="Y Dither Offset" start="19" end="20" type="uint"/>
+    <group count="8" start="32" size="64">
+      <field name="Entry" start="0" end="63" type="BLEND_STATE_ENTRY"/>
+    </group>
+  </struct>
+
+  <struct name="CC_VIEWPORT" length="2">
+    <field name="Minimum Depth" start="0" end="31" type="float"/>
+    <field name="Maximum Depth" start="32" end="63" type="float"/>
+  </struct>
+
+  <struct name="COLOR_CALC_STATE" length="6">
+    <field name="Stencil Reference Value" start="24" end="31" type="uint"/>
+    <field name="BackFace Stencil Reference Value" start="16" end="23" type="uint"/>
+    <field name="Round Disable Function Disable" start="15" end="15" type="bool"/>
+    <field name="Alpha Test Format" start="0" end="0" type="uint">
+      <value name="ALPHATEST_UNORM8" value="0"/>
+      <value name="ALPHATEST_FLOAT32" value="1"/>
+    </field>
+    <field name="Alpha Reference Value As UNORM8" start="32" end="63" type="uint"/>
+    <field name="Alpha Reference Value As FLOAT32" start="32" end="63" type="float"/>
+    <field name="Blend Constant Color Red" start="64" end="95" type="float"/>
+    <field name="Blend Constant Color Green" start="96" end="127" type="float"/>
+    <field name="Blend Constant Color Blue" start="128" end="159" type="float"/>
+    <field name="Blend Constant Color Alpha" start="160" end="191" type="float"/>
+  </struct>
+
+  <struct name="BLACK_LEVEL_CORRECTION_STATE_-_DW75..76" length="2">
+    <field name="Black Point Offset R" start="0" end="12" type="int" default="0"/>
+    <field name="Black Point Offset G" start="45" end="57" type="int"/>
+    <field name="Black Point Offset B" start="32" end="44" type="int"/>
+  </struct>
+
+  <struct name="INTERFACE_DESCRIPTOR_DATA" length="8">
+    <field name="Kernel Start Pointer" start="6" end="31" type="offset"/>
+    <field name="Kernel Start Pointer High" start="32" end="47" type="offset"/>
+    <field name="Denorm Mode" start="83" end="83" type="uint">
+      <value name="Ftz" value="0"/>
+      <value name="SetByKernel" value="1"/>
+    </field>
+    <field name="Single Program Flow" start="82" end="82" type="uint"/>
+    <field name="Thread Priority" start="81" end="81" type="uint">
+      <value name="Normal Priority" value="0"/>
+      <value name="High Priority" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="80" end="80" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="77" end="77" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="75" end="75" type="bool"/>
+    <field name="Software Exception Enable" start="71" end="71" type="bool"/>
+    <field name="Sampler State Pointer" start="101" end="127" type="offset"/>
+    <field name="Sampler Count" start="98" end="100" type="uint">
+      <value name="No samplers used" value="0"/>
+      <value name="Between 1 and 4 samplers used" value="1"/>
+      <value name="Between 5 and 8 samplers used" value="2"/>
+      <value name="Between 9 and 12 samplers used" value="3"/>
+      <value name="Between 13 and 16 samplers used" value="4"/>
+    </field>
+    <field name="Binding Table Pointer" start="133" end="143" type="offset"/>
+    <field name="Binding Table Entry Count" start="128" end="132" type="uint"/>
+    <field name="Constant/Indirect URB Entry Read Length" start="176" end="191" type="uint"/>
+    <field name="Constant URB Entry Read Offset" start="160" end="175" type="uint"/>
+    <field name="Rounding Mode" start="214" end="215" type="uint">
+      <value name="RTNE" value="0"/>
+      <value name="RU" value="1"/>
+      <value name="RD" value="2"/>
+      <value name="RTZ" value="3"/>
+    </field>
+    <field name="Barrier Enable" start="213" end="213" type="bool"/>
+    <field name="Shared Local Memory Size" start="208" end="212" type="uint">
+      <value name="Encodes 0k" value="0"/>
+      <value name="Encodes 4k" value="1"/>
+      <value name="Encodes 8k" value="2"/>
+      <value name="Encodes 16k" value="4"/>
+      <value name="Encodes 32k" value="8"/>
+      <value name="Encodes 64k" value="16"/>
+    </field>
+    <field name="Number of Threads in GPGPU Thread Group" start="192" end="201" type="uint"/>
+    <field name="Cross-Thread Constant Data Read Length" start="224" end="231" type="uint"/>
+  </struct>
+
+  <struct name="PALETTE_ENTRY" length="1">
+    <field name="Alpha" start="24" end="31" type="uint"/>
+    <field name="Red" start="16" end="23" type="uint"/>
+    <field name="Green" start="8" end="15" type="uint"/>
+    <field name="Blue" start="0" end="7" type="uint"/>
+  </struct>
+
+  <struct name="BINDING_TABLE_STATE" length="1">
+    <field name="Surface State Pointer" start="6" end="31" type="offset"/>
+  </struct>
+
+  <struct name="RENDER_SURFACE_STATE" length="16">
+    <field name="Surface Type" start="29" end="31" type="uint">
+      <value name="SURFTYPE_1D" value="0"/>
+      <value name="SURFTYPE_2D" value="1"/>
+      <value name="SURFTYPE_3D" value="2"/>
+      <value name="SURFTYPE_CUBE" value="3"/>
+      <value name="SURFTYPE_BUFFER" value="4"/>
+      <value name="SURFTYPE_STRBUF" value="5"/>
+      <value name="SURFTYPE_NULL" value="7"/>
+    </field>
+    <field name="Surface Array" start="28" end="28" type="bool"/>
+    <field name="Surface Format" start="18" end="26" type="uint"/>
+    <field name="Surface Vertical Alignment" start="16" end="17" type="uint">
+      <value name="VALIGN 4" value="1"/>
+      <value name="VALIGN 8" value="2"/>
+      <value name="VALIGN 16" value="3"/>
+    </field>
+    <field name="Surface Horizontal Alignment" start="14" end="15" type="uint">
+      <value name="HALIGN 4" value="1"/>
+      <value name="HALIGN 8" value="2"/>
+      <value name="HALIGN 16" value="3"/>
+    </field>
+    <field name="Tile Mode" start="12" end="13" type="uint">
+      <value name="LINEAR" value="0"/>
+      <value name="WMAJOR" value="1"/>
+      <value name="XMAJOR" value="2"/>
+      <value name="YMAJOR" value="3"/>
+    </field>
+    <field name="Vertical Line Stride" start="11" end="11" type="uint"/>
+    <field name="Vertical Line Stride Offset" start="10" end="10" type="uint"/>
+    <field name="Sampler L2 Bypass Mode Disable" start="9" end="9" type="bool"/>
+    <field name="Render Cache Read Write Mode" start="8" end="8" type="uint">
+      <value name="Write-Only Cache" value="0"/>
+      <value name="Read-Write Cache" value="1"/>
+    </field>
+    <field name="Media Boundary Pixel Mode" start="6" end="7" type="uint">
+      <value name="NORMAL_MODE" value="0"/>
+      <value name="PROGRESSIVE_FRAME" value="2"/>
+      <value name="INTERLACED_FRAME" value="3"/>
+    </field>
+    <field name="Cube Face Enable - Positive Z" start="0" end="0" type="bool"/>
+    <field name="Cube Face Enable - Negative Z" start="1" end="1" type="bool"/>
+    <field name="Cube Face Enable - Positive Y" start="2" end="2" type="bool"/>
+    <field name="Cube Face Enable - Negative Y" start="3" end="3" type="bool"/>
+    <field name="Cube Face Enable - Positive X" start="4" end="4" type="bool"/>
+    <field name="Cube Face Enable - Negative X" start="5" end="5" type="bool"/>
+    <field name="Memory Object Control State" start="56" end="62" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="MOCS" start="56" end="62" type="uint"/>
+    <field name="Base Mip Level" start="51" end="55" type="u4.1"/>
+    <field name="Surface QPitch" start="32" end="46" type="uint"/>
+    <field name="Height" start="80" end="93" type="uint"/>
+    <field name="Width" start="64" end="77" type="uint"/>
+    <field name="Depth" start="117" end="127" type="uint"/>
+    <field name="Surface Pitch" start="96" end="113" type="uint"/>
+    <field name="Render Target And Sample Unorm Rotation" start="157" end="158" type="uint">
+      <value name="0DEG" value="0"/>
+      <value name="90DEG" value="1"/>
+      <value name="270DEG" value="3"/>
+    </field>
+    <field name="Minimum Array Element" start="146" end="156" type="uint"/>
+    <field name="Render Target View Extent" start="135" end="145" type="uint"/>
+    <field name="Multisampled Surface Storage Format" start="134" end="134" type="uint">
+      <value name="MSS" value="0"/>
+      <value name="DEPTH_STENCIL" value="1"/>
+    </field>
+    <field name="Number of Multisamples" start="131" end="133" type="uint">
+      <value name="MULTISAMPLECOUNT_1" value="0"/>
+      <value name="MULTISAMPLECOUNT_2" value="1"/>
+      <value name="MULTISAMPLECOUNT_4" value="2"/>
+      <value name="MULTISAMPLECOUNT_8" value="3"/>
+    </field>
+    <field name="Multisample Position Palette Index" start="128" end="130" type="uint"/>
+    <field name="X Offset" start="185" end="191" type="offset"/>
+    <field name="Y Offset" start="181" end="183" type="offset"/>
+    <field name="EWA Disable For Cube" start="180" end="180" type="bool"/>
+    <field name="Coherency Type" start="174" end="174" type="uint">
+      <value name="GPU coherent" value="0"/>
+      <value name="IA coherent" value="1"/>
+    </field>
+    <field name="Surface Min LOD" start="164" end="167" type="uint"/>
+    <field name="MIP Count / LOD" start="160" end="163" type="uint"/>
+    <field name="Auxiliary Surface QPitch" start="208" end="222" type="uint"/>
+    <field name="Auxiliary Surface Pitch" start="195" end="203" type="uint"/>
+    <field name="Auxiliary Surface Mode" start="192" end="194" type="uint">
+      <value name="AUX_NONE" value="0"/>
+      <value name="AUX_MCS" value="1"/>
+      <value name="AUX_APPEND" value="2"/>
+      <value name="AUX_HIZ" value="3"/>
+    </field>
+    <field name="Separate UV Plane Enable" start="223" end="223" type="bool"/>
+    <field name="X Offset for U or UV Plane" start="208" end="221" type="uint"/>
+    <field name="Y Offset for U or UV Plane" start="192" end="205" type="uint"/>
+    <field name="Red Clear Color" start="255" end="255" type="uint"/>
+    <field name="Green Clear Color" start="254" end="254" type="uint"/>
+    <field name="Blue Clear Color" start="253" end="253" type="uint"/>
+    <field name="Alpha Clear Color" start="252" end="252" type="uint"/>
+    <field name="Shader Channel Select Red" start="249" end="251" type="uint"/>
+    <field name="Shader Channel Select Green" start="246" end="248" type="uint"/>
+    <field name="Shader Channel Select Blue" start="243" end="245" type="uint"/>
+    <field name="Shader Channel Select Alpha" start="240" end="242" type="uint"/>
+    <field name="Resource Min LOD" start="224" end="235" type="u4.8"/>
+    <field name="Surface Base Address" start="256" end="319" type="address"/>
+    <field name="X Offset for V Plane" start="368" end="381" type="uint"/>
+    <field name="Y Offset for V Plane" start="352" end="365" type="uint"/>
+    <field name="Auxiliary Table Index for Media Compressed Surface" start="341" end="351" type="uint"/>
+    <field name="Auxiliary Surface Base Address" start="332" end="383" type="address"/>
+  </struct>
+
+  <struct name="FILTER_COEFFICIENT" length="1">
+    <field name="Filter Coefficient" start="0" end="7" type="s1.6"/>
+  </struct>
+
+  <struct name="SAMPLER_STATE" length="4">
+    <field name="Sampler Disable" start="31" end="31" type="bool"/>
+    <field name="Texture Border Color Mode" start="29" end="29" type="uint">
+      <value name="DX10/OGL" value="0"/>
+      <value name="DX9" value="1"/>
+    </field>
+    <field name="LOD PreClamp Mode" start="27" end="28" type="uint" prefix="CLAMP_MODE">
+      <value name="NONE" value="0"/>
+      <value name="OGL" value="2"/>
+    </field>
+    <field name="Base Mip Level" start="22" end="26" type="u4.1"/>
+    <field name="Mip Mode Filter" start="20" end="21" type="uint" prefix="MIPFILTER">
+      <value name="NONE" value="0"/>
+      <value name="NEAREST" value="1"/>
+      <value name="LINEAR" value="3"/>
+    </field>
+    <field name="Mag Mode Filter" start="17" end="19" type="uint" prefix="MAPFILTER">
+      <value name="NEAREST" value="0"/>
+      <value name="LINEAR" value="1"/>
+      <value name="ANISOTROPIC" value="2"/>
+      <value name="MONO" value="6"/>
+    </field>
+    <field name="Min Mode Filter" start="14" end="16" type="uint" prefix="MAPFILTER">
+      <value name="NEAREST" value="0"/>
+      <value name="LINEAR" value="1"/>
+      <value name="ANISOTROPIC" value="2"/>
+      <value name="MONO" value="6"/>
+    </field>
+    <field name="Texture LOD Bias" start="1" end="13" type="s4.8"/>
+    <field name="Anisotropic Algorithm" start="0" end="0" type="uint">
+      <value name="LEGACY" value="0"/>
+      <value name="EWA Approximation" value="1"/>
+    </field>
+    <field name="Min LOD" start="52" end="63" type="u4.8"/>
+    <field name="Max LOD" start="40" end="51" type="u4.8"/>
+    <field name="ChromaKey Enable" start="39" end="39" type="bool"/>
+    <field name="ChromaKey Index" start="37" end="38" type="uint"/>
+    <field name="ChromaKey Mode" start="36" end="36" type="uint">
+      <value name="KEYFILTER_KILL_ON_ANY_MATCH" value="0"/>
+      <value name="KEYFILTER_REPLACE_BLACK" value="1"/>
+    </field>
+    <field name="Shadow Function" start="33" end="35" type="uint">
+      <value name="PREFILTEROP ALWAYS" value="0"/>
+      <value name="PREFILTEROP NEVER" value="1"/>
+      <value name="PREFILTEROP LESS" value="2"/>
+      <value name="PREFILTEROP EQUAL" value="3"/>
+      <value name="PREFILTEROP LEQUAL" value="4"/>
+      <value name="PREFILTEROP GREATER" value="5"/>
+      <value name="PREFILTEROP NOTEQUAL" value="6"/>
+      <value name="PREFILTEROP GEQUAL" value="7"/>
+    </field>
+    <field name="Cube Surface Control Mode" start="32" end="32" type="uint">
+      <value name="PROGRAMMED" value="0"/>
+      <value name="OVERRIDE" value="1"/>
+    </field>
+    <field name="Border Color Pointer" start="70" end="87" type="offset"/>
+    <field name="LOD Clamp Magnification Mode" start="64" end="64" type="uint">
+      <value name="MIPNONE" value="0"/>
+      <value name="MIPFILTER" value="1"/>
+    </field>
+    <field name="Maximum Anisotropy" start="115" end="117" type="uint">
+      <value name="RATIO 2:1" value="0"/>
+      <value name="RATIO 4:1" value="1"/>
+      <value name="RATIO 6:1" value="2"/>
+      <value name="RATIO 8:1" value="3"/>
+      <value name="RATIO 10:1" value="4"/>
+      <value name="RATIO 12:1" value="5"/>
+      <value name="RATIO 14:1" value="6"/>
+      <value name="RATIO 16:1" value="7"/>
+    </field>
+    <field name="R Address Min Filter Rounding Enable" start="109" end="109" type="bool"/>
+    <field name="R Address Mag Filter Rounding Enable" start="110" end="110" type="bool"/>
+    <field name="V Address Min Filter Rounding Enable" start="111" end="111" type="bool"/>
+    <field name="V Address Mag Filter Rounding Enable" start="112" end="112" type="bool"/>
+    <field name="U Address Min Filter Rounding Enable" start="113" end="113" type="bool"/>
+    <field name="U Address Mag Filter Rounding Enable" start="114" end="114" type="bool"/>
+    <field name="Trilinear Filter Quality" start="107" end="108" type="uint">
+      <value name="FULL" value="0"/>
+      <value name="HIGH" value="1"/>
+      <value name="MED" value="2"/>
+      <value name="LOW" value="3"/>
+    </field>
+    <field name="Non-normalized Coordinate Enable" start="106" end="106" type="bool"/>
+    <field name="TCX Address Control Mode" start="102" end="104" type="uint"/>
+    <field name="TCY Address Control Mode" start="99" end="101" type="uint"/>
+    <field name="TCZ Address Control Mode" start="96" end="98" type="uint"/>
+  </struct>
+
+  <struct name="SAMPLER_STATE_8X8_AVS_COEFFICIENTS" length="8">
+    <field name="Table 0Y Filter Coefficient[n,1]" start="24" end="31" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,1]" start="16" end="23" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,0]" start="8" end="15" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,0]" start="0" end="7" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,3]" start="56" end="63" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,3]" start="48" end="55" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,2]" start="40" end="47" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,2]" start="32" end="39" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,5]" start="88" end="95" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,5]" start="80" end="87" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,4]" start="72" end="79" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,4]" start="64" end="71" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,7]" start="120" end="127" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,7]" start="112" end="119" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,6]" start="104" end="111" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,6]" start="96" end="103" type="s1.6"/>
+    <field name="Table 1X Filter Coefficient[n,3]" start="152" end="159" type="s1.6"/>
+    <field name="Table 1X Filter Coefficient[n,2]" start="144" end="151" type="s1.6"/>
+    <field name="Table 1X Filter Coefficient[n,5]" start="168" end="175" type="s1.6"/>
+    <field name="Table 1X Filter Coefficient[n,4]" start="160" end="167" type="s1.6"/>
+    <field name="Table 1Y Filter Coefficient[n,3]" start="216" end="223" type="s1.6"/>
+    <field name="Table 1Y Filter Coefficient[n,2]" start="208" end="215" type="s1.6"/>
+    <field name="Table 1Y Filter Coefficient[n,5]" start="232" end="239" type="s1.6"/>
+    <field name="Table 1Y Filter Coefficient[n,4]" start="224" end="231" type="s1.6"/>
+  </struct>
+
+  <enum name="3D_Prim_Topo_Type" prefix="3DPRIM">
+    <value name="POINTLIST" value="1"/>
+    <value name="LINELIST" value="2"/>
+    <value name="LINESTRIP" value="3"/>
+    <value name="TRILIST" value="4"/>
+    <value name="TRISTRIP" value="5"/>
+    <value name="TRIFAN" value="6"/>
+    <value name="QUADLIST" value="7"/>
+    <value name="QUADSTRIP" value="8"/>
+    <value name="LINELIST_ADJ" value="9"/>
+    <value name="LINESTRIP_ADJ" value="10"/>
+    <value name="TRILIST_ADJ" value="11"/>
+    <value name="TRISTRIP_ADJ" value="12"/>
+    <value name="TRISTRIP_REVERSE" value="13"/>
+    <value name="POLYGON" value="14"/>
+    <value name="RECTLIST" value="15"/>
+    <value name="LINELOOP" value="16"/>
+    <value name="POINTLIST _BF" value="17"/>
+    <value name="LINESTRIP_CONT" value="18"/>
+    <value name="LINESTRIP_BF" value="19"/>
+    <value name="LINESTRIP_CONT_BF" value="20"/>
+    <value name="TRIFAN_NOSTIPPLE" value="22"/>
+    <value name="PATCHLIST_1" value="32"/>
+    <value name="PATCHLIST_2" value="33"/>
+    <value name="PATCHLIST_3" value="34"/>
+    <value name="PATCHLIST_4" value="35"/>
+    <value name="PATCHLIST_5" value="36"/>
+    <value name="PATCHLIST_6" value="37"/>
+    <value name="PATCHLIST_7" value="38"/>
+    <value name="PATCHLIST_8" value="39"/>
+    <value name="PATCHLIST_9" value="40"/>
+    <value name="PATCHLIST_10" value="41"/>
+    <value name="PATCHLIST_11" value="42"/>
+    <value name="PATCHLIST_12" value="43"/>
+    <value name="PATCHLIST_13" value="44"/>
+    <value name="PATCHLIST_14" value="45"/>
+    <value name="PATCHLIST_15" value="46"/>
+    <value name="PATCHLIST_16" value="47"/>
+    <value name="PATCHLIST_17" value="48"/>
+    <value name="PATCHLIST_18" value="49"/>
+    <value name="PATCHLIST_19" value="50"/>
+    <value name="PATCHLIST_20" value="51"/>
+    <value name="PATCHLIST_21" value="52"/>
+    <value name="PATCHLIST_22" value="53"/>
+    <value name="PATCHLIST_23" value="54"/>
+    <value name="PATCHLIST_24" value="55"/>
+    <value name="PATCHLIST_25" value="56"/>
+    <value name="PATCHLIST_26" value="57"/>
+    <value name="PATCHLIST_27" value="58"/>
+    <value name="PATCHLIST_28" value="59"/>
+    <value name="PATCHLIST_29" value="60"/>
+    <value name="PATCHLIST_30" value="61"/>
+    <value name="PATCHLIST_31" value="62"/>
+    <value name="PATCHLIST_32" value="63"/>
+  </enum>
+
+  <enum name="3D_Vertex_Component_Control" prefix="VFCOMP">
+    <value name="NOSTORE" value="0"/>
+    <value name="STORE_SRC" value="1"/>
+    <value name="STORE_0" value="2"/>
+    <value name="STORE_1_FP" value="3"/>
+    <value name="STORE_1_INT" value="4"/>
+    <value name="STORE_PID" value="7"/>
+  </enum>
+
+  <enum name="WRAP_SHORTEST_ENABLE" prefix="WSE">
+    <value name="X" value="1"/>
+    <value name="Y" value="2"/>
+    <value name="XY" value="3"/>
+    <value name="Z" value="4"/>
+    <value name="XZ" value="5"/>
+    <value name="YZ" value="6"/>
+    <value name="XYZ" value="7"/>
+    <value name="W" value="8"/>
+    <value name="XW" value="9"/>
+    <value name="YW" value="10"/>
+    <value name="XYW" value="11"/>
+    <value name="ZW" value="12"/>
+    <value name="XZW" value="13"/>
+    <value name="YZW" value="14"/>
+    <value name="XYZW" value="15"/>
+  </enum>
+
+  <enum name="3D_Stencil_Operation" prefix="STENCILOP">
+    <value name="KEEP" value="0"/>
+    <value name="ZERO" value="1"/>
+    <value name="REPLACE" value="2"/>
+    <value name="INCRSAT" value="3"/>
+    <value name="DECRSAT" value="4"/>
+    <value name="INCR" value="5"/>
+    <value name="DECR" value="6"/>
+    <value name="INVERT" value="7"/>
+  </enum>
+
+  <enum name="3D_Color_Buffer_Blend_Factor" prefix="BLENDFACTOR">
+    <value name="ONE" value="1"/>
+    <value name="SRC_COLOR" value="2"/>
+    <value name="SRC_ALPHA" value="3"/>
+    <value name="DST_ALPHA" value="4"/>
+    <value name="DST_COLOR" value="5"/>
+    <value name="SRC_ALPHA_SATURATE" value="6"/>
+    <value name="CONST_COLOR" value="7"/>
+    <value name="CONST_ALPHA" value="8"/>
+    <value name="SRC1_COLOR" value="9"/>
+    <value name="SRC1_ALPHA" value="10"/>
+    <value name="ZERO" value="17"/>
+    <value name="INV_SRC_COLOR" value="18"/>
+    <value name="INV_SRC_ALPHA" value="19"/>
+    <value name="INV_DST_ALPHA" value="20"/>
+    <value name="INV_DST_COLOR" value="21"/>
+    <value name="INV_CONST_COLOR" value="23"/>
+    <value name="INV_CONST_ALPHA" value="24"/>
+    <value name="INV_SRC1_COLOR" value="25"/>
+    <value name="INV_SRC1_ALPHA" value="26"/>
+  </enum>
+
+  <enum name="3D_Color_Buffer_Blend_Function" prefix="BLENDFUNCTION">
+    <value name="ADD" value="0"/>
+    <value name="SUBTRACT" value="1"/>
+    <value name="REVERSE_SUBTRACT" value="2"/>
+    <value name="MIN" value="3"/>
+    <value name="MAX" value="4"/>
+  </enum>
+
+  <enum name="3D_Compare_Function" prefix="COMPAREFUNCTION">
+    <value name="ALWAYS" value="0"/>
+    <value name="NEVER" value="1"/>
+    <value name="LESS" value="2"/>
+    <value name="EQUAL" value="3"/>
+    <value name="LEQUAL" value="4"/>
+    <value name="GREATER" value="5"/>
+    <value name="NOTEQUAL" value="6"/>
+    <value name="GEQUAL" value="7"/>
+  </enum>
+
+  <enum name="3D_Logic_Op_Function" prefix="LOGICOP">
+    <value name="CLEAR" value="0"/>
+    <value name="NOR" value="1"/>
+    <value name="AND_INVERTED" value="2"/>
+    <value name="COPY_INVERTED" value="3"/>
+    <value name="AND_REVERSE" value="4"/>
+    <value name="INVERT" value="5"/>
+    <value name="XOR" value="6"/>
+    <value name="NAND" value="7"/>
+    <value name="AND" value="8"/>
+    <value name="EQUIV" value="9"/>
+    <value name="NOOP" value="10"/>
+    <value name="OR_INVERTED" value="11"/>
+    <value name="COPY" value="12"/>
+    <value name="OR_REVERSE" value="13"/>
+    <value name="OR" value="14"/>
+    <value name="SET" value="15"/>
+  </enum>
+
+  <enum name="SURFACE_FORMAT" prefix="SF">
+    <value name="R32G32B32A32_FLOAT" value="0"/>
+    <value name="R32G32B32A32_SINT" value="1"/>
+    <value name="R32G32B32A32_UINT" value="2"/>
+    <value name="R32G32B32A32_UNORM" value="3"/>
+    <value name="R32G32B32A32_SNORM" value="4"/>
+    <value name="R64G64_FLOAT" value="5"/>
+    <value name="R32G32B32X32_FLOAT" value="6"/>
+    <value name="R32G32B32A32_SSCALED" value="7"/>
+    <value name="R32G32B32A32_USCALED" value="8"/>
+    <value name="R32G32B32A32_SFIXED" value="32"/>
+    <value name="R64G64_PASSTHRU" value="33"/>
+    <value name="R32G32B32_FLOAT" value="64"/>
+    <value name="R32G32B32_SINT" value="65"/>
+    <value name="R32G32B32_UINT" value="66"/>
+    <value name="R32G32B32_UNORM" value="67"/>
+    <value name="R32G32B32_SNORM" value="68"/>
+    <value name="R32G32B32_SSCALED" value="69"/>
+    <value name="R32G32B32_USCALED" value="70"/>
+    <value name="R32G32B32_SFIXED" value="80"/>
+    <value name="R16G16B16A16_UNORM" value="128"/>
+    <value name="R16G16B16A16_SNORM" value="129"/>
+    <value name="R16G16B16A16_SINT" value="130"/>
+    <value name="R16G16B16A16_UINT" value="131"/>
+    <value name="R16G16B16A16_FLOAT" value="132"/>
+    <value name="R32G32_FLOAT" value="133"/>
+    <value name="R32G32_SINT" value="134"/>
+    <value name="R32G32_UINT" value="135"/>
+    <value name="R32_FLOAT_X8X24_TYPELESS" value="136"/>
+    <value name="X32_TYPELESS_G8X24_UINT" value="137"/>
+    <value name="L32A32_FLOAT" value="138"/>
+    <value name="R32G32_UNORM" value="139"/>
+    <value name="R32G32_SNORM" value="140"/>
+    <value name="R64_FLOAT" value="141"/>
+    <value name="R16G16B16X16_UNORM" value="142"/>
+    <value name="R16G16B16X16_FLOAT" value="143"/>
+    <value name="A32X32_FLOAT" value="144"/>
+    <value name="L32X32_FLOAT" value="145"/>
+    <value name="I32X32_FLOAT" value="146"/>
+    <value name="R16G16B16A16_SSCALED" value="147"/>
+    <value name="R16G16B16A16_USCALED" value="148"/>
+    <value name="R32G32_SSCALED" value="149"/>
+    <value name="R32G32_USCALED" value="150"/>
+    <value name="R32G32_SFIXED" value="160"/>
+    <value name="R64_PASSTHRU" value="161"/>
+    <value name="B8G8R8A8_UNORM" value="192"/>
+    <value name="B8G8R8A8_UNORM_SRGB" value="193"/>
+    <value name="R10G10B10A2_UNORM" value="194"/>
+    <value name="R10G10B10A2_UNORM_SRGB" value="195"/>
+    <value name="R10G10B10A2_UINT" value="196"/>
+    <value name="R10G10B10_SNORM_A2_UNORM" value="197"/>
+    <value name="R8G8B8A8_UNORM" value="199"/>
+    <value name="R8G8B8A8_UNORM_SRGB" value="200"/>
+    <value name="R8G8B8A8_SNORM" value="201"/>
+    <value name="R8G8B8A8_SINT" value="202"/>
+    <value name="R8G8B8A8_UINT" value="203"/>
+    <value name="R16G16_UNORM" value="204"/>
+    <value name="R16G16_SNORM" value="205"/>
+    <value name="R16G16_SINT" value="206"/>
+    <value name="R16G16_UINT" value="207"/>
+    <value name="R16G16_FLOAT" value="208"/>
+    <value name="B10G10R10A2_UNORM" value="209"/>
+    <value name="B10G10R10A2_UNORM_SRGB" value="210"/>
+    <value name="R11G11B10_FLOAT" value="211"/>
+    <value name="R32_SINT" value="214"/>
+    <value name="R32_UINT" value="215"/>
+    <value name="R32_FLOAT" value="216"/>
+    <value name="R24_UNORM_X8_TYPELESS" value="217"/>
+    <value name="X24_TYPELESS_G8_UINT" value="218"/>
+    <value name="L32_UNORM" value="221"/>
+    <value name="A32_UNORM" value="222"/>
+    <value name="L16A16_UNORM" value="223"/>
+    <value name="I24X8_UNORM" value="224"/>
+    <value name="L24X8_UNORM" value="225"/>
+    <value name="A24X8_UNORM" value="226"/>
+    <value name="I32_FLOAT" value="227"/>
+    <value name="L32_FLOAT" value="228"/>
+    <value name="A32_FLOAT" value="229"/>
+    <value name="X8B8_UNORM_G8R8_SNORM" value="230"/>
+    <value name="A8X8_UNORM_G8R8_SNORM" value="231"/>
+    <value name="B8X8_UNORM_G8R8_SNORM" value="232"/>
+    <value name="B8G8R8X8_UNORM" value="233"/>
+    <value name="B8G8R8X8_UNORM_SRGB" value="234"/>
+    <value name="R8G8B8X8_UNORM" value="235"/>
+    <value name="R8G8B8X8_UNORM_SRGB" value="236"/>
+    <value name="R9G9B9E5_SHAREDEXP" value="237"/>
+    <value name="B10G10R10X2_UNORM" value="238"/>
+    <value name="L16A16_FLOAT" value="240"/>
+    <value name="R32_UNORM" value="241"/>
+    <value name="R32_SNORM" value="242"/>
+    <value name="R10G10B10X2_USCALED" value="243"/>
+    <value name="R8G8B8A8_SSCALED" value="244"/>
+    <value name="R8G8B8A8_USCALED" value="245"/>
+    <value name="R16G16_SSCALED" value="246"/>
+    <value name="R16G16_USCALED" value="247"/>
+    <value name="R32_SSCALED" value="248"/>
+    <value name="R32_USCALED" value="249"/>
+    <value name="B5G6R5_UNORM" value="256"/>
+    <value name="B5G6R5_UNORM_SRGB" value="257"/>
+    <value name="B5G5R5A1_UNORM" value="258"/>
+    <value name="B5G5R5A1_UNORM_SRGB" value="259"/>
+    <value name="B4G4R4A4_UNORM" value="260"/>
+    <value name="B4G4R4A4_UNORM_SRGB" value="261"/>
+    <value name="R8G8_UNORM" value="262"/>
+    <value name="R8G8_SNORM" value="263"/>
+    <value name="R8G8_SINT" value="264"/>
+    <value name="R8G8_UINT" value="265"/>
+    <value name="R16_UNORM" value="266"/>
+    <value name="R16_SNORM" value="267"/>
+    <value name="R16_SINT" value="268"/>
+    <value name="R16_UINT" value="269"/>
+    <value name="R16_FLOAT" value="270"/>
+    <value name="A8P8_UNORM_PALETTE0" value="271"/>
+    <value name="A8P8_UNORM_PALETTE1" value="272"/>
+    <value name="I16_UNORM" value="273"/>
+    <value name="L16_UNORM" value="274"/>
+    <value name="A16_UNORM" value="275"/>
+    <value name="L8A8_UNORM" value="276"/>
+    <value name="I16_FLOAT" value="277"/>
+    <value name="L16_FLOAT" value="278"/>
+    <value name="A16_FLOAT" value="279"/>
+    <value name="L8A8_UNORM_SRGB" value="280"/>
+    <value name="R5G5_SNORM_B6_UNORM" value="281"/>
+    <value name="B5G5R5X1_UNORM" value="282"/>
+    <value name="B5G5R5X1_UNORM_SRGB" value="283"/>
+    <value name="R8G8_SSCALED" value="284"/>
+    <value name="R8G8_USCALED" value="285"/>
+    <value name="R16_SSCALED" value="286"/>
+    <value name="R16_USCALED" value="287"/>
+    <value name="P8A8_UNORM_PALETTE0" value="290"/>
+    <value name="P8A8_UNORM_PALETTE1" value="291"/>
+    <value name="A1B5G5R5_UNORM" value="292"/>
+    <value name="A4B4G4R4_UNORM" value="293"/>
+    <value name="L8A8_UINT" value="294"/>
+    <value name="L8A8_SINT" value="295"/>
+    <value name="R8_UNORM" value="320"/>
+    <value name="R8_SNORM" value="321"/>
+    <value name="R8_SINT" value="322"/>
+    <value name="R8_UINT" value="323"/>
+    <value name="A8_UNORM" value="324"/>
+    <value name="I8_UNORM" value="325"/>
+    <value name="L8_UNORM" value="326"/>
+    <value name="P4A4_UNORM_PALETTE0" value="327"/>
+    <value name="A4P4_UNORM_PALETTE0" value="328"/>
+    <value name="R8_SSCALED" value="329"/>
+    <value name="R8_USCALED" value="330"/>
+    <value name="P8_UNORM_PALETTE0" value="331"/>
+    <value name="L8_UNORM_SRGB" value="332"/>
+    <value name="P8_UNORM_PALETTE1" value="333"/>
+    <value name="P4A4_UNORM_PALETTE1" value="334"/>
+    <value name="A4P4_UNORM_PALETTE1" value="335"/>
+    <value name="Y8_UNORM" value="336"/>
+    <value name="L8_UINT" value="338"/>
+    <value name="L8_SINT" value="339"/>
+    <value name="I8_UINT" value="340"/>
+    <value name="I8_SINT" value="341"/>
+    <value name="DXT1_RGB_SRGB" value="384"/>
+    <value name="R1_UNORM" value="385"/>
+    <value name="YCRCB_NORMAL" value="386"/>
+    <value name="YCRCB_SWAPUVY" value="387"/>
+    <value name="P2_UNORM_PALETTE0" value="388"/>
+    <value name="P2_UNORM_PALETTE1" value="389"/>
+    <value name="BC1_UNORM" value="390"/>
+    <value name="BC2_UNORM" value="391"/>
+    <value name="BC3_UNORM" value="392"/>
+    <value name="BC4_UNORM" value="393"/>
+    <value name="BC5_UNORM" value="394"/>
+    <value name="BC1_UNORM_SRGB" value="395"/>
+    <value name="BC2_UNORM_SRGB" value="396"/>
+    <value name="BC3_UNORM_SRGB" value="397"/>
+    <value name="MONO8" value="398"/>
+    <value name="YCRCB_SWAPUV" value="399"/>
+    <value name="YCRCB_SWAPY" value="400"/>
+    <value name="DXT1_RGB" value="401"/>
+    <value name="FXT1" value="402"/>
+    <value name="R8G8B8_UNORM" value="403"/>
+    <value name="R8G8B8_SNORM" value="404"/>
+    <value name="R8G8B8_SSCALED" value="405"/>
+    <value name="R8G8B8_USCALED" value="406"/>
+    <value name="R64G64B64A64_FLOAT" value="407"/>
+    <value name="R64G64B64_FLOAT" value="408"/>
+    <value name="BC4_SNORM" value="409"/>
+    <value name="BC5_SNORM" value="410"/>
+    <value name="R16G16B16_FLOAT" value="411"/>
+    <value name="R16G16B16_UNORM" value="412"/>
+    <value name="R16G16B16_SNORM" value="413"/>
+    <value name="R16G16B16_SSCALED" value="414"/>
+    <value name="R16G16B16_USCALED" value="415"/>
+    <value name="BC6H_SF16" value="417"/>
+    <value name="BC7_UNORM" value="418"/>
+    <value name="BC7_UNORM_SRGB" value="419"/>
+    <value name="BC6H_UF16" value="420"/>
+    <value name="PLANAR_420_8" value="421"/>
+    <value name="R8G8B8_UNORM_SRGB" value="424"/>
+    <value name="ETC1_RGB8" value="425"/>
+    <value name="ETC2_RGB8" value="426"/>
+    <value name="EAC_R11" value="427"/>
+    <value name="EAC_RG11" value="428"/>
+    <value name="EAC_SIGNED_R11" value="429"/>
+    <value name="EAC_SIGNED_RG11" value="430"/>
+    <value name="ETC2_SRGB8" value="431"/>
+    <value name="R16G16B16_UINT" value="432"/>
+    <value name="R16G16B16_SINT" value="433"/>
+    <value name="R32_SFIXED" value="434"/>
+    <value name="R10G10B10A2_SNORM" value="435"/>
+    <value name="R10G10B10A2_USCALED" value="436"/>
+    <value name="R10G10B10A2_SSCALED" value="437"/>
+    <value name="R10G10B10A2_SINT" value="438"/>
+    <value name="B10G10R10A2_SNORM" value="439"/>
+    <value name="B10G10R10A2_USCALED" value="440"/>
+    <value name="B10G10R10A2_SSCALED" value="441"/>
+    <value name="B10G10R10A2_UINT" value="442"/>
+    <value name="B10G10R10A2_SINT" value="443"/>
+    <value name="R64G64B64A64_PASSTHRU" value="444"/>
+    <value name="R64G64B64_PASSTHRU" value="445"/>
+    <value name="ETC2_RGB8_PTA" value="448"/>
+    <value name="ETC2_SRGB8_PTA" value="449"/>
+    <value name="ETC2_EAC_RGBA8" value="450"/>
+    <value name="ETC2_EAC_SRGB8_A8" value="451"/>
+    <value name="R8G8B8_UINT" value="456"/>
+    <value name="R8G8B8_SINT" value="457"/>
+    <value name="RAW" value="511"/>
+  </enum>
+
+  <enum name="Shader Channel Select" prefix="SCS">
+    <value name="ZERO" value="0"/>
+    <value name="ONE" value="1"/>
+    <value name="RED" value="4"/>
+    <value name="GREEN" value="5"/>
+    <value name="BLUE" value="6"/>
+    <value name="ALPHA" value="7"/>
+  </enum>
+
+  <enum name="Clear Color">
+    <value name="CC_ZERO" value="0"/>
+    <value name="CC_ONE" value="1"/>
+  </enum>
+
+  <enum name="Texture Coordinate Mode" prefix="TCM">
+    <value name="WRAP" value="0"/>
+    <value name="MIRROR" value="1"/>
+    <value name="CLAMP" value="2"/>
+    <value name="CUBE" value="3"/>
+    <value name="CLAMP_BORDER" value="4"/>
+    <value name="MIRROR_ONCE" value="5"/>
+    <value name="HALF_BORDER" value="6"/>
+  </enum>
+
+  <instruction name="3DPRIMITIVE" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="3"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="Indirect Parameter Enable" start="10" end="10" type="bool"/>
+    <field name="UAV Coherency Required" start="9" end="9" type="bool"/>
+    <field name="Predicate Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="End Offset Enable" start="41" end="41" type="bool"/>
+    <field name="Vertex Access Type" start="40" end="40" type="uint">
+      <value name="SEQUENTIAL" value="0"/>
+      <value name="RANDOM" value="1"/>
+    </field>
+    <field name="Primitive Topology Type" start="32" end="37" type="uint"/>
+    <field name="Vertex Count Per Instance" start="64" end="95" type="uint"/>
+    <field name="Start Vertex Location" start="96" end="127" type="uint"/>
+    <field name="Instance Count" start="128" end="159" type="uint"/>
+    <field name="Start Instance Location" start="160" end="191" type="uint"/>
+    <field name="Base Vertex Location" start="192" end="223" type="int"/>
+  </instruction>
+
+  <instruction name="3DSTATE_AA_LINE_PARAMETERS" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="10"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="AA Point Coverage Bias" start="56" end="63" type="u0.8"/>
+    <field name="AA Coverage Bias" start="48" end="55" type="u0.8"/>
+    <field name="AA Point Coverage Slope" start="40" end="47" type="u0.8"/>
+    <field name="AA Coverage Slope" start="32" end="39" type="u0.8"/>
+    <field name="AA Point Coverage EndCap Bias" start="88" end="95" type="u0.8"/>
+    <field name="AA Coverage EndCap Bias" start="80" end="87" type="u0.8"/>
+    <field name="AA Point Coverage EndCap Slope" start="72" end="79" type="u0.8"/>
+    <field name="AA Coverage EndCap Slope" start="64" end="71" type="u0.8"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_DS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="70"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_GS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="68"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_HS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="69"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_PS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="71"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_VS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="67"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="40"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to DS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="41"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to GS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="39"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to HS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="42"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to PS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="38"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to VS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POOL_ALLOC" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="25"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Binding Table Pool Base Address" start="44" end="95" type="address"/>
+    <field name="Binding Table Pool Enable" start="43" end="43" type="uint"/>
+    <field name="Surface Object Control State" start="32" end="38" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Binding Table Pool Buffer Size" start="108" end="127" type="uint">
+      <value name="No Valid Data" value="0"/>
+    </field>
+  </instruction>
+
+  <instruction name="3DSTATE_BLEND_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="36"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Blend State Pointer" start="38" end="63" type="offset"/>
+    <field name="Blend State Pointer Valid" start="32" end="32" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CC_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="14"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Color Calc State Pointer" start="38" end="63" type="offset"/>
+    <field name="Color Calc State Pointer Valid" start="32" end="32" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CHROMA_KEY" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="ChromaKey Table Index" start="62" end="63" type="uint"/>
+    <field name="ChromaKey Low Value" start="64" end="95" type="uint"/>
+    <field name="ChromaKey High Value" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CLEAR_PARAMS" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Depth Clear Value" start="32" end="63" type="float"/>
+    <field name="Depth Clear Value Valid" start="64" end="64" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CLIP" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="18"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Force User Clip Distance Cull Test Enable Bitmask" start="52" end="52" type="bool"/>
+    <field name="Vertex Sub Pixel Precision Select" start="51" end="51" type="uint">
+      <value name="8 Bit" value="0"/>
+      <value name="4 Bit" value="1"/>
+    </field>
+    <field name="Early Cull Enable" start="50" end="50" type="bool"/>
+    <field name="Force User Clip Distance Clip Test Enable Bitmask" start="49" end="49" type="bool"/>
+    <field name="Force Clip Mode" start="48" end="48" type="bool"/>
+    <field name="Clipper Statistics Enable" start="42" end="42" type="bool"/>
+    <field name="User Clip Distance Cull Test Enable Bitmask" start="32" end="39" type="uint"/>
+    <field name="Clip Enable" start="95" end="95" type="bool"/>
+    <field name="API Mode" start="94" end="94" type="uint">
+      <value name="OGL" value="0"/>
+    </field>
+    <field name="Viewport XY Clip Test Enable" start="92" end="92" type="bool"/>
+    <field name="Guardband Clip Test Enable" start="90" end="90" type="bool"/>
+    <field name="User Clip Distance Clip Test Enable Bitmask" start="80" end="87" type="uint"/>
+    <field name="Clip Mode" start="77" end="79" type="uint">
+      <value name="NORMAL" value="0"/>
+      <value name="REJECT_ALL" value="3"/>
+      <value name="ACCEPT_ALL" value="4"/>
+    </field>
+    <field name="Perspective Divide Disable" start="73" end="73" type="bool"/>
+    <field name="Non-Perspective Barycentric Enable" start="72" end="72" type="bool"/>
+    <field name="Triangle Strip/List Provoking Vertex Select" start="68" end="69" type="uint"/>
+    <field name="Line Strip/List Provoking Vertex Select" start="66" end="67" type="uint"/>
+    <field name="Triangle Fan Provoking Vertex Select" start="64" end="65" type="uint"/>
+    <field name="Minimum Point Width" start="113" end="123" type="u8.3"/>
+    <field name="Maximum Point Width" start="102" end="112" type="u8.3"/>
+    <field name="Force Zero RTA Index Enable" start="101" end="101" type="bool"/>
+    <field name="Maximum VP Index" start="96" end="99" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_DS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="26"/>
+    <field name="Constant Buffer Object Control State" start="8" end="14" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Constant Body" start="32" end="351" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_GS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="22"/>
+    <field name="Constant Buffer Object Control State" start="8" end="14" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Constant Body" start="32" end="351" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_HS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="25"/>
+    <field name="Constant Buffer Object Control State" start="8" end="14" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Constant Body" start="32" end="351" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_PS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="23"/>
+    <field name="Constant Buffer Object Control State" start="8" end="14" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Constant Body" start="32" end="351" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_VS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="21"/>
+    <field name="Constant Buffer Object Control State" start="8" end="14" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Constant Body" start="32" end="351" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DEPTH_BUFFER" bias="2" length="8">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="5"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="6"/>
+    <field name="Surface Type" start="61" end="63" type="uint">
+      <value name="SURFTYPE_1D" value="0"/>
+      <value name="SURFTYPE_2D" value="1"/>
+      <value name="SURFTYPE_3D" value="2"/>
+      <value name="SURFTYPE_CUBE" value="3"/>
+      <value name="SURFTYPE_NULL" value="7"/>
+    </field>
+    <field name="Depth Write Enable" start="60" end="60" type="bool"/>
+    <field name="Stencil Write Enable" start="59" end="59" type="bool"/>
+    <field name="Hierarchical Depth Buffer Enable" start="54" end="54" type="bool"/>
+    <field name="Surface Format" start="50" end="52" type="uint">
+      <value name="D32_FLOAT" value="1"/>
+      <value name="D24_UNORM_X8_UINT" value="3"/>
+      <value name="D16_UNORM" value="5"/>
+    </field>
+    <field name="Surface Pitch" start="32" end="49" type="uint"/>
+    <field name="Surface Base Address" start="64" end="127" type="address"/>
+    <field name="Height" start="146" end="159" type="uint"/>
+    <field name="Width" start="132" end="145" type="uint"/>
+    <field name="LOD" start="128" end="131" type="uint"/>
+    <field name="Depth" start="181" end="191" type="uint"/>
+    <field name="Minimum Array Element" start="170" end="180" type="uint"/>
+    <field name="Depth Buffer Object Control State" start="160" end="166" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Render Target View Extent" start="245" end="255" type="uint"/>
+    <field name="Surface QPitch" start="224" end="238" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DRAWING_RECTANGLE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="Core Mode Select" start="14" end="15" type="uint">
+      <value name="Legacy" value="0"/>
+      <value name="Core 0 Enabled" value="1"/>
+      <value name="Core 1 Enabled" value="2"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Clipped Drawing Rectangle Y Min" start="48" end="63" type="uint"/>
+    <field name="Clipped Drawing Rectangle X Min" start="32" end="47" type="uint"/>
+    <field name="Clipped Drawing Rectangle Y Max" start="80" end="95" type="uint"/>
+    <field name="Clipped Drawing Rectangle X Max" start="64" end="79" type="uint"/>
+    <field name="Drawing Rectangle Origin Y" start="112" end="127" type="int"/>
+    <field name="Drawing Rectangle Origin X" start="96" end="111" type="int"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DS" bias="2" length="9">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="29"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="7"/>
+    <field name="Kernel Start Pointer" start="38" end="95" type="offset"/>
+    <field name="Single Domain Point Dispatch" start="127" end="127" type="uint"/>
+    <field name="Vector Mask Enable" start="126" end="126" type="bool"/>
+    <field name="Sampler Count" start="123" end="125" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="114" end="121" type="uint"/>
+    <field name="Thread Dispatch Priority" start="113" end="113" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="112" end="112" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Accesses UAV" start="110" end="110" type="bool"/>
+    <field name="Illegal Opcode Exception Enable" start="109" end="109" type="bool"/>
+    <field name="Software Exception Enable" start="103" end="103" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="138" end="191" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="128" end="131" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="212" end="216" type="uint"/>
+    <field name="Patch URB Entry Read Length" start="203" end="209" type="uint"/>
+    <field name="Patch URB Entry Read Offset" start="196" end="201" type="uint"/>
+    <field name="Maximum Number of Threads" start="245" end="253" type="uint"/>
+    <field name="Statistics Enable" start="234" end="234" type="bool"/>
+    <field name="SIMD8 Dispatch Enable" start="227" end="227" type="bool"/>
+    <field name="Compute W Coordinate Enable" start="226" end="226" type="bool"/>
+    <field name="Cache Disable" start="225" end="225" type="bool"/>
+    <field name="Function Enable" start="224" end="224" type="bool"/>
+    <field name="Vertex URB Entry Output Read Offset" start="277" end="282" type="uint"/>
+    <field name="Vertex URB Entry Output Length" start="272" end="276" type="uint"/>
+    <field name="User Clip Distance Clip Test Enable Bitmask" start="264" end="271" type="uint"/>
+    <field name="User Clip Distance Cull Test Enable Bitmask" start="256" end="263" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_DS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="55"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Generate Stall" start="69" end="69" type="bool"/>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_GS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="53"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Generate Stall" start="69" end="69" type="bool"/>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_HS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="54"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Generate Stall" start="69" end="69" type="bool"/>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_PS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="56"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Generate Stall" start="69" end="69" type="bool"/>
+    <field name="Constant Buffer Dx9 Enable" start="68" end="68" type="bool"/>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_VS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="52"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Generate Stall" start="69" end="69" type="bool"/>
+    <field name="Constant Buffer Dx9 Enable" start="68" end="68" type="bool"/>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_POOL_ALLOC" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="26"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Gather Pool Base Address" start="44" end="95" type="address"/>
+    <field name="Gather Pool Enable" start="43" end="43" type="bool"/>
+    <field name="Memory Object Control State" start="32" end="38" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Gather Pool Buffer Size" start="108" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_GS" bias="2" length="10">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="17"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="8"/>
+    <field name="Kernel Start Pointer" start="38" end="95" type="offset"/>
+    <field name="Single Program Flow" start="127" end="127" type="uint"/>
+    <field name="Vector Mask Enable" start="126" end="126" type="bool"/>
+    <field name="Sampler Count" start="123" end="125" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="114" end="121" type="uint"/>
+    <field name="Thread Dispatch Priority" start="113" end="113" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="112" end="112" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="109" end="109" type="bool"/>
+    <field name="Accesses UAV" start="108" end="108" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="107" end="107" type="bool"/>
+    <field name="Software  Exception Enable" start="103" end="103" type="bool"/>
+    <field name="Expected Vertex Count" start="96" end="101" type="uint"/>
+    <field name="Scratch Space Base Pointer" start="138" end="191" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="128" end="131" type="uint"/>
+    <field name="Output Vertex Size" start="215" end="220" type="uint"/>
+    <field name="Output Topology" start="209" end="214" type="uint" prefix="OUTPUT"/>
+    <field name="Vertex URB Entry Read Length" start="203" end="208" type="uint"/>
+    <field name="Include Vertex Handles" start="202" end="202" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="196" end="201" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="192" end="195" type="uint"/>
+    <field name="Maximum Number of Threads" start="248" end="255" type="uint"/>
+    <field name="Control Data Header Size" start="244" end="247" type="uint"/>
+    <field name="Instance Control" start="239" end="243" type="uint"/>
+    <field name="Default Stream Id" start="237" end="238" type="uint"/>
+    <field name="Dispatch Mode" start="235" end="236" type="uint" prefix="DISPATCH_MODE">
+      <value name="Dual Instance" value="1"/>
+      <value name="Dual Object" value="2"/>
+      <value name="SIMD8" value="3"/>
+    </field>
+    <field name="Statistics Enable" start="234" end="234" type="bool"/>
+    <field name="Invocations Increment Value" start="229" end="233" type="uint"/>
+    <field name="Include Primitive ID" start="228" end="228" type="uint"/>
+    <field name="Hint" start="227" end="227" type="uint"/>
+    <field name="Reorder Mode" start="226" end="226" type="uint">
+      <value name="LEADING" value="0"/>
+      <value name="TRAILING" value="1"/>
+    </field>
+    <field name="Discard Adjacency" start="225" end="225" type="bool"/>
+    <field name="Enable" start="224" end="224" type="bool"/>
+    <field name="Control Data Format" start="287" end="287" type="uint">
+      <value name="CUT" value="0"/>
+      <value name="SID" value="1"/>
+    </field>
+    <field name="Static Output" start="286" end="286" type="bool"/>
+    <field name="Static Output Vertex Count" start="272" end="282" type="uint"/>
+    <field name="Vertex URB Entry Output Read Offset" start="309" end="314" type="uint"/>
+    <field name="Vertex URB Entry Output Length" start="304" end="308" type="uint"/>
+    <field name="User Clip Distance Clip Test Enable Bitmask" start="296" end="303" type="uint"/>
+    <field name="User Clip Distance Cull Test Enable Bitmask" start="288" end="295" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_HIER_DEPTH_BUFFER" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="7"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Hierarchical Depth Buffer Object Control State" start="57" end="63" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface Pitch" start="32" end="48" type="uint"/>
+    <field name="Surface Base Address" start="64" end="127" type="address"/>
+    <field name="Surface QPitch" start="128" end="142" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_HS" bias="2" length="9">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="27"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="7"/>
+    <field name="Sampler Count" start="59" end="61" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="50" end="57" type="uint"/>
+    <field name="Thread Dispatch Priority" start="49" end="49" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="48" end="48" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="45" end="45" type="bool"/>
+    <field name="Software Exception Enable" start="44" end="44" type="bool"/>
+    <field name="Enable" start="95" end="95" type="bool"/>
+    <field name="Statistics Enable" start="93" end="93" type="bool"/>
+    <field name="Maximum Number of Threads" start="72" end="80" type="uint"/>
+    <field name="Instance Count" start="64" end="67" type="uint"/>
+    <field name="Kernel Start Pointer" start="102" end="159" type="offset"/>
+    <field name="Scratch Space Base Pointer" start="170" end="223" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="160" end="163" type="uint"/>
+    <field name="Single Program Flow" start="251" end="251" type="bool"/>
+    <field name="Vector Mask Enable" start="250" end="250" type="bool"/>
+    <field name="Accesses UAV" start="249" end="249" type="bool"/>
+    <field name="Include Vertex Handles" start="248" end="248" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="243" end="247" type="uint"/>
+    <field name="Vertex URB Entry Read Length" start="235" end="240" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="228" end="233" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_INDEX_BUFFER" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="10"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Index Format" start="40" end="41" type="uint" prefix="INDEX">
+      <value name="BYTE" value="0"/>
+      <value name="WORD" value="1"/>
+      <value name="DWORD" value="2"/>
+    </field>
+    <field name="Memory Object Control State" start="32" end="38" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Buffer Starting Address" start="64" end="127" type="address"/>
+    <field name="Buffer Size" start="128" end="159" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_LINE_STIPPLE" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="8"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Modify Enable (Current Repeat Counter, Current Stipple Index)" start="63" end="63" type="bool"/>
+    <field name="Current Repeat Counter" start="53" end="61" type="uint"/>
+    <field name="Current Stipple Index" start="48" end="51" type="uint"/>
+    <field name="Line Stipple Pattern" start="32" end="47" type="uint"/>
+    <field name="Line Stipple Inverse Repeat Count" start="79" end="95" type="u1.16"/>
+    <field name="Line Stipple Repeat Count" start="64" end="72" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_MONOFILTER_SIZE" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="17"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Monochrome Filter Width" start="35" end="37" type="uint"/>
+    <field name="Monochrome Filter Height" start="32" end="34" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_MULTISAMPLE" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="13"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pixel Position Offset Enable" start="37" end="37" type="bool"/>
+    <field name="Pixel Location" start="36" end="36" type="uint">
+      <value name="CENTER" value="0"/>
+      <value name="UL_CORNER" value="1"/>
+    </field>
+    <field name="Number of Multisamples" start="33" end="35" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_POLY_STIPPLE_OFFSET" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="6"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Polygon Stipple X Offset" start="40" end="44" type="uint"/>
+    <field name="Polygon Stipple Y Offset" start="32" end="36" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_POLY_STIPPLE_PATTERN" bias="2" length="33">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="7"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="31"/>
+    <group count="32" start="32" size="32">
+      <field name="Pattern Row" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_PS" bias="2" length="12">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="32"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="10"/>
+    <field name="Kernel Start Pointer 0" start="38" end="95" type="offset"/>
+    <field name="Single Program Flow" start="127" end="127" type="uint"/>
+    <field name="Vector Mask Enable" start="126" end="126" type="bool"/>
+    <field name="Sampler Count" start="123" end="125" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Single Precision Denormal Mode" start="122" end="122" type="uint">
+      <value name="Flushed to Zero" value="0"/>
+      <value name="Retained" value="1"/>
+    </field>
+    <field name="Binding Table Entry Count" start="114" end="121" type="uint"/>
+    <field name="Thread Dispatch Priority" start="113" end="113" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="112" end="112" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Rounding Mode" start="110" end="111" type="uint">
+      <value name="RTNE" value="0"/>
+      <value name="RU" value="1"/>
+      <value name="RD" value="2"/>
+      <value name="RTZ" value="3"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="109" end="109" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="107" end="107" type="bool"/>
+    <field name="Software  Exception Enable" start="103" end="103" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="138" end="191" type="offset"/>
+    <field name="Per Thread Scratch Space" start="128" end="131" type="uint"/>
+    <field name="Maximum Number of Threads Per PSD" start="215" end="223" type="uint"/>
+    <field name="Push Constant Enable" start="203" end="203" type="bool"/>
+    <field name="Render Target Fast Clear Enable" start="200" end="200" type="bool"/>
+    <field name="Render Target Resolve Enable" start="198" end="198" type="bool"/>
+    <field name="Position XY Offset Select" start="195" end="196" type="uint">
+      <value name="POSOFFSET_NONE" value="0"/>
+      <value name="POSOFFSET_CENTROID" value="2"/>
+      <value name="POSOFFSET_SAMPLE" value="3"/>
+    </field>
+    <field name="32 Pixel Dispatch Enable" start="194" end="194" type="bool"/>
+    <field name="16 Pixel Dispatch Enable" start="193" end="193" type="bool"/>
+    <field name="8 Pixel Dispatch Enable" start="192" end="192" type="bool"/>
+    <field name="Dispatch GRF Start Register For Constant/Setup Data 0" start="240" end="246" type="uint"/>
+    <field name="Dispatch GRF Start Register For Constant/Setup Data 1" start="232" end="238" type="uint"/>
+    <field name="Dispatch GRF Start Register For Constant/Setup Data 2" start="224" end="230" type="uint"/>
+    <field name="Kernel Start Pointer 1" start="262" end="319" type="offset"/>
+    <field name="Kernel Start Pointer 2" start="326" end="383" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PS_BLEND" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="77"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Alpha To Coverage Enable" start="63" end="63" type="bool"/>
+    <field name="Has Writeable RT" start="62" end="62" type="bool"/>
+    <field name="Color Buffer Blend Enable" start="61" end="61" type="bool"/>
+    <field name="Source Alpha Blend Factor" start="56" end="60" type="uint"/>
+    <field name="Destination Alpha Blend Factor" start="51" end="55" type="uint"/>
+    <field name="Source Blend Factor" start="46" end="50" type="uint"/>
+    <field name="Destination Blend Factor" start="41" end="45" type="uint"/>
+    <field name="Alpha Test Enable" start="40" end="40" type="bool"/>
+    <field name="Independent Alpha Blend Enable" start="39" end="39" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PS_EXTRA" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="79"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pixel Shader Valid" start="63" end="63" type="bool"/>
+    <field name="Pixel Shader Does not write to RT" start="62" end="62" type="bool"/>
+    <field name="oMask Present to Render Target" start="61" end="61" type="bool"/>
+    <field name="Pixel Shader Kills Pixel" start="60" end="60" type="bool"/>
+    <field name="Pixel Shader Computed Depth Mode" start="58" end="59" type="uint">
+      <value name="PSCDEPTH_OFF" value="0"/>
+      <value name="PSCDEPTH_ON" value="1"/>
+      <value name="PSCDEPTH_ON_GE" value="2"/>
+      <value name="PSCDEPTH_ON_LE" value="3"/>
+    </field>
+    <field name="Force Computed Depth" start="57" end="57" type="bool"/>
+    <field name="Pixel Shader Uses Source Depth" start="56" end="56" type="bool"/>
+    <field name="Pixel Shader Uses Source W" start="55" end="55" type="bool"/>
+    <field name="Attribute Enable" start="40" end="40" type="bool"/>
+    <field name="Pixel Shader Disables Alpha To Coverage" start="39" end="39" type="bool"/>
+    <field name="Pixel Shader Is Per Sample" start="38" end="38" type="bool"/>
+    <field name="Pixel Shader Has UAV" start="34" end="34" type="bool"/>
+    <field name="Pixel Shader Uses Input Coverage Mask" start="33" end="33" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="20"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="21"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="19"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="22"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="18"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_RASTER" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="80"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="API Mode" start="54" end="55" type="uint">
+      <value name="DX9/OGL" value="0"/>
+      <value name="DX10.0" value="1"/>
+      <value name="DX10.1+" value="2"/>
+    </field>
+    <field name="Front Winding" start="53" end="53" type="uint">
+      <value name="Clockwise" value="0"/>
+      <value name="Counter Clockwise" value="1"/>
+    </field>
+    <field name="Forced Sample Count" start="50" end="52" type="uint" prefix="FSC">
+      <value name="NUMRASTSAMPLES_0" value="0"/>
+      <value name="NUMRASTSAMPLES_1" value="1"/>
+      <value name="NUMRASTSAMPLES_2" value="2"/>
+      <value name="NUMRASTSAMPLES_4" value="3"/>
+      <value name="NUMRASTSAMPLES_8" value="4"/>
+      <value name="NUMRASTSAMPLES_16" value="5"/>
+    </field>
+    <field name="Cull Mode" start="48" end="49" type="uint" prefix="CULLMODE">
+      <value name="BOTH" value="0"/>
+      <value name="NONE" value="1"/>
+      <value name="FRONT" value="2"/>
+      <value name="BACK" value="3"/>
+    </field>
+    <field name="Force Multisampling" start="46" end="46" type="uint"/>
+    <field name="Smooth Point Enable" start="45" end="45" type="bool"/>
+    <field name="DX Multisample Rasterization Enable" start="44" end="44" type="bool"/>
+    <field name="DX Multisample Rasterization Mode" start="42" end="43" type="uint">
+      <value name="MSRASTMODE_ OFF_PIXEL" value="0"/>
+      <value name="MSRASTMODE_ OFF_PATTERN" value="1"/>
+      <value name="MSRASTMODE_ ON_PIXEL" value="2"/>
+      <value name="MSRASTMODE_ ON_PATTERN" value="3"/>
+    </field>
+    <field name="Global Depth Offset Enable Solid" start="41" end="41" type="bool"/>
+    <field name="Global Depth Offset Enable Wireframe" start="40" end="40" type="bool"/>
+    <field name="Global Depth Offset Enable Point" start="39" end="39" type="bool"/>
+    <field name="Front Face Fill Mode" start="37" end="38" type="uint" prefix="FILL_MODE">
+      <value name="SOLID" value="0"/>
+      <value name="WIREFRAME" value="1"/>
+      <value name="POINT" value="2"/>
+    </field>
+    <field name="Back Face Fill Mode" start="35" end="36" type="uint" prefix="FILL_MODE">
+      <value name="SOLID" value="0"/>
+      <value name="WIREFRAME" value="1"/>
+      <value name="POINT" value="2"/>
+    </field>
+    <field name="Antialiasing Enable" start="34" end="34" type="bool"/>
+    <field name="Scissor Rectangle Enable" start="33" end="33" type="bool"/>
+    <field name="Viewport Z Clip Test Enable" start="32" end="32" type="bool"/>
+    <field name="Global Depth Offset Constant" start="64" end="95" type="float"/>
+    <field name="Global Depth Offset Scale" start="96" end="127" type="float"/>
+    <field name="Global Depth Offset Clamp" start="128" end="159" type="float"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_PALETTE_LOAD0" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="7" type="uint"/>
+    <group count="0" start="32" size="32">
+      <field name="Entry" start="0" end="31" type="PALETTE_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_PALETTE_LOAD1" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="12"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <group count="0" start="32" size="32">
+      <field name="Palette Alpha[0:N-1]" start="24" end="31" type="uint"/>
+      <field name="Palette Red[0:N-1]" start="16" end="23" type="uint"/>
+      <field name="Palette Green[0:N-1]" start="8" end="15" type="uint"/>
+      <field name="Palette Blue[0:N-1]" start="0" end="7" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="45"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to DS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="46"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to GS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="44"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to HS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="47"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to PS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="43"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to VS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLE_MASK" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Sample Mask" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLE_PATTERN" bias="2" length="9">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="28"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="7"/>
+    <group count="4" start="32" size="32">
+    </group>
+    <field name="8x Sample7 X Offset" start="188" end="191" type="u0.4"/>
+    <field name="8x Sample7 Y Offset" start="184" end="187" type="u0.4"/>
+    <field name="8x Sample6 X Offset" start="180" end="183" type="u0.4"/>
+    <field name="8x Sample6 Y Offset" start="176" end="179" type="u0.4"/>
+    <field name="8x Sample5 X Offset" start="172" end="175" type="u0.4"/>
+    <field name="8x Sample5 Y Offset" start="168" end="171" type="u0.4"/>
+    <field name="8x Sample4 X Offset" start="164" end="167" type="u0.4"/>
+    <field name="8x Sample4 Y Offset" start="160" end="163" type="u0.4"/>
+    <field name="8x Sample3 X Offset" start="220" end="223" type="u0.4"/>
+    <field name="8x Sample3 Y Offset" start="216" end="219" type="u0.4"/>
+    <field name="8x Sample2 X Offset" start="212" end="215" type="u0.4"/>
+    <field name="8x Sample2 Y Offset" start="208" end="211" type="u0.4"/>
+    <field name="8x Sample1 X Offset" start="204" end="207" type="u0.4"/>
+    <field name="8x Sample1 Y Offset" start="200" end="203" type="u0.4"/>
+    <field name="8x Sample0 X Offset" start="196" end="199" type="u0.4"/>
+    <field name="8x Sample0 Y Offset" start="192" end="195" type="u0.4"/>
+    <field name="4x Sample3 X Offset" start="252" end="255" type="u0.4"/>
+    <field name="4x Sample3 Y Offset" start="248" end="251" type="u0.4"/>
+    <field name="4x Sample2 X Offset" start="244" end="247" type="u0.4"/>
+    <field name="4x Sample2 Y Offset" start="240" end="243" type="u0.4"/>
+    <field name="4x Sample1 X Offset" start="236" end="239" type="u0.4"/>
+    <field name="4x Sample1 Y Offset" start="232" end="235" type="u0.4"/>
+    <field name="4x Sample0 X Offset" start="228" end="231" type="u0.4"/>
+    <field name="4x Sample0 Y Offset" start="224" end="227" type="u0.4"/>
+    <field name="1x Sample0 X Offset" start="276" end="279" type="u0.4"/>
+    <field name="1x Sample0 Y Offset" start="272" end="275" type="u0.4"/>
+    <field name="2x Sample1 X Offset" start="268" end="271" type="u0.4"/>
+    <field name="2x Sample1 Y Offset" start="264" end="267" type="u0.4"/>
+    <field name="2x Sample0 X Offset" start="260" end="263" type="u0.4"/>
+    <field name="2x Sample0 Y Offset" start="256" end="259" type="u0.4"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SBE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="31"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Force Vertex URB Entry Read Length" start="61" end="61" type="bool"/>
+    <field name="Force Vertex URB Entry Read Offset" start="60" end="60" type="bool"/>
+    <field name="Number of SF Output Attributes" start="54" end="59" type="uint"/>
+    <field name="Attribute Swizzle Enable" start="53" end="53" type="bool"/>
+    <field name="Point Sprite Texture Coordinate Origin" start="52" end="52" type="uint">
+      <value name="UPPERLEFT" value="0"/>
+      <value name="LOWERLEFT" value="1"/>
+    </field>
+    <field name="Primitive ID Override Component W" start="51" end="51" type="bool"/>
+    <field name="Primitive ID Override Component Z" start="50" end="50" type="bool"/>
+    <field name="Primitive ID Override Component Y" start="49" end="49" type="bool"/>
+    <field name="Primitive ID Override Component X" start="48" end="48" type="bool"/>
+    <field name="Vertex URB Entry Read Length" start="43" end="47" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="37" end="42" type="uint"/>
+    <field name="Primitive ID Override Attribute Select" start="32" end="36" type="uint"/>
+    <field name="Point Sprite Texture Coordinate Enable" start="64" end="95" type="uint"/>
+    <field name="Constant Interpolation Enable" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SBE_SWIZ" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="81"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <group count="16" start="32" size="16">
+      <field name="Attribute" start="0" end="15" type="SF_OUTPUT_ATTRIBUTE_DETAIL"/>
+    </group>
+    <group count="16" start="288" size="4">
+      <field name="Attribute Wrap Shortest Enables" start="0" end="3" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_SCISSOR_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="15"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Scissor Rect Pointer" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SF" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="19"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Legacy Global Depth Bias Enable" start="43" end="43" type="bool"/>
+    <field name="Statistics Enable" start="42" end="42" type="bool"/>
+    <field name="Viewport Transform Enable" start="33" end="33" type="bool"/>
+    <field name="Line Width" start="82" end="91" type="u3.7"/>
+    <field name="Line End Cap Antialiasing Region Width" start="80" end="81" type="uint">
+      <value name="0.5 pixels" value="0"/>
+      <value name="1.0 pixels" value="1"/>
+      <value name="2.0 pixels" value="2"/>
+      <value name="4.0 pixels" value="3"/>
+    </field>
+    <field name="Last Pixel Enable" start="127" end="127" type="bool"/>
+    <field name="Triangle Strip/List Provoking Vertex Select" start="125" end="126" type="uint"/>
+    <field name="Line Strip/List Provoking Vertex Select" start="123" end="124" type="uint"/>
+    <field name="Triangle Fan Provoking Vertex Select" start="121" end="122" type="uint"/>
+    <field name="AA Line Distance Mode" start="110" end="110" type="uint">
+      <value name="AALINEDISTANCE_TRUE" value="1"/>
+    </field>
+    <field name="Smooth Point Enable" start="109" end="109" type="bool"/>
+    <field name="Vertex Sub Pixel Precision Select" start="108" end="108" type="uint"/>
+    <field name="Point Width Source" start="107" end="107" type="uint">
+      <value name="Vertex" value="0"/>
+      <value name="State" value="1"/>
+    </field>
+    <field name="Point Width" start="96" end="106" type="u8.3"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SO_BUFFER" bias="2" length="8">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="6"/>
+    <field name="SO Buffer Enable" start="63" end="63" type="bool"/>
+    <field name="SO Buffer Index" start="61" end="62" type="uint"/>
+    <field name="SO Buffer Object Control State" start="54" end="60" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Stream Offset Write Enable" start="53" end="53" type="bool"/>
+    <field name="Stream Output Buffer Offset Address Enable" start="52" end="52" type="bool"/>
+    <field name="Surface Base Address" start="66" end="111" type="address"/>
+    <field name="Surface Size" start="128" end="157" type="uint"/>
+    <field name="Stream Output Buffer Offset Address" start="162" end="207" type="address"/>
+    <field name="Stream Offset" start="224" end="255" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SO_DECL_LIST" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="23"/>
+    <field name="DWord Length" start="0" end="8" type="uint"/>
+    <field name="Stream to Buffer Selects [3]" start="44" end="47" type="uint"/>
+    <field name="Stream to Buffer Selects [2]" start="40" end="43" type="uint"/>
+    <field name="Stream to Buffer Selects [1]" start="36" end="39" type="uint"/>
+    <field name="Stream to Buffer Selects [0]" start="32" end="35" type="uint"/>
+    <field name="Num Entries [3]" start="88" end="95" type="uint"/>
+    <field name="Num Entries [2]" start="80" end="87" type="uint"/>
+    <field name="Num Entries [1]" start="72" end="79" type="uint"/>
+    <field name="Num Entries [0]" start="64" end="71" type="uint"/>
+    <group count="0" start="96" size="64">
+      <field name="Entry" start="0" end="63" type="SO_DECL_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_STENCIL_BUFFER" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="6"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Stencil Buffer Enable" start="63" end="63" type="uint"/>
+    <field name="Stencil Buffer Object Control State" start="54" end="60" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface Pitch" start="32" end="48" type="uint"/>
+    <field name="Surface Base Address" start="64" end="127" type="address"/>
+    <field name="Surface QPitch" start="128" end="142" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_STREAMOUT" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="30"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="SO Function Enable" start="63" end="63" type="uint"/>
+    <field name="API Rendering Disable" start="62" end="62" type="uint"/>
+    <field name="Render Stream Select" start="59" end="60" type="uint"/>
+    <field name="Reorder Mode" start="58" end="58" type="uint">
+      <value name="LEADING" value="0"/>
+      <value name="TRAILING" value="1"/>
+    </field>
+    <field name="SO Statistics Enable" start="57" end="57" type="bool"/>
+    <field name="Force Rendering" start="55" end="56" type="uint">
+      <value name="Resreved" value="1"/>
+      <value name="Force_Off" value="2"/>
+      <value name="Force_on" value="3"/>
+    </field>
+    <field name="Stream 3 Vertex Read Offset" start="93" end="93" type="uint"/>
+    <field name="Stream 3 Vertex Read Length" start="88" end="92" type="uint"/>
+    <field name="Stream 2 Vertex Read Offset" start="85" end="85" type="uint"/>
+    <field name="Stream 2 Vertex Read Length" start="80" end="84" type="uint"/>
+    <field name="Stream 1 Vertex Read Offset" start="77" end="77" type="uint"/>
+    <field name="Stream 1 Vertex Read Length" start="72" end="76" type="uint"/>
+    <field name="Stream 0 Vertex Read Offset" start="69" end="69" type="uint"/>
+    <field name="Stream 0 Vertex Read Length" start="64" end="68" type="uint"/>
+    <field name="Buffer 1 Surface Pitch" start="112" end="123" type="uint"/>
+    <field name="Buffer 0 Surface Pitch" start="96" end="107" type="uint"/>
+    <field name="Buffer 3 Surface Pitch" start="144" end="155" type="uint"/>
+    <field name="Buffer 2 Surface Pitch" start="128" end="139" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_TE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="28"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Partitioning" start="44" end="45" type="uint">
+      <value name="INTEGER" value="0"/>
+      <value name="ODD_FRACTIONAL" value="1"/>
+      <value name="EVEN_FRACTIONAL" value="2"/>
+    </field>
+    <field name="Output Topology" start="40" end="41" type="uint" prefix="OUTPUT">
+      <value name="POINT" value="0"/>
+      <value name="LINE" value="1"/>
+      <value name="TRI_CW" value="2"/>
+      <value name="TRI_CCW" value="3"/>
+    </field>
+    <field name="TE Domain" start="36" end="37" type="uint">
+      <value name="QUAD" value="0"/>
+      <value name="TRI" value="1"/>
+      <value name="ISOLINE" value="2"/>
+    </field>
+    <field name="TE Mode" start="33" end="34" type="uint">
+      <value name="HW_TESS" value="0"/>
+      <value name="SW_TESS" value="1"/>
+    </field>
+    <field name="TE Enable" start="32" end="32" type="bool"/>
+    <field name="Maximum Tessellation Factor Odd" start="64" end="95" type="float"/>
+    <field name="Maximum Tessellation Factor Not Odd" start="96" end="127" type="float"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="50"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="DS URB Starting Address" start="57" end="63" type="uint"/>
+    <field name="DS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="DS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="51"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="GS URB Starting Address" start="57" end="63" type="uint"/>
+    <field name="GS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="GS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="49"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="HS URB Starting Address" start="57" end="63" type="uint"/>
+    <field name="HS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="HS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="48"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="VS URB Starting Address" start="57" end="63" type="uint"/>
+    <field name="VS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="VS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VERTEX_BUFFERS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="8"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <group count="0" start="32" size="128">
+      <field name="Vertex Buffer State" start="0" end="127" type="VERTEX_BUFFER_STATE"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_VERTEX_ELEMENTS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="9"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <group count="0" start="32" size="64">
+      <field name="Element" start="0" end="63" type="VERTEX_ELEMENT_STATE"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_VF" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="12"/>
+    <field name="Indexed Draw Cut Index Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Cut Index" start="32" end="63" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_INSTANCING" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="73"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Instancing Enable" start="40" end="40" type="bool"/>
+    <field name="Vertex Element Index" start="32" end="37" type="uint"/>
+    <field name="Instance Data Step Rate" start="64" end="95" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_SGVS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="74"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="InstanceID Enable" start="63" end="63" type="bool"/>
+    <field name="InstanceID Component Number" start="61" end="62" type="uint">
+      <value name="COMP_0" value="0"/>
+      <value name="COMP_1" value="1"/>
+      <value name="COMP_2" value="2"/>
+      <value name="COMP_3" value="3"/>
+    </field>
+    <field name="InstanceID Element Offset" start="48" end="53" type="uint"/>
+    <field name="VertexID Enable" start="47" end="47" type="bool"/>
+    <field name="VertexID Component Number" start="45" end="46" type="uint">
+      <value name="COMP_0" value="0"/>
+      <value name="COMP_1" value="1"/>
+      <value name="COMP_2" value="2"/>
+      <value name="COMP_3" value="3"/>
+    </field>
+    <field name="VertexID Element Offset" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_STATISTICS" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="1"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="11"/>
+    <field name="Statistics Enable" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_TOPOLOGY" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="75"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Primitive Topology Type" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VIEWPORT_STATE_POINTERS_CC" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="35"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="CC Viewport Pointer" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="33"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="SF Clip Viewport Pointer" start="38" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VS" bias="2" length="9">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="16"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="7"/>
+    <field name="Kernel Start Pointer" start="38" end="95" type="offset"/>
+    <field name="Single Vertex Dispatch" start="127" end="127" type="bool"/>
+    <field name="Vector Mask Enable" start="126" end="126" type="bool"/>
+    <field name="Sampler Count" start="123" end="125" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="114" end="121" type="uint"/>
+    <field name="Thread Dispatch Priority" start="113" end="113" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="112" end="112" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="109" end="109" type="bool"/>
+    <field name="Accesses UAV" start="108" end="108" type="bool"/>
+    <field name="Software Exception Enable" start="103" end="103" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="138" end="191" type="offset"/>
+    <field name="Per-Thread Scratch Space " start="128" end="131" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="212" end="216" type="uint"/>
+    <field name="Vertex URB Entry Read Length" start="203" end="208" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="196" end="201" type="uint"/>
+    <field name="Maximum Number of Threads" start="247" end="255" type="uint"/>
+    <field name="Statistics Enable" start="234" end="234" type="bool"/>
+    <field name="SIMD8 Dispatch Enable" start="226" end="226" type="bool"/>
+    <field name="Vertex Cache Disable" start="225" end="225" type="bool"/>
+    <field name="Function Enable" start="224" end="224" type="bool"/>
+    <field name="Vertex URB Entry Output Read Offset" start="277" end="282" type="uint"/>
+    <field name="Vertex URB Entry Output Length" start="272" end="276" type="uint"/>
+    <field name="User Clip Distance Clip Test Enable Bitmask" start="264" end="271" type="uint"/>
+    <field name="User Clip Distance Cull Test Enable Bitmask" start="256" end="263" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_WM" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="20"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Statistics Enable" start="63" end="63" type="bool"/>
+    <field name="Legacy Depth Buffer Clear Enable" start="62" end="62" type="bool"/>
+    <field name="Legacy Depth Buffer Resolve Enable" start="60" end="60" type="bool"/>
+    <field name="Legacy Hierarchical Depth Buffer Resolve Enable" start="59" end="59" type="bool"/>
+    <field name="Legacy Diamond Line Rasterization" start="58" end="58" type="bool"/>
+    <field name="Early Depth/Stencil Control" start="53" end="54" type="uint">
+      <value name="NORMAL" value="0"/>
+      <value name="PSEXEC" value="1"/>
+      <value name="PREPS" value="2"/>
+    </field>
+    <field name="Force Thread Dispatch Enable" start="51" end="52" type="uint">
+      <value name="ForceOff" value="1"/>
+      <value name="ForceON" value="2"/>
+    </field>
+    <field name="Position ZW Interpolation Mode" start="49" end="50" type="uint">
+      <value name="INTERP_PIXEL" value="0"/>
+      <value name="INTERP_CENTROID" value="2"/>
+      <value name="INTERP_SAMPLE" value="3"/>
+    </field>
+    <field name="Barycentric Interpolation Mode" start="43" end="48" type="uint"/>
+    <field name="Line End Cap Antialiasing Region Width" start="40" end="41" type="uint">
+      <value name="0.5 pixels" value="0"/>
+      <value name="1.0 pixels" value="1"/>
+      <value name="2.0 pixels" value="2"/>
+      <value name="4.0 pixels" value="3"/>
+    </field>
+    <field name="Line Antialiasing Region Width" start="38" end="39" type="uint">
+      <value name="0.5 pixels" value="0"/>
+      <value name="1.0 pixels" value="1"/>
+      <value name="2.0 pixels" value="2"/>
+      <value name="4.0 pixels" value="3"/>
+    </field>
+    <field name="Polygon Stipple Enable" start="36" end="36" type="bool"/>
+    <field name="Line Stipple Enable" start="35" end="35" type="bool"/>
+    <field name="Point Rasterization Rule" start="34" end="34" type="uint">
+      <value name="RASTRULE_UPPER_LEFT" value="0"/>
+      <value name="RASTRULE_UPPER_RIGHT" value="1"/>
+    </field>
+    <field name="Force Kill Pixel Enable" start="32" end="33" type="uint">
+      <value name="ForceOff" value="1"/>
+      <value name="ForceON" value="2"/>
+    </field>
+  </instruction>
+
+  <instruction name="3DSTATE_WM_CHROMAKEY" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="76"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="ChromaKey Kill Enable" start="63" end="63" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_WM_DEPTH_STENCIL" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="78"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Stencil Fail Op" start="61" end="63" type="uint"/>
+    <field name="Stencil Pass Depth Fail Op" start="58" end="60" type="uint"/>
+    <field name="Stencil Pass Depth Pass Op" start="55" end="57" type="uint"/>
+    <field name="Backface Stencil Test Function" start="52" end="54" type="uint"/>
+    <field name="Backface Stencil Fail Op" start="49" end="51" type="uint"/>
+    <field name="Backface Stencil Pass Depth Fail Op" start="46" end="48" type="uint"/>
+    <field name="Backface Stencil Pass Depth Pass Op" start="43" end="45" type="uint"/>
+    <field name="Stencil Test Function" start="40" end="42" type="uint"/>
+    <field name="Depth Test Function" start="37" end="39" type="uint"/>
+    <field name="Double Sided Stencil Enable" start="36" end="36" type="bool"/>
+    <field name="Stencil Test Enable" start="35" end="35" type="bool"/>
+    <field name="Stencil Buffer Write Enable" start="34" end="34" type="bool"/>
+    <field name="Depth Test Enable" start="33" end="33" type="bool"/>
+    <field name="Depth Buffer Write Enable" start="32" end="32" type="bool"/>
+    <field name="Stencil Test Mask" start="88" end="95" type="uint"/>
+    <field name="Stencil Write Mask" start="80" end="87" type="uint"/>
+    <field name="Backface Stencil Test Mask" start="72" end="79" type="uint"/>
+    <field name="Backface Stencil Write Mask" start="64" end="71" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_WM_HZ_OP" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="82"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Stencil Buffer Clear Enable" start="63" end="63" type="bool"/>
+    <field name="Depth Buffer Clear Enable" start="62" end="62" type="bool"/>
+    <field name="Scissor Rectangle Enable" start="61" end="61" type="bool"/>
+    <field name="Depth Buffer Resolve Enable" start="60" end="60" type="bool"/>
+    <field name="Hierarchical Depth Buffer Resolve Enable" start="59" end="59" type="bool"/>
+    <field name="Pixel Position Offset Enable" start="58" end="58" type="bool"/>
+    <field name="Full Surface Depth and Stencil Clear" start="57" end="57" type="bool"/>
+    <field name="Stencil Clear Value" start="48" end="55" type="uint"/>
+    <field name="Number of Multisamples" start="45" end="47" type="uint"/>
+    <field name="Clear Rectangle Y Min" start="80" end="95" type="uint"/>
+    <field name="Clear Rectangle X Min" start="64" end="79" type="uint"/>
+    <field name="Clear Rectangle Y Max" start="112" end="127" type="uint"/>
+    <field name="Clear Rectangle X Max" start="96" end="111" type="uint"/>
+    <field name="Sample Mask" start="128" end="143" type="uint"/>
+  </instruction>
+
+  <instruction name="GPGPU_CSR_BASE_ADDRESS" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="GPGPU CSR Base Address" start="44" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="GPGPU_WALKER" bias="2" length="15">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="5"/>
+    <field name="Indirect Parameter Enable" start="10" end="10" type="bool"/>
+    <field name="Predicate Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="13"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="102" end="127" type="offset"/>
+    <field name="SIMD Size" start="158" end="159" type="uint">
+      <value name="SIMD8" value="0"/>
+      <value name="SIMD16" value="1"/>
+      <value name="SIMD32" value="2"/>
+    </field>
+    <field name="Thread Depth Counter Maximum" start="144" end="149" type="uint"/>
+    <field name="Thread Height Counter Maximum" start="136" end="141" type="uint"/>
+    <field name="Thread Width Counter Maximum" start="128" end="133" type="uint"/>
+    <field name="Thread Group ID Starting X" start="160" end="191" type="uint"/>
+    <field name="Thread Group ID X Dimension" start="224" end="255" type="uint"/>
+    <field name="Thread Group ID Starting Y" start="256" end="287" type="uint"/>
+    <field name="Thread Group ID Y Dimension" start="320" end="351" type="uint"/>
+    <field name="Thread Group ID Starting/Resume Z" start="352" end="383" type="uint"/>
+    <field name="Thread Group ID Z Dimension" start="384" end="415" type="uint"/>
+    <field name="Right Execution Mask" start="416" end="447" type="uint"/>
+    <field name="Bottom Execution Mask" start="448" end="479" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_CURBE_LOAD" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="1"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="2"/>
+    <field name="CURBE Total Data Length" start="64" end="80" type="uint"/>
+    <field name="CURBE Data Start Address" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_INTERFACE_DESCRIPTOR_LOAD" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="2"/>
+    <field name="Interface Descriptor Total Length" start="64" end="80" type="uint"/>
+    <field name="Interface Descriptor Data Start Address" start="96" end="127" type="offset"/>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Media Command Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="Media Command Sub-Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="4"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="bool"/>
+    <field name="Thread Synchronization" start="88" end="88" type="uint">
+      <value name="No thread synchronization" value="0"/>
+      <value name="Thread dispatch is synchronized by the 'spawn root thread' message" value="1"/>
+    </field>
+    <field name="Force Destination" start="86" end="86" type="uint"/>
+    <field name="Use Scoreboard" start="85" end="85" type="uint">
+      <value name="Not using scoreboard" value="0"/>
+      <value name="Using scoreboard" value="1"/>
+    </field>
+    <field name="Slice Destination Select" start="83" end="84" type="uint">
+      <value name="Slice 0" value="0"/>
+      <value name="Slice 1" value="1"/>
+      <value name="Slice 2" value="2"/>
+    </field>
+    <field name="SubSlice Destination Select" start="81" end="82" type="uint">
+      <value name="SubSlice 2" value="2"/>
+      <value name="SubSlice 1" value="1"/>
+      <value name="SubSlice 0" value="0"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="address"/>
+    <field name="Scoredboard Y" start="144" end="152" type="uint"/>
+    <field name="Scoreboard X" start="128" end="136" type="uint"/>
+    <field name="Scoreboard Color" start="176" end="179" type="uint"/>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <group count="0" start="192" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT_GRPID" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Media Command Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="Media Command Sub-Opcode" start="16" end="23" type="uint" default="6"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="5"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="End of Thread Group" start="87" end="87" type="uint"/>
+    <field name="Force Destination" start="86" end="86" type="uint"/>
+    <field name="Use Scoreboard" start="85" end="85" type="uint">
+      <value name="Not using scoreboard" value="0"/>
+      <value name="Using scoreboard" value="1"/>
+    </field>
+    <field name="Slice Destination Select" start="83" end="84" type="uint">
+      <value name="Slice 0" value="0"/>
+      <value name="Slice 1" value="1"/>
+      <value name="Slice 2" value="2"/>
+    </field>
+    <field name="SubSlice Destination Select" start="81" end="82" type="uint">
+      <value name="SubSlice 2" value="2"/>
+      <value name="SubSlice 1" value="1"/>
+      <value name="SubSlice 0" value="0"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="address"/>
+    <field name="Scoreboard Y" start="144" end="152" type="uint"/>
+    <field name="Scoreboard X" start="128" end="136" type="uint"/>
+    <field name="Scoreboard Color" start="176" end="179" type="uint"/>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <field name="GroupID" start="192" end="223" type="uint"/>
+    <group count="0" start="224" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT_PRT" bias="2" length="16">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="14"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="bool"/>
+    <field name="PRT_Fence Needed" start="87" end="87" type="bool"/>
+    <field name="PRT_FenceType" start="86" end="86" type="uint">
+      <value name="Root thread queue" value="0"/>
+      <value name="VFE state flush" value="1"/>
+    </field>
+    <group count="12" start="128" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT_WALKER" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="15"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="uint"/>
+    <field name="Thread Synchronization" start="88" end="88" type="uint">
+      <value name="No thread synchronization" value="0"/>
+      <value name="Thread dispatch is synchronized by the 'spawn root thread' message" value="1"/>
+    </field>
+    <field name="Use Scoreboard" start="85" end="85" type="uint">
+      <value name="Not using scoreboard" value="0"/>
+      <value name="Using scoreboard" value="1"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="offset"/>
+    <field name="Group ID Loop Select" start="168" end="191" type="uint"/>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <field name="Color Count Minus One" start="216" end="219" type="uint"/>
+    <field name="Middle Loop Extra Steps" start="208" end="212" type="uint"/>
+    <field name="Local Mid-Loop Unit Y" start="204" end="205" type="int"/>
+    <field name="Mid-Loop Unit X" start="200" end="201" type="int"/>
+    <field name="Global Loop Exec Count" start="240" end="249" type="uint"/>
+    <field name="Local Loop Exec Count" start="224" end="233" type="uint"/>
+    <field name="Block Resolution Y" start="272" end="280" type="uint"/>
+    <field name="Block Resolution X" start="256" end="264" type="uint"/>
+    <field name="Local Start Y" start="304" end="312" type="uint"/>
+    <field name="Local Start X" start="288" end="296" type="uint"/>
+    <field name="Local Outer Loop Stride Y" start="368" end="377" type="int"/>
+    <field name="Local Outer Loop Stride X" start="352" end="361" type="int"/>
+    <field name="Local Inner Loop Unit Y" start="400" end="409" type="int"/>
+    <field name="Local Inner Loop Unit X" start="384" end="393" type="int"/>
+    <field name="Global Resolution Y" start="432" end="440" type="uint"/>
+    <field name="Global Resolution X" start="416" end="424" type="uint"/>
+    <field name="Global Start Y" start="464" end="473" type="int"/>
+    <field name="Global Start X" start="448" end="457" type="int"/>
+    <field name="Global Outer Loop Stride Y" start="496" end="505" type="int"/>
+    <field name="Global Outer Loop Stride X" start="480" end="489" type="int"/>
+    <field name="Global Inner Loop Unit Y" start="528" end="537" type="int"/>
+    <field name="Global Inner Loop Unit X" start="512" end="521" type="int"/>
+    <group count="0" start="544" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_STATE_FLUSH" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="0"/>
+    <field name="Flush to GO" start="39" end="39" type="bool"/>
+    <field name="Watermark Required" start="38" end="38" type="uint"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_VFE_STATE" bias="2" length="9">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="7"/>
+    <field name="Scratch Space Base Pointer" start="42" end="63" type="offset"/>
+    <field name="Stack Size" start="36" end="39" type="uint"/>
+    <field name="Per Thread Scratch Space" start="32" end="35" type="uint"/>
+    <field name="Scratch Space Base Pointer High" start="64" end="79" type="offset"/>
+    <field name="Maximum Number of Threads" start="112" end="127" type="uint"/>
+    <field name="Number of URB Entries" start="104" end="111" type="uint"/>
+    <field name="Reset Gateway Timer" start="103" end="103" type="uint">
+      <value name="Maintaining the existing timestamp state" value="0"/>
+      <value name="Resetting relative timer and latching the global timestamp" value="1"/>
+    </field>
+    <field name="Bypass Gateway Control" start="102" end="102" type="uint">
+      <value name="Maintaining OpenGateway/ForwardMsg/CloseGateway protocol (legacy mode)" value="0"/>
+      <value name="Bypassing OpenGateway/CloseGateway protocol" value="1"/>
+    </field>
+    <field name="Slice Disable" start="128" end="129" type="uint">
+      <value name="All Subslices Enabled" value="0"/>
+      <value name="Only Slice 0 Enabled" value="1"/>
+      <value name="Only Slice 0 Subslice 0 Enabled" value="3"/>
+    </field>
+    <field name="URB Entry Allocation Size" start="176" end="191" type="uint"/>
+    <field name="CURBE Allocation Size" start="160" end="175" type="uint"/>
+    <field name="Scoreboard Enable" start="223" end="223" type="bool"/>
+    <field name="Scoreboard Type" start="222" end="222" type="uint">
+      <value name="Stalling Scoreboard" value="0"/>
+      <value name="Non-Stalling Scoreboard" value="1"/>
+    </field>
+    <field name="Scoreboard Mask" start="192" end="199" type="uint"/>
+    <field name="Scoreboard 3 Delta Y" start="252" end="255" type="int"/>
+    <field name="Scoreboard 3 Delta X" start="248" end="251" type="int"/>
+    <field name="Scoreboard 2 Delta Y" start="244" end="247" type="int"/>
+    <field name="Scoreboard 2 Delta X" start="240" end="243" type="int"/>
+    <field name="Scoreboard 1 Delta Y" start="236" end="239" type="int"/>
+    <field name="Scoreboard 1 Delta X" start="232" end="235" type="int"/>
+    <field name="Scoreboard 0 Delta Y" start="228" end="231" type="int"/>
+    <field name="Scoreboard 0 Delta X" start="224" end="227" type="int"/>
+    <field name="Scoreboard 7 Delta Y" start="284" end="287" type="int"/>
+    <field name="Scoreboard 7 Delta X" start="280" end="283" type="int"/>
+    <field name="Scoreboard 6 Delta Y" start="276" end="279" type="int"/>
+    <field name="Scoreboard 6 Delta X" start="272" end="275" type="int"/>
+    <field name="Scoreboard 5 Delta Y" start="268" end="271" type="int"/>
+    <field name="Scoreboard 5 Delta X" start="264" end="267" type="int"/>
+    <field name="Scoreboard 4 Delta Y" start="260" end="263" type="int"/>
+    <field name="Scoreboard 4 Delta X" start="256" end="259" type="int"/>
+  </instruction>
+
+  <instruction name="MI_ARB_CHECK" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="5"/>
+  </instruction>
+
+  <instruction name="MI_ATOMIC" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="47"/>
+    <field name="Memory Type" start="22" end="22" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="Post-Sync Operation" start="21" end="21" type="bool"/>
+    <field name="Data Size" start="19" end="20" type="uint">
+      <value name="DWORD" value="0"/>
+      <value name="QWORD" value="1"/>
+      <value name="OCTWORD" value="2"/>
+      <value name="RESERVED" value="3"/>
+    </field>
+    <field name="Inline Data" start="18" end="18" type="uint"/>
+    <field name="CS STALL" start="17" end="17" type="uint"/>
+    <field name="Return Data Control" start="16" end="16" type="uint"/>
+    <field name="ATOMIC OPCODE" start="8" end="15" type="uint"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Memory Address" start="34" end="79" type="address"/>
+    <field name="Operand1 Data Dword 0" start="96" end="127" type="uint"/>
+    <field name="Operand2 Data Dword 0" start="128" end="159" type="uint"/>
+    <field name="Operand1 Data Dword 1" start="160" end="191" type="uint"/>
+    <field name="Operand2 Data Dword 1" start="192" end="223" type="uint"/>
+    <field name="Operand1 Data Dword 2" start="224" end="255" type="uint"/>
+    <field name="Operand2 Data Dword 2" start="256" end="287" type="uint"/>
+    <field name="Operand1 Data Dword 3" start="288" end="319" type="uint"/>
+    <field name="Operand2 Data Dword 3" start="320" end="351" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_BATCH_BUFFER_END" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="10"/>
+  </instruction>
+
+  <instruction name="MI_BATCH_BUFFER_START" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="49"/>
+    <field name="2nd Level Batch Buffer" start="22" end="22" type="uint">
+      <value name="1st level batch" value="0"/>
+      <value name="2nd level batch" value="1"/>
+    </field>
+    <field name="Add Offset Enable" start="16" end="16" type="bool"/>
+    <field name="Predication Enable" start="15" end="15" type="uint"/>
+    <field name="Resource Streamer Enable" start="10" end="10" type="bool"/>
+    <field name="Address Space Indicator" start="8" end="8" type="uint" prefix="ASI">
+      <value name="GGTT" value="0"/>
+      <value name="PPGTT" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Batch Buffer Start Address" start="34" end="79" type="address"/>
+  </instruction>
+
+  <instruction name="MI_CLFLUSH" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="39"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="9" type="uint" default="1"/>
+    <field name="Page Base Address" start="44" end="79" type="address"/>
+    <field name="Starting Cacheline Offset" start="38" end="43" type="uint"/>
+    <group count="0" start="96" size="32">
+      <field name="DW Representing a Half Cache Line" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MI_CONDITIONAL_BATCH_BUFFER_END" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="54"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint" default="0"/>
+    <field name="Compare Semaphore" start="21" end="21" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Compare Data Dword" start="32" end="63" type="uint"/>
+    <field name="Compare Address" start="67" end="111" type="address"/>
+  </instruction>
+
+  <instruction name="MI_COPY_MEM_MEM" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="46"/>
+    <field name="Use Global GTT Source" start="22" end="22" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="Use Global GTT Destination" start="21" end="21" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Destination Memory Address" start="34" end="95" type="address"/>
+    <field name="Source Memory Address" start="98" end="159" type="address"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_IMM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="34"/>
+    <field name="Byte Write Disables" start="8" end="11" type="uint"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Register Offset" start="34" end="54" type="offset"/>
+    <field name="Data DWord" start="64" end="95" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_MEM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="41"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="Async Mode Enable" start="21" end="21" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Register Address" start="34" end="54" type="offset"/>
+    <field name="Memory Address" start="66" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_REG" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="42"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Source Register Address" start="34" end="54" type="offset"/>
+    <field name="Destination Register Address" start="66" end="86" type="offset"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_SCAN_LINES_EXCL" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="19"/>
+    <field name="Display (Plane) Select" start="19" end="21" type="uint">
+      <value name="Display Plane A" value="0"/>
+      <value name="Display Plane B" value="1"/>
+      <value name="Display Plane C" value="4"/>
+    </field>
+    <field name="DWord Length" start="0" end="5" type="uint" default="0"/>
+    <field name="Start Scan Line Number" start="48" end="60" type="uint"/>
+    <field name="End Scan Line Number" start="32" end="44" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_SCAN_LINES_INCL" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="18"/>
+    <field name="Display (Plane) Select" start="19" end="21" type="uint">
+      <value name="Display Plane A" value="0"/>
+      <value name="Display Plane B" value="1"/>
+      <value name="Display Plane C" value="4"/>
+    </field>
+    <field name="Scan Line Event Done Forward" start="17" end="18" type="bool"/>
+    <field name="DWord Length" start="0" end="5" type="uint" default="0"/>
+    <field name="Start Scan Line Number" start="48" end="60" type="uint"/>
+    <field name="End Scan Line Number" start="32" end="44" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_URB_MEM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="44"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="URB Address" start="34" end="46" type="uint"/>
+    <field name="Memory Address" start="70" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_MATH" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="26"/>
+    <field name="DWord Length" start="0" end="5" type="uint" default="0"/>
+    <field name="ALU INSTRUCTION 1" start="32" end="63" type="uint"/>
+    <field name="ALU INSTRUCTION 2" start="64" end="95" type="uint"/>
+    <group count="0" start="96" size="32">
+      <field name="ALU INSTRUCTION n" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MI_NOOP" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="0"/>
+    <field name="Identification Number Register Write Enable" start="22" end="22" type="bool"/>
+    <field name="Identification Number" start="0" end="21" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_PREDICATE" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="12"/>
+    <field name="Load Operation" start="6" end="7" type="uint" prefix="LOAD">
+      <value name="KEEP" value="0"/>
+      <value name="LOAD" value="2"/>
+      <value name="LOADINV" value="3"/>
+    </field>
+    <field name="Combine Operation" start="3" end="4" type="uint" prefix="COMBINE">
+      <value name="SET" value="0"/>
+      <value name="AND" value="1"/>
+      <value name="OR" value="2"/>
+      <value name="XOR" value="3"/>
+    </field>
+    <field name="Compare Operation" start="0" end="1" type="uint" prefix="COMPARE">
+      <value name="SRCS_EQUAL" value="2"/>
+      <value name="DELTAS_EQUAL" value="3"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_REPORT_HEAD" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="7"/>
+  </instruction>
+
+  <instruction name="MI_REPORT_PERF_COUNT" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="40"/>
+    <field name="DWord Length" start="0" end="5" type="uint" default="2"/>
+    <field name="Memory Address" start="38" end="95" type="address"/>
+    <field name="Core Mode Enable" start="36" end="36" type="uint"/>
+    <field name="Use Global GTT" start="32" end="32" type="uint"/>
+    <field name="Report ID" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_RS_CONTEXT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="15"/>
+    <field name="Resource Streamer Save" start="0" end="0" type="uint" prefix="RS">
+      <value name="Restore" value="0"/>
+      <value name="Save" value="1"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_RS_CONTROL" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="6"/>
+    <field name="Resource Streamer Control" start="0" end="0" type="uint" prefix="RS">
+      <value name="Stop" value="0"/>
+      <value name="Start" value="1"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_RS_STORE_DATA_IMM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="43"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Destination Address" start="34" end="95" type="address"/>
+    <field name="Core Mode Enable" start="32" end="32" type="uint"/>
+    <field name="Data DWord 0" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SEMAPHORE_SIGNAL" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="27"/>
+    <field name="Post-Sync Operation" start="21" end="21" type="bool"/>
+    <field name="Target Engine Select" start="15" end="17" type="uint">
+      <value name="RCS" value="0"/>
+      <value name="VCS0" value="1"/>
+      <value name="BCS" value="2"/>
+      <value name="VECS" value="3"/>
+      <value name="VCS1" value="4"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Target Context ID" start="32" end="63" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SEMAPHORE_WAIT" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="28"/>
+    <field name="Memory Type" start="22" end="22" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="Wait Mode" start="15" end="15" type="uint">
+      <value name="Polling Mode" value="1"/>
+      <value name="Signal Mode" value="0"/>
+    </field>
+    <field name="Compare Operation" start="12" end="14" type="uint" prefix="COMPARE">
+      <value name="SAD_GREATER_THAN_SDD" value="0"/>
+      <value name="SAD_GREATER_THAN_OR_EQUAL_SDD" value="1"/>
+      <value name="SAD_LESS_THAN_SDD" value="2"/>
+      <value name="SAD_LESS_THAN_OR_EQUAL_SDD" value="3"/>
+      <value name="SAD_EQUAL_SDD" value="4"/>
+      <value name="SAD_NOT_EQUAL_SDD" value="5"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Semaphore Data Dword" start="32" end="63" type="uint"/>
+    <field name="Semaphore Address" start="66" end="95" type="address"/>
+    <field name="Semaphore Address High" start="96" end="111" type="address"/>
+  </instruction>
+
+  <instruction name="MI_SET_CONTEXT" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Logical Context Address" start="44" end="63" type="address"/>
+    <field name="Reserved, Must be 1" start="40" end="40" type="uint"/>
+    <field name="Core Mode Enable" start="36" end="36" type="bool"/>
+    <field name="Resource Streamer State Save Enable" start="35" end="35" type="bool"/>
+    <field name="Resource Streamer State Restore Enable" start="34" end="34" type="bool"/>
+    <field name="Force Restore" start="33" end="33" type="uint"/>
+    <field name="Restore Inhibit" start="32" end="32" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SET_PREDICATE" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="1"/>
+    <field name="PREDICATE ENABLE" start="0" end="3" type="uint">
+      <value name="NOOP Never" value="0"/>
+      <value name="NOOP on Result2 clear" value="1"/>
+      <value name="NOOP on Result2 set" value="2"/>
+      <value name="NOOP on Result clear" value="3"/>
+      <value name="NOOP on Result set" value="4"/>
+      <value name="Execute when one slice enabled." value="5"/>
+      <value name="Execute when two slices are enabled." value="6"/>
+      <value name="Execute when three slices are enabled." value="7"/>
+      <value name="NOOP Always" value="15"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_STORE_DATA_IMM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="32"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="Store Qword" start="21" end="21" type="uint"/>
+    <field name="DWord Length" start="0" end="9" type="uint" default="2"/>
+    <field name="Address" start="34" end="79" type="address"/>
+    <field name="Core Mode Enable" start="32" end="32" type="uint"/>
+    <field name="Data DWord 0" start="96" end="127" type="uint"/>
+    <field name="Data DWord 1" start="128" end="159" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_STORE_DATA_INDEX" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="33"/>
+    <field name="Use Per-Process Hardware Status Page" start="21" end="21" type="uint"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Offset" start="34" end="43" type="uint"/>
+    <field name="Data DWord 0" start="64" end="95" type="uint"/>
+    <field name="Data DWord 1" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_STORE_REGISTER_MEM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="36"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="Predicate Enable" start="21" end="21" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Register Address" start="34" end="54" type="offset"/>
+    <field name="Memory Address" start="66" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_STORE_URB_MEM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="45"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="URB Address" start="34" end="46" type="uint"/>
+    <field name="Memory Address" start="70" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_SUSPEND_FLUSH" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="11"/>
+    <field name="Suspend Flush" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="MI_TOPOLOGY_FILTER" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="13"/>
+    <field name="Topology Filter Value" start="0" end="5" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_URB_ATOMIC_ALLOC" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="9"/>
+    <field name="URB Atomic Storage Offset" start="12" end="19" type="uint"/>
+    <field name="URB Atomic Storage Size" start="0" end="8" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_URB_CLEAR" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="25"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="URB Clear Length" start="48" end="61" type="uint"/>
+    <field name="URB Address" start="32" end="46" type="offset"/>
+  </instruction>
+
+  <instruction name="MI_USER_INTERRUPT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="2"/>
+  </instruction>
+
+  <instruction name="MI_WAIT_FOR_EVENT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="3"/>
+    <field name="Display Pipe C Vertical Blank Wait Enable" start="21" end="21" type="bool"/>
+    <field name="Display Sprite C Flip Pending Wait Enable" start="20" end="20" type="bool"/>
+    <field name="Display Plane C Flip Pending Wait Enable" start="15" end="15" type="bool"/>
+    <field name="Display Pipe C Scan Line Wait Enable" start="14" end="14" type="bool"/>
+    <field name="Display Pipe B Vertical Blank Wait Enable" start="11" end="11" type="bool"/>
+    <field name="Display Sprite B Flip Pending Wait Enable" start="10" end="10" type="bool"/>
+    <field name="Display Plane B Flip Pending Wait Enable" start="9" end="9" type="bool"/>
+    <field name="Display Pipe B Scan Line Wait Enable" start="8" end="8" type="bool"/>
+    <field name="Display Pipe A Vertical Blank Wait Enable" start="3" end="3" type="bool"/>
+    <field name="Display Sprite A Flip Pending Wait Enable" start="2" end="2" type="bool"/>
+    <field name="Display Plane A Flip Pending Wait Enable" start="1" end="1" type="bool"/>
+    <field name="Display Pipe A Scan Line Wait Enable" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="PIPELINE_SELECT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="1"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="Pipeline Selection" start="0" end="1" type="uint">
+      <value name="3D" value="0"/>
+      <value name="Media" value="1"/>
+      <value name="GPGPU" value="2"/>
+    </field>
+  </instruction>
+
+  <instruction name="PIPE_CONTROL" bias="2" length="6">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="2"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="4"/>
+    <field name="Destination Address Type" start="56" end="56" type="uint" prefix="DAT">
+      <value name="PPGTT" value="0"/>
+      <value name="GGTT" value="1"/>
+    </field>
+    <field name="LRI Post Sync Operation" start="55" end="55" type="uint">
+      <value name="No LRI Operation" value="0"/>
+      <value name="MMIO Write Immediate Data" value="1"/>
+    </field>
+    <field name="Store Data Index" start="53" end="53" type="uint"/>
+    <field name="Command Streamer Stall Enable" start="52" end="52" type="uint"/>
+    <field name="Global Snapshot Count Reset" start="51" end="51" type="uint">
+      <value name="Don't Reset" value="0"/>
+      <value name="Reset" value="1"/>
+    </field>
+    <field name="TLB Invalidate" start="50" end="50" type="uint"/>
+    <field name="Generic Media State Clear" start="48" end="48" type="bool"/>
+    <field name="Post Sync Operation" start="46" end="47" type="uint">
+      <value name="No Write" value="0"/>
+      <value name="Write Immediate Data" value="1"/>
+      <value name="Write PS Depth Count" value="2"/>
+      <value name="Write Timestamp" value="3"/>
+    </field>
+    <field name="Depth Stall Enable" start="45" end="45" type="bool"/>
+    <field name="Render Target Cache Flush Enable" start="44" end="44" type="bool"/>
+    <field name="Instruction Cache Invalidate Enable" start="43" end="43" type="bool"/>
+    <field name="Texture Cache Invalidation Enable" start="42" end="42" type="bool"/>
+    <field name="Indirect State Pointers Disable" start="41" end="41" type="bool"/>
+    <field name="Notify Enable" start="40" end="40" type="bool"/>
+    <field name="Pipe Control Flush Enable" start="39" end="39" type="bool"/>
+    <field name="DC  Flush Enable" start="37" end="37" type="bool"/>
+    <field name="VF Cache Invalidation Enable" start="36" end="36" type="bool"/>
+    <field name="Constant Cache Invalidation Enable" start="35" end="35" type="bool"/>
+    <field name="State Cache Invalidation Enable" start="34" end="34" type="bool"/>
+    <field name="Stall At Pixel Scoreboard" start="33" end="33" type="bool"/>
+    <field name="Depth Cache Flush Enable" start="32" end="32" type="bool"/>
+    <field name="Address" start="66" end="111" type="address"/>
+    <field name="Immediate Data" start="128" end="191" type="uint"/>
+  </instruction>
+
+  <instruction name="STATE_BASE_ADDRESS" bias="2" length="16">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="1"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="14"/>
+    <field name="General State Base Address" start="44" end="95" type="address"/>
+    <field name="General State Memory Object Control State" start="36" end="42" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="General State Base Address Modify Enable" start="32" end="32" type="bool"/>
+    <field name="Stateless Data Port Access Memory Object Control State" start="112" end="118" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface State Base Address" start="140" end="191" type="address"/>
+    <field name="Surface State Memory Object Control State" start="132" end="138" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface State Base Address Modify Enable" start="128" end="128" type="bool"/>
+    <field name="Dynamic State Base Address" start="204" end="255" type="address"/>
+    <field name="Dynamic State Memory Object Control State" start="196" end="202" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Dynamic State Base Address Modify Enable" start="192" end="192" type="bool"/>
+    <field name="Indirect Object Base Address" start="268" end="319" type="address"/>
+    <field name="Indirect Object Memory Object Control State" start="260" end="266" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Indirect Object Base Address Modify Enable" start="256" end="256" type="bool"/>
+    <field name="Instruction Base Address" start="332" end="383" type="address"/>
+    <field name="Instruction Memory Object Control State" start="324" end="330" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Instruction Base Address Modify Enable" start="320" end="320" type="bool"/>
+    <field name="General State Buffer Size" start="396" end="415" type="uint"/>
+    <field name="General State Buffer Size Modify Enable" start="384" end="384" type="bool"/>
+    <field name="Dynamic State Buffer Size" start="428" end="447" type="uint"/>
+    <field name="Dynamic State Buffer Size Modify Enable" start="416" end="416" type="bool"/>
+    <field name="Indirect Object Buffer Size" start="460" end="479" type="uint"/>
+    <field name="Indirect Object Buffer Size Modify Enable" start="448" end="448" type="bool"/>
+    <field name="Instruction Buffer Size" start="492" end="511" type="uint"/>
+    <field name="Instruction Buffer size Modify Enable" start="480" end="480" type="bool"/>
+  </instruction>
+
+  <instruction name="STATE_PREFETCH" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Prefetch Pointer" start="38" end="63" type="address"/>
+    <field name="Prefetch Count" start="32" end="34" type="uint"/>
+  </instruction>
+
+  <instruction name="STATE_SIP" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="System Instruction Pointer" start="36" end="95" type="offset"/>
+  </instruction>
+
+  <instruction name="SWTESS_BASE_ADDRESS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="SW Tessellation Base Address" start="44" end="79" type="address"/>
+    <field name="SW Tessellation Memory Object Control State" start="40" end="43" type="MEMORY_OBJECT_CONTROL_STATE"/>
+  </instruction>
+
+</genxml>
diff --git a/src/intel/genxml/gen9.xml b/src/intel/genxml/gen9.xml
new file mode 100644 (file)
index 0000000..79d3006
--- /dev/null
@@ -0,0 +1,3470 @@
+<genxml name="SKL" gen="9">
+  <struct name="3DSTATE_CONSTANT_BODY" length="10">
+    <field name="Constant Buffer 1 Read Length" start="16" end="31" type="uint"/>
+    <field name="Constant Buffer 0 Read Length" start="0" end="15" type="uint"/>
+    <field name="Constant Buffer 3 Read Length" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer 2 Read Length" start="32" end="47" type="uint"/>
+    <field name="Pointer To Constant Buffer 0" start="69" end="127" type="address"/>
+    <field name="Pointer To Constant Buffer 1" start="133" end="191" type="address"/>
+    <field name="Pointer To Constant Buffer 2" start="197" end="255" type="address"/>
+    <field name="Pointer To Constant Buffer 3" start="261" end="319" type="address"/>
+  </struct>
+
+  <struct name="BINDING_TABLE_EDIT_ENTRY" length="1">
+    <field name="Binding Table Index" start="16" end="23" type="uint"/>
+    <field name="Surface State Pointer" start="0" end="15" type="offset"/>
+  </struct>
+
+  <struct name="GATHER_CONSTANT_ENTRY" length="1">
+    <field name="Constant Buffer Offset" start="8" end="15" type="offset"/>
+    <field name="Channel Mask" start="4" end="7" type="uint"/>
+    <field name="Binding Table Index Offset" start="0" end="3" type="uint"/>
+  </struct>
+
+  <struct name="MEMORY_OBJECT_CONTROL_STATE" length="1">
+    <field name="Index to MOCS Tables" start="1" end="6" type="uint"/>
+  </struct>
+
+  <struct name="VERTEX_BUFFER_STATE" length="4">
+    <field name="Vertex Buffer Index" start="26" end="31" type="uint"/>
+    <field name="Memory Object Control State" start="16" end="22" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Address Modify Enable" start="14" end="14" type="uint"/>
+    <field name="Null Vertex Buffer" start="13" end="13" type="bool"/>
+    <field name="Buffer Pitch" start="0" end="11" type="uint"/>
+    <field name="Buffer Starting Address" start="32" end="95" type="address"/>
+    <field name="Buffer Size" start="96" end="127" type="uint"/>
+  </struct>
+
+  <struct name="VERTEX_ELEMENT_STATE" length="2">
+    <field name="Vertex Buffer Index" start="26" end="31" type="uint"/>
+    <field name="Valid" start="25" end="25" type="uint"/>
+    <field name="Source Element Format" start="16" end="24" type="uint"/>
+    <field name="Edge Flag Enable" start="15" end="15" type="bool"/>
+    <field name="Source Element Offset" start="0" end="11" type="uint"/>
+    <field name="Component 0 Control" start="60" end="62" type="uint"/>
+    <field name="Component 1 Control" start="56" end="58" type="uint"/>
+    <field name="Component 2 Control" start="52" end="54" type="uint"/>
+    <field name="Component 3 Control" start="48" end="50" type="uint"/>
+  </struct>
+
+  <struct name="SO_DECL" length="1">
+    <field name="Output Buffer Slot" start="12" end="13" type="uint"/>
+    <field name="Hole Flag" start="11" end="11" type="uint"/>
+    <field name="Register Index" start="4" end="9" type="uint"/>
+    <field name="Component Mask" start="0" end="3" type="uint" default="0"/>
+  </struct>
+
+  <struct name="SO_DECL_ENTRY" length="2">
+    <field name="Stream 3 Decl" start="48" end="63" type="SO_DECL"/>
+    <field name="Stream 2 Decl" start="32" end="47" type="SO_DECL"/>
+    <field name="Stream 1 Decl" start="16" end="31" type="SO_DECL"/>
+    <field name="Stream 0 Decl" start="0" end="15" type="SO_DECL"/>
+  </struct>
+
+  <struct name="SF_OUTPUT_ATTRIBUTE_DETAIL" length="1">
+    <field name="Component Override W" start="15" end="15" type="bool"/>
+    <field name="Component Override Z" start="14" end="14" type="bool"/>
+    <field name="Component Override Y" start="13" end="13" type="bool"/>
+    <field name="Component Override X" start="12" end="12" type="bool"/>
+    <field name="Swizzle Control Mode" start="11" end="11" type="uint"/>
+    <field name="Constant Source" start="9" end="10" type="uint">
+      <value name="CONST_0000" value="0"/>
+      <value name="CONST_0001_FLOAT" value="1"/>
+      <value name="CONST_1111_FLOAT" value="2"/>
+      <value name="PRIM_ID" value="3"/>
+    </field>
+    <field name="Swizzle Select" start="6" end="7" type="uint">
+      <value name="INPUTATTR" value="0"/>
+      <value name="INPUTATTR_FACING" value="1"/>
+      <value name="INPUTATTR_W" value="2"/>
+      <value name="INPUTATTR_FACING_W" value="3"/>
+    </field>
+    <field name="Source Attribute" start="0" end="4" type="uint"/>
+  </struct>
+
+  <struct name="SCISSOR_RECT" length="2">
+    <field name="Scissor Rectangle Y Min" start="16" end="31" type="uint"/>
+    <field name="Scissor Rectangle X Min" start="0" end="15" type="uint"/>
+    <field name="Scissor Rectangle Y Max" start="48" end="63" type="uint"/>
+    <field name="Scissor Rectangle X Max" start="32" end="47" type="uint"/>
+  </struct>
+
+  <struct name="SF_CLIP_VIEWPORT" length="16">
+    <field name="Viewport Matrix Element m00" start="0" end="31" type="float"/>
+    <field name="Viewport Matrix Element m11" start="32" end="63" type="float"/>
+    <field name="Viewport Matrix Element m22" start="64" end="95" type="float"/>
+    <field name="Viewport Matrix Element m30" start="96" end="127" type="float"/>
+    <field name="Viewport Matrix Element m31" start="128" end="159" type="float"/>
+    <field name="Viewport Matrix Element m32" start="160" end="191" type="float"/>
+    <field name="X Min Clip Guardband" start="256" end="287" type="float"/>
+    <field name="X Max Clip Guardband" start="288" end="319" type="float"/>
+    <field name="Y Min Clip Guardband" start="320" end="351" type="float"/>
+    <field name="Y Max Clip Guardband" start="352" end="383" type="float"/>
+    <field name="X Min ViewPort" start="384" end="415" type="float"/>
+    <field name="X Max ViewPort" start="416" end="447" type="float"/>
+    <field name="Y Min ViewPort" start="448" end="479" type="float"/>
+    <field name="Y Max ViewPort" start="480" end="511" type="float"/>
+  </struct>
+
+  <struct name="BLEND_STATE_ENTRY" length="2">
+    <field name="Logic Op Enable" start="63" end="63" type="bool"/>
+    <field name="Logic Op Function" start="59" end="62" type="uint"/>
+    <field name="Pre-Blend Source Only Clamp Enable" start="36" end="36" type="bool"/>
+    <field name="Color Clamp Range" start="34" end="35" type="uint">
+      <value name="COLORCLAMP_UNORM" value="0"/>
+      <value name="COLORCLAMP_SNORM" value="1"/>
+      <value name="COLORCLAMP_RTFORMAT" value="2"/>
+    </field>
+    <field name="Pre-Blend Color Clamp Enable" start="33" end="33" type="bool"/>
+    <field name="Post-Blend Color Clamp Enable" start="32" end="32" type="bool"/>
+    <field name="Color Buffer Blend Enable" start="31" end="31" type="bool"/>
+    <field name="Source Blend Factor" start="26" end="30" type="uint"/>
+    <field name="Destination Blend Factor" start="21" end="25" type="uint"/>
+    <field name="Color Blend Function" start="18" end="20" type="uint"/>
+    <field name="Source Alpha Blend Factor" start="13" end="17" type="uint"/>
+    <field name="Destination Alpha Blend Factor" start="8" end="12" type="uint"/>
+    <field name="Alpha Blend Function" start="5" end="7" type="uint"/>
+    <field name="Write Disable Alpha" start="3" end="3" type="bool"/>
+    <field name="Write Disable Red" start="2" end="2" type="bool"/>
+    <field name="Write Disable Green" start="1" end="1" type="bool"/>
+    <field name="Write Disable Blue" start="0" end="0" type="bool"/>
+  </struct>
+
+  <struct name="BLEND_STATE" length="17">
+    <field name="Alpha To Coverage Enable" start="31" end="31" type="bool"/>
+    <field name="Independent Alpha Blend Enable" start="30" end="30" type="bool"/>
+    <field name="Alpha To One Enable" start="29" end="29" type="bool"/>
+    <field name="Alpha To Coverage Dither Enable" start="28" end="28" type="bool"/>
+    <field name="Alpha Test Enable" start="27" end="27" type="bool"/>
+    <field name="Alpha Test Function" start="24" end="26" type="uint"/>
+    <field name="Color Dither Enable" start="23" end="23" type="bool"/>
+    <field name="X Dither Offset" start="21" end="22" type="uint"/>
+    <field name="Y Dither Offset" start="19" end="20" type="uint"/>
+    <group count="8" start="32" size="64">
+      <field name="Entry" start="0" end="63" type="BLEND_STATE_ENTRY"/>
+    </group>
+  </struct>
+
+  <struct name="CC_VIEWPORT" length="2">
+    <field name="Minimum Depth" start="0" end="31" type="float"/>
+    <field name="Maximum Depth" start="32" end="63" type="float"/>
+  </struct>
+
+  <struct name="COLOR_CALC_STATE" length="6">
+    <field name="Round Disable Function Disable" start="15" end="15" type="bool"/>
+    <field name="Alpha Test Format" start="0" end="0" type="uint">
+      <value name="ALPHATEST_UNORM8" value="0"/>
+      <value name="ALPHATEST_FLOAT32" value="1"/>
+    </field>
+    <field name="Alpha Reference Value As UNORM8" start="32" end="63" type="uint"/>
+    <field name="Alpha Reference Value As FLOAT32" start="32" end="63" type="float"/>
+    <field name="Blend Constant Color Red" start="64" end="95" type="float"/>
+    <field name="Blend Constant Color Green" start="96" end="127" type="float"/>
+    <field name="Blend Constant Color Blue" start="128" end="159" type="float"/>
+    <field name="Blend Constant Color Alpha" start="160" end="191" type="float"/>
+  </struct>
+
+  <struct name="EXECUTION_UNIT_EXTENDED_MESSAGE_DESCRIPTOR" length="1">
+    <field name="Extended Message Length" start="6" end="9" type="uint"/>
+    <field name="End Of Thread" start="5" end="5" type="uint">
+      <value name="No Termination" value="0"/>
+      <value name="EOT" value="1"/>
+    </field>
+    <field name="Target Function ID" start="0" end="3" type="uint"/>
+  </struct>
+
+  <struct name="INTERFACE_DESCRIPTOR_DATA" length="8">
+    <field name="Kernel Start Pointer" start="6" end="31" type="offset"/>
+    <field name="Kernel Start Pointer High" start="32" end="47" type="offset"/>
+    <field name="Denorm Mode" start="83" end="83" type="uint">
+      <value name="Ftz" value="0"/>
+      <value name="SetByKernel" value="1"/>
+    </field>
+    <field name="Single Program Flow" start="82" end="82" type="uint"/>
+    <field name="Thread Priority" start="81" end="81" type="uint">
+      <value name="Normal Priority" value="0"/>
+      <value name="High Priority" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="80" end="80" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="77" end="77" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="75" end="75" type="bool"/>
+    <field name="Software Exception Enable" start="71" end="71" type="bool"/>
+    <field name="Sampler State Pointer" start="101" end="127" type="offset"/>
+    <field name="Sampler Count" start="98" end="100" type="uint">
+      <value name="No samplers used" value="0"/>
+      <value name="Between 1 and 4 samplers used" value="1"/>
+      <value name="Between 5 and 8 samplers used" value="2"/>
+      <value name="Between 9 and 12 samplers used" value="3"/>
+      <value name="Between 13 and 16 samplers used" value="4"/>
+    </field>
+    <field name="Binding Table Pointer" start="133" end="143" type="offset"/>
+    <field name="Binding Table Entry Count" start="128" end="132" type="uint"/>
+    <field name="Constant/Indirect URB Entry Read Length" start="176" end="191" type="uint"/>
+    <field name="Constant URB Entry Read Offset" start="160" end="175" type="uint"/>
+    <field name="Rounding Mode" start="214" end="215" type="uint">
+      <value name="RTNE" value="0"/>
+      <value name="RU" value="1"/>
+      <value name="RD" value="2"/>
+      <value name="RTZ" value="3"/>
+    </field>
+    <field name="Barrier Enable" start="213" end="213" type="bool"/>
+    <field name="Shared Local Memory Size" start="208" end="212" type="uint">
+      <value name="Encodes 0K" value="0"/>
+      <value name="Encodes 1K" value="1"/>
+      <value name="Encodes 2K" value="2"/>
+      <value name="Encodes 4K" value="3"/>
+      <value name="Encodes 8K" value="4"/>
+      <value name="Encodes 16K" value="5"/>
+      <value name="Encodes 32K" value="6"/>
+      <value name="Encodes 64K" value="7"/>
+    </field>
+    <field name="Global Barrier Enable" start="207" end="207" type="bool"/>
+    <field name="Number of Threads in GPGPU Thread Group" start="192" end="201" type="uint"/>
+    <field name="Cross-Thread Constant Data Read Length" start="224" end="231" type="uint"/>
+  </struct>
+
+  <struct name="ROUNDINGPRECISIONTABLE_3_BITS" length="1">
+    <field name="Rounding Precision" start="0" end="2" type="uint">
+      <value name="+1/16" value="0"/>
+      <value name="+2/16" value="1"/>
+      <value name="+3/16" value="2"/>
+      <value name="+4/16" value="3"/>
+      <value name="+5/16" value="4"/>
+      <value name="+6/16" value="5"/>
+      <value name="+7/16" value="6"/>
+      <value name="+8/16" value="7"/>
+    </field>
+  </struct>
+
+  <struct name="PALETTE_ENTRY" length="1">
+    <field name="Alpha" start="24" end="31" type="uint"/>
+    <field name="Red" start="16" end="23" type="uint"/>
+    <field name="Green" start="8" end="15" type="uint"/>
+    <field name="Blue" start="0" end="7" type="uint"/>
+  </struct>
+
+  <struct name="BINDING_TABLE_STATE" length="1">
+    <field name="Surface State Pointer" start="6" end="31" type="offset"/>
+  </struct>
+
+  <struct name="RENDER_SURFACE_STATE" length="16">
+    <field name="Surface Type" start="29" end="31" type="uint">
+      <value name="SURFTYPE_1D" value="0"/>
+      <value name="SURFTYPE_2D" value="1"/>
+      <value name="SURFTYPE_3D" value="2"/>
+      <value name="SURFTYPE_CUBE" value="3"/>
+      <value name="SURFTYPE_BUFFER" value="4"/>
+      <value name="SURFTYPE_STRBUF" value="5"/>
+      <value name="SURFTYPE_NULL" value="7"/>
+    </field>
+    <field name="Surface Array" start="28" end="28" type="bool"/>
+    <field name="ASTC_Enable" start="27" end="27" type="bool"/>
+    <field name="Surface Format" start="18" end="26" type="uint"/>
+    <field name="Surface Vertical Alignment" start="16" end="17" type="uint">
+      <value name="VALIGN 4" value="1"/>
+      <value name="VALIGN 8" value="2"/>
+      <value name="VALIGN 16" value="3"/>
+    </field>
+    <field name="Surface Horizontal Alignment" start="14" end="15" type="uint">
+      <value name="HALIGN 4" value="1"/>
+      <value name="HALIGN 8" value="2"/>
+      <value name="HALIGN 16" value="3"/>
+    </field>
+    <field name="Tile Mode" start="12" end="13" type="uint">
+      <value name="LINEAR" value="0"/>
+      <value name="WMAJOR" value="1"/>
+      <value name="XMAJOR" value="2"/>
+      <value name="YMAJOR" value="3"/>
+    </field>
+    <field name="Vertical Line Stride" start="11" end="11" type="uint"/>
+    <field name="Vertical Line Stride Offset" start="10" end="10" type="uint"/>
+    <field name="Sampler L2 Bypass Mode Disable" start="9" end="9" type="bool"/>
+    <field name="Render Cache Read Write Mode" start="8" end="8" type="uint">
+      <value name="Write-Only Cache" value="0"/>
+      <value name="Read-Write Cache" value="1"/>
+    </field>
+    <field name="Media Boundary Pixel Mode" start="6" end="7" type="uint">
+      <value name="NORMAL_MODE" value="0"/>
+      <value name="PROGRESSIVE_FRAME" value="2"/>
+      <value name="INTERLACED_FRAME" value="3"/>
+    </field>
+    <field name="Cube Face Enable - Positive Z" start="0" end="0" type="bool"/>
+    <field name="Cube Face Enable - Negative Z" start="1" end="1" type="bool"/>
+    <field name="Cube Face Enable - Positive Y" start="2" end="2" type="bool"/>
+    <field name="Cube Face Enable - Negative Y" start="3" end="3" type="bool"/>
+    <field name="Cube Face Enable - Positive X" start="4" end="4" type="bool"/>
+    <field name="Cube Face Enable - Negative X" start="5" end="5" type="bool"/>
+    <field name="Memory Object Control State" start="56" end="62" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="MOCS" start="56" end="62" type="uint"/>
+    <field name="Base Mip Level" start="51" end="55" type="u4.1"/>
+    <field name="Surface QPitch" start="32" end="46" type="uint"/>
+    <field name="Height" start="80" end="93" type="uint"/>
+    <field name="Width" start="64" end="77" type="uint"/>
+    <field name="Depth" start="117" end="127" type="uint"/>
+    <field name="Surface Pitch" start="96" end="113" type="uint"/>
+    <field name="Render Target And Sample Unorm Rotation" start="157" end="158" type="uint">
+      <value name="0DEG" value="0"/>
+      <value name="90DEG" value="1"/>
+      <value name="180DEG" value="2"/>
+      <value name="270DEG" value="3"/>
+    </field>
+    <field name="Minimum Array Element" start="146" end="156" type="uint"/>
+    <field name="Render Target View Extent" start="135" end="145" type="uint"/>
+    <field name="Multisampled Surface Storage Format" start="134" end="134" type="uint">
+      <value name="MSS" value="0"/>
+      <value name="DEPTH_STENCIL" value="1"/>
+    </field>
+    <field name="Number of Multisamples" start="131" end="133" type="uint">
+      <value name="MULTISAMPLECOUNT_1" value="0"/>
+      <value name="MULTISAMPLECOUNT_2" value="1"/>
+      <value name="MULTISAMPLECOUNT_4" value="2"/>
+      <value name="MULTISAMPLECOUNT_8" value="3"/>
+      <value name="MULTISAMPLECOUNT_16" value="4"/>
+    </field>
+    <field name="Multisample Position Palette Index" start="128" end="130" type="uint"/>
+    <field name="X Offset" start="185" end="191" type="offset"/>
+    <field name="Y Offset" start="181" end="183" type="offset"/>
+    <field name="EWA Disable For Cube" start="180" end="180" type="bool"/>
+    <field name="Tiled Resource Mode" start="178" end="179" type="uint">
+      <value name="NONE" value="0"/>
+      <value name="4KB" value="1"/>
+      <value name="64KB" value="2"/>
+      <value name="TILEYF" value="1"/>
+      <value name="TILEYS" value="2"/>
+    </field>
+    <field name="Coherency Type" start="174" end="174" type="uint">
+      <value name="GPU coherent" value="0"/>
+      <value name="IA coherent" value="1"/>
+    </field>
+    <field name="Mip Tail Start LOD" start="168" end="171" type="uint"/>
+    <field name="Surface Min LOD" start="164" end="167" type="uint"/>
+    <field name="MIP Count / LOD" start="160" end="163" type="uint"/>
+    <field name="Auxiliary Surface QPitch" start="208" end="222" type="uint"/>
+    <field name="Auxiliary Surface Pitch" start="195" end="203" type="uint"/>
+    <field name="Auxiliary Surface Mode" start="192" end="194" type="uint">
+      <value name="AUX_NONE" value="0"/>
+      <value name="AUX_CCS_D" value="1"/>
+      <value name="AUX_APPEND" value="2"/>
+      <value name="AUX_HIZ" value="3"/>
+      <value name="AUX_CCS_E" value="5"/>
+    </field>
+    <field name="Separate UV Plane Enable" start="223" end="223" type="bool"/>
+    <field name="X Offset for U or UV Plane" start="208" end="221" type="uint"/>
+    <field name="Y Offset for U or UV Plane" start="192" end="205" type="uint"/>
+    <field name="Memory Compression Mode" start="255" end="255" type="uint">
+      <value name="Horizontal" value="0"/>
+      <value name="Vertical" value="1"/>
+    </field>
+    <field name="Memory Compression Enable" start="254" end="254" type="bool"/>
+    <field name="Shader Channel Select Red" start="249" end="251" type="uint"/>
+    <field name="Shader Channel Select Green" start="246" end="248" type="uint"/>
+    <field name="Shader Channel Select Blue" start="243" end="245" type="uint"/>
+    <field name="Shader Channel Select Alpha" start="240" end="242" type="uint"/>
+    <field name="Resource Min LOD" start="224" end="235" type="u4.8"/>
+    <field name="Surface Base Address" start="256" end="319" type="address"/>
+    <field name="X Offset for V Plane" start="368" end="381" type="uint"/>
+    <field name="Y Offset for V Plane" start="352" end="365" type="uint"/>
+    <field name="Auxiliary Table Index for Media Compressed Surface" start="341" end="351" type="uint"/>
+    <field name="Auxiliary Surface Base Address" start="332" end="383" type="address"/>
+    <field name="Quilt Height" start="325" end="329" type="uint"/>
+    <field name="Quilt Width" start="320" end="324" type="uint"/>
+    <field name="Hierarchical Depth Clear Value" start="384" end="415" type="float"/>
+    <field name="Red Clear Color" start="384" end="415" type="int"/>
+    <field name="Green Clear Color" start="416" end="447" type="int"/>
+    <field name="Blue Clear Color" start="448" end="479" type="int"/>
+    <field name="Alpha Clear Color" start="480" end="511" type="int"/>
+  </struct>
+
+  <struct name="FILTER_COEFFICIENT" length="1">
+    <field name="Filter Coefficient" start="0" end="7" type="s1.6"/>
+  </struct>
+
+  <struct name="SAMPLER_STATE" length="4">
+    <field name="Sampler Disable" start="31" end="31" type="bool"/>
+    <field name="Texture Border Color Mode" start="29" end="29" type="uint">
+      <value name="DX10/OGL" value="0"/>
+      <value name="DX9" value="1"/>
+    </field>
+    <field name="LOD PreClamp Mode" start="27" end="28" type="uint" prefix="CLAMP_MODE">
+      <value name="NONE" value="0"/>
+      <value name="OGL" value="2"/>
+    </field>
+    <field name="Coarse LOD Quality Mode" start="22" end="26" type="uint"/>
+    <field name="Mip Mode Filter" start="20" end="21" type="uint" prefix="MIPFILTER">
+      <value name="NONE" value="0"/>
+      <value name="NEAREST" value="1"/>
+      <value name="LINEAR" value="3"/>
+    </field>
+    <field name="Mag Mode Filter" start="17" end="19" type="uint" prefix="MAPFILTER">
+      <value name="NEAREST" value="0"/>
+      <value name="LINEAR" value="1"/>
+      <value name="ANISOTROPIC" value="2"/>
+      <value name="MONO" value="6"/>
+    </field>
+    <field name="Min Mode Filter" start="14" end="16" type="uint" prefix="MAPFILTER">
+      <value name="NEAREST" value="0"/>
+      <value name="LINEAR" value="1"/>
+      <value name="ANISOTROPIC" value="2"/>
+      <value name="MONO" value="6"/>
+    </field>
+    <field name="Texture LOD Bias" start="1" end="13" type="s4.8"/>
+    <field name="Anisotropic Algorithm" start="0" end="0" type="uint">
+      <value name="LEGACY" value="0"/>
+      <value name="EWA Approximation" value="1"/>
+    </field>
+    <field name="Min LOD" start="52" end="63" type="u4.8"/>
+    <field name="Max LOD" start="40" end="51" type="u4.8"/>
+    <field name="ChromaKey Enable" start="39" end="39" type="bool"/>
+    <field name="ChromaKey Index" start="37" end="38" type="uint"/>
+    <field name="ChromaKey Mode" start="36" end="36" type="uint">
+      <value name="KEYFILTER_KILL_ON_ANY_MATCH" value="0"/>
+      <value name="KEYFILTER_REPLACE_BLACK" value="1"/>
+    </field>
+    <field name="Shadow Function" start="33" end="35" type="uint">
+      <value name="PREFILTEROP ALWAYS" value="0"/>
+      <value name="PREFILTEROP NEVER" value="1"/>
+      <value name="PREFILTEROP LESS" value="2"/>
+      <value name="PREFILTEROP EQUAL" value="3"/>
+      <value name="PREFILTEROP LEQUAL" value="4"/>
+      <value name="PREFILTEROP GREATER" value="5"/>
+      <value name="PREFILTEROP NOTEQUAL" value="6"/>
+      <value name="PREFILTEROP GEQUAL" value="7"/>
+    </field>
+    <field name="Cube Surface Control Mode" start="32" end="32" type="uint">
+      <value name="PROGRAMMED" value="0"/>
+      <value name="OVERRIDE" value="1"/>
+    </field>
+    <field name="Border Color Pointer" start="70" end="87" type="offset"/>
+    <field name="LOD Clamp Magnification Mode" start="64" end="64" type="uint">
+      <value name="MIPNONE" value="0"/>
+      <value name="MIPFILTER" value="1"/>
+    </field>
+    <field name="Reduction Type" start="118" end="119" type="uint">
+      <value name="STD_FILTER" value="0"/>
+      <value name="COMPARISON" value="1"/>
+      <value name="MINIMUM" value="2"/>
+      <value name="MAXIMUM" value="3"/>
+    </field>
+    <field name="Maximum Anisotropy" start="115" end="117" type="uint">
+      <value name="RATIO 2:1" value="0"/>
+      <value name="RATIO 4:1" value="1"/>
+      <value name="RATIO 6:1" value="2"/>
+      <value name="RATIO 8:1" value="3"/>
+      <value name="RATIO 10:1" value="4"/>
+      <value name="RATIO 12:1" value="5"/>
+      <value name="RATIO 14:1" value="6"/>
+      <value name="RATIO 16:1" value="7"/>
+    </field>
+    <field name="R Address Min Filter Rounding Enable" start="109" end="109" type="bool"/>
+    <field name="R Address Mag Filter Rounding Enable" start="110" end="110" type="bool"/>
+    <field name="V Address Min Filter Rounding Enable" start="111" end="111" type="bool"/>
+    <field name="V Address Mag Filter Rounding Enable" start="112" end="112" type="bool"/>
+    <field name="U Address Min Filter Rounding Enable" start="113" end="113" type="bool"/>
+    <field name="U Address Mag Filter Rounding Enable" start="114" end="114" type="bool"/>
+    <field name="Trilinear Filter Quality" start="107" end="108" type="uint">
+      <value name="FULL" value="0"/>
+      <value name="HIGH" value="1"/>
+      <value name="MED" value="2"/>
+      <value name="LOW" value="3"/>
+    </field>
+    <field name="Non-normalized Coordinate Enable" start="106" end="106" type="bool"/>
+    <field name="Reduction Type Enable" start="105" end="105" type="bool"/>
+    <field name="TCX Address Control Mode" start="102" end="104" type="uint"/>
+    <field name="TCY Address Control Mode" start="99" end="101" type="uint"/>
+    <field name="TCZ Address Control Mode" start="96" end="98" type="uint"/>
+  </struct>
+
+  <struct name="SAMPLER_STATE_8X8_AVS_COEFFICIENTS" length="8">
+    <field name="Table 0Y Filter Coefficient[n,1]" start="24" end="31" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,1]" start="16" end="23" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,0]" start="8" end="15" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,0]" start="0" end="7" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,3]" start="56" end="63" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,3]" start="48" end="55" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,2]" start="40" end="47" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,2]" start="32" end="39" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,5]" start="88" end="95" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,5]" start="80" end="87" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,4]" start="72" end="79" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,4]" start="64" end="71" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,7]" start="120" end="127" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,7]" start="112" end="119" type="s1.6"/>
+    <field name="Table 0Y Filter Coefficient[n,6]" start="104" end="111" type="s1.6"/>
+    <field name="Table 0X Filter Coefficient[n,6]" start="96" end="103" type="s1.6"/>
+    <field name="Table 1X Filter Coefficient[n,3]" start="152" end="159" type="s1.6"/>
+    <field name="Table 1X Filter Coefficient[n,2]" start="144" end="151" type="s1.6"/>
+    <field name="Table 1X Filter Coefficient[n,5]" start="168" end="175" type="s1.6"/>
+    <field name="Table 1X Filter Coefficient[n,4]" start="160" end="167" type="s1.6"/>
+    <field name="Table 1Y Filter Coefficient[n,3]" start="216" end="223" type="s1.6"/>
+    <field name="Table 1Y Filter Coefficient[n,2]" start="208" end="215" type="s1.6"/>
+    <field name="Table 1Y Filter Coefficient[n,5]" start="232" end="239" type="s1.6"/>
+    <field name="Table 1Y Filter Coefficient[n,4]" start="224" end="231" type="s1.6"/>
+  </struct>
+
+  <enum name="3D_Prim_Topo_Type" prefix="3DPRIM">
+    <value name="POINTLIST" value="1"/>
+    <value name="LINELIST" value="2"/>
+    <value name="LINESTRIP" value="3"/>
+    <value name="TRILIST" value="4"/>
+    <value name="TRISTRIP" value="5"/>
+    <value name="TRIFAN" value="6"/>
+    <value name="QUADLIST" value="7"/>
+    <value name="QUADSTRIP" value="8"/>
+    <value name="LINELIST_ADJ" value="9"/>
+    <value name="LINESTRIP_ADJ" value="10"/>
+    <value name="TRILIST_ADJ" value="11"/>
+    <value name="TRISTRIP_ADJ" value="12"/>
+    <value name="TRISTRIP_REVERSE" value="13"/>
+    <value name="POLYGON" value="14"/>
+    <value name="RECTLIST" value="15"/>
+    <value name="LINELOOP" value="16"/>
+    <value name="POINTLIST _BF" value="17"/>
+    <value name="LINESTRIP_CONT" value="18"/>
+    <value name="LINESTRIP_BF" value="19"/>
+    <value name="LINESTRIP_CONT_BF" value="20"/>
+    <value name="TRIFAN_NOSTIPPLE" value="22"/>
+    <value name="PATCHLIST_1" value="32"/>
+    <value name="PATCHLIST_2" value="33"/>
+    <value name="PATCHLIST_3" value="34"/>
+    <value name="PATCHLIST_4" value="35"/>
+    <value name="PATCHLIST_5" value="36"/>
+    <value name="PATCHLIST_6" value="37"/>
+    <value name="PATCHLIST_7" value="38"/>
+    <value name="PATCHLIST_8" value="39"/>
+    <value name="PATCHLIST_9" value="40"/>
+    <value name="PATCHLIST_10" value="41"/>
+    <value name="PATCHLIST_11" value="42"/>
+    <value name="PATCHLIST_12" value="43"/>
+    <value name="PATCHLIST_13" value="44"/>
+    <value name="PATCHLIST_14" value="45"/>
+    <value name="PATCHLIST_15" value="46"/>
+    <value name="PATCHLIST_16" value="47"/>
+    <value name="PATCHLIST_17" value="48"/>
+    <value name="PATCHLIST_18" value="49"/>
+    <value name="PATCHLIST_19" value="50"/>
+    <value name="PATCHLIST_20" value="51"/>
+    <value name="PATCHLIST_21" value="52"/>
+    <value name="PATCHLIST_22" value="53"/>
+    <value name="PATCHLIST_23" value="54"/>
+    <value name="PATCHLIST_24" value="55"/>
+    <value name="PATCHLIST_25" value="56"/>
+    <value name="PATCHLIST_26" value="57"/>
+    <value name="PATCHLIST_27" value="58"/>
+    <value name="PATCHLIST_28" value="59"/>
+    <value name="PATCHLIST_29" value="60"/>
+    <value name="PATCHLIST_30" value="61"/>
+    <value name="PATCHLIST_31" value="62"/>
+    <value name="PATCHLIST_32" value="63"/>
+  </enum>
+
+  <enum name="3D_Vertex_Component_Control" prefix="VFCOMP">
+    <value name="NOSTORE" value="0"/>
+    <value name="STORE_SRC" value="1"/>
+    <value name="STORE_0" value="2"/>
+    <value name="STORE_1_FP" value="3"/>
+    <value name="STORE_1_INT" value="4"/>
+    <value name="STORE_PID" value="7"/>
+  </enum>
+
+  <enum name="COMPONENT_ENABLES" prefix="CE">
+    <value name="NONE" value="0"/>
+    <value name="X" value="1"/>
+    <value name="Y" value="2"/>
+    <value name="XY" value="3"/>
+    <value name="Z" value="4"/>
+    <value name="XZ" value="5"/>
+    <value name="YZ" value="6"/>
+    <value name="XYZ" value="7"/>
+    <value name="W" value="8"/>
+    <value name="XW" value="9"/>
+    <value name="YW" value="10"/>
+    <value name="XYW" value="11"/>
+    <value name="ZW" value="12"/>
+    <value name="XZW" value="13"/>
+    <value name="YZW" value="14"/>
+    <value name="XYZW" value="15"/>
+  </enum>
+
+  <enum name="Attribute_Component_Format" prefix="ACF">
+    <value name="disabled" value="0"/>
+    <value name=".xy" value="1"/>
+    <value name=".xyz" value="2"/>
+    <value name=".xyzw" value="3"/>
+  </enum>
+
+  <enum name="WRAP_SHORTEST_ENABLE" prefix="WSE">
+    <value name="X" value="1"/>
+    <value name="Y" value="2"/>
+    <value name="XY" value="3"/>
+    <value name="Z" value="4"/>
+    <value name="XZ" value="5"/>
+    <value name="YZ" value="6"/>
+    <value name="XYZ" value="7"/>
+    <value name="W" value="8"/>
+    <value name="XW" value="9"/>
+    <value name="YW" value="10"/>
+    <value name="XYW" value="11"/>
+    <value name="ZW" value="12"/>
+    <value name="XZW" value="13"/>
+    <value name="YZW" value="14"/>
+    <value name="XYZW" value="15"/>
+  </enum>
+
+  <enum name="3D_Stencil_Operation" prefix="STENCILOP">
+    <value name="KEEP" value="0"/>
+    <value name="ZERO" value="1"/>
+    <value name="REPLACE" value="2"/>
+    <value name="INCRSAT" value="3"/>
+    <value name="DECRSAT" value="4"/>
+    <value name="INCR" value="5"/>
+    <value name="DECR" value="6"/>
+    <value name="INVERT" value="7"/>
+  </enum>
+
+  <enum name="3D_Color_Buffer_Blend_Factor" prefix="BLENDFACTOR">
+    <value name="ONE" value="1"/>
+    <value name="SRC_COLOR" value="2"/>
+    <value name="SRC_ALPHA" value="3"/>
+    <value name="DST_ALPHA" value="4"/>
+    <value name="DST_COLOR" value="5"/>
+    <value name="SRC_ALPHA_SATURATE" value="6"/>
+    <value name="CONST_COLOR" value="7"/>
+    <value name="CONST_ALPHA" value="8"/>
+    <value name="SRC1_COLOR" value="9"/>
+    <value name="SRC1_ALPHA" value="10"/>
+    <value name="ZERO" value="17"/>
+    <value name="INV_SRC_COLOR" value="18"/>
+    <value name="INV_SRC_ALPHA" value="19"/>
+    <value name="INV_DST_ALPHA" value="20"/>
+    <value name="INV_DST_COLOR" value="21"/>
+    <value name="INV_CONST_COLOR" value="23"/>
+    <value name="INV_CONST_ALPHA" value="24"/>
+    <value name="INV_SRC1_COLOR" value="25"/>
+    <value name="INV_SRC1_ALPHA" value="26"/>
+  </enum>
+
+  <enum name="3D_Color_Buffer_Blend_Function" prefix="BLENDFUNCTION">
+    <value name="ADD" value="0"/>
+    <value name="SUBTRACT" value="1"/>
+    <value name="REVERSE_SUBTRACT" value="2"/>
+    <value name="MIN" value="3"/>
+    <value name="MAX" value="4"/>
+  </enum>
+
+  <enum name="3D_Compare_Function" prefix="COMPAREFUNCTION">
+    <value name="ALWAYS" value="0"/>
+    <value name="NEVER" value="1"/>
+    <value name="LESS" value="2"/>
+    <value name="EQUAL" value="3"/>
+    <value name="LEQUAL" value="4"/>
+    <value name="GREATER" value="5"/>
+    <value name="NOTEQUAL" value="6"/>
+    <value name="GEQUAL" value="7"/>
+  </enum>
+
+  <enum name="3D_Logic_Op_Function" prefix="LOGICOP">
+    <value name="CLEAR" value="0"/>
+    <value name="NOR" value="1"/>
+    <value name="AND_INVERTED" value="2"/>
+    <value name="COPY_INVERTED" value="3"/>
+    <value name="AND_REVERSE" value="4"/>
+    <value name="INVERT" value="5"/>
+    <value name="XOR" value="6"/>
+    <value name="NAND" value="7"/>
+    <value name="AND" value="8"/>
+    <value name="EQUIV" value="9"/>
+    <value name="NOOP" value="10"/>
+    <value name="OR_INVERTED" value="11"/>
+    <value name="COPY" value="12"/>
+    <value name="OR_REVERSE" value="13"/>
+    <value name="OR" value="14"/>
+    <value name="SET" value="15"/>
+  </enum>
+
+  <enum name="SURFACE_FORMAT" prefix="SF">
+    <value name="R32G32B32A32_FLOAT" value="0"/>
+    <value name="R32G32B32A32_SINT" value="1"/>
+    <value name="R32G32B32A32_UINT" value="2"/>
+    <value name="R32G32B32A32_UNORM" value="3"/>
+    <value name="R32G32B32A32_SNORM" value="4"/>
+    <value name="R64G64_FLOAT" value="5"/>
+    <value name="R32G32B32X32_FLOAT" value="6"/>
+    <value name="R32G32B32A32_SSCALED" value="7"/>
+    <value name="R32G32B32A32_USCALED" value="8"/>
+    <value name="R32G32B32A32_SFIXED" value="32"/>
+    <value name="R64G64_PASSTHRU" value="33"/>
+    <value name="R32G32B32_FLOAT" value="64"/>
+    <value name="R32G32B32_SINT" value="65"/>
+    <value name="R32G32B32_UINT" value="66"/>
+    <value name="R32G32B32_UNORM" value="67"/>
+    <value name="R32G32B32_SNORM" value="68"/>
+    <value name="R32G32B32_SSCALED" value="69"/>
+    <value name="R32G32B32_USCALED" value="70"/>
+    <value name="R32G32B32_SFIXED" value="80"/>
+    <value name="R16G16B16A16_UNORM" value="128"/>
+    <value name="R16G16B16A16_SNORM" value="129"/>
+    <value name="R16G16B16A16_SINT" value="130"/>
+    <value name="R16G16B16A16_UINT" value="131"/>
+    <value name="R16G16B16A16_FLOAT" value="132"/>
+    <value name="R32G32_FLOAT" value="133"/>
+    <value name="R32G32_SINT" value="134"/>
+    <value name="R32G32_UINT" value="135"/>
+    <value name="R32_FLOAT_X8X24_TYPELESS" value="136"/>
+    <value name="X32_TYPELESS_G8X24_UINT" value="137"/>
+    <value name="L32A32_FLOAT" value="138"/>
+    <value name="R32G32_UNORM" value="139"/>
+    <value name="R32G32_SNORM" value="140"/>
+    <value name="R64_FLOAT" value="141"/>
+    <value name="R16G16B16X16_UNORM" value="142"/>
+    <value name="R16G16B16X16_FLOAT" value="143"/>
+    <value name="A32X32_FLOAT" value="144"/>
+    <value name="L32X32_FLOAT" value="145"/>
+    <value name="I32X32_FLOAT" value="146"/>
+    <value name="R16G16B16A16_SSCALED" value="147"/>
+    <value name="R16G16B16A16_USCALED" value="148"/>
+    <value name="R32G32_SSCALED" value="149"/>
+    <value name="R32G32_USCALED" value="150"/>
+    <value name="R32G32_SFIXED" value="160"/>
+    <value name="R64_PASSTHRU" value="161"/>
+    <value name="B8G8R8A8_UNORM" value="192"/>
+    <value name="B8G8R8A8_UNORM_SRGB" value="193"/>
+    <value name="R10G10B10A2_UNORM" value="194"/>
+    <value name="R10G10B10A2_UNORM_SRGB" value="195"/>
+    <value name="R10G10B10A2_UINT" value="196"/>
+    <value name="R10G10B10_SNORM_A2_UNORM" value="197"/>
+    <value name="R8G8B8A8_UNORM" value="199"/>
+    <value name="R8G8B8A8_UNORM_SRGB" value="200"/>
+    <value name="R8G8B8A8_SNORM" value="201"/>
+    <value name="R8G8B8A8_SINT" value="202"/>
+    <value name="R8G8B8A8_UINT" value="203"/>
+    <value name="R16G16_UNORM" value="204"/>
+    <value name="R16G16_SNORM" value="205"/>
+    <value name="R16G16_SINT" value="206"/>
+    <value name="R16G16_UINT" value="207"/>
+    <value name="R16G16_FLOAT" value="208"/>
+    <value name="B10G10R10A2_UNORM" value="209"/>
+    <value name="B10G10R10A2_UNORM_SRGB" value="210"/>
+    <value name="R11G11B10_FLOAT" value="211"/>
+    <value name="R32_SINT" value="214"/>
+    <value name="R32_UINT" value="215"/>
+    <value name="R32_FLOAT" value="216"/>
+    <value name="R24_UNORM_X8_TYPELESS" value="217"/>
+    <value name="X24_TYPELESS_G8_UINT" value="218"/>
+    <value name="L32_UNORM" value="221"/>
+    <value name="A32_UNORM" value="222"/>
+    <value name="L16A16_UNORM" value="223"/>
+    <value name="I24X8_UNORM" value="224"/>
+    <value name="L24X8_UNORM" value="225"/>
+    <value name="A24X8_UNORM" value="226"/>
+    <value name="I32_FLOAT" value="227"/>
+    <value name="L32_FLOAT" value="228"/>
+    <value name="A32_FLOAT" value="229"/>
+    <value name="X8B8_UNORM_G8R8_SNORM" value="230"/>
+    <value name="A8X8_UNORM_G8R8_SNORM" value="231"/>
+    <value name="B8X8_UNORM_G8R8_SNORM" value="232"/>
+    <value name="B8G8R8X8_UNORM" value="233"/>
+    <value name="B8G8R8X8_UNORM_SRGB" value="234"/>
+    <value name="R8G8B8X8_UNORM" value="235"/>
+    <value name="R8G8B8X8_UNORM_SRGB" value="236"/>
+    <value name="R9G9B9E5_SHAREDEXP" value="237"/>
+    <value name="B10G10R10X2_UNORM" value="238"/>
+    <value name="L16A16_FLOAT" value="240"/>
+    <value name="R32_UNORM" value="241"/>
+    <value name="R32_SNORM" value="242"/>
+    <value name="R10G10B10X2_USCALED" value="243"/>
+    <value name="R8G8B8A8_SSCALED" value="244"/>
+    <value name="R8G8B8A8_USCALED" value="245"/>
+    <value name="R16G16_SSCALED" value="246"/>
+    <value name="R16G16_USCALED" value="247"/>
+    <value name="R32_SSCALED" value="248"/>
+    <value name="R32_USCALED" value="249"/>
+    <value name="B5G6R5_UNORM" value="256"/>
+    <value name="B5G6R5_UNORM_SRGB" value="257"/>
+    <value name="B5G5R5A1_UNORM" value="258"/>
+    <value name="B5G5R5A1_UNORM_SRGB" value="259"/>
+    <value name="B4G4R4A4_UNORM" value="260"/>
+    <value name="B4G4R4A4_UNORM_SRGB" value="261"/>
+    <value name="R8G8_UNORM" value="262"/>
+    <value name="R8G8_SNORM" value="263"/>
+    <value name="R8G8_SINT" value="264"/>
+    <value name="R8G8_UINT" value="265"/>
+    <value name="R16_UNORM" value="266"/>
+    <value name="R16_SNORM" value="267"/>
+    <value name="R16_SINT" value="268"/>
+    <value name="R16_UINT" value="269"/>
+    <value name="R16_FLOAT" value="270"/>
+    <value name="A8P8_UNORM_PALETTE0" value="271"/>
+    <value name="A8P8_UNORM_PALETTE1" value="272"/>
+    <value name="I16_UNORM" value="273"/>
+    <value name="L16_UNORM" value="274"/>
+    <value name="A16_UNORM" value="275"/>
+    <value name="L8A8_UNORM" value="276"/>
+    <value name="I16_FLOAT" value="277"/>
+    <value name="L16_FLOAT" value="278"/>
+    <value name="A16_FLOAT" value="279"/>
+    <value name="L8A8_UNORM_SRGB" value="280"/>
+    <value name="R5G5_SNORM_B6_UNORM" value="281"/>
+    <value name="B5G5R5X1_UNORM" value="282"/>
+    <value name="B5G5R5X1_UNORM_SRGB" value="283"/>
+    <value name="R8G8_SSCALED" value="284"/>
+    <value name="R8G8_USCALED" value="285"/>
+    <value name="R16_SSCALED" value="286"/>
+    <value name="R16_USCALED" value="287"/>
+    <value name="P8A8_UNORM_PALETTE0" value="290"/>
+    <value name="P8A8_UNORM_PALETTE1" value="291"/>
+    <value name="A1B5G5R5_UNORM" value="292"/>
+    <value name="A4B4G4R4_UNORM" value="293"/>
+    <value name="L8A8_UINT" value="294"/>
+    <value name="L8A8_SINT" value="295"/>
+    <value name="R8_UNORM" value="320"/>
+    <value name="R8_SNORM" value="321"/>
+    <value name="R8_SINT" value="322"/>
+    <value name="R8_UINT" value="323"/>
+    <value name="A8_UNORM" value="324"/>
+    <value name="I8_UNORM" value="325"/>
+    <value name="L8_UNORM" value="326"/>
+    <value name="P4A4_UNORM_PALETTE0" value="327"/>
+    <value name="A4P4_UNORM_PALETTE0" value="328"/>
+    <value name="R8_SSCALED" value="329"/>
+    <value name="R8_USCALED" value="330"/>
+    <value name="P8_UNORM_PALETTE0" value="331"/>
+    <value name="L8_UNORM_SRGB" value="332"/>
+    <value name="P8_UNORM_PALETTE1" value="333"/>
+    <value name="P4A4_UNORM_PALETTE1" value="334"/>
+    <value name="A4P4_UNORM_PALETTE1" value="335"/>
+    <value name="Y8_UNORM" value="336"/>
+    <value name="L8_UINT" value="338"/>
+    <value name="L8_SINT" value="339"/>
+    <value name="I8_UINT" value="340"/>
+    <value name="I8_SINT" value="341"/>
+    <value name="DXT1_RGB_SRGB" value="384"/>
+    <value name="R1_UNORM" value="385"/>
+    <value name="YCRCB_NORMAL" value="386"/>
+    <value name="YCRCB_SWAPUVY" value="387"/>
+    <value name="P2_UNORM_PALETTE0" value="388"/>
+    <value name="P2_UNORM_PALETTE1" value="389"/>
+    <value name="BC1_UNORM" value="390"/>
+    <value name="BC2_UNORM" value="391"/>
+    <value name="BC3_UNORM" value="392"/>
+    <value name="BC4_UNORM" value="393"/>
+    <value name="BC5_UNORM" value="394"/>
+    <value name="BC1_UNORM_SRGB" value="395"/>
+    <value name="BC2_UNORM_SRGB" value="396"/>
+    <value name="BC3_UNORM_SRGB" value="397"/>
+    <value name="MONO8" value="398"/>
+    <value name="YCRCB_SWAPUV" value="399"/>
+    <value name="YCRCB_SWAPY" value="400"/>
+    <value name="DXT1_RGB" value="401"/>
+    <value name="FXT1" value="402"/>
+    <value name="R8G8B8_UNORM" value="403"/>
+    <value name="R8G8B8_SNORM" value="404"/>
+    <value name="R8G8B8_SSCALED" value="405"/>
+    <value name="R8G8B8_USCALED" value="406"/>
+    <value name="R64G64B64A64_FLOAT" value="407"/>
+    <value name="R64G64B64_FLOAT" value="408"/>
+    <value name="BC4_SNORM" value="409"/>
+    <value name="BC5_SNORM" value="410"/>
+    <value name="R16G16B16_FLOAT" value="411"/>
+    <value name="R16G16B16_UNORM" value="412"/>
+    <value name="R16G16B16_SNORM" value="413"/>
+    <value name="R16G16B16_SSCALED" value="414"/>
+    <value name="R16G16B16_USCALED" value="415"/>
+    <value name="BC6H_SF16" value="417"/>
+    <value name="BC7_UNORM" value="418"/>
+    <value name="BC7_UNORM_SRGB" value="419"/>
+    <value name="BC6H_UF16" value="420"/>
+    <value name="PLANAR_420_8" value="421"/>
+    <value name="R8G8B8_UNORM_SRGB" value="424"/>
+    <value name="ETC1_RGB8" value="425"/>
+    <value name="ETC2_RGB8" value="426"/>
+    <value name="EAC_R11" value="427"/>
+    <value name="EAC_RG11" value="428"/>
+    <value name="EAC_SIGNED_R11" value="429"/>
+    <value name="EAC_SIGNED_RG11" value="430"/>
+    <value name="ETC2_SRGB8" value="431"/>
+    <value name="R16G16B16_UINT" value="432"/>
+    <value name="R16G16B16_SINT" value="433"/>
+    <value name="R32_SFIXED" value="434"/>
+    <value name="R10G10B10A2_SNORM" value="435"/>
+    <value name="R10G10B10A2_USCALED" value="436"/>
+    <value name="R10G10B10A2_SSCALED" value="437"/>
+    <value name="R10G10B10A2_SINT" value="438"/>
+    <value name="B10G10R10A2_SNORM" value="439"/>
+    <value name="B10G10R10A2_USCALED" value="440"/>
+    <value name="B10G10R10A2_SSCALED" value="441"/>
+    <value name="B10G10R10A2_UINT" value="442"/>
+    <value name="B10G10R10A2_SINT" value="443"/>
+    <value name="R64G64B64A64_PASSTHRU" value="444"/>
+    <value name="R64G64B64_PASSTHRU" value="445"/>
+    <value name="ETC2_RGB8_PTA" value="448"/>
+    <value name="ETC2_SRGB8_PTA" value="449"/>
+    <value name="ETC2_EAC_RGBA8" value="450"/>
+    <value name="ETC2_EAC_SRGB8_A8" value="451"/>
+    <value name="R8G8B8_UINT" value="456"/>
+    <value name="R8G8B8_SINT" value="457"/>
+    <value name="RAW" value="511"/>
+  </enum>
+
+  <enum name="Shader Channel Select" prefix="SCS">
+    <value name="ZERO" value="0"/>
+    <value name="ONE" value="1"/>
+    <value name="RED" value="4"/>
+    <value name="GREEN" value="5"/>
+    <value name="BLUE" value="6"/>
+    <value name="ALPHA" value="7"/>
+  </enum>
+
+  <enum name="Texture Coordinate Mode" prefix="TCM">
+    <value name="WRAP" value="0"/>
+    <value name="MIRROR" value="1"/>
+    <value name="CLAMP" value="2"/>
+    <value name="CUBE" value="3"/>
+    <value name="CLAMP_BORDER" value="4"/>
+    <value name="MIRROR_ONCE" value="5"/>
+    <value name="HALF_BORDER" value="6"/>
+  </enum>
+
+  <instruction name="3DPRIMITIVE" bias="2" length="7">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="3"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="Indirect Parameter Enable" start="10" end="10" type="bool"/>
+    <field name="UAV Coherency Required" start="9" end="9" type="bool"/>
+    <field name="Predicate Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="5"/>
+    <field name="End Offset Enable" start="41" end="41" type="bool"/>
+    <field name="Vertex Access Type" start="40" end="40" type="uint">
+      <value name="SEQUENTIAL" value="0"/>
+      <value name="RANDOM" value="1"/>
+    </field>
+    <field name="Primitive Topology Type" start="32" end="37" type="uint"/>
+    <field name="Vertex Count Per Instance" start="64" end="95" type="uint"/>
+    <field name="Start Vertex Location" start="96" end="127" type="uint"/>
+    <field name="Instance Count" start="128" end="159" type="uint"/>
+    <field name="Start Instance Location" start="160" end="191" type="uint"/>
+    <field name="Base Vertex Location" start="192" end="223" type="int"/>
+  </instruction>
+
+  <instruction name="3DSTATE_AA_LINE_PARAMETERS" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="10"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="AA Point Coverage Bias" start="56" end="63" type="u0.8"/>
+    <field name="AA Coverage Bias" start="48" end="55" type="u0.8"/>
+    <field name="AA Point Coverage Slope" start="40" end="47" type="u0.8"/>
+    <field name="AA Coverage Slope" start="32" end="39" type="u0.8"/>
+    <field name="AA Point Coverage EndCap Bias" start="88" end="95" type="u0.8"/>
+    <field name="AA Coverage EndCap Bias" start="80" end="87" type="u0.8"/>
+    <field name="AA Point Coverage EndCap Slope" start="72" end="79" type="u0.8"/>
+    <field name="AA Coverage EndCap Slope" start="64" end="71" type="u0.8"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_DS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="70"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_GS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="68"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_HS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="69"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_PS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="71"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_EDIT_VS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="67"/>
+    <field name="DWord Length" start="0" end="8" type="uint" default="0"/>
+    <field name="Binding Table Block Clear" start="48" end="63" type="uint"/>
+    <field name="Binding Table Edit Target" start="32" end="33" type="uint">
+      <value name="All Cores" value="3"/>
+      <value name="Core 1" value="2"/>
+      <value name="Core 0" value="1"/>
+    </field>
+    <group count="0" start="64" size="32">
+      <field name="Entry [n]" start="0" end="31" type="BINDING_TABLE_EDIT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="40"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to DS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="41"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to GS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="39"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to HS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="42"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to PS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POINTERS_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="38"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to VS Binding Table" start="37" end="47" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_BINDING_TABLE_POOL_ALLOC" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="25"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Binding Table Pool Base Address" start="44" end="95" type="address"/>
+    <field name="Binding Table Pool Enable" start="43" end="43" type="uint"/>
+    <field name="Surface Object Control State" start="32" end="38" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Binding Table Pool Buffer Size" start="108" end="127" type="uint">
+      <value name="No Valid Data" value="0"/>
+    </field>
+  </instruction>
+
+  <instruction name="3DSTATE_BLEND_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="36"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Blend State Pointer" start="38" end="63" type="offset"/>
+    <field name="Blend State Pointer Valid" start="32" end="32" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CC_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="14"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Color Calc State Pointer" start="38" end="63" type="offset"/>
+    <field name="Color Calc State Pointer Valid" start="32" end="32" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CHROMA_KEY" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="ChromaKey Table Index" start="62" end="63" type="uint"/>
+    <field name="ChromaKey Low Value" start="64" end="95" type="uint"/>
+    <field name="ChromaKey High Value" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CLEAR_PARAMS" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Depth Clear Value" start="32" end="63" type="float"/>
+    <field name="Depth Clear Value Valid" start="64" end="64" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CLIP" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="18"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Force User Clip Distance Cull Test Enable Bitmask" start="52" end="52" type="bool"/>
+    <field name="Vertex Sub Pixel Precision Select" start="51" end="51" type="uint">
+      <value name="8 Bit" value="0"/>
+      <value name="4 Bit" value="1"/>
+    </field>
+    <field name="Early Cull Enable" start="50" end="50" type="bool"/>
+    <field name="Force User Clip Distance Clip Test Enable Bitmask" start="49" end="49" type="bool"/>
+    <field name="Force Clip Mode" start="48" end="48" type="bool"/>
+    <field name="Clipper Statistics Enable" start="42" end="42" type="bool"/>
+    <field name="User Clip Distance Cull Test Enable Bitmask" start="32" end="39" type="uint"/>
+    <field name="Clip Enable" start="95" end="95" type="bool"/>
+    <field name="API Mode" start="94" end="94" type="uint">
+      <value name="OGL" value="0"/>
+    </field>
+    <field name="Viewport XY Clip Test Enable" start="92" end="92" type="bool"/>
+    <field name="Guardband Clip Test Enable" start="90" end="90" type="bool"/>
+    <field name="User Clip Distance Clip Test Enable Bitmask" start="80" end="87" type="uint"/>
+    <field name="Clip Mode" start="77" end="79" type="uint">
+      <value name="NORMAL" value="0"/>
+      <value name="REJECT_ALL" value="3"/>
+      <value name="ACCEPT_ALL" value="4"/>
+    </field>
+    <field name="Perspective Divide Disable" start="73" end="73" type="bool"/>
+    <field name="Non-Perspective Barycentric Enable" start="72" end="72" type="bool"/>
+    <field name="Triangle Strip/List Provoking Vertex Select" start="68" end="69" type="uint"/>
+    <field name="Line Strip/List Provoking Vertex Select" start="66" end="67" type="uint"/>
+    <field name="Triangle Fan Provoking Vertex Select" start="64" end="65" type="uint"/>
+    <field name="Minimum Point Width" start="113" end="123" type="u8.3"/>
+    <field name="Maximum Point Width" start="102" end="112" type="u8.3"/>
+    <field name="Force Zero RTA Index Enable" start="101" end="101" type="bool"/>
+    <field name="Maximum VP Index" start="96" end="99" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_DS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="26"/>
+    <field name="Constant Buffer Object Control State" start="8" end="14" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Constant Body" start="32" end="351" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_GS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="22"/>
+    <field name="Constant Buffer Object Control State" start="8" end="14" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Constant Body" start="32" end="351" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_HS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="25"/>
+    <field name="Constant Buffer Object Control State" start="8" end="14" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Constant Body" start="32" end="351" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_PS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="23"/>
+    <field name="Constant Buffer Object Control State" start="8" end="14" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Constant Body" start="32" end="351" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_CONSTANT_VS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="21"/>
+    <field name="Constant Buffer Object Control State" start="8" end="14" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Constant Body" start="32" end="351" type="3DSTATE_CONSTANT_BODY"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DEPTH_BUFFER" bias="2" length="8">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="5"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="6"/>
+    <field name="Surface Type" start="61" end="63" type="uint">
+      <value name="SURFTYPE_2D" value="1"/>
+      <value name="SURFTYPE_CUBE" value="3"/>
+      <value name="SURFTYPE_NULL" value="7"/>
+    </field>
+    <field name="Depth Write Enable" start="60" end="60" type="bool"/>
+    <field name="Stencil Write Enable" start="59" end="59" type="bool"/>
+    <field name="Hierarchical Depth Buffer Enable" start="54" end="54" type="bool"/>
+    <field name="Surface Format" start="50" end="52" type="uint">
+      <value name="D32_FLOAT" value="1"/>
+      <value name="D24_UNORM_X8_UINT" value="3"/>
+      <value name="D16_UNORM" value="5"/>
+    </field>
+    <field name="Surface Pitch" start="32" end="49" type="uint"/>
+    <field name="Surface Base Address" start="64" end="127" type="address"/>
+    <field name="Height" start="146" end="159" type="uint"/>
+    <field name="Width" start="132" end="145" type="uint"/>
+    <field name="LOD" start="128" end="131" type="uint"/>
+    <field name="Depth" start="181" end="191" type="uint"/>
+    <field name="Minimum Array Element" start="170" end="180" type="uint"/>
+    <field name="Depth Buffer Object Control State" start="160" end="166" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Tiled Resource Mode" start="222" end="223" type="uint">
+      <value name="NONE" value="0"/>
+      <value name="TILEYF" value="1"/>
+      <value name="TILEYS" value="2"/>
+    </field>
+    <field name="Mip Tail Start LOD" start="218" end="221" type="uint"/>
+    <field name="Render Target View Extent" start="245" end="255" type="uint"/>
+    <field name="Surface QPitch" start="224" end="238" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DRAWING_RECTANGLE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="Core Mode Select" start="14" end="15" type="uint">
+      <value name="Legacy" value="0"/>
+      <value name="Core 0 Enabled" value="1"/>
+      <value name="Core 1 Enabled" value="2"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Clipped Drawing Rectangle Y Min" start="48" end="63" type="uint"/>
+    <field name="Clipped Drawing Rectangle X Min" start="32" end="47" type="uint"/>
+    <field name="Clipped Drawing Rectangle Y Max" start="80" end="95" type="uint"/>
+    <field name="Clipped Drawing Rectangle X Max" start="64" end="79" type="uint"/>
+    <field name="Drawing Rectangle Origin Y" start="112" end="127" type="int"/>
+    <field name="Drawing Rectangle Origin X" start="96" end="111" type="int"/>
+  </instruction>
+
+  <instruction name="3DSTATE_DS" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="29"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <field name="Kernel Start Pointer" start="38" end="95" type="offset"/>
+    <field name="Vector Mask Enable" start="126" end="126" type="bool"/>
+    <field name="Sampler Count" start="123" end="125" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="114" end="121" type="uint"/>
+    <field name="Thread Dispatch Priority" start="113" end="113" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="112" end="112" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Accesses UAV" start="110" end="110" type="bool"/>
+    <field name="Illegal Opcode Exception Enable" start="109" end="109" type="bool"/>
+    <field name="Software Exception Enable" start="103" end="103" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="138" end="191" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="128" end="131" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="212" end="216" type="uint"/>
+    <field name="Patch URB Entry Read Length" start="203" end="209" type="uint"/>
+    <field name="Patch URB Entry Read Offset" start="196" end="201" type="uint"/>
+    <field name="Maximum Number of Threads" start="245" end="253" type="uint"/>
+    <field name="Statistics Enable" start="234" end="234" type="bool"/>
+    <field name="Dispatch Mode" start="227" end="228" type="uint" prefix="DISPATCH_MODE">
+      <value name="SIMD4X2" value="0"/>
+      <value name="SIMD8_SINGLE_PATCH" value="1"/>
+      <value name="SIMD8_SINGLE_OR_DUAL_PATCH" value="2"/>
+    </field>
+    <field name="Compute W Coordinate Enable" start="226" end="226" type="bool"/>
+    <field name="Cache Disable" start="225" end="225" type="bool"/>
+    <field name="Function Enable" start="224" end="224" type="bool"/>
+    <field name="Vertex URB Entry Output Read Offset" start="277" end="282" type="uint"/>
+    <field name="Vertex URB Entry Output Length" start="272" end="276" type="uint"/>
+    <field name="User Clip Distance Clip Test Enable Bitmask" start="264" end="271" type="uint"/>
+    <field name="User Clip Distance Cull Test Enable Bitmask" start="256" end="263" type="uint"/>
+    <field name="DUAL_PATCH Kernel Start Pointer" start="294" end="351" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_DS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="55"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Update Gather Table Only" start="33" end="33" type="uint">
+      <value name="Commit Gather" value="0"/>
+      <value name="Non-Commit Gather" value="1"/>
+    </field>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Generate Stall" start="69" end="69" type="bool"/>
+    <field name="On-Die Table" start="67" end="67" type="uint">
+      <value name="Load" value="0"/>
+      <value name="Read" value="1"/>
+    </field>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_GS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="53"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Update Gather Table Only" start="33" end="33" type="uint">
+      <value name="Commit Gather" value="0"/>
+      <value name="Non-Commit Gather" value="1"/>
+    </field>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Generate Stall" start="69" end="69" type="bool"/>
+    <field name="On-Die Table" start="67" end="67" type="uint">
+      <value name="Load" value="0"/>
+      <value name="Read" value="1"/>
+    </field>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_HS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="54"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Update Gather Table Only" start="33" end="33" type="uint">
+      <value name="Commit Gather" value="0"/>
+      <value name="Non-Commit Gather" value="1"/>
+    </field>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Generate Stall" start="69" end="69" type="bool"/>
+    <field name="On-Die Table" start="67" end="67" type="uint">
+      <value name="Load" value="0"/>
+      <value name="Read" value="1"/>
+    </field>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_PS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="56"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Update Gather Table Only" start="33" end="33" type="uint">
+      <value name="Commit Gather" value="0"/>
+      <value name="Non-Commit Gather" value="1"/>
+    </field>
+    <field name="DX9 On-Die Register Read Enable" start="32" end="32" type="bool"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Generate Stall" start="69" end="69" type="bool"/>
+    <field name="Constant Buffer Dx9 Enable" start="68" end="68" type="bool"/>
+    <field name="On-Die Table" start="67" end="67" type="uint">
+      <value name="Load" value="0"/>
+      <value name="Read" value="1"/>
+    </field>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_CONSTANT_VS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="52"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Valid" start="48" end="63" type="uint"/>
+    <field name="Constant Buffer Binding Table Block" start="44" end="47" type="uint"/>
+    <field name="Update Gather Table Only" start="33" end="33" type="uint">
+      <value name="Commit Gather" value="0"/>
+      <value name="Non-Commit Gather" value="1"/>
+    </field>
+    <field name="DX9 On-Die Register Read Enable" start="32" end="32" type="bool"/>
+    <field name="Gather Buffer Offset" start="70" end="86" type="offset"/>
+    <field name="Constant Buffer Dx9 Generate Stall" start="69" end="69" type="bool"/>
+    <field name="Constant Buffer Dx9 Enable" start="68" end="68" type="bool"/>
+    <field name="On-Die Table" start="67" end="67" type="uint">
+      <value name="Load" value="0"/>
+      <value name="Read" value="1"/>
+    </field>
+    <group count="0" start="96" size="32">
+      <field name="Entry_0" start="0" end="15" type="GATHER_CONSTANT_ENTRY"/>
+      <field name="Entry_1" start="16" end="31" type="GATHER_CONSTANT_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_GATHER_POOL_ALLOC" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="26"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Gather Pool Base Address" start="44" end="95" type="address"/>
+    <field name="Gather Pool Enable" start="43" end="43" type="bool"/>
+    <field name="Memory Object Control State" start="32" end="38" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Gather Pool Buffer Size" start="108" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_GS" bias="2" length="10">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="17"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="8"/>
+    <field name="Kernel Start Pointer" start="38" end="95" type="offset"/>
+    <field name="Single Program Flow" start="127" end="127" type="uint"/>
+    <field name="Vector Mask Enable" start="126" end="126" type="bool"/>
+    <field name="Sampler Count" start="123" end="125" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="114" end="121" type="uint"/>
+    <field name="Thread Dispatch Priority" start="113" end="113" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="112" end="112" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="109" end="109" type="bool"/>
+    <field name="Accesses UAV" start="108" end="108" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="107" end="107" type="bool"/>
+    <field name="Software  Exception Enable" start="103" end="103" type="bool"/>
+    <field name="Expected Vertex Count" start="96" end="101" type="uint"/>
+    <field name="Scratch Space Base Pointer" start="138" end="191" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="128" end="131" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data [5:4]" start="221" end="222" type="uint"/>
+    <field name="Output Vertex Size" start="215" end="220" type="uint"/>
+    <field name="Output Topology" start="209" end="214" type="uint" prefix="OUTPUT"/>
+    <field name="Vertex URB Entry Read Length" start="203" end="208" type="uint"/>
+    <field name="Include Vertex Handles" start="202" end="202" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="196" end="201" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="192" end="195" type="uint"/>
+    <field name="Control Data Header Size" start="244" end="247" type="uint"/>
+    <field name="Instance Control" start="239" end="243" type="uint"/>
+    <field name="Default Stream Id" start="237" end="238" type="uint"/>
+    <field name="Dispatch Mode" start="235" end="236" type="uint" prefix="DISPATCH_MODE">
+      <value name="Dual Instance" value="1"/>
+      <value name="Dual Object" value="2"/>
+      <value name="SIMD8" value="3"/>
+    </field>
+    <field name="Statistics Enable" start="234" end="234" type="bool"/>
+    <field name="Invocations Increment Value" start="229" end="233" type="uint"/>
+    <field name="Include Primitive ID" start="228" end="228" type="uint"/>
+    <field name="Hint" start="227" end="227" type="uint"/>
+    <field name="Reorder Mode" start="226" end="226" type="uint">
+      <value name="LEADING" value="0"/>
+      <value name="TRAILING" value="1"/>
+    </field>
+    <field name="Discard Adjacency" start="225" end="225" type="bool"/>
+    <field name="Enable" start="224" end="224" type="bool"/>
+    <field name="Control Data Format" start="287" end="287" type="uint">
+      <value name="CUT" value="0"/>
+      <value name="SID" value="1"/>
+    </field>
+    <field name="Static Output" start="286" end="286" type="bool"/>
+    <field name="Static Output Vertex Count" start="272" end="282" type="uint"/>
+    <field name="Maximum Number of Threads" start="256" end="264" type="uint"/>
+    <field name="Vertex URB Entry Output Read Offset" start="309" end="314" type="uint"/>
+    <field name="Vertex URB Entry Output Length" start="304" end="308" type="uint"/>
+    <field name="User Clip Distance Clip Test Enable Bitmask" start="296" end="303" type="uint"/>
+    <field name="User Clip Distance Cull Test Enable Bitmask" start="288" end="295" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_HIER_DEPTH_BUFFER" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="7"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Hierarchical Depth Buffer Object Control State" start="57" end="63" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface Pitch" start="32" end="48" type="uint"/>
+    <field name="Surface Base Address" start="64" end="127" type="address"/>
+    <field name="Surface QPitch" start="128" end="142" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_HS" bias="2" length="9">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="27"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="7"/>
+    <field name="Sampler Count" start="59" end="61" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="50" end="57" type="uint"/>
+    <field name="Thread Dispatch Priority" start="49" end="49" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="48" end="48" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="45" end="45" type="bool"/>
+    <field name="Software Exception Enable" start="44" end="44" type="bool"/>
+    <field name="Enable" start="95" end="95" type="bool"/>
+    <field name="Statistics Enable" start="93" end="93" type="bool"/>
+    <field name="Maximum Number of Threads" start="72" end="80" type="uint"/>
+    <field name="Instance Count" start="64" end="67" type="uint"/>
+    <field name="Kernel Start Pointer" start="102" end="159" type="offset"/>
+    <field name="Scratch Space Base Pointer" start="170" end="223" type="offset"/>
+    <field name="Per-Thread Scratch Space" start="160" end="163" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data [5]" start="252" end="252" type="uint"/>
+    <field name="Single Program Flow" start="251" end="251" type="bool"/>
+    <field name="Vector Mask Enable" start="250" end="250" type="bool"/>
+    <field name="Accesses UAV" start="249" end="249" type="bool"/>
+    <field name="Include Vertex Handles" start="248" end="248" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="243" end="247" type="uint"/>
+    <field name="Dispatch Mode" start="241" end="242" type="uint" prefix="DISPATCH_MODE">
+      <value name="SINGLE_PATCH" value="0"/>
+      <value name="DUAL_PATCH" value="1"/>
+      <value name="8_PATCH" value="2"/>
+    </field>
+    <field name="Vertex URB Entry Read Length" start="235" end="240" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="228" end="233" type="uint"/>
+    <field name="Include Primitive ID" start="224" end="224" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_INDEX_BUFFER" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="10"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Index Format" start="40" end="41" type="uint" prefix="INDEX">
+      <value name="BYTE" value="0"/>
+      <value name="WORD" value="1"/>
+      <value name="DWORD" value="2"/>
+    </field>
+    <field name="Memory Object Control State" start="32" end="38" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Buffer Starting Address" start="64" end="127" type="address"/>
+    <field name="Buffer Size" start="128" end="159" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_LINE_STIPPLE" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="8"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Modify Enable (Current Repeat Counter, Current Stipple Index)" start="63" end="63" type="bool"/>
+    <field name="Current Repeat Counter" start="53" end="61" type="uint"/>
+    <field name="Current Stipple Index" start="48" end="51" type="uint"/>
+    <field name="Line Stipple Pattern" start="32" end="47" type="uint"/>
+    <field name="Line Stipple Inverse Repeat Count" start="79" end="95" type="u1.16"/>
+    <field name="Line Stipple Repeat Count" start="64" end="72" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_MONOFILTER_SIZE" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="17"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Monochrome Filter Width" start="35" end="37" type="uint"/>
+    <field name="Monochrome Filter Height" start="32" end="34" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_MULTISAMPLE" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="13"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pixel Position Offset Enable" start="37" end="37" type="bool"/>
+    <field name="Pixel Location" start="36" end="36" type="uint">
+      <value name="CENTER" value="0"/>
+      <value name="UL_CORNER" value="1"/>
+    </field>
+    <field name="Number of Multisamples" start="33" end="35" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_POLY_STIPPLE_OFFSET" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="6"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Polygon Stipple X Offset" start="40" end="44" type="uint"/>
+    <field name="Polygon Stipple Y Offset" start="32" end="36" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_POLY_STIPPLE_PATTERN" bias="2" length="33">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="7"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="31"/>
+    <group count="32" start="32" size="32">
+      <field name="Pattern Row" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_PS" bias="2" length="12">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="32"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="10"/>
+    <field name="Kernel Start Pointer 0" start="38" end="95" type="offset"/>
+    <field name="Single Program Flow" start="127" end="127" type="uint"/>
+    <field name="Vector Mask Enable" start="126" end="126" type="bool"/>
+    <field name="Sampler Count" start="123" end="125" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Single Precision Denormal Mode" start="122" end="122" type="uint">
+      <value name="Flushed to Zero" value="0"/>
+      <value name="Retained" value="1"/>
+    </field>
+    <field name="Binding Table Entry Count" start="114" end="121" type="uint"/>
+    <field name="Thread Dispatch Priority" start="113" end="113" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="112" end="112" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Rounding Mode" start="110" end="111" type="uint">
+      <value name="RTNE" value="0"/>
+      <value name="RU" value="1"/>
+      <value name="RD" value="2"/>
+      <value name="RTZ" value="3"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="109" end="109" type="bool"/>
+    <field name="Mask Stack Exception Enable" start="107" end="107" type="bool"/>
+    <field name="Software  Exception Enable" start="103" end="103" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="138" end="191" type="offset"/>
+    <field name="Per Thread Scratch Space" start="128" end="131" type="uint"/>
+    <field name="Maximum Number of Threads Per PSD" start="215" end="223" type="uint"/>
+    <field name="Push Constant Enable" start="203" end="203" type="bool"/>
+    <field name="Render Target Fast Clear Enable" start="200" end="200" type="bool"/>
+    <field name="Render Target Resolve Type" start="198" end="199" type="uint">
+      <value name="RESOLVE_DISABLED" value="0"/>
+      <value name="RESOLVE_PARTIAL" value="1"/>
+      <value name="RESOLVE_FULL" value="3"/>
+    </field>
+    <field name="Position XY Offset Select" start="195" end="196" type="uint">
+      <value name="POSOFFSET_NONE" value="0"/>
+      <value name="POSOFFSET_CENTROID" value="2"/>
+      <value name="POSOFFSET_SAMPLE" value="3"/>
+    </field>
+    <field name="32 Pixel Dispatch Enable" start="194" end="194" type="bool"/>
+    <field name="16 Pixel Dispatch Enable" start="193" end="193" type="bool"/>
+    <field name="8 Pixel Dispatch Enable" start="192" end="192" type="bool"/>
+    <field name="Dispatch GRF Start Register For Constant/Setup Data 0" start="240" end="246" type="uint"/>
+    <field name="Dispatch GRF Start Register For Constant/Setup Data 1" start="232" end="238" type="uint"/>
+    <field name="Dispatch GRF Start Register For Constant/Setup Data 2" start="224" end="230" type="uint"/>
+    <field name="Kernel Start Pointer 1" start="262" end="319" type="offset"/>
+    <field name="Kernel Start Pointer 2" start="326" end="383" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PS_BLEND" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="77"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Alpha To Coverage Enable" start="63" end="63" type="bool"/>
+    <field name="Has Writeable RT" start="62" end="62" type="bool"/>
+    <field name="Color Buffer Blend Enable" start="61" end="61" type="bool"/>
+    <field name="Source Alpha Blend Factor" start="56" end="60" type="uint"/>
+    <field name="Destination Alpha Blend Factor" start="51" end="55" type="uint"/>
+    <field name="Source Blend Factor" start="46" end="50" type="uint"/>
+    <field name="Destination Blend Factor" start="41" end="45" type="uint"/>
+    <field name="Alpha Test Enable" start="40" end="40" type="bool"/>
+    <field name="Independent Alpha Blend Enable" start="39" end="39" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PS_EXTRA" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="79"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pixel Shader Valid" start="63" end="63" type="bool"/>
+    <field name="Pixel Shader Does not write to RT" start="62" end="62" type="bool"/>
+    <field name="oMask Present to Render Target" start="61" end="61" type="bool"/>
+    <field name="Pixel Shader Kills Pixel" start="60" end="60" type="bool"/>
+    <field name="Pixel Shader Computed Depth Mode" start="58" end="59" type="uint">
+      <value name="PSCDEPTH_OFF" value="0"/>
+      <value name="PSCDEPTH_ON" value="1"/>
+      <value name="PSCDEPTH_ON_GE" value="2"/>
+      <value name="PSCDEPTH_ON_LE" value="3"/>
+    </field>
+    <field name="Force Computed Depth" start="57" end="57" type="bool"/>
+    <field name="Pixel Shader Uses Source Depth" start="56" end="56" type="bool"/>
+    <field name="Pixel Shader Uses Source W" start="55" end="55" type="bool"/>
+    <field name="Attribute Enable" start="40" end="40" type="bool"/>
+    <field name="Pixel Shader Disables Alpha To Coverage" start="39" end="39" type="bool"/>
+    <field name="Pixel Shader Is Per Sample" start="38" end="38" type="bool"/>
+    <field name="Pixel Shader Computes Stencil" start="37" end="37" type="bool"/>
+    <field name="Pixel Shader Pulls Bary" start="35" end="35" type="bool"/>
+    <field name="Pixel Shader Has UAV" start="34" end="34" type="bool"/>
+    <field name="Input Coverage Mask State" start="32" end="33" type="uint" prefix="ICMS">
+      <value name="NONE" value="0"/>
+      <value name="NORMAL" value="1"/>
+      <value name="INNER_CONSERVATIVE" value="2"/>
+      <value name="DEPTH_COVERAGE" value="3"/>
+    </field>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="20"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="21"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="19"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="22"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_PUSH_CONSTANT_ALLOC_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="18"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Constant Buffer Offset" start="48" end="52" type="uint"/>
+    <field name="Constant Buffer Size" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_RASTER" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="80"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Viewport Z Far Clip Test Enable" start="58" end="58" type="bool"/>
+    <field name="Conservative Rasterization Enable" start="56" end="56" type="bool"/>
+    <field name="API Mode" start="54" end="55" type="uint">
+      <value name="DX9/OGL" value="0"/>
+      <value name="DX10.0" value="1"/>
+      <value name="DX10.1+" value="2"/>
+    </field>
+    <field name="Front Winding" start="53" end="53" type="uint">
+      <value name="Clockwise" value="0"/>
+      <value name="Counter Clockwise" value="1"/>
+    </field>
+    <field name="Forced Sample Count" start="50" end="52" type="uint" prefix="FSC">
+      <value name="NUMRASTSAMPLES_0" value="0"/>
+      <value name="NUMRASTSAMPLES_1" value="1"/>
+      <value name="NUMRASTSAMPLES_2" value="2"/>
+      <value name="NUMRASTSAMPLES_4" value="3"/>
+      <value name="NUMRASTSAMPLES_8" value="4"/>
+      <value name="NUMRASTSAMPLES_16" value="5"/>
+    </field>
+    <field name="Cull Mode" start="48" end="49" type="uint" prefix="CULLMODE">
+      <value name="BOTH" value="0"/>
+      <value name="NONE" value="1"/>
+      <value name="FRONT" value="2"/>
+      <value name="BACK" value="3"/>
+    </field>
+    <field name="Force Multisampling" start="46" end="46" type="uint"/>
+    <field name="Smooth Point Enable" start="45" end="45" type="bool"/>
+    <field name="DX Multisample Rasterization Enable" start="44" end="44" type="bool"/>
+    <field name="DX Multisample Rasterization Mode" start="42" end="43" type="uint">
+      <value name="MSRASTMODE_ OFF_PIXEL" value="0"/>
+      <value name="MSRASTMODE_ OFF_PATTERN" value="1"/>
+      <value name="MSRASTMODE_ ON_PIXEL" value="2"/>
+      <value name="MSRASTMODE_ ON_PATTERN" value="3"/>
+    </field>
+    <field name="Global Depth Offset Enable Solid" start="41" end="41" type="bool"/>
+    <field name="Global Depth Offset Enable Wireframe" start="40" end="40" type="bool"/>
+    <field name="Global Depth Offset Enable Point" start="39" end="39" type="bool"/>
+    <field name="Front Face Fill Mode" start="37" end="38" type="uint" prefix="FILL_MODE">
+      <value name="SOLID" value="0"/>
+      <value name="WIREFRAME" value="1"/>
+      <value name="POINT" value="2"/>
+    </field>
+    <field name="Back Face Fill Mode" start="35" end="36" type="uint" prefix="FILL_MODE">
+      <value name="SOLID" value="0"/>
+      <value name="WIREFRAME" value="1"/>
+      <value name="POINT" value="2"/>
+    </field>
+    <field name="Antialiasing Enable" start="34" end="34" type="bool"/>
+    <field name="Scissor Rectangle Enable" start="33" end="33" type="bool"/>
+    <field name="Viewport Z Near Clip Test Enable" start="32" end="32" type="bool"/>
+    <field name="Global Depth Offset Constant" start="64" end="95" type="float"/>
+    <field name="Global Depth Offset Scale" start="96" end="127" type="float"/>
+    <field name="Global Depth Offset Clamp" start="128" end="159" type="float"/>
+  </instruction>
+
+  <instruction name="3DSTATE_RS_CONSTANT_POINTER" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="84"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Shader Select" start="60" end="62" type="uint">
+      <value name="VS" value="0"/>
+      <value name="PS" value="4"/>
+    </field>
+    <field name="Operation Load or Store" start="44" end="44" type="uint" prefix="RS">
+      <value name="Store" value="0"/>
+      <value name="Load" value="1"/>
+    </field>
+    <field name="Global Constant Buffer Address" start="70" end="95" type="address"/>
+    <field name="Global Constant Buffer Address High" start="96" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_PALETTE_LOAD0" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="7" type="uint"/>
+    <group count="0" start="32" size="32">
+      <field name="Entry" start="0" end="31" type="PALETTE_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_PALETTE_LOAD1" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="12"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <group count="0" start="32" size="32">
+      <field name="Palette Alpha[0:N-1]" start="24" end="31" type="uint"/>
+      <field name="Palette Red[0:N-1]" start="16" end="23" type="uint"/>
+      <field name="Palette Green[0:N-1]" start="8" end="15" type="uint"/>
+      <field name="Palette Blue[0:N-1]" start="0" end="7" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="45"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to DS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="46"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to GS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="44"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to HS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_PS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="47"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to PS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLER_STATE_POINTERS_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="43"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Pointer to VS Sampler State" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLE_MASK" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Sample Mask" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SAMPLE_PATTERN" bias="2" length="9">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="28"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="7"/>
+    <field name="16x Sample3 X Offset" start="60" end="63" type="u0.4"/>
+    <field name="16x Sample3 Y Offset" start="56" end="59" type="u0.4"/>
+    <field name="16x Sample2 X Offset" start="52" end="55" type="u0.4"/>
+    <field name="16x Sample2 Y Offset" start="48" end="51" type="u0.4"/>
+    <field name="16x Sample1 X Offset" start="44" end="47" type="u0.4"/>
+    <field name="16x Sample1 Y Offset" start="40" end="43" type="u0.4"/>
+    <field name="16x Sample0 X Offset" start="36" end="39" type="u0.4"/>
+    <field name="16x Sample0 Y Offset" start="32" end="35" type="u0.4"/>
+    <field name="16x Sample7 X Offset" start="92" end="95" type="u0.4"/>
+    <field name="16x Sample7 Y Offset" start="88" end="91" type="u0.4"/>
+    <field name="16x Sample6 X Offset" start="84" end="87" type="u0.4"/>
+    <field name="16x Sample6 Y Offset" start="80" end="83" type="u0.4"/>
+    <field name="16x Sample5 X Offset" start="76" end="79" type="u0.4"/>
+    <field name="16x Sample5 Y Offset" start="72" end="75" type="u0.4"/>
+    <field name="16x Sample4 X Offset" start="68" end="71" type="u0.4"/>
+    <field name="16x Sample4 Y Offset" start="64" end="67" type="u0.4"/>
+    <field name="16x Sample11 X Offset" start="124" end="127" type="u0.4"/>
+    <field name="16x Sample11 Y Offset" start="120" end="123" type="u0.4"/>
+    <field name="16x Sample10 X Offset" start="116" end="119" type="u0.4"/>
+    <field name="16x Sample10 Y Offset" start="112" end="115" type="u0.4"/>
+    <field name="16x Sample9 X Offset" start="108" end="111" type="u0.4"/>
+    <field name="16x Sample9 Y Offset" start="104" end="107" type="u0.4"/>
+    <field name="16x Sample8 X Offset" start="100" end="103" type="u0.4"/>
+    <field name="16x Sample8 Y Offset" start="96" end="99" type="u0.4"/>
+    <field name="16x Sample15 X Offset" start="156" end="159" type="u0.4"/>
+    <field name="16x Sample15 Y Offset" start="152" end="155" type="u0.4"/>
+    <field name="16x Sample14 X Offset" start="148" end="151" type="u0.4"/>
+    <field name="16x Sample14 Y Offset" start="144" end="147" type="u0.4"/>
+    <field name="16x Sample13 X Offset" start="140" end="143" type="u0.4"/>
+    <field name="16x Sample13 Y Offset" start="136" end="139" type="u0.4"/>
+    <field name="16x Sample12 X Offset" start="132" end="135" type="u0.4"/>
+    <field name="16x Sample12 Y Offset" start="128" end="131" type="u0.4"/>
+    <field name="8x Sample7 X Offset" start="188" end="191" type="u0.4"/>
+    <field name="8x Sample7 Y Offset" start="184" end="187" type="u0.4"/>
+    <field name="8x Sample6 X Offset" start="180" end="183" type="u0.4"/>
+    <field name="8x Sample6 Y Offset" start="176" end="179" type="u0.4"/>
+    <field name="8x Sample5 X Offset" start="172" end="175" type="u0.4"/>
+    <field name="8x Sample5 Y Offset" start="168" end="171" type="u0.4"/>
+    <field name="8x Sample4 X Offset" start="164" end="167" type="u0.4"/>
+    <field name="8x Sample4 Y Offset" start="160" end="163" type="u0.4"/>
+    <field name="8x Sample3 X Offset" start="220" end="223" type="u0.4"/>
+    <field name="8x Sample3 Y Offset" start="216" end="219" type="u0.4"/>
+    <field name="8x Sample2 X Offset" start="212" end="215" type="u0.4"/>
+    <field name="8x Sample2 Y Offset" start="208" end="211" type="u0.4"/>
+    <field name="8x Sample1 X Offset" start="204" end="207" type="u0.4"/>
+    <field name="8x Sample1 Y Offset" start="200" end="203" type="u0.4"/>
+    <field name="8x Sample0 X Offset" start="196" end="199" type="u0.4"/>
+    <field name="8x Sample0 Y Offset" start="192" end="195" type="u0.4"/>
+    <field name="4x Sample3 X Offset" start="252" end="255" type="u0.4"/>
+    <field name="4x Sample3 Y Offset" start="248" end="251" type="u0.4"/>
+    <field name="4x Sample2 X Offset" start="244" end="247" type="u0.4"/>
+    <field name="4x Sample2 Y Offset" start="240" end="243" type="u0.4"/>
+    <field name="4x Sample1 X Offset" start="236" end="239" type="u0.4"/>
+    <field name="4x Sample1 Y Offset" start="232" end="235" type="u0.4"/>
+    <field name="4x Sample0 X Offset" start="228" end="231" type="u0.4"/>
+    <field name="4x Sample0 Y Offset" start="224" end="227" type="u0.4"/>
+    <field name="1x Sample0 X Offset" start="276" end="279" type="u0.4"/>
+    <field name="1x Sample0 Y Offset" start="272" end="275" type="u0.4"/>
+    <field name="2x Sample1 X Offset" start="268" end="271" type="u0.4"/>
+    <field name="2x Sample1 Y Offset" start="264" end="267" type="u0.4"/>
+    <field name="2x Sample0 X Offset" start="260" end="263" type="u0.4"/>
+    <field name="2x Sample0 Y Offset" start="256" end="259" type="u0.4"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SBE" bias="2" length="6">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="31"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="4"/>
+    <field name="Force Vertex URB Entry Read Length" start="61" end="61" type="bool"/>
+    <field name="Force Vertex URB Entry Read Offset" start="60" end="60" type="bool"/>
+    <field name="Number of SF Output Attributes" start="54" end="59" type="uint"/>
+    <field name="Attribute Swizzle Enable" start="53" end="53" type="bool"/>
+    <field name="Point Sprite Texture Coordinate Origin" start="52" end="52" type="uint">
+      <value name="UPPERLEFT" value="0"/>
+      <value name="LOWERLEFT" value="1"/>
+    </field>
+    <field name="Primitive ID Override Component W" start="51" end="51" type="bool"/>
+    <field name="Primitive ID Override Component Z" start="50" end="50" type="bool"/>
+    <field name="Primitive ID Override Component Y" start="49" end="49" type="bool"/>
+    <field name="Primitive ID Override Component X" start="48" end="48" type="bool"/>
+    <field name="Vertex URB Entry Read Length" start="43" end="47" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="37" end="42" type="uint"/>
+    <field name="Primitive ID Override Attribute Select" start="32" end="36" type="uint"/>
+    <field name="Point Sprite Texture Coordinate Enable" start="64" end="95" type="uint"/>
+    <field name="Constant Interpolation Enable" start="96" end="127" type="uint"/>
+    <field name="Attribute 15 Active Component Format" start="158" end="159" type="uint"/>
+    <field name="Attribute 14 Active Component Format" start="156" end="157" type="uint"/>
+    <field name="Attribute 13 Active Component Format" start="154" end="155" type="uint"/>
+    <field name="Attribute 12 Active Component Format" start="152" end="153" type="uint"/>
+    <field name="Attribute 11 Active Component Format" start="150" end="151" type="uint"/>
+    <field name="Attribute 10 Active Component Format" start="148" end="149" type="uint"/>
+    <field name="Attribute 9 Active Component Format" start="146" end="147" type="uint"/>
+    <field name="Attribute 8 Active Component Format" start="144" end="145" type="uint"/>
+    <field name="Attribute 7 Active Component Format" start="142" end="143" type="uint"/>
+    <field name="Attribute 6 Active Component Format" start="140" end="141" type="uint"/>
+    <field name="Attribute 5 Active Component Format" start="138" end="139" type="uint"/>
+    <field name="Attribute 4 Active Component Format" start="136" end="137" type="uint"/>
+    <field name="Attribute 3 Active Component Format" start="134" end="135" type="uint"/>
+    <field name="Attribute 2 Active Component Format" start="132" end="133" type="uint"/>
+    <field name="Attribute 1 Active Component Format" start="130" end="131" type="uint"/>
+    <field name="Attribute 0 Active Component Format" start="128" end="129" type="uint"/>
+    <field name="Attribute 31 Active Component Format" start="190" end="191" type="uint"/>
+    <field name="Attribute 30 Active Component Format" start="188" end="189" type="uint"/>
+    <field name="Attribute 29 Active Component Format" start="186" end="187" type="uint"/>
+    <field name="Attribute 28 Active Component Format" start="184" end="185" type="uint"/>
+    <field name="Attribute 27 Active Component Format" start="182" end="183" type="uint"/>
+    <field name="Attribute 26 Active Component Format" start="180" end="181" type="uint"/>
+    <field name="Attribute 25 Active Component Format" start="178" end="179" type="uint"/>
+    <field name="Attribute 24 Active Component Format" start="176" end="177" type="uint"/>
+    <field name="Attribute 23 Active Component Format" start="174" end="175" type="uint"/>
+    <field name="Attribute 22 Active Component Format" start="172" end="173" type="uint"/>
+    <field name="Attribute 21 Active Component Format" start="170" end="171" type="uint"/>
+    <field name="Attribute 20 Active Component Format" start="168" end="169" type="uint"/>
+    <field name="Attribute 19 Active Component Format" start="166" end="167" type="uint"/>
+    <field name="Attribute 18 Active Component Format" start="164" end="165" type="uint"/>
+    <field name="Attribute 17 Active Component Format" start="162" end="163" type="uint"/>
+    <field name="Attribute 16 Active Component Format" start="160" end="161" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SBE_SWIZ" bias="2" length="11">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="81"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="9"/>
+    <group count="16" start="32" size="16">
+      <field name="Attribute" start="0" end="15" type="SF_OUTPUT_ATTRIBUTE_DETAIL"/>
+    </group>
+    <group count="16" start="288" size="4">
+      <field name="Attribute Wrap Shortest Enables" start="0" end="3" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_SCISSOR_STATE_POINTERS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="15"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Scissor Rect Pointer" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SF" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="19"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Line Width" start="44" end="61" type="u11.7"/>
+    <field name="Legacy Global Depth Bias Enable" start="43" end="43" type="bool"/>
+    <field name="Statistics Enable" start="42" end="42" type="bool"/>
+    <field name="Viewport Transform Enable" start="33" end="33" type="bool"/>
+    <field name="Line End Cap Antialiasing Region Width" start="80" end="81" type="uint">
+      <value name="0.5 pixels" value="0"/>
+      <value name="1.0 pixels" value="1"/>
+      <value name="2.0 pixels" value="2"/>
+      <value name="4.0 pixels" value="3"/>
+    </field>
+    <field name="Last Pixel Enable" start="127" end="127" type="bool"/>
+    <field name="Triangle Strip/List Provoking Vertex Select" start="125" end="126" type="uint"/>
+    <field name="Line Strip/List Provoking Vertex Select" start="123" end="124" type="uint"/>
+    <field name="Triangle Fan Provoking Vertex Select" start="121" end="122" type="uint"/>
+    <field name="AA Line Distance Mode" start="110" end="110" type="uint">
+      <value name="AALINEDISTANCE_TRUE" value="1"/>
+    </field>
+    <field name="Smooth Point Enable" start="109" end="109" type="bool"/>
+    <field name="Vertex Sub Pixel Precision Select" start="108" end="108" type="uint"/>
+    <field name="Point Width Source" start="107" end="107" type="uint">
+      <value name="Vertex" value="0"/>
+      <value name="State" value="1"/>
+    </field>
+    <field name="Point Width" start="96" end="106" type="u8.3"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SO_BUFFER" bias="2" length="8">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="6"/>
+    <field name="SO Buffer Enable" start="63" end="63" type="bool"/>
+    <field name="SO Buffer Index" start="61" end="62" type="uint"/>
+    <field name="SO Buffer Object Control State" start="54" end="60" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Stream Offset Write Enable" start="53" end="53" type="bool"/>
+    <field name="Stream Output Buffer Offset Address Enable" start="52" end="52" type="bool"/>
+    <field name="Surface Base Address" start="66" end="111" type="address"/>
+    <field name="Surface Size" start="128" end="157" type="uint"/>
+    <field name="Stream Output Buffer Offset Address" start="162" end="207" type="address"/>
+    <field name="Stream Offset" start="224" end="255" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_SO_DECL_LIST" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="23"/>
+    <field name="DWord Length" start="0" end="8" type="uint"/>
+    <field name="Stream to Buffer Selects [3]" start="44" end="47" type="uint"/>
+    <field name="Stream to Buffer Selects [2]" start="40" end="43" type="uint"/>
+    <field name="Stream to Buffer Selects [1]" start="36" end="39" type="uint"/>
+    <field name="Stream to Buffer Selects [0]" start="32" end="35" type="uint"/>
+    <field name="Num Entries [3]" start="88" end="95" type="uint"/>
+    <field name="Num Entries [2]" start="80" end="87" type="uint"/>
+    <field name="Num Entries [1]" start="72" end="79" type="uint"/>
+    <field name="Num Entries [0]" start="64" end="71" type="uint"/>
+    <group count="0" start="96" size="64">
+      <field name="Entry" start="0" end="63" type="SO_DECL_ENTRY"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_STENCIL_BUFFER" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="6"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Stencil Buffer Enable" start="63" end="63" type="uint"/>
+    <field name="Stencil Buffer Object Control State" start="54" end="60" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface Pitch" start="32" end="48" type="uint"/>
+    <field name="Surface Base Address" start="64" end="127" type="address"/>
+    <field name="Surface QPitch" start="128" end="142" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_STREAMOUT" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="30"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="SO Function Enable" start="63" end="63" type="uint"/>
+    <field name="API Rendering Disable" start="62" end="62" type="uint"/>
+    <field name="Render Stream Select" start="59" end="60" type="uint"/>
+    <field name="Reorder Mode" start="58" end="58" type="uint">
+      <value name="LEADING" value="0"/>
+      <value name="TRAILING" value="1"/>
+    </field>
+    <field name="SO Statistics Enable" start="57" end="57" type="bool"/>
+    <field name="Force Rendering" start="55" end="56" type="uint">
+      <value name="Resreved" value="1"/>
+      <value name="Force_Off" value="2"/>
+      <value name="Force_on" value="3"/>
+    </field>
+    <field name="Stream 3 Vertex Read Offset" start="93" end="93" type="uint"/>
+    <field name="Stream 3 Vertex Read Length" start="88" end="92" type="uint"/>
+    <field name="Stream 2 Vertex Read Offset" start="85" end="85" type="uint"/>
+    <field name="Stream 2 Vertex Read Length" start="80" end="84" type="uint"/>
+    <field name="Stream 1 Vertex Read Offset" start="77" end="77" type="uint"/>
+    <field name="Stream 1 Vertex Read Length" start="72" end="76" type="uint"/>
+    <field name="Stream 0 Vertex Read Offset" start="69" end="69" type="uint"/>
+    <field name="Stream 0 Vertex Read Length" start="64" end="68" type="uint"/>
+    <field name="Buffer 1 Surface Pitch" start="112" end="123" type="uint"/>
+    <field name="Buffer 0 Surface Pitch" start="96" end="107" type="uint"/>
+    <field name="Buffer 3 Surface Pitch" start="144" end="155" type="uint"/>
+    <field name="Buffer 2 Surface Pitch" start="128" end="139" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_TE" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="28"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Partitioning" start="44" end="45" type="uint">
+      <value name="INTEGER" value="0"/>
+      <value name="ODD_FRACTIONAL" value="1"/>
+      <value name="EVEN_FRACTIONAL" value="2"/>
+    </field>
+    <field name="Output Topology" start="40" end="41" type="uint" prefix="OUTPUT">
+      <value name="POINT" value="0"/>
+      <value name="LINE" value="1"/>
+      <value name="TRI_CW" value="2"/>
+      <value name="TRI_CCW" value="3"/>
+    </field>
+    <field name="TE Domain" start="36" end="37" type="uint">
+      <value name="QUAD" value="0"/>
+      <value name="TRI" value="1"/>
+      <value name="ISOLINE" value="2"/>
+    </field>
+    <field name="TE Mode" start="33" end="34" type="uint">
+      <value name="HW_TESS" value="0"/>
+    </field>
+    <field name="TE Enable" start="32" end="32" type="bool"/>
+    <field name="Maximum Tessellation Factor Odd" start="64" end="95" type="float"/>
+    <field name="Maximum Tessellation Factor Not Odd" start="96" end="127" type="float"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_CLEAR" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="29"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="URB Clear Length" start="48" end="61" type="uint"/>
+    <field name="URB Address" start="32" end="46" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_DS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="50"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="DS URB Starting Address" start="57" end="63" type="uint"/>
+    <field name="DS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="DS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_GS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="51"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="GS URB Starting Address" start="57" end="63" type="uint"/>
+    <field name="GS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="GS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_HS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="49"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="HS URB Starting Address" start="57" end="63" type="uint"/>
+    <field name="HS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="HS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_URB_VS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="48"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="VS URB Starting Address" start="57" end="63" type="uint"/>
+    <field name="VS URB Entry Allocation Size" start="48" end="56" type="uint"/>
+    <field name="VS Number of URB Entries" start="32" end="47" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VERTEX_BUFFERS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="8"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <group count="0" start="32" size="128">
+      <field name="Vertex Buffer State" start="0" end="127" type="VERTEX_BUFFER_STATE"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_VERTEX_ELEMENTS" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="9"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <group count="0" start="32" size="64">
+      <field name="Element" start="0" end="63" type="VERTEX_ELEMENT_STATE"/>
+    </group>
+  </instruction>
+
+  <instruction name="3DSTATE_VF" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="12"/>
+    <field name="Sequential Draw Cut Index Enable" start="10" end="10" type="bool"/>
+    <field name="Component Packing Enable" start="9" end="9" type="bool"/>
+    <field name="Indexed Draw Cut Index Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Cut Index" start="32" end="63" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_COMPONENT_PACKING" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="85"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Vertex Element 07 Enables" start="60" end="63" type="uint"/>
+    <field name="Vertex Element 06 Enables" start="56" end="59" type="uint"/>
+    <field name="Vertex Element 05 Enables" start="52" end="55" type="uint"/>
+    <field name="Vertex Element 04 Enables" start="48" end="51" type="uint"/>
+    <field name="Vertex Element 03 Enables" start="44" end="47" type="uint"/>
+    <field name="Vertex Element 02 Enables" start="40" end="43" type="uint"/>
+    <field name="Vertex Element 01 Enables" start="36" end="39" type="uint"/>
+    <field name="Vertex Element 00 Enables" start="32" end="35" type="uint"/>
+    <field name="Vertex Element 15 Enables" start="92" end="95" type="uint"/>
+    <field name="Vertex Element 14 Enables" start="88" end="91" type="uint"/>
+    <field name="Vertex Element 13 Enables" start="84" end="87" type="uint"/>
+    <field name="Vertex Element 12 Enables" start="80" end="83" type="uint"/>
+    <field name="Vertex Element 11 Enables" start="76" end="79" type="uint"/>
+    <field name="Vertex Element 10 Enables" start="72" end="75" type="uint"/>
+    <field name="Vertex Element 09 Enables" start="68" end="71" type="uint"/>
+    <field name="Vertex Element 08 Enables" start="64" end="67" type="uint"/>
+    <field name="Vertex Element 23 Enables" start="124" end="127" type="uint"/>
+    <field name="Vertex Element 22 Enables" start="120" end="123" type="uint"/>
+    <field name="Vertex Element 21 Enables" start="116" end="119" type="uint"/>
+    <field name="Vertex Element 20 Enables" start="112" end="115" type="uint"/>
+    <field name="Vertex Element 19 Enables" start="108" end="111" type="uint"/>
+    <field name="Vertex Element 18 Enables" start="104" end="107" type="uint"/>
+    <field name="Vertex Element 17 Enables" start="100" end="103" type="uint"/>
+    <field name="Vertex Element 16 Enables" start="96" end="99" type="uint"/>
+    <field name="Vertex Element 31 Enables" start="156" end="159" type="uint"/>
+    <field name="Vertex Element 30 Enables" start="152" end="155" type="uint"/>
+    <field name="Vertex Element 29 Enables" start="148" end="151" type="uint"/>
+    <field name="Vertex Element 28 Enables" start="144" end="147" type="uint"/>
+    <field name="Vertex Element 27 Enables" start="140" end="143" type="uint"/>
+    <field name="Vertex Element 26 Enables" start="136" end="139" type="uint"/>
+    <field name="Vertex Element 25 Enables" start="132" end="135" type="uint"/>
+    <field name="Vertex Element 24 Enables" start="128" end="131" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_INSTANCING" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="73"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Instancing Enable" start="40" end="40" type="bool"/>
+    <field name="Vertex Element Index" start="32" end="37" type="uint"/>
+    <field name="Instance Data Step Rate" start="64" end="95" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_SGVS" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="74"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="InstanceID Enable" start="63" end="63" type="bool"/>
+    <field name="InstanceID Component Number" start="61" end="62" type="uint">
+      <value name="COMP_0" value="0"/>
+      <value name="COMP_1" value="1"/>
+      <value name="COMP_2" value="2"/>
+      <value name="COMP_3" value="3"/>
+    </field>
+    <field name="InstanceID Element Offset" start="48" end="53" type="uint"/>
+    <field name="VertexID Enable" start="47" end="47" type="bool"/>
+    <field name="VertexID Component Number" start="45" end="46" type="uint">
+      <value name="COMP_0" value="0"/>
+      <value name="COMP_1" value="1"/>
+      <value name="COMP_2" value="2"/>
+      <value name="COMP_3" value="3"/>
+    </field>
+    <field name="VertexID Element Offset" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_STATISTICS" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="1"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="11"/>
+    <field name="Statistics Enable" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VF_TOPOLOGY" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="75"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Primitive Topology Type" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VIEWPORT_STATE_POINTERS_CC" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="35"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="CC Viewport Pointer" start="37" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="33"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="SF Clip Viewport Pointer" start="38" end="63" type="offset"/>
+  </instruction>
+
+  <instruction name="3DSTATE_VS" bias="2" length="9">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="16"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="7"/>
+    <field name="Kernel Start Pointer" start="38" end="95" type="offset"/>
+    <field name="Single Vertex Dispatch" start="127" end="127" type="bool"/>
+    <field name="Vector Mask Enable" start="126" end="126" type="bool"/>
+    <field name="Sampler Count" start="123" end="125" type="uint">
+      <value name="No Samplers" value="0"/>
+      <value name="1-4 Samplers" value="1"/>
+      <value name="5-8 Samplers" value="2"/>
+      <value name="9-12 Samplers" value="3"/>
+      <value name="13-16 Samplers" value="4"/>
+    </field>
+    <field name="Binding Table Entry Count" start="114" end="121" type="uint"/>
+    <field name="Thread Dispatch Priority" start="113" end="113" type="uint">
+      <value name="High" value="1"/>
+    </field>
+    <field name="Floating Point Mode" start="112" end="112" type="uint">
+      <value name="IEEE-754" value="0"/>
+      <value name="Alternate" value="1"/>
+    </field>
+    <field name="Illegal Opcode Exception Enable" start="109" end="109" type="bool"/>
+    <field name="Accesses UAV" start="108" end="108" type="bool"/>
+    <field name="Software Exception Enable" start="103" end="103" type="bool"/>
+    <field name="Scratch Space Base Pointer" start="138" end="191" type="offset"/>
+    <field name="Per-Thread Scratch Space " start="128" end="131" type="uint"/>
+    <field name="Dispatch GRF Start Register For URB Data" start="212" end="216" type="uint"/>
+    <field name="Vertex URB Entry Read Length" start="203" end="208" type="uint"/>
+    <field name="Vertex URB Entry Read Offset" start="196" end="201" type="uint"/>
+    <field name="Maximum Number of Threads" start="247" end="255" type="uint"/>
+    <field name="Statistics Enable" start="234" end="234" type="bool"/>
+    <field name="SIMD8 Dispatch Enable" start="226" end="226" type="bool"/>
+    <field name="Vertex Cache Disable" start="225" end="225" type="bool"/>
+    <field name="Function Enable" start="224" end="224" type="bool"/>
+    <field name="Vertex URB Entry Output Read Offset" start="277" end="282" type="uint"/>
+    <field name="Vertex URB Entry Output Length" start="272" end="276" type="uint"/>
+    <field name="User Clip Distance Clip Test Enable Bitmask" start="264" end="271" type="uint"/>
+    <field name="User Clip Distance Cull Test Enable Bitmask" start="256" end="263" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_WM" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="20"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Statistics Enable" start="63" end="63" type="bool"/>
+    <field name="Legacy Depth Buffer Clear Enable" start="62" end="62" type="bool"/>
+    <field name="Legacy Depth Buffer Resolve Enable" start="60" end="60" type="bool"/>
+    <field name="Legacy Hierarchical Depth Buffer Resolve Enable" start="59" end="59" type="bool"/>
+    <field name="Legacy Diamond Line Rasterization" start="58" end="58" type="bool"/>
+    <field name="Early Depth/Stencil Control" start="53" end="54" type="uint">
+      <value name="NORMAL" value="0"/>
+      <value name="PSEXEC" value="1"/>
+      <value name="PREPS" value="2"/>
+    </field>
+    <field name="Force Thread Dispatch Enable" start="51" end="52" type="uint">
+      <value name="ForceOff" value="1"/>
+      <value name="ForceON" value="2"/>
+    </field>
+    <field name="Position ZW Interpolation Mode" start="49" end="50" type="uint">
+      <value name="INTERP_PIXEL" value="0"/>
+      <value name="INTERP_CENTROID" value="2"/>
+      <value name="INTERP_SAMPLE" value="3"/>
+    </field>
+    <field name="Barycentric Interpolation Mode" start="43" end="48" type="uint"/>
+    <field name="Line End Cap Antialiasing Region Width" start="40" end="41" type="uint">
+      <value name="0.5 pixels" value="0"/>
+      <value name="1.0 pixels" value="1"/>
+      <value name="2.0 pixels" value="2"/>
+      <value name="4.0 pixels" value="3"/>
+    </field>
+    <field name="Line Antialiasing Region Width" start="38" end="39" type="uint">
+      <value name="0.5 pixels" value="0"/>
+      <value name="1.0 pixels" value="1"/>
+      <value name="2.0 pixels" value="2"/>
+      <value name="4.0 pixels" value="3"/>
+    </field>
+    <field name="Polygon Stipple Enable" start="36" end="36" type="bool"/>
+    <field name="Line Stipple Enable" start="35" end="35" type="bool"/>
+    <field name="Point Rasterization Rule" start="34" end="34" type="uint">
+      <value name="RASTRULE_UPPER_LEFT" value="0"/>
+      <value name="RASTRULE_UPPER_RIGHT" value="1"/>
+    </field>
+    <field name="Force Kill Pixel Enable" start="32" end="33" type="uint">
+      <value name="ForceOff" value="1"/>
+      <value name="ForceON" value="2"/>
+    </field>
+  </instruction>
+
+  <instruction name="3DSTATE_WM_CHROMAKEY" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="76"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="ChromaKey Kill Enable" start="63" end="63" type="bool"/>
+  </instruction>
+
+  <instruction name="3DSTATE_WM_DEPTH_STENCIL" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="78"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Stencil Fail Op" start="61" end="63" type="uint"/>
+    <field name="Stencil Pass Depth Fail Op" start="58" end="60" type="uint"/>
+    <field name="Stencil Pass Depth Pass Op" start="55" end="57" type="uint"/>
+    <field name="Backface Stencil Test Function" start="52" end="54" type="uint"/>
+    <field name="Backface Stencil Fail Op" start="49" end="51" type="uint"/>
+    <field name="Backface Stencil Pass Depth Fail Op" start="46" end="48" type="uint"/>
+    <field name="Backface Stencil Pass Depth Pass Op" start="43" end="45" type="uint"/>
+    <field name="Stencil Test Function" start="40" end="42" type="uint"/>
+    <field name="Depth Test Function" start="37" end="39" type="uint"/>
+    <field name="Double Sided Stencil Enable" start="36" end="36" type="bool"/>
+    <field name="Stencil Test Enable" start="35" end="35" type="bool"/>
+    <field name="Stencil Buffer Write Enable" start="34" end="34" type="bool"/>
+    <field name="Depth Test Enable" start="33" end="33" type="bool"/>
+    <field name="Depth Buffer Write Enable" start="32" end="32" type="bool"/>
+    <field name="Stencil Test Mask" start="88" end="95" type="uint"/>
+    <field name="Stencil Write Mask" start="80" end="87" type="uint"/>
+    <field name="Backface Stencil Test Mask" start="72" end="79" type="uint"/>
+    <field name="Backface Stencil Write Mask" start="64" end="71" type="uint"/>
+    <field name="Stencil Reference Value" start="104" end="111" type="uint"/>
+    <field name="Backface Stencil Reference Value" start="96" end="103" type="uint"/>
+  </instruction>
+
+  <instruction name="3DSTATE_WM_HZ_OP" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="82"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Stencil Buffer Clear Enable" start="63" end="63" type="bool"/>
+    <field name="Depth Buffer Clear Enable" start="62" end="62" type="bool"/>
+    <field name="Scissor Rectangle Enable" start="61" end="61" type="bool"/>
+    <field name="Depth Buffer Resolve Enable" start="60" end="60" type="bool"/>
+    <field name="Hierarchical Depth Buffer Resolve Enable" start="59" end="59" type="bool"/>
+    <field name="Pixel Position Offset Enable" start="58" end="58" type="bool"/>
+    <field name="Full Surface Depth and Stencil Clear" start="57" end="57" type="bool"/>
+    <field name="Stencil Clear Value" start="48" end="55" type="uint"/>
+    <field name="Number of Multisamples" start="45" end="47" type="uint"/>
+    <field name="Clear Rectangle Y Min" start="80" end="95" type="uint"/>
+    <field name="Clear Rectangle X Min" start="64" end="79" type="uint"/>
+    <field name="Clear Rectangle Y Max" start="112" end="127" type="uint"/>
+    <field name="Clear Rectangle X Max" start="96" end="111" type="uint"/>
+    <field name="Sample Mask" start="128" end="143" type="uint"/>
+  </instruction>
+
+  <instruction name="GPGPU_CSR_BASE_ADDRESS" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="GPGPU CSR Base Address" start="44" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="GPGPU_WALKER" bias="2" length="15">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="5"/>
+    <field name="Indirect Parameter Enable" start="10" end="10" type="bool"/>
+    <field name="Predicate Enable" start="8" end="8" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="13"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="102" end="127" type="offset"/>
+    <field name="SIMD Size" start="158" end="159" type="uint">
+      <value name="SIMD8" value="0"/>
+      <value name="SIMD16" value="1"/>
+      <value name="SIMD32" value="2"/>
+    </field>
+    <field name="Thread Depth Counter Maximum" start="144" end="149" type="uint"/>
+    <field name="Thread Height Counter Maximum" start="136" end="141" type="uint"/>
+    <field name="Thread Width Counter Maximum" start="128" end="133" type="uint"/>
+    <field name="Thread Group ID Starting X" start="160" end="191" type="uint"/>
+    <field name="Thread Group ID X Dimension" start="224" end="255" type="uint"/>
+    <field name="Thread Group ID Starting Y" start="256" end="287" type="uint"/>
+    <field name="Thread Group ID Y Dimension" start="320" end="351" type="uint"/>
+    <field name="Thread Group ID Starting/Resume Z" start="352" end="383" type="uint"/>
+    <field name="Thread Group ID Z Dimension" start="384" end="415" type="uint"/>
+    <field name="Right Execution Mask" start="416" end="447" type="uint"/>
+    <field name="Bottom Execution Mask" start="448" end="479" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_CURBE_LOAD" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="1"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="2"/>
+    <field name="CURBE Total Data Length" start="64" end="80" type="uint"/>
+    <field name="CURBE Data Start Address" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_INTERFACE_DESCRIPTOR_LOAD" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="2"/>
+    <field name="Interface Descriptor Total Length" start="64" end="80" type="uint"/>
+    <field name="Interface Descriptor Data Start Address" start="96" end="127" type="offset"/>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Media Command Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="Media Command Sub-Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="4"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="bool"/>
+    <field name="Slice Destination Select MSBs" start="89" end="90" type="uint"/>
+    <field name="Thread Synchronization" start="88" end="88" type="uint">
+      <value name="No thread synchronization" value="0"/>
+      <value name="Thread dispatch is synchronized by the 'spawn root thread' message" value="1"/>
+    </field>
+    <field name="Force Destination" start="86" end="86" type="uint"/>
+    <field name="Use Scoreboard" start="85" end="85" type="uint">
+      <value name="Not using scoreboard" value="0"/>
+      <value name="Using scoreboard" value="1"/>
+    </field>
+    <field name="Slice Destination Select" start="83" end="84" type="uint">
+      <value name="Slice 0" value="0"/>
+      <value name="Slice 1" value="1"/>
+      <value name="Slice 2" value="2"/>
+    </field>
+    <field name="SubSlice Destination Select" start="81" end="82" type="uint">
+      <value name="Subslice 3" value="3"/>
+      <value name="SubSlice 2" value="2"/>
+      <value name="SubSlice 1" value="1"/>
+      <value name="SubSlice 0" value="0"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="address"/>
+    <field name="Scoredboard Y" start="144" end="152" type="uint"/>
+    <field name="Scoreboard X" start="128" end="136" type="uint"/>
+    <field name="Scoreboard Color" start="176" end="179" type="uint"/>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <group count="0" start="192" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT_GRPID" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Media Command Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="Media Command Sub-Opcode" start="16" end="23" type="uint" default="6"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="5"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Slice Destination Select MSB" start="88" end="88" type="uint"/>
+    <field name="End of Thread Group" start="87" end="87" type="uint"/>
+    <field name="Force Destination" start="86" end="86" type="uint"/>
+    <field name="Use Scoreboard" start="85" end="85" type="uint">
+      <value name="Not using scoreboard" value="0"/>
+      <value name="Using scoreboard" value="1"/>
+    </field>
+    <field name="Slice Destination Select" start="83" end="84" type="uint">
+      <value name="Slice 0" value="0"/>
+      <value name="Slice 1" value="1"/>
+      <value name="Slice 2" value="2"/>
+    </field>
+    <field name="SubSlice Destination Select" start="81" end="82" type="uint">
+      <value name="Subslice3" value="3"/>
+      <value name="SubSlice 2" value="2"/>
+      <value name="SubSlice 1" value="1"/>
+      <value name="SubSlice 0" value="0"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="address"/>
+    <field name="Scoreboard Y" start="144" end="152" type="uint"/>
+    <field name="Scoreboard X" start="128" end="136" type="uint"/>
+    <field name="Scoreboard Color" start="176" end="179" type="uint"/>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <field name="GroupID" start="192" end="223" type="uint"/>
+    <group count="0" start="224" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT_PRT" bias="2" length="16">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="14"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Children Present" start="95" end="95" type="bool"/>
+    <field name="PRT_Fence Needed" start="87" end="87" type="bool"/>
+    <field name="PRT_FenceType" start="86" end="86" type="uint">
+      <value name="Root thread queue" value="0"/>
+      <value name="VFE state flush" value="1"/>
+    </field>
+    <group count="12" start="128" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_OBJECT_WALKER" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="15"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+    <field name="Thread Synchronization" start="88" end="88" type="uint">
+      <value name="No thread synchronization" value="0"/>
+      <value name="Thread dispatch is synchronized by the 'spawn root thread' message" value="1"/>
+    </field>
+    <field name="Masked Dispatch" start="86" end="87" type="uint"/>
+    <field name="Use Scoreboard" start="85" end="85" type="uint">
+      <value name="Not using scoreboard" value="0"/>
+      <value name="Using scoreboard" value="1"/>
+    </field>
+    <field name="Indirect Data Length" start="64" end="80" type="uint"/>
+    <field name="Indirect Data Start Address" start="96" end="127" type="uint"/>
+    <field name="Group ID Loop Select" start="168" end="191" type="uint">
+      <value name="No_Groups" value="0"/>
+      <value name="Color_Groups" value="1"/>
+      <value name="InnerLocal_Groups" value="2"/>
+      <value name="MidLocal_Groups" value="3"/>
+      <value name="OuterLocal_Groups" value="4"/>
+      <value name="InnerGlobal_Groups" value="5"/>
+    </field>
+    <field name="Scoreboard Mask" start="160" end="167" type="uint"/>
+    <field name="Color Count Minus One" start="216" end="219" type="uint"/>
+    <field name="Middle Loop Extra Steps" start="208" end="212" type="uint"/>
+    <field name="Local Mid-Loop Unit Y" start="204" end="205" type="int"/>
+    <field name="Mid-Loop Unit X" start="200" end="201" type="int"/>
+    <field name="Global Loop Exec Count" start="240" end="251" type="uint"/>
+    <field name="Local Loop Exec Count" start="224" end="235" type="uint"/>
+    <field name="Block Resolution Y" start="272" end="282" type="uint"/>
+    <field name="Block Resolution X" start="256" end="266" type="uint"/>
+    <field name="Local Start Y" start="304" end="314" type="uint"/>
+    <field name="Local Start X" start="288" end="298" type="uint"/>
+    <field name="Local Outer Loop Stride Y" start="368" end="379" type="int"/>
+    <field name="Local Outer Loop Stride X" start="352" end="363" type="int"/>
+    <field name="Local Inner Loop Unit Y" start="400" end="411" type="int"/>
+    <field name="Local Inner Loop Unit X" start="384" end="395" type="int"/>
+    <field name="Global Resolution Y" start="432" end="442" type="uint"/>
+    <field name="Global Resolution X" start="416" end="426" type="uint"/>
+    <field name="Global Start Y" start="464" end="475" type="int"/>
+    <field name="Global Start X" start="448" end="459" type="int"/>
+    <field name="Global Outer Loop Stride Y" start="496" end="507" type="int"/>
+    <field name="Global Outer Loop Stride X" start="480" end="491" type="int"/>
+    <field name="Global Inner Loop Unit Y" start="528" end="539" type="int"/>
+    <field name="Global Inner Loop Unit X" start="512" end="523" type="int"/>
+    <group count="0" start="544" size="32">
+      <field name="Inline Data" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MEDIA_STATE_FLUSH" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="4"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="0"/>
+    <field name="Flush to GO" start="39" end="39" type="bool"/>
+    <field name="Watermark Required" start="38" end="38" type="uint"/>
+    <field name="Interface Descriptor Offset" start="32" end="37" type="uint"/>
+  </instruction>
+
+  <instruction name="MEDIA_VFE_STATE" bias="2" length="9">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Pipeline" start="27" end="28" type="uint" default="2"/>
+    <field name="Media Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="SubOpcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="15" type="uint" default="7"/>
+    <field name="Scratch Space Base Pointer" start="42" end="63" type="offset"/>
+    <field name="Stack Size" start="36" end="39" type="uint"/>
+    <field name="Per Thread Scratch Space" start="32" end="35" type="uint"/>
+    <field name="Scratch Space Base Pointer High" start="64" end="79" type="offset"/>
+    <field name="Maximum Number of Threads" start="112" end="127" type="uint"/>
+    <field name="Number of URB Entries" start="104" end="111" type="uint"/>
+    <field name="Reset Gateway Timer" start="103" end="103" type="uint">
+      <value name="Maintaining the existing timestamp state" value="0"/>
+      <value name="Resetting relative timer and latching the global timestamp" value="1"/>
+    </field>
+    <field name="Slice Disable" start="128" end="129" type="uint">
+      <value name="All Subslices Enabled" value="0"/>
+      <value name="Only Slice 0 Enabled" value="1"/>
+      <value name="Only Slice 0 Subslice 0 Enabled" value="3"/>
+    </field>
+    <field name="URB Entry Allocation Size" start="176" end="191" type="uint"/>
+    <field name="CURBE Allocation Size" start="160" end="175" type="uint"/>
+    <field name="Scoreboard Enable" start="223" end="223" type="bool"/>
+    <field name="Scoreboard Type" start="222" end="222" type="uint">
+      <value name="Stalling Scoreboard" value="0"/>
+      <value name="Non-Stalling Scoreboard" value="1"/>
+    </field>
+    <field name="Scoreboard Mask" start="192" end="199" type="uint"/>
+    <field name="Scoreboard 3 Delta Y" start="252" end="255" type="int"/>
+    <field name="Scoreboard 3 Delta X" start="248" end="251" type="int"/>
+    <field name="Scoreboard 2 Delta Y" start="244" end="247" type="int"/>
+    <field name="Scoreboard 2 Delta X" start="240" end="243" type="int"/>
+    <field name="Scoreboard 1 Delta Y" start="236" end="239" type="int"/>
+    <field name="Scoreboard 1 Delta X" start="232" end="235" type="int"/>
+    <field name="Scoreboard 0 Delta Y" start="228" end="231" type="int"/>
+    <field name="Scoreboard 0 Delta X" start="224" end="227" type="int"/>
+    <field name="Scoreboard 7 Delta Y" start="284" end="287" type="int"/>
+    <field name="Scoreboard 7 Delta X" start="280" end="283" type="int"/>
+    <field name="Scoreboard 6 Delta Y" start="276" end="279" type="int"/>
+    <field name="Scoreboard 6 Delta X" start="272" end="275" type="int"/>
+    <field name="Scoreboard 5 Delta Y" start="268" end="271" type="int"/>
+    <field name="Scoreboard 5 Delta X" start="264" end="267" type="int"/>
+    <field name="Scoreboard 4 Delta Y" start="260" end="263" type="int"/>
+    <field name="Scoreboard 4 Delta X" start="256" end="259" type="int"/>
+  </instruction>
+
+  <instruction name="MI_ARB_CHECK" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="5"/>
+  </instruction>
+
+  <instruction name="MI_ATOMIC" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="47"/>
+    <field name="Memory Type" start="22" end="22" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="Post-Sync Operation" start="21" end="21" type="bool"/>
+    <field name="Data Size" start="19" end="20" type="uint">
+      <value name="DWORD" value="0"/>
+      <value name="QWORD" value="1"/>
+      <value name="OCTWORD" value="2"/>
+      <value name="RESERVED" value="3"/>
+    </field>
+    <field name="Inline Data" start="18" end="18" type="uint"/>
+    <field name="CS STALL" start="17" end="17" type="uint"/>
+    <field name="Return Data Control" start="16" end="16" type="uint"/>
+    <field name="ATOMIC OPCODE" start="8" end="15" type="uint"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Memory Address" start="34" end="79" type="address"/>
+    <field name="Operand1 Data Dword 0" start="96" end="127" type="uint"/>
+    <field name="Operand2 Data Dword 0" start="128" end="159" type="uint"/>
+    <field name="Operand1 Data Dword 1" start="160" end="191" type="uint"/>
+    <field name="Operand2 Data Dword 1" start="192" end="223" type="uint"/>
+    <field name="Operand1 Data Dword 2" start="224" end="255" type="uint"/>
+    <field name="Operand2 Data Dword 2" start="256" end="287" type="uint"/>
+    <field name="Operand1 Data Dword 3" start="288" end="319" type="uint"/>
+    <field name="Operand2 Data Dword 3" start="320" end="351" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_BATCH_BUFFER_END" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="10"/>
+  </instruction>
+
+  <instruction name="MI_BATCH_BUFFER_START" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="49"/>
+    <field name="Second Level Batch Buffer" start="22" end="22" type="uint">
+      <value name="First level batch" value="0"/>
+      <value name="Second level batch" value="1"/>
+    </field>
+    <field name="Add Offset Enable" start="16" end="16" type="bool"/>
+    <field name="Predication Enable" start="15" end="15" type="uint"/>
+    <field name="Resource Streamer Enable" start="10" end="10" type="bool"/>
+    <field name="Address Space Indicator" start="8" end="8" type="uint" prefix="ASI">
+      <value name="GGTT" value="0"/>
+      <value name="PPGTT" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Batch Buffer Start Address" start="34" end="95" type="address"/>
+  </instruction>
+
+  <instruction name="MI_CLFLUSH" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="39"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="9" type="uint" default="1"/>
+    <field name="Page Base Address" start="44" end="79" type="address"/>
+    <field name="Starting Cacheline Offset" start="38" end="43" type="uint"/>
+    <group count="0" start="96" size="32">
+      <field name="DW Representing a Half Cache Line" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MI_CONDITIONAL_BATCH_BUFFER_END" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="54"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint" default="0"/>
+    <field name="Compare Semaphore" start="21" end="21" type="uint" default="0"/>
+    <field name="Compare Mask Mode" start="19" end="19" type="uint">
+      <value name="Compare Mask Mode Disabled" value="0"/>
+      <value name="Compare Mask Mode Enabled" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Compare Data Dword" start="32" end="63" type="uint"/>
+    <field name="Compare Address" start="67" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_COPY_MEM_MEM" bias="2" length="5">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="46"/>
+    <field name="Use Global GTT Source" start="22" end="22" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="Use Global GTT Destination" start="21" end="21" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="3"/>
+    <field name="Destination Memory Address" start="34" end="95" type="address"/>
+    <field name="Source Memory Address" start="98" end="159" type="address"/>
+  </instruction>
+
+  <instruction name="MI_DISPLAY_FLIP" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="20"/>
+    <field name="Async Flip Indicator" start="22" end="22" type="bool"/>
+    <field name="Display Plane Select" start="8" end="12" type="uint">
+      <value name="Display Plane 1" value="0"/>
+      <value name="Display Plane 2" value="1"/>
+      <value name="Display Plane 3" value="2"/>
+      <value name="Display Plane 4" value="4"/>
+      <value name="Display Plane 5" value="5"/>
+      <value name="Display Plane 6" value="6"/>
+      <value name="Display Plane 7" value="7"/>
+      <value name="Display Plane 8" value="8"/>
+      <value name="Display Plane 9" value="9"/>
+      <value name="Display Plane 10" value="10"/>
+      <value name="Display Plane 11" value="11"/>
+      <value name="Display Plane 12" value="12"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Stereoscopic 3D Mode" start="63" end="63" type="bool"/>
+    <field name="Display Buffer Pitch" start="38" end="47" type="uint"/>
+    <field name="Tile Parameter" start="32" end="34" type="bool"/>
+    <field name="Display Buffer Base Address" start="76" end="95" type="address"/>
+    <field name="Flip Type" start="64" end="65" type="uint">
+      <value name="Sync Flip" value="0"/>
+      <value name="Async Flip" value="1"/>
+      <value name="Stereo 3D Flip" value="2"/>
+    </field>
+    <field name="Left Eye Display Buffer Base Address" start="108" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_FORCE_WAKEUP" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="29"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Mask Bits" start="48" end="63" type="uint"/>
+    <field name="Force Render Awake" start="33" end="33" type="uint"/>
+    <field name="Force Media Awake" start="32" end="32" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_IMM" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="34"/>
+    <field name="Byte Write Disables" start="8" end="11" type="uint"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Register Offset" start="34" end="54" type="offset"/>
+    <field name="Data DWord" start="64" end="95" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_MEM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="41"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="Async Mode Enable" start="21" end="21" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Register Address" start="34" end="54" type="offset"/>
+    <field name="Memory Address" start="66" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_REGISTER_REG" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="42"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Source Register Address" start="34" end="54" type="offset"/>
+    <field name="Destination Register Address" start="66" end="86" type="offset"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_SCAN_LINES_EXCL" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="19"/>
+    <field name="Display (Plane) Select" start="19" end="21" type="uint">
+      <value name="Display Plane A" value="0"/>
+      <value name="Display Plane B" value="1"/>
+      <value name="Display Plane C" value="4"/>
+    </field>
+    <field name="DWord Length" start="0" end="5" type="uint" default="0"/>
+    <field name="Start Scan Line Number" start="48" end="60" type="uint"/>
+    <field name="End Scan Line Number" start="32" end="44" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_SCAN_LINES_INCL" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="18"/>
+    <field name="Display (Plane) Select" start="19" end="21" type="uint">
+      <value name="Display Plane 1 A" value="0"/>
+      <value name="Display Plane 1 B" value="1"/>
+      <value name="Display Plane 1 C" value="4"/>
+    </field>
+    <field name="Scan Line Event Done Forward" start="17" end="18" type="bool"/>
+    <field name="DWord Length" start="0" end="5" type="uint" default="0"/>
+    <field name="Start Scan Line Number" start="48" end="60" type="uint"/>
+    <field name="End Scan Line Number" start="32" end="44" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_LOAD_URB_MEM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="44"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="URB Address" start="34" end="46" type="uint"/>
+    <field name="Memory Address" start="70" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_MATH" bias="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="26"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="ALU INSTRUCTION 1" start="32" end="63" type="uint"/>
+    <field name="ALU INSTRUCTION 2" start="64" end="95" type="uint"/>
+    <group count="0" start="96" size="32">
+      <field name="ALU INSTRUCTION n" start="0" end="31" type="uint"/>
+    </group>
+  </instruction>
+
+  <instruction name="MI_NOOP" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="0"/>
+    <field name="Identification Number Register Write Enable" start="22" end="22" type="bool"/>
+    <field name="Identification Number" start="0" end="21" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_PREDICATE" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="12"/>
+    <field name="Load Operation" start="6" end="7" type="uint" prefix="LOAD">
+      <value name="KEEP" value="0"/>
+      <value name="LOAD" value="2"/>
+      <value name="LOADINV" value="3"/>
+    </field>
+    <field name="Combine Operation" start="3" end="4" type="uint" prefix="COMBINE">
+      <value name="SET" value="0"/>
+      <value name="AND" value="1"/>
+      <value name="OR" value="2"/>
+      <value name="XOR" value="3"/>
+    </field>
+    <field name="Compare Operation" start="0" end="1" type="uint" prefix="COMPARE">
+      <value name="SRCS_EQUAL" value="2"/>
+      <value name="DELTAS_EQUAL" value="3"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_REPORT_HEAD" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="7"/>
+  </instruction>
+
+  <instruction name="MI_REPORT_PERF_COUNT" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="40"/>
+    <field name="DWord Length" start="0" end="5" type="uint" default="2"/>
+    <field name="Memory Address" start="38" end="95" type="address"/>
+    <field name="Core Mode Enable" start="36" end="36" type="uint"/>
+    <field name="Use Global GTT" start="32" end="32" type="uint"/>
+    <field name="Report ID" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_RS_CONTEXT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="15"/>
+    <field name="Resource Streamer Save" start="0" end="0" type="uint" prefix="RS">
+      <value name="Restore" value="0"/>
+      <value name="Save" value="1"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_RS_CONTROL" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="6"/>
+    <field name="Resource Streamer Control" start="0" end="0" type="uint" prefix="RS">
+      <value name="Stop" value="0"/>
+      <value name="Start" value="1"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_RS_STORE_DATA_IMM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="43"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Destination Address" start="34" end="95" type="address"/>
+    <field name="Core Mode Enable" start="32" end="32" type="uint"/>
+    <field name="Data DWord 0" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SEMAPHORE_SIGNAL" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="27"/>
+    <field name="Post-Sync Operation" start="21" end="21" type="bool"/>
+    <field name="Target Engine Select" start="15" end="17" type="uint">
+      <value name="RCS" value="0"/>
+      <value name="VCS0" value="1"/>
+      <value name="BCS" value="2"/>
+      <value name="VECS" value="3"/>
+      <value name="VCS1" value="4"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Target Context ID" start="32" end="63" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SEMAPHORE_WAIT" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="28"/>
+    <field name="Memory Type" start="22" end="22" type="uint">
+      <value name="Per Process Graphics Address" value="0"/>
+      <value name="Global Graphics Address" value="1"/>
+    </field>
+    <field name="Register Poll Mode" start="16" end="16" type="uint" default="1"/>
+    <field name="Wait Mode" start="15" end="15" type="uint">
+      <value name="Polling Mode" value="1"/>
+      <value name="Signal Mode" value="0"/>
+    </field>
+    <field name="Compare Operation" start="12" end="14" type="uint" prefix="COMPARE">
+      <value name="SAD_GREATER_THAN_SDD" value="0"/>
+      <value name="SAD_GREATER_THAN_OR_EQUAL_SDD" value="1"/>
+      <value name="SAD_LESS_THAN_SDD" value="2"/>
+      <value name="SAD_LESS_THAN_OR_EQUAL_SDD" value="3"/>
+      <value name="SAD_EQUAL_SDD" value="4"/>
+      <value name="SAD_NOT_EQUAL_SDD" value="5"/>
+    </field>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Semaphore Data Dword" start="32" end="63" type="uint"/>
+    <field name="Semaphore Address" start="66" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_SET_CONTEXT" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="24"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Logical Context Address" start="44" end="63" type="address"/>
+    <field name="Reserved, Must be 1" start="40" end="40" type="uint"/>
+    <field name="Core Mode Enable" start="36" end="36" type="bool"/>
+    <field name="Resource Streamer State Save Enable" start="35" end="35" type="bool"/>
+    <field name="Resource Streamer State Restore Enable" start="34" end="34" type="bool"/>
+    <field name="Force Restore" start="33" end="33" type="uint"/>
+    <field name="Restore Inhibit" start="32" end="32" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_SET_PREDICATE" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="1"/>
+    <field name="PREDICATE ENABLE" start="0" end="3" type="uint">
+      <value name="NOOP Never" value="0"/>
+      <value name="NOOP on Result2 clear" value="1"/>
+      <value name="NOOP on Result2 set" value="2"/>
+      <value name="NOOP on Result clear" value="3"/>
+      <value name="NOOP on Result set" value="4"/>
+      <value name="Execute when one slice enabled." value="5"/>
+      <value name="Execute when two slices are enabled." value="6"/>
+      <value name="Execute when three slices are enabled." value="7"/>
+      <value name="NOOP Always" value="15"/>
+    </field>
+  </instruction>
+
+  <instruction name="MI_STORE_DATA_IMM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="32"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="Store Qword" start="21" end="21" type="uint"/>
+    <field name="DWord Length" start="0" end="9" type="uint" default="2"/>
+    <field name="Address" start="34" end="79" type="address"/>
+    <field name="Core Mode Enable" start="32" end="32" type="uint"/>
+    <field name="Data DWord 0" start="96" end="127" type="uint"/>
+    <field name="Data DWord 1" start="128" end="159" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_STORE_DATA_INDEX" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="33"/>
+    <field name="Use Per-Process Hardware Status Page" start="21" end="21" type="uint"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="Offset" start="34" end="43" type="uint"/>
+    <field name="Data DWord 0" start="64" end="95" type="uint"/>
+    <field name="Data DWord 1" start="96" end="127" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_STORE_REGISTER_MEM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="36"/>
+    <field name="Use Global GTT" start="22" end="22" type="uint"/>
+    <field name="Predicate Enable" start="21" end="21" type="bool"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="Register Address" start="34" end="54" type="offset"/>
+    <field name="Memory Address" start="66" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_STORE_URB_MEM" bias="2" length="4">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="45"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="2"/>
+    <field name="URB Address" start="34" end="46" type="uint"/>
+    <field name="Memory Address" start="70" end="127" type="address"/>
+  </instruction>
+
+  <instruction name="MI_SUSPEND_FLUSH" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="11"/>
+    <field name="Suspend Flush" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="MI_TOPOLOGY_FILTER" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="13"/>
+    <field name="Topology Filter Value" start="0" end="5" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_URB_ATOMIC_ALLOC" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="9"/>
+    <field name="URB Atomic Storage Offset" start="12" end="19" type="uint"/>
+    <field name="URB Atomic Storage Size" start="0" end="8" type="uint"/>
+  </instruction>
+
+  <instruction name="MI_USER_INTERRUPT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="2"/>
+  </instruction>
+
+  <instruction name="MI_WAIT_FOR_EVENT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="0"/>
+    <field name="MI Command Opcode" start="23" end="28" type="uint" default="3"/>
+    <field name="Display Plane 1 C Vertical Blank Wait Enable" start="21" end="21" type="bool"/>
+    <field name="Display Plane 6 Flip Pending Wait Enable" start="20" end="20" type="bool"/>
+    <field name="Display Plane 12 Flip Pending Wait Enable" start="19" end="19" type="bool"/>
+    <field name="Display Plane 11 Flip Pending Wait Enable" start="18" end="18" type="bool"/>
+    <field name="Display Plane 10 Flip Pending Wait Enable" start="17" end="17" type="bool"/>
+    <field name="Display Plane 9 Flip Pending Wait Enable" start="16" end="16" type="bool"/>
+    <field name="Display Plane 3 Flip Pending Wait Enable" start="15" end="15" type="bool"/>
+    <field name="Display Plane 1 C Scan Line Wait Enable" start="14" end="14" type="bool"/>
+    <field name="Display Plane 1 B Vertical Blank Wait Enable" start="11" end="11" type="bool"/>
+    <field name="Display Plane 5 Flip Pending Wait Enable" start="10" end="10" type="bool"/>
+    <field name="Display Plane 2 Flip Pending Wait Enable" start="9" end="9" type="bool"/>
+    <field name="Display Plane 1 B Scan Line Wait Enable" start="8" end="8" type="bool"/>
+    <field name="Display Plane 8 Flip Pending Wait Enable" start="7" end="7" type="bool"/>
+    <field name="Display Plane 7 Flip Pending Wait Enable" start="6" end="6" type="bool"/>
+    <field name="Display Plane 1 A Vertical Blank Wait Enable" start="3" end="3" type="bool"/>
+    <field name="Display Plane 4 Flip Pending Wait Enable" start="2" end="2" type="bool"/>
+    <field name="Display Plane 1 Flip Pending Wait Enable" start="1" end="1" type="bool"/>
+    <field name="Display Plnae 1 A Scan Line Wait Enable" start="0" end="0" type="bool"/>
+  </instruction>
+
+  <instruction name="PIPELINE_SELECT" bias="1" length="1">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="1"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="4"/>
+    <field name="Mask Bits" start="8" end="15" type="uint"/>
+    <field name="Force Media Awake" start="5" end="5" type="bool"/>
+    <field name="Media Sampler DOP Clock Gate Enable" start="4" end="4" type="bool"/>
+    <field name="Pipeline Selection" start="0" end="1" type="uint">
+      <value name="3D" value="0"/>
+      <value name="Media" value="1"/>
+      <value name="GPGPU" value="2"/>
+    </field>
+  </instruction>
+
+  <instruction name="PIPE_CONTROL" bias="2" length="6">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="3"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="2"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="0"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="4"/>
+    <field name="Flush LLC" start="58" end="58" type="bool"/>
+    <field name="Destination Address Type" start="56" end="56" type="uint" prefix="DAT">
+      <value name="PPGTT" value="0"/>
+      <value name="GGTT" value="1"/>
+    </field>
+    <field name="LRI Post Sync Operation" start="55" end="55" type="uint">
+      <value name="No LRI Operation" value="0"/>
+      <value name="MMIO Write Immediate Data" value="1"/>
+    </field>
+    <field name="Store Data Index" start="53" end="53" type="uint"/>
+    <field name="Command Streamer Stall Enable" start="52" end="52" type="uint"/>
+    <field name="Global Snapshot Count Reset" start="51" end="51" type="uint">
+      <value name="Don't Reset" value="0"/>
+      <value name="Reset" value="1"/>
+    </field>
+    <field name="TLB Invalidate" start="50" end="50" type="uint"/>
+    <field name="Generic Media State Clear" start="48" end="48" type="bool"/>
+    <field name="Post Sync Operation" start="46" end="47" type="uint">
+      <value name="No Write" value="0"/>
+      <value name="Write Immediate Data" value="1"/>
+      <value name="Write PS Depth Count" value="2"/>
+      <value name="Write Timestamp" value="3"/>
+    </field>
+    <field name="Depth Stall Enable" start="45" end="45" type="bool"/>
+    <field name="Render Target Cache Flush Enable" start="44" end="44" type="bool"/>
+    <field name="Instruction Cache Invalidate Enable" start="43" end="43" type="bool"/>
+    <field name="Texture Cache Invalidation Enable" start="42" end="42" type="bool"/>
+    <field name="Indirect State Pointers Disable" start="41" end="41" type="bool"/>
+    <field name="Notify Enable" start="40" end="40" type="bool"/>
+    <field name="Pipe Control Flush Enable" start="39" end="39" type="bool"/>
+    <field name="DC  Flush Enable" start="37" end="37" type="bool"/>
+    <field name="VF Cache Invalidation Enable" start="36" end="36" type="bool"/>
+    <field name="Constant Cache Invalidation Enable" start="35" end="35" type="bool"/>
+    <field name="State Cache Invalidation Enable" start="34" end="34" type="bool"/>
+    <field name="Stall At Pixel Scoreboard" start="33" end="33" type="bool"/>
+    <field name="Depth Cache Flush Enable" start="32" end="32" type="bool"/>
+    <field name="Address" start="66" end="111" type="address"/>
+    <field name="Immediate Data" start="128" end="191" type="uint"/>
+  </instruction>
+
+  <instruction name="STATE_BASE_ADDRESS" bias="2" length="19">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="1"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="17"/>
+    <field name="General State Base Address" start="44" end="95" type="address"/>
+    <field name="General State Memory Object Control State" start="36" end="42" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="General State Base Address Modify Enable" start="32" end="32" type="bool"/>
+    <field name="Stateless Data Port Access Memory Object Control State" start="112" end="118" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface State Base Address" start="140" end="191" type="address"/>
+    <field name="Surface State Memory Object Control State" start="132" end="138" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Surface State Base Address Modify Enable" start="128" end="128" type="bool"/>
+    <field name="Dynamic State Base Address" start="204" end="255" type="address"/>
+    <field name="Dynamic State Memory Object Control State" start="196" end="202" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Dynamic State Base Address Modify Enable" start="192" end="192" type="bool"/>
+    <field name="Indirect Object Base Address" start="268" end="319" type="address"/>
+    <field name="Indirect Object Memory Object Control State" start="260" end="266" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Indirect Object Base Address Modify Enable" start="256" end="256" type="bool"/>
+    <field name="Instruction Base Address" start="332" end="383" type="address"/>
+    <field name="Instruction Memory Object Control State" start="324" end="330" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Instruction Base Address Modify Enable" start="320" end="320" type="bool"/>
+    <field name="General State Buffer Size" start="396" end="415" type="uint"/>
+    <field name="General State Buffer Size Modify Enable" start="384" end="384" type="bool"/>
+    <field name="Dynamic State Buffer Size" start="428" end="447" type="uint"/>
+    <field name="Dynamic State Buffer Size Modify Enable" start="416" end="416" type="bool"/>
+    <field name="Indirect Object Buffer Size" start="460" end="479" type="uint"/>
+    <field name="Indirect Object Buffer Size Modify Enable" start="448" end="448" type="bool"/>
+    <field name="Instruction Buffer Size" start="492" end="511" type="uint"/>
+    <field name="Instruction Buffer size Modify Enable" start="480" end="480" type="bool"/>
+    <field name="Bindless Surface State Base Address" start="524" end="575" type="address"/>
+    <field name="Bindless Surface State Memory Object Control State" start="516" end="522" type="MEMORY_OBJECT_CONTROL_STATE"/>
+    <field name="Bindless Surface State Base Address Modify Enable" start="512" end="512" type="bool"/>
+    <field name="Bindless Surface State Size" start="588" end="607" type="uint"/>
+  </instruction>
+
+  <instruction name="STATE_PREFETCH" bias="2" length="2">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="0"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="3"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="0"/>
+    <field name="Prefetch Pointer" start="38" end="63" type="address"/>
+    <field name="Prefetch Count" start="32" end="34" type="uint"/>
+  </instruction>
+
+  <instruction name="STATE_SIP" bias="2" length="3">
+    <field name="Command Type" start="29" end="31" type="uint" default="3"/>
+    <field name="Command SubType" start="27" end="28" type="uint" default="0"/>
+    <field name="3D Command Opcode" start="24" end="26" type="uint" default="1"/>
+    <field name="3D Command Sub Opcode" start="16" end="23" type="uint" default="2"/>
+    <field name="DWord Length" start="0" end="7" type="uint" default="1"/>
+    <field name="System Instruction Pointer" start="36" end="95" type="offset"/>
+  </instruction>
+
+</genxml>
diff --git a/src/intel/genxml/genX_pack.h b/src/intel/genxml/genX_pack.h
new file mode 100644 (file)
index 0000000..7967c29
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#ifndef GEN_VERSIONx10
+#  error "The GEN_VERSIONx10 macro must be defined"
+#endif
+
+#if (GEN_VERSIONx10 == 70)
+#  include "genxml/gen7_pack.h"
+#elif (GEN_VERSIONx10 == 75)
+#  include "genxml/gen75_pack.h"
+#elif (GEN_VERSIONx10 == 80)
+#  include "genxml/gen8_pack.h"
+#elif (GEN_VERSIONx10 == 90)
+#  include "genxml/gen9_pack.h"
+#else
+#  error "Need to add a pack header include for this gen"
+#endif
diff --git a/src/intel/genxml/gen_macros.h b/src/intel/genxml/gen_macros.h
new file mode 100644 (file)
index 0000000..052c57f
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+/* Macros for handling per-gen compilation.
+ *
+ * The prefixing macros GENX() and genX() automatically prefix whatever you
+ * give them by GENX_ or genX_  where X is the gen number.
+ *
+ * You can declare a function to be used on some range of gens like this:
+ *
+ * GENX_FUNC(GEN7, GEN75) void
+ * genX(my_function_name)(args...)
+ * {
+ *    // Do stuff
+ * }
+ *
+ * If the file is compiled for any set of gens containing gen7 and gen75,
+ * the function will effectively only get compiled twice as
+ * gen7_my_function_nmae and gen75_my_function_name.  The function has to
+ * be compilable on all gens, but it will become a static inline that gets
+ * discarded by the compiler on all gens not in range.
+ *
+ * You can do pseudo-runtime checks in your function such as
+ *
+ * if (GEN_GEN > 8 || GEN_IS_HASWELL) {
+ *    // Do something
+ * }
+ *
+ * The contents of the if statement must be valid regardless of gen, but
+ * the if will get compiled away on everything except haswell.
+ *
+ * For places where you really do have a compile-time conflict, you can
+ * use preprocessor logic:
+ *
+ * #if (GEN_GEN > 8 || GEN_IS_HASWELL)
+ *    // Do something
+ * #endif
+ *
+ * However, it is strongly recommended that the former be used whenever
+ * possible.
+ */
+
+/* Base macro defined on the command line.  If we don't have this, we can't
+ * do anything.
+ */
+#ifndef GEN_VERSIONx10
+#  error "The GEN_VERSIONx10 macro must be defined"
+#endif
+
+#define GEN_GEN ((GEN_VERSIONx10) / 10)
+#define GEN_IS_HASWELL ((GEN_VERSIONx10) == 75)
+
+/* Prefixing macros */
+#if (GEN_VERSIONx10 == 70)
+#  define GENX(X) GEN7_##X
+#  define genX(x) gen7_##x
+#elif (GEN_VERSIONx10 == 75)
+#  define GENX(X) GEN75_##X
+#  define genX(x) gen75_##x
+#elif (GEN_VERSIONx10 == 80)
+#  define GENX(X) GEN8_##X
+#  define genX(x) gen8_##x
+#elif (GEN_VERSIONx10 == 90)
+#  define GENX(X) GEN9_##X
+#  define genX(x) gen9_##x
+#else
+#  error "Need to add prefixing macros for this gen"
+#endif
diff --git a/src/intel/genxml/gen_pack_header.py b/src/intel/genxml/gen_pack_header.py
new file mode 100755 (executable)
index 0000000..5bc18c7
--- /dev/null
@@ -0,0 +1,616 @@
+#!/usr/bin/env python3
+
+import xml.parsers.expat
+import re
+import sys
+import copy
+
+license =  """/*
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+"""
+
+pack_header = """%(license)s
+
+/* Instructions, enums and structures for %(platform)s.
+ *
+ * This file has been generated, do not hand edit.
+ */
+
+#pragma once
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <math.h>
+
+#ifndef __gen_validate_value
+#define __gen_validate_value(x)
+#endif
+
+#ifndef __gen_field_functions
+#define __gen_field_functions
+
+union __gen_value {
+   float f;
+   uint32_t dw;
+};
+
+static inline uint64_t
+__gen_mbo(uint32_t start, uint32_t end)
+{
+   return (~0ull >> (64 - (end - start + 1))) << start;
+}
+
+static inline uint64_t
+__gen_uint(uint64_t v, uint32_t start, uint32_t end)
+{
+   __gen_validate_value(v);
+
+#if DEBUG
+   const int width = end - start + 1;
+   if (width < 64) {
+      const uint64_t max = (1ull << width) - 1;
+      assert(v <= max);
+   }
+#endif
+
+   return v << start;
+}
+
+static inline uint64_t
+__gen_sint(int64_t v, uint32_t start, uint32_t end)
+{
+   const int width = end - start + 1;
+
+   __gen_validate_value(v);
+
+#if DEBUG
+   if (width < 64) {
+      const int64_t max = (1ll << (width - 1)) - 1;
+      const int64_t min = -(1ll << (width - 1));
+      assert(min <= v && v <= max);
+   }
+#endif
+
+   const uint64_t mask = ~0ull >> (64 - width);
+
+   return (v & mask) << start;
+}
+
+static inline uint64_t
+__gen_offset(uint64_t v, uint32_t start, uint32_t end)
+{
+   __gen_validate_value(v);
+#if DEBUG
+   uint64_t mask = (~0ull >> (64 - (end - start + 1))) << start;
+
+   assert((v & ~mask) == 0);
+#endif
+
+   return v;
+}
+
+static inline uint32_t
+__gen_float(float v)
+{
+   __gen_validate_value(v);
+   return ((union __gen_value) { .f = (v) }).dw;
+}
+
+static inline uint64_t
+__gen_sfixed(float v, uint32_t start, uint32_t end, uint32_t fract_bits)
+{
+   __gen_validate_value(v);
+
+   const float factor = (1 << fract_bits);
+
+#if DEBUG
+   const float max = ((1 << (end - start)) - 1) / factor;
+   const float min = -(1 << (end - start)) / factor;
+   assert(min <= v && v <= max);
+#endif
+
+   const int32_t int_val = roundf(v * factor);
+   const uint64_t mask = ~0ull >> (64 - (end - start + 1));
+
+   return (int_val & mask) << start;
+}
+
+static inline uint64_t
+__gen_ufixed(float v, uint32_t start, uint32_t end, uint32_t fract_bits)
+{
+   __gen_validate_value(v);
+
+   const float factor = (1 << fract_bits);
+
+#if DEBUG
+   const float max = ((1 << (end - start + 1)) - 1) / factor;
+   const float min = 0.0f;
+   assert(min <= v && v <= max);
+#endif
+
+   const uint32_t uint_val = roundf(v * factor);
+
+   return uint_val << start;
+}
+
+#ifndef __gen_address_type
+#error #define __gen_address_type before including this file
+#endif
+
+#ifndef __gen_user_data
+#error #define __gen_combine_address before including this file
+#endif
+
+#endif
+
+"""
+
+def to_alphanum(name):
+    substitutions = {
+        ' ': '',
+        '/': '',
+        '[': '',
+        ']': '',
+        '(': '',
+        ')': '',
+        '-': '',
+        ':': '',
+        '.': '',
+        ',': '',
+        '=': '',
+        '>': '',
+        '#': '',
+        'α': 'alpha',
+        '&': '',
+        '*': '',
+        '"': '',
+        '+': '',
+        '\'': '',
+    }
+
+    for i, j in substitutions.items():
+        name = name.replace(i, j)
+
+    return name
+
+def safe_name(name):
+    name = to_alphanum(name)
+    if not str.isalpha(name[0]):
+        name = '_' + name
+
+    return name
+
+class Field:
+    ufixed_pattern = re.compile("u(\d+)\.(\d+)")
+    sfixed_pattern = re.compile("s(\d+)\.(\d+)")
+
+    def __init__(self, parser, attrs):
+        self.parser = parser
+        if "name" in attrs:
+            self.name = safe_name(attrs["name"])
+        self.start = int(attrs["start"])
+        self.end = int(attrs["end"])
+        self.type = attrs["type"]
+
+        if "prefix" in attrs:
+            self.prefix = attrs["prefix"]
+        else:
+            self.prefix = None
+
+        if "default" in attrs:
+            self.default = int(attrs["default"])
+        else:
+            self.default = None
+
+        ufixed_match = Field.ufixed_pattern.match(self.type)
+        if ufixed_match:
+            self.type = 'ufixed'
+            self.fractional_size = int(ufixed_match.group(2))
+
+        sfixed_match = Field.sfixed_pattern.match(self.type)
+        if sfixed_match:
+            self.type = 'sfixed'
+            self.fractional_size = int(sfixed_match.group(2))
+
+    def emit_template_struct(self, dim):
+        if self.type == 'address':
+            type = '__gen_address_type'
+        elif self.type == 'bool':
+            type = 'bool'
+        elif self.type == 'float':
+            type = 'float'
+        elif self.type == 'ufixed':
+            type = 'float'
+        elif self.type == 'sfixed':
+            type = 'float'
+        elif self.type == 'uint' and self.end - self.start > 32:
+            type = 'uint64_t'
+        elif self.type == 'offset':
+            type = 'uint64_t'
+        elif self.type == 'int':
+            type = 'int32_t'
+        elif self.type == 'uint':
+            type = 'uint32_t'
+        elif self.type in self.parser.structs:
+            type = 'struct ' + self.parser.gen_prefix(safe_name(self.type))
+        elif self.type == 'mbo':
+            return
+        else:
+            print("#error unhandled type: %s" % self.type)
+
+        print("   %-36s %s%s;" % (type, self.name, dim))
+
+        if len(self.values) > 0 and self.default == None:
+            if self.prefix:
+                prefix = self.prefix + "_"
+            else:
+                prefix = ""
+
+        for value in self.values:
+            print("#define %-40s %d" % (prefix + value.name, value.value))
+
+class Group:
+    def __init__(self, parser, parent, start, count, size):
+        self.parser = parser
+        self.parent = parent
+        self.start = start
+        self.count = count
+        self.size = size
+        self.fields = []
+
+    def emit_template_struct(self, dim):
+        if self.count == 0:
+            print("   /* variable length fields follow */")
+        else:
+            if self.count > 1:
+                dim = "%s[%d]" % (dim, self.count)
+
+            for field in self.fields:
+                field.emit_template_struct(dim)
+
+    class DWord:
+        def __init__(self):
+            self.size = 32
+            self.fields = []
+            self.address = None
+
+    def collect_dwords(self, dwords, start, dim):
+        for field in self.fields:
+            if type(field) is Group:
+                if field.count == 1:
+                    field.collect_dwords(dwords, start + field.start, dim)
+                else:
+                    for i in range(field.count):
+                        field.collect_dwords(dwords,
+                                             start + field.start + i * field.size,
+                                             "%s[%d]" % (dim, i))
+                continue
+
+            index = (start + field.start) // 32
+            if not index in dwords:
+                dwords[index] = self.DWord()
+
+            clone = copy.copy(field)
+            clone.start = clone.start + start
+            clone.end = clone.end + start
+            clone.dim = dim
+            dwords[index].fields.append(clone)
+
+            if field.type == "address":
+                # assert dwords[index].address == None
+                dwords[index].address = field
+
+            # Coalesce all the dwords covered by this field. The two cases we
+            # handle are where multiple fields are in a 64 bit word (typically
+            # and address and a few bits) or where a single struct field
+            # completely covers multiple dwords.
+            while index < (start + field.end) // 32:
+                if index + 1 in dwords and not dwords[index] == dwords[index + 1]:
+                    dwords[index].fields.extend(dwords[index + 1].fields)
+                dwords[index].size = 64
+                dwords[index + 1] = dwords[index]
+                index = index + 1
+
+    def emit_pack_function(self, start):
+        dwords = {}
+        self.collect_dwords(dwords, 0, "")
+
+        # Determine number of dwords in this group. If we have a size, use
+        # that, since that'll account for MBZ dwords at the end of a group
+        # (like dword 8 on BDW+ 3DSTATE_HS). Otherwise, use the largest dword
+        # index we've seen plus one.
+        if self.size > 0:
+            length = self.size // 32
+        else:
+            length = max(dwords.keys()) + 1
+
+        for index in range(length):
+            # Handle MBZ dwords
+            if not index in dwords:
+                print("")
+                print("   dw[%d] = 0;" % index)
+                continue
+
+            # For 64 bit dwords, we aliased the two dword entries in the dword
+            # dict it occupies. Now that we're emitting the pack function,
+            # skip the duplicate entries.
+            dw = dwords[index]
+            if index > 0 and index - 1 in dwords and dw == dwords[index - 1]:
+                continue
+
+            # Special case: only one field and it's a struct at the beginning
+            # of the dword. In this case we pack directly into the
+            # destination. This is the only way we handle embedded structs
+            # larger than 32 bits.
+            if len(dw.fields) == 1:
+                field = dw.fields[0]
+                name = field.name + field.dim
+                if field.type in self.parser.structs and field.start % 32 == 0:
+                    print("")
+                    print("   %s_pack(data, &dw[%d], &values->%s);" %
+                          (self.parser.gen_prefix(safe_name(field.type)), index, name))
+                    continue
+
+            # Pack any fields of struct type first so we have integer values
+            # to the dword for those fields.
+            field_index = 0
+            for field in dw.fields:
+                if type(field) is Field and field.type in self.parser.structs:
+                    name = field.name + field.dim
+                    print("")
+                    print("   uint32_t v%d_%d;" % (index, field_index))
+                    print("   %s_pack(data, &v%d_%d, &values->%s);" %
+                          (self.parser.gen_prefix(safe_name(field.type)), index, field_index, name))
+                    field_index = field_index + 1
+
+            print("")
+            dword_start = index * 32
+            if dw.address == None:
+                address_count = 0
+            else:
+                address_count = 1
+
+            if dw.size == 32 and dw.address == None:
+                v = None
+                print("   dw[%d] =" % index)
+            elif len(dw.fields) > address_count:
+                v = "v%d" % index
+                print("   const uint%d_t %s =" % (dw.size, v))
+            else:
+                v = "0"
+
+            field_index = 0
+            for field in dw.fields:
+                if field.type != "mbo":
+                    name = field.name + field.dim
+
+                if field.type == "mbo":
+                    s = "__gen_mbo(%d, %d)" % \
+                        (field.start - dword_start, field.end - dword_start)
+                elif field.type == "address":
+                    s = None
+                elif field.type == "uint":
+                    s = "__gen_uint(values->%s, %d, %d)" % \
+                        (name, field.start - dword_start, field.end - dword_start)
+                elif field.type == "int":
+                    s = "__gen_sint(values->%s, %d, %d)" % \
+                        (name, field.start - dword_start, field.end - dword_start)
+                elif field.type == "bool":
+                    s = "__gen_uint(values->%s, %d, %d)" % \
+                        (name, field.start - dword_start, field.end - dword_start)
+                elif field.type == "float":
+                    s = "__gen_float(values->%s)" % name
+                elif field.type == "offset":
+                    s = "__gen_offset(values->%s, %d, %d)" % \
+                        (name, field.start - dword_start, field.end - dword_start)
+                elif field.type == 'ufixed':
+                    s = "__gen_ufixed(values->%s, %d, %d, %d)" % \
+                        (name, field.start - dword_start, field.end - dword_start, field.fractional_size)
+                elif field.type == 'sfixed':
+                    s = "__gen_sfixed(values->%s, %d, %d, %d)" % \
+                        (name, field.start - dword_start, field.end - dword_start, field.fractional_size)
+                elif field.type in self.parser.structs:
+                    s = "__gen_uint(v%d_%d, %d, %d)" % \
+                        (index, field_index, field.start - dword_start, field.end - dword_start)
+                    field_index = field_index + 1
+                else:
+                    print("/* unhandled field %s, type %s */\n" % (name, field.type))
+                    s = None
+
+                if not s == None:
+                    if field == dw.fields[-1]:
+                        print("      %s;" % s)
+                    else:
+                        print("      %s |" % s)
+
+            if dw.size == 32:
+                if dw.address:
+                    print("   dw[%d] = __gen_combine_address(data, &dw[%d], values->%s, %s);" % (index, index, dw.address.name, v))
+                continue
+
+            if dw.address:
+                v_address = "v%d_address" % index
+                print("   const uint64_t %s =\n      __gen_combine_address(data, &dw[%d], values->%s, %s);" %
+                      (v_address, index, dw.address.name, v))
+                v = v_address
+
+            print("   dw[%d] = %s;" % (index, v))
+            print("   dw[%d] = %s >> 32;" % (index + 1, v))
+
+class Value:
+    def __init__(self, attrs):
+        self.name = safe_name(attrs["name"])
+        self.value = int(attrs["value"])
+
+class Parser:
+    def __init__(self):
+        self.parser = xml.parsers.expat.ParserCreate()
+        self.parser.StartElementHandler = self.start_element
+        self.parser.EndElementHandler = self.end_element
+
+        self.instruction = None
+        self.structs = {}
+
+    def start_element(self, name, attrs):
+        if name == "genxml":
+            self.platform = attrs["name"]
+            self.gen = attrs["gen"].replace('.', '')
+            print(pack_header % {'license': license, 'platform': self.platform})
+        elif name == "instruction":
+            self.instruction = safe_name(attrs["name"])
+            self.length_bias = int(attrs["bias"])
+            if "length" in attrs:
+                self.length = int(attrs["length"])
+                size = self.length * 32
+            else:
+                self.length = None
+                size = 0
+            self.group = Group(self, None, 0, 1, size)
+        elif name == "struct":
+            self.struct = safe_name(attrs["name"])
+            self.structs[attrs["name"]] = 1
+            if "length" in attrs:
+                self.length = int(attrs["length"])
+                size = self.length * 32
+            else:
+                self.length = None
+                size = 0
+            self.group = Group(self, None, 0, 1, size)
+
+        elif name == "group":
+            group = Group(self, self.group,
+                          int(attrs["start"]), int(attrs["count"]), int(attrs["size"]))
+            self.group.fields.append(group)
+            self.group = group
+        elif name == "field":
+            self.group.fields.append(Field(self, attrs))
+            self.values = []
+        elif name == "enum":
+            self.values = []
+            self.enum = safe_name(attrs["name"])
+            if "prefix" in attrs:
+                self.prefix = safe_name(attrs["prefix"])
+            else:
+                self.prefix= None
+        elif name == "value":
+            self.values.append(Value(attrs))
+
+    def end_element(self, name):
+        if name  == "instruction":
+            self.emit_instruction()
+            self.instruction = None
+            self.group = None
+        elif name  == "struct":
+            self.emit_struct()
+            self.struct = None
+            self.group = None
+        elif name == "group":
+            self.group = self.group.parent
+        elif name  == "field":
+            self.group.fields[-1].values = self.values
+        elif name  == "enum":
+            self.emit_enum()
+            self.enum = None
+
+    def gen_prefix(self, name):
+        if name[0] == "_":
+            return 'GEN%s%s' % (self.gen, name)
+        else:
+            return 'GEN%s_%s' % (self.gen, name)
+
+    def emit_template_struct(self, name, group):
+        print("struct %s {" % self.gen_prefix(name))
+        group.emit_template_struct("")
+        print("};\n")
+
+    def emit_pack_function(self, name, group):
+        name = self.gen_prefix(name)
+        print("static inline void\n%s_pack(__gen_user_data *data, void * restrict dst,\n%sconst struct %s * restrict values)\n{" %
+              (name, ' ' * (len(name) + 6), name))
+
+        # Cast dst to make header C++ friendly
+        print("   uint32_t * restrict dw = (uint32_t * restrict) dst;")
+
+        group.emit_pack_function(0)
+
+        print("}\n")
+
+    def emit_instruction(self):
+        name = self.instruction
+        if not self.length == None:
+            print('#define %-33s %4d' %
+                  (self.gen_prefix(name + "_length"), self.length))
+        print('#define %-33s %4d' %
+              (self.gen_prefix(name + "_length_bias"), self.length_bias))
+
+        default_fields = []
+        for field in self.group.fields:
+            if not type(field) is Field:
+                continue
+            if field.default == None:
+                continue
+            default_fields.append("   .%-35s = %4d" % (field.name, field.default))
+
+        if default_fields:
+            print('#define %-40s\\' % (self.gen_prefix(name + '_header')))
+            print(",  \\\n".join(default_fields))
+            print('')
+
+        self.emit_template_struct(self.instruction, self.group)
+
+        self.emit_pack_function(self.instruction, self.group)
+
+    def emit_struct(self):
+        name = self.struct
+        if not self.length == None:
+            print('#define %-33s %4d' %
+                  (self.gen_prefix(name + "_length"), self.length))
+
+        self.emit_template_struct(self.struct, self.group)
+        self.emit_pack_function(self.struct, self.group)
+
+    def emit_enum(self):
+        print('/* enum %s */' % self.gen_prefix(self.enum))
+        for value in self.values:
+            if self.prefix:
+                name = self.prefix + "_" + value.name
+            else:
+                name = value.name
+            print('#define %-36s %4d' % (name.upper(), value.value))
+        print('')
+
+    def parse(self, filename):
+        file = open(filename, "rb")
+        self.parser.ParseFile(file)
+        file.close()
+
+if len(sys.argv) < 2:
+    print("No input xml file specified")
+    sys.exit(1)
+
+input_file = sys.argv[1]
+
+p = Parser()
+p.parse(input_file)
diff --git a/src/intel/isl/.gitignore b/src/intel/isl/.gitignore
new file mode 100644 (file)
index 0000000..e9cfd67
--- /dev/null
@@ -0,0 +1 @@
+/isl_format_layout.c
diff --git a/src/intel/isl/Makefile.am b/src/intel/isl/Makefile.am
new file mode 100644 (file)
index 0000000..806934e
--- /dev/null
@@ -0,0 +1,123 @@
+# Copyright 2015 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+SUBDIRS = .
+
+
+ISL_GEN_LIBS =                                           \
+       libisl-gen7.la                                   \
+       libisl-gen75.la                                  \
+       libisl-gen8.la                                   \
+       libisl-gen9.la                                   \
+       $(NULL)
+
+noinst_LTLIBRARIES = $(ISL_GEN_LIBS) libisl.la
+
+EXTRA_DIST = tests
+
+# The gallium includes are for the util/u_math.h include from main/macros.h
+AM_CPPFLAGS = \
+       $(INTEL_CFLAGS) \
+       $(VALGRIND_CFLAGS) \
+       $(DEFINES) \
+       -I$(top_srcdir)/include \
+       -I$(top_srcdir)/src \
+       -I$(top_srcdir)/src/intel \
+       -I$(top_srcdir)/src/mapi \
+       -I$(top_srcdir)/src/mesa \
+       -I$(top_srcdir)/src/mesa/drivers/dri/common \
+       -I$(top_srcdir)/src/mesa/drivers/dri/i965 \
+       -I$(top_srcdir)/src/gallium/auxiliary \
+       -I$(top_srcdir)/src/gallium/include \
+       -I$(top_builddir)/src \
+       -I$(top_builddir)/src/intel
+
+libisl_la_CFLAGS = $(CFLAGS) -Wno-override-init
+
+libisl_la_LIBADD = $(ISL_GEN_LIBS)
+
+libisl_la_SOURCES =                                     \
+       isl.c                                           \
+       isl.h                                           \
+       isl_format.c                                    \
+       isl_format_layout.c                             \
+       isl_gen4.c                                      \
+       isl_gen4.h                                      \
+       isl_gen6.c                                      \
+       isl_gen6.h                                      \
+       isl_storage_image.c                             \
+       $(NULL)
+
+libisl_gen7_la_SOURCES =                                \
+       isl_gen7.c                                      \
+       isl_gen7.h                                      \
+        isl_surface_state.c                             \
+       $(NULL)
+libisl_gen7_la_CFLAGS = $(libisl_la_CFLAGS) -DGEN_VERSIONx10=70
+
+libisl_gen75_la_SOURCES =                               \
+        isl_surface_state.c                             \
+       $(NULL)
+libisl_gen75_la_CFLAGS = $(libisl_la_CFLAGS) -DGEN_VERSIONx10=75
+
+libisl_gen8_la_SOURCES =                                \
+       isl_gen8.c                                      \
+       isl_gen8.h                                      \
+        isl_surface_state.c                             \
+       $(NULL)
+libisl_gen8_la_CFLAGS = $(libisl_la_CFLAGS) -DGEN_VERSIONx10=80
+
+libisl_gen9_la_SOURCES =                                \
+       isl_gen9.c                                      \
+       isl_gen9.h                                      \
+        isl_surface_state.c                             \
+       $(NULL)
+libisl_gen9_la_CFLAGS = $(libisl_la_CFLAGS) -DGEN_VERSIONx10=90
+
+BUILT_SOURCES =                                         \
+       isl_format_layout.c
+
+isl_format_layout.c: isl_format_layout_gen.bash \
+                     isl_format_layout.csv
+       $(AM_V_GEN)$(srcdir)/isl_format_layout_gen.bash \
+           <$(srcdir)/isl_format_layout.csv >$@
+
+# ----------------------------------------------------------------------------
+#  Tests
+# ----------------------------------------------------------------------------
+
+TESTS = tests/isl_surf_get_image_offset_test
+
+check_PROGRAMS = $(TESTS)
+
+# Link tests to lib965_compiler.la for brw_get_device_info().
+tests_ldadd =                                          \
+       -lm                                             \
+       libisl.la                                       \
+       $(top_builddir)/src/mesa/drivers/dri/i965/libi965_compiler.la
+
+tests_isl_surf_get_image_offset_test_SOURCES =         \
+       tests/isl_surf_get_image_offset_test.c
+tests_isl_surf_get_image_offset_test_LDADD = $(tests_ldadd)
+
+# ----------------------------------------------------------------------------
+
+include $(top_srcdir)/install-lib-links.mk
diff --git a/src/intel/isl/README b/src/intel/isl/README
new file mode 100644 (file)
index 0000000..1ab4313
--- /dev/null
@@ -0,0 +1,113 @@
+Intel Surface Layout
+
+Introduction
+============
+isl is a small library that calculates the layout of Intel GPU surfaces, queries
+those layouts, and queries the properties of surface formats.
+
+
+Independence from User APIs
+===========================
+isl's API is independent of any user-facing graphics API, such as OpenGL and
+Vulkan. This independence allows isl to be used a shared component by multiple
+Intel drivers.
+
+Rather than mimic the user-facing APIs, the isl API attempts to reflect Intel
+hardware: the actual memory layout of Intel GPU surfaces and how one programs
+the GPU to use those surfaces. For example:
+
+  - The tokens of `enum isl_format` (such as `ISL_FORMAT_R8G8B8A8_UNORM`)
+    match those of the hardware enum `SURFACE_FORMAT` rather than the OpenGL
+    or Vulkan format tokens.  And the values of `isl_format` and
+    `SURFACE_FORMAT` are identical.
+
+  - The OpenGL and Vulkan APIs contain depth and stencil formats. However the
+    hardware enum `SURFACE_FORMAT` does not, and therefore neither does `enum
+    isl_format`. Rather than define new pixel formats that have no hardware
+    counterpart, isl records the intent to use a surface as a depth or stencil
+    buffer with the usage flags `ISL_SURF_USAGE_DEPTH_BIT` and
+    `ISL_SURF_USAGE_STENCIL_BIT`.
+
+  - `struct isl_surf` distinguishes between the surface's logical dimension
+    from the user API's perspective (`enum isl_surf_dim`, which may be 1D, 2D,
+    or 3D) and the layout of those dimensions in memory (`enum isl_dim_layout`).
+
+
+Surface Units
+=============
+
+Intro
+-----
+ISL takes care in its equations to correctly handle conversion among surface
+units (such as pixels and compression blocks) and to carefully distinguish
+between a surface's logical layout in the client API and its physical layout
+in memory.
+
+Symbol names often explicitly declare their unit with a suffix:
+
+   - px: logical pixels
+   - sa: physical surface samples
+   - el: physical surface elements
+   - sa_rows: rows of physical surface samples
+   - el_rows: rows of physical surface elements
+
+Logical units are independent of hardware generation and are closely related
+to the user-facing API (OpenGL and Vulkan). Physical units are dependent on
+hardware generation and reflect the surface's layout in memory.
+
+Definitions
+-----------
+- Logical Pixels (px):
+
+  The surface's layout from the perspective of the client API (OpenGL and
+  Vulkan) is in units of logical pixels. Logical pixels are independent of the
+  surface's layout in memory.
+
+  A surface's width and height, in units of logical pixels, is not affected by
+  the surface's sample count. For example, consider a VkImage created with
+  VkImageCreateInfo{width=w0, height=h0, samples=s0}. The surface's width and
+  height at level 0 is, in units of logical pixels, w0 and h0 regardless of
+  the value of s0.
+
+  For example, the logical array length of a 3D surface is always 1, even on
+  Gen9 where the surface's memory layout is that of an array surface
+  (ISL_DIM_LAYOUT_GEN4_2D).
+
+- Physical Surface Samples (sa):
+
+  For a multisampled surface, this unit has the obvious meaning.
+  A singlesampled surface, from ISL's perspective, is simply a multisampled
+  surface whose sample count is 1.
+
+  For example, consider a 2D single-level non-array surface with samples=4,
+  width_px=64, and height_px=64 (note that the suffix 'px' indicates logical
+  pixels). If the surface's multisample layout is ISL_MSAA_LAYOUT_INTERLEAVED,
+  then the extent of level 0 is, in units of physical surface samples,
+  width_sa=128, height_sa=128, depth_sa=1, array_length_sa=1. If
+  ISL_MSAA_LAYOUT_ARRAY, then width_sa=64, height_sa=64, depth_sa=1,
+  array_length_sa=4.
+
+- Physical Surface Elements (el):
+
+  This unit allows ISL to treat compressed and uncompressed formats
+  identically in many calculations.
+
+  If the surface's pixel format is compressed, such as ETC2, then a surface
+  element is equivalent to a compression block. If uncompressed, then
+  a surface element is equivalent to a surface sample. As a corollary, for
+  a given surface a surface element is at least as large as a surface sample.
+
+Errata
+------
+ISL acquired the term 'surface element' from the Broadwell PRM [1], which
+defines it as follows:
+
+   An element is defined as a pixel in uncompresed surface formats, and as
+   a compression block in compressed surface formats. For MSFMT_DEPTH_STENCIL
+   type multisampled surfaces, an element is a sample.
+
+
+References
+==========
+[1]: Broadwell PRM >> Volume 2d: Command Reference: Structures >>
+     RENDER_SURFACE_STATE Surface Vertical Alignment (p325)
diff --git a/src/intel/isl/isl.c b/src/intel/isl/isl.c
new file mode 100644 (file)
index 0000000..a366380
--- /dev/null
@@ -0,0 +1,1517 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "isl.h"
+#include "isl_gen4.h"
+#include "isl_gen6.h"
+#include "isl_gen7.h"
+#include "isl_gen8.h"
+#include "isl_gen9.h"
+#include "isl_priv.h"
+
+void PRINTFLIKE(3, 4) UNUSED
+__isl_finishme(const char *file, int line, const char *fmt, ...)
+{
+   va_list ap;
+   char buf[512];
+
+   va_start(ap, fmt);
+   vsnprintf(buf, sizeof(buf), fmt, ap);
+   va_end(ap);
+
+   fprintf(stderr, "%s:%d: FINISHME: %s\n", file, line, buf);
+}
+
+void
+isl_device_init(struct isl_device *dev,
+                const struct brw_device_info *info,
+                bool has_bit6_swizzling)
+{
+   dev->info = info;
+   dev->use_separate_stencil = ISL_DEV_GEN(dev) >= 6;
+   dev->has_bit6_swizzling = has_bit6_swizzling;
+
+   /* The ISL_DEV macros may be defined in the CFLAGS, thus hardcoding some
+    * device properties at buildtime. Verify that the macros with the device
+    * properties chosen during runtime.
+    */
+   assert(ISL_DEV_GEN(dev) == dev->info->gen);
+   assert(ISL_DEV_USE_SEPARATE_STENCIL(dev) == dev->use_separate_stencil);
+
+   /* Did we break hiz or stencil? */
+   if (ISL_DEV_USE_SEPARATE_STENCIL(dev))
+      assert(info->has_hiz_and_separate_stencil);
+   if (info->must_use_separate_stencil)
+      assert(ISL_DEV_USE_SEPARATE_STENCIL(dev));
+}
+
+/**
+ * @brief Query the set of multisamples supported by the device.
+ *
+ * This function always returns non-zero, as ISL_SAMPLE_COUNT_1_BIT is always
+ * supported.
+ */
+isl_sample_count_mask_t ATTRIBUTE_CONST
+isl_device_get_sample_counts(struct isl_device *dev)
+{
+   if (ISL_DEV_GEN(dev) >= 9) {
+      return ISL_SAMPLE_COUNT_1_BIT |
+             ISL_SAMPLE_COUNT_2_BIT |
+             ISL_SAMPLE_COUNT_4_BIT |
+             ISL_SAMPLE_COUNT_8_BIT |
+             ISL_SAMPLE_COUNT_16_BIT;
+   } else if (ISL_DEV_GEN(dev) >= 8) {
+      return ISL_SAMPLE_COUNT_1_BIT |
+             ISL_SAMPLE_COUNT_2_BIT |
+             ISL_SAMPLE_COUNT_4_BIT |
+             ISL_SAMPLE_COUNT_8_BIT;
+   } else if (ISL_DEV_GEN(dev) >= 7) {
+      return ISL_SAMPLE_COUNT_1_BIT |
+             ISL_SAMPLE_COUNT_4_BIT |
+             ISL_SAMPLE_COUNT_8_BIT;
+   } else if (ISL_DEV_GEN(dev) >= 6) {
+      return ISL_SAMPLE_COUNT_1_BIT |
+             ISL_SAMPLE_COUNT_4_BIT;
+   } else {
+      return ISL_SAMPLE_COUNT_1_BIT;
+   }
+}
+
+/**
+ * @param[out] info is written only on success
+ */
+bool
+isl_tiling_get_info(const struct isl_device *dev,
+                    enum isl_tiling tiling,
+                    uint32_t format_block_size,
+                    struct isl_tile_info *tile_info)
+{
+   const uint32_t bs = format_block_size;
+   uint32_t width, height;
+
+   assert(bs > 0);
+
+   switch (tiling) {
+   case ISL_TILING_LINEAR:
+      width = 1;
+      height = 1;
+      break;
+
+   case ISL_TILING_X:
+      width = 1 << 9;
+      height = 1 << 3;
+      break;
+
+   case ISL_TILING_Y0:
+      width = 1 << 7;
+      height = 1 << 5;
+      break;
+
+   case ISL_TILING_W:
+      /* XXX: Should W tile be same as Y? */
+      width = 1 << 6;
+      height = 1 << 6;
+      break;
+
+   case ISL_TILING_Yf:
+   case ISL_TILING_Ys: {
+      if (ISL_DEV_GEN(dev) < 9)
+         return false;
+
+      if (!isl_is_pow2(bs))
+         return false;
+
+      bool is_Ys = tiling == ISL_TILING_Ys;
+
+      width = 1 << (6 + (ffs(bs) / 2) + (2 * is_Ys));
+      height = 1 << (6 - (ffs(bs) / 2) + (2 * is_Ys));
+      break;
+   }
+   } /* end switch */
+
+   *tile_info = (struct isl_tile_info) {
+      .tiling = tiling,
+      .width = width,
+      .height = height,
+      .size = width * height,
+   };
+
+   return true;
+}
+
+void
+isl_tiling_get_extent(const struct isl_device *dev,
+                      enum isl_tiling tiling,
+                      uint32_t format_block_size,
+                      struct isl_extent2d *e)
+{
+   struct isl_tile_info tile_info;
+   isl_tiling_get_info(dev, tiling, format_block_size, &tile_info);
+   *e = isl_extent2d(tile_info.width, tile_info.height);
+}
+
+/**
+ * @param[out] tiling is set only on success
+ */
+bool
+isl_surf_choose_tiling(const struct isl_device *dev,
+                       const struct isl_surf_init_info *restrict info,
+                       enum isl_tiling *tiling)
+{
+   isl_tiling_flags_t tiling_flags = info->tiling_flags;
+
+   /* Filter if multiple tiling options are given */
+   if (!isl_is_pow2(tiling_flags)) {
+      if (ISL_DEV_GEN(dev) >= 7) {
+         gen7_filter_tiling(dev, info, &tiling_flags);
+      } else {
+         isl_finishme("%s: gen%u", __func__, ISL_DEV_GEN(dev));
+         gen7_filter_tiling(dev, info, &tiling_flags);
+      }
+   }
+
+   #define CHOOSE(__tiling) \
+      do { \
+         if (tiling_flags & (1u << (__tiling))) { \
+            *tiling = (__tiling); \
+            return true; \
+          } \
+      } while (0)
+
+   /* Of the tiling modes remaining, choose the one that offers the best
+    * performance.
+    */
+
+   if (info->dim == ISL_SURF_DIM_1D) {
+      /* Prefer linear for 1D surfaces because they do not benefit from
+       * tiling. To the contrary, tiling leads to wasted memory and poor
+       * memory locality due to the swizzling and alignment restrictions
+       * required in tiled surfaces.
+       */
+      CHOOSE(ISL_TILING_LINEAR);
+   }
+
+   CHOOSE(ISL_TILING_Ys);
+   CHOOSE(ISL_TILING_Yf);
+   CHOOSE(ISL_TILING_Y0);
+   CHOOSE(ISL_TILING_X);
+   CHOOSE(ISL_TILING_W);
+   CHOOSE(ISL_TILING_LINEAR);
+
+   #undef CHOOSE
+
+   /* No tiling mode accomodates the inputs. */
+   return false;
+}
+
+static bool
+isl_choose_msaa_layout(const struct isl_device *dev,
+                 const struct isl_surf_init_info *info,
+                 enum isl_tiling tiling,
+                 enum isl_msaa_layout *msaa_layout)
+{
+   if (ISL_DEV_GEN(dev) >= 8) {
+      return gen8_choose_msaa_layout(dev, info, tiling, msaa_layout);
+   } else if (ISL_DEV_GEN(dev) >= 7) {
+      return gen7_choose_msaa_layout(dev, info, tiling, msaa_layout);
+   } else if (ISL_DEV_GEN(dev) >= 6) {
+      return gen6_choose_msaa_layout(dev, info, tiling, msaa_layout);
+   } else {
+      return gen4_choose_msaa_layout(dev, info, tiling, msaa_layout);
+   }
+}
+
+static void
+isl_msaa_interleaved_scale_px_to_sa(uint32_t samples,
+                                    uint32_t *width, uint32_t *height)
+{
+   assert(isl_is_pow2(samples));
+
+   /* From the Broadwell PRM >> Volume 5: Memory Views >> Computing Mip Level
+    * Sizes (p133):
+    *
+    *    If the surface is multisampled and it is a depth or stencil surface
+    *    or Multisampled Surface StorageFormat in SURFACE_STATE is
+    *    MSFMT_DEPTH_STENCIL, W_L and H_L must be adjusted as follows before
+    *    proceeding: [...]
+    */
+   if (width)
+      *width = isl_align(*width, 2) << ((ffs(samples) - 0) / 2);
+   if (height)
+      *height = isl_align(*height, 2) << ((ffs(samples) - 1) / 2);
+}
+
+static enum isl_array_pitch_span
+isl_choose_array_pitch_span(const struct isl_device *dev,
+                            const struct isl_surf_init_info *restrict info,
+                            enum isl_dim_layout dim_layout,
+                            const struct isl_extent4d *phys_level0_sa)
+{
+   switch (dim_layout) {
+   case ISL_DIM_LAYOUT_GEN9_1D:
+   case ISL_DIM_LAYOUT_GEN4_2D:
+      if (ISL_DEV_GEN(dev) >= 8) {
+         /* QPitch becomes programmable in Broadwell. So choose the
+          * most compact QPitch possible in order to conserve memory.
+          *
+          * From the Broadwell PRM >> Volume 2d: Command Reference: Structures
+          * >> RENDER_SURFACE_STATE Surface QPitch (p325):
+          *
+          *    - Software must ensure that this field is set to a value
+          *      sufficiently large such that the array slices in the surface
+          *      do not overlap. Refer to the Memory Data Formats section for
+          *      information on how surfaces are stored in memory.
+          *
+          *    - This field specifies the distance in rows between array
+          *      slices.  It is used only in the following cases:
+          *
+          *          - Surface Array is enabled OR
+          *          - Number of Mulitsamples is not NUMSAMPLES_1 and
+          *            Multisampled Surface Storage Format set to MSFMT_MSS OR
+          *          - Surface Type is SURFTYPE_CUBE
+          */
+         return ISL_ARRAY_PITCH_SPAN_COMPACT;
+      } else if (ISL_DEV_GEN(dev) >= 7) {
+         /* Note that Ivybridge introduces
+          * RENDER_SURFACE_STATE.SurfaceArraySpacing, which provides the
+          * driver more control over the QPitch.
+          */
+
+         if (phys_level0_sa->array_len == 1) {
+            /* The hardware will never use the QPitch. So choose the most
+             * compact QPitch possible in order to conserve memory.
+             */
+            return ISL_ARRAY_PITCH_SPAN_COMPACT;
+         }
+
+         if (isl_surf_usage_is_depth_or_stencil(info->usage)) {
+            /* From the Ivybridge PRM >> Volume 1 Part 1: Graphics Core >>
+             * Section 6.18.4.7: Surface Arrays (p112):
+             *
+             *    If Surface Array Spacing is set to ARYSPC_FULL (note that
+             *    the depth buffer and stencil buffer have an implied value of
+             *    ARYSPC_FULL):
+             */
+            return ISL_ARRAY_PITCH_SPAN_COMPACT;
+         }
+
+         if (info->levels == 1) {
+            /* We are able to set RENDER_SURFACE_STATE.SurfaceArraySpacing
+             * to ARYSPC_LOD0.
+             */
+            return ISL_ARRAY_PITCH_SPAN_COMPACT;
+         }
+
+         return ISL_ARRAY_PITCH_SPAN_FULL;
+      } else if ((ISL_DEV_GEN(dev) == 5 || ISL_DEV_GEN(dev) == 6) &&
+                 ISL_DEV_USE_SEPARATE_STENCIL(dev) &&
+                 isl_surf_usage_is_stencil(info->usage)) {
+         /* [ILK-SNB] Errata from the Sandy Bridge PRM >> Volume 4 Part 1:
+          * Graphics Core >> Section 7.18.3.7: Surface Arrays:
+          *
+          *    The separate stencil buffer does not support mip mapping, thus
+          *    the storage for LODs other than LOD 0 is not needed.
+          */
+         assert(info->levels == 1);
+         assert(phys_level0_sa->array_len == 1);
+         return ISL_ARRAY_PITCH_SPAN_COMPACT;
+      } else {
+         if ((ISL_DEV_GEN(dev) == 5 || ISL_DEV_GEN(dev) == 6) &&
+             ISL_DEV_USE_SEPARATE_STENCIL(dev) &&
+             isl_surf_usage_is_stencil(info->usage)) {
+            /* [ILK-SNB] Errata from the Sandy Bridge PRM >> Volume 4 Part 1:
+             * Graphics Core >> Section 7.18.3.7: Surface Arrays:
+             *
+             *    The separate stencil buffer does not support mip mapping,
+             *    thus the storage for LODs other than LOD 0 is not needed.
+             */
+            assert(info->levels == 1);
+            assert(phys_level0_sa->array_len == 1);
+            return ISL_ARRAY_PITCH_SPAN_COMPACT;
+         }
+
+         if (phys_level0_sa->array_len == 1) {
+            /* The hardware will never use the QPitch. So choose the most
+             * compact QPitch possible in order to conserve memory.
+             */
+            return ISL_ARRAY_PITCH_SPAN_COMPACT;
+         }
+
+         return ISL_ARRAY_PITCH_SPAN_FULL;
+      }
+
+   case ISL_DIM_LAYOUT_GEN4_3D:
+      /* The hardware will never use the QPitch. So choose the most
+       * compact QPitch possible in order to conserve memory.
+       */
+      return ISL_ARRAY_PITCH_SPAN_COMPACT;
+   }
+
+   unreachable("bad isl_dim_layout");
+   return ISL_ARRAY_PITCH_SPAN_FULL;
+}
+
+static void
+isl_choose_image_alignment_el(const struct isl_device *dev,
+                              const struct isl_surf_init_info *restrict info,
+                              enum isl_tiling tiling,
+                              enum isl_msaa_layout msaa_layout,
+                              struct isl_extent3d *image_align_el)
+{
+   if (ISL_DEV_GEN(dev) >= 9) {
+      gen9_choose_image_alignment_el(dev, info, tiling, msaa_layout,
+                                     image_align_el);
+   } else if (ISL_DEV_GEN(dev) >= 8) {
+      gen8_choose_image_alignment_el(dev, info, tiling, msaa_layout,
+                                     image_align_el);
+   } else if (ISL_DEV_GEN(dev) >= 7) {
+      gen7_choose_image_alignment_el(dev, info, tiling, msaa_layout,
+                                     image_align_el);
+   } else if (ISL_DEV_GEN(dev) >= 6) {
+      gen6_choose_image_alignment_el(dev, info, tiling, msaa_layout,
+                                     image_align_el);
+   } else {
+      gen4_choose_image_alignment_el(dev, info, tiling, msaa_layout,
+                                     image_align_el);
+   }
+}
+
+static enum isl_dim_layout
+isl_surf_choose_dim_layout(const struct isl_device *dev,
+                           enum isl_surf_dim logical_dim)
+{
+   if (ISL_DEV_GEN(dev) >= 9) {
+      switch (logical_dim) {
+      case ISL_SURF_DIM_1D:
+         return ISL_DIM_LAYOUT_GEN9_1D;
+      case ISL_SURF_DIM_2D:
+      case ISL_SURF_DIM_3D:
+         return ISL_DIM_LAYOUT_GEN4_2D;
+      }
+   } else {
+      switch (logical_dim) {
+      case ISL_SURF_DIM_1D:
+      case ISL_SURF_DIM_2D:
+         return ISL_DIM_LAYOUT_GEN4_2D;
+      case ISL_SURF_DIM_3D:
+         return ISL_DIM_LAYOUT_GEN4_3D;
+      }
+   }
+
+   unreachable("bad isl_surf_dim");
+   return ISL_DIM_LAYOUT_GEN4_2D;
+}
+
+/**
+ * Calculate the physical extent of the surface's first level, in units of
+ * surface samples. The result is aligned to the format's compression block.
+ */
+static void
+isl_calc_phys_level0_extent_sa(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_dim_layout dim_layout,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent4d *phys_level0_sa)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(info->format);
+
+   if (isl_format_is_yuv(info->format))
+      isl_finishme("%s:%s: YUV format", __FILE__, __func__);
+
+   switch (info->dim) {
+   case ISL_SURF_DIM_1D:
+      assert(info->height == 1);
+      assert(info->depth == 1);
+      assert(info->samples == 1);
+      assert(!isl_format_is_compressed(info->format));
+
+      switch (dim_layout) {
+      case ISL_DIM_LAYOUT_GEN4_3D:
+         unreachable("bad isl_dim_layout");
+
+      case ISL_DIM_LAYOUT_GEN9_1D:
+      case ISL_DIM_LAYOUT_GEN4_2D:
+         *phys_level0_sa = (struct isl_extent4d) {
+            .w = info->width,
+            .h = 1,
+            .d = 1,
+            .a = info->array_len,
+         };
+         break;
+      }
+      break;
+
+   case ISL_SURF_DIM_2D:
+      assert(dim_layout == ISL_DIM_LAYOUT_GEN4_2D);
+
+      if (tiling == ISL_TILING_Ys && info->samples > 1)
+         isl_finishme("%s:%s: multisample TileYs layout", __FILE__, __func__);
+
+      switch (msaa_layout) {
+      case ISL_MSAA_LAYOUT_NONE:
+         assert(info->depth == 1);
+         assert(info->samples == 1);
+
+         *phys_level0_sa = (struct isl_extent4d) {
+            .w = isl_align(info->width, fmtl->bw),
+            .h = isl_align(info->height, fmtl->bh),
+            .d = 1,
+            .a = info->array_len,
+         };
+         break;
+
+      case ISL_MSAA_LAYOUT_ARRAY:
+         assert(info->depth == 1);
+         assert(info->array_len == 1);
+         assert(!isl_format_is_compressed(info->format));
+
+         *phys_level0_sa = (struct isl_extent4d) {
+            .w = info->width,
+            .h = info->height,
+            .d = 1,
+            .a = info->samples,
+         };
+         break;
+
+      case ISL_MSAA_LAYOUT_INTERLEAVED:
+         assert(info->depth == 1);
+         assert(info->array_len == 1);
+         assert(!isl_format_is_compressed(info->format));
+
+         *phys_level0_sa = (struct isl_extent4d) {
+            .w = info->width,
+            .h = info->height,
+            .d = 1,
+            .a = 1,
+         };
+
+         isl_msaa_interleaved_scale_px_to_sa(info->samples,
+                                             &phys_level0_sa->w,
+                                             &phys_level0_sa->h);
+         break;
+      }
+      break;
+
+   case ISL_SURF_DIM_3D:
+      assert(info->array_len == 1);
+      assert(info->samples == 1);
+
+      if (fmtl->bd > 1) {
+         isl_finishme("%s:%s: compression block with depth > 1",
+                      __FILE__, __func__);
+      }
+
+      switch (dim_layout) {
+      case ISL_DIM_LAYOUT_GEN9_1D:
+         unreachable("bad isl_dim_layout");
+
+      case ISL_DIM_LAYOUT_GEN4_2D:
+         assert(ISL_DEV_GEN(dev) >= 9);
+
+         *phys_level0_sa = (struct isl_extent4d) {
+            .w = isl_align(info->width, fmtl->bw),
+            .h = isl_align(info->height, fmtl->bh),
+            .d = 1,
+            .a = info->depth,
+         };
+         break;
+
+      case ISL_DIM_LAYOUT_GEN4_3D:
+         assert(ISL_DEV_GEN(dev) < 9);
+         *phys_level0_sa = (struct isl_extent4d) {
+            .w = isl_align(info->width, fmtl->bw),
+            .h = isl_align(info->height, fmtl->bh),
+            .d = info->depth,
+            .a = 1,
+         };
+         break;
+      }
+      break;
+   }
+}
+
+/**
+ * A variant of isl_calc_phys_slice0_extent_sa() specific to
+ * ISL_DIM_LAYOUT_GEN4_2D.
+ */
+static void
+isl_calc_phys_slice0_extent_sa_gen4_2d(
+      const struct isl_device *dev,
+      const struct isl_surf_init_info *restrict info,
+      enum isl_msaa_layout msaa_layout,
+      const struct isl_extent3d *image_align_sa,
+      const struct isl_extent4d *phys_level0_sa,
+      struct isl_extent2d *phys_slice0_sa)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(info->format);
+
+   assert(phys_level0_sa->depth == 1);
+
+   if (info->levels == 1 && msaa_layout != ISL_MSAA_LAYOUT_INTERLEAVED) {
+      /* Do not pad the surface to the image alignment. Instead, pad it only
+       * to the pixel format's block alignment.
+       *
+       * For tiled surfaces, using a reduced alignment here avoids wasting CPU
+       * cycles on the below mipmap layout caluclations. Reducing the
+       * alignment here is safe because we later align the row pitch and array
+       * pitch to the tile boundary. It is safe even for
+       * ISL_MSAA_LAYOUT_INTERLEAVED, because phys_level0_sa is already scaled
+       * to accomodate the interleaved samples.
+       *
+       * For linear surfaces, reducing the alignment here permits us to later
+       * choose an arbitrary, non-aligned row pitch. If the surface backs
+       * a VkBuffer, then an arbitrary pitch may be needed to accomodate
+       * VkBufferImageCopy::bufferRowLength.
+       */
+      *phys_slice0_sa = (struct isl_extent2d) {
+         .w = isl_align_npot(phys_level0_sa->w, fmtl->bw),
+         .h = isl_align_npot(phys_level0_sa->h, fmtl->bh),
+      };
+      return;
+   }
+
+   uint32_t slice_top_w = 0;
+   uint32_t slice_bottom_w = 0;
+   uint32_t slice_left_h = 0;
+   uint32_t slice_right_h = 0;
+
+   uint32_t W0 = phys_level0_sa->w;
+   uint32_t H0 = phys_level0_sa->h;
+
+   for (uint32_t l = 0; l < info->levels; ++l) {
+      uint32_t W = isl_minify(W0, l);
+      uint32_t H = isl_minify(H0, l);
+
+      if (msaa_layout == ISL_MSAA_LAYOUT_INTERLEAVED) {
+         /* From the Broadwell PRM >> Volume 5: Memory Views >> Computing Mip Level
+          * Sizes (p133):
+          *
+          *    If the surface is multisampled and it is a depth or stencil
+          *    surface or Multisampled Surface StorageFormat in
+          *    SURFACE_STATE is MSFMT_DEPTH_STENCIL, W_L and H_L must be
+          *    adjusted as follows before proceeding: [...]
+          */
+         isl_msaa_interleaved_scale_px_to_sa(info->samples, &W, &H);
+      }
+
+      uint32_t w = isl_align_npot(W, image_align_sa->w);
+      uint32_t h = isl_align_npot(H, image_align_sa->h);
+
+      if (l == 0) {
+         slice_top_w = w;
+         slice_left_h = h;
+         slice_right_h = h;
+      } else if (l == 1) {
+         slice_bottom_w = w;
+         slice_left_h += h;
+      } else if (l == 2) {
+         slice_bottom_w += w;
+         slice_right_h += h;
+      } else {
+         slice_right_h += h;
+      }
+   }
+
+   *phys_slice0_sa = (struct isl_extent2d) {
+      .w = MAX(slice_top_w, slice_bottom_w),
+      .h = MAX(slice_left_h, slice_right_h),
+   };
+}
+
+/**
+ * A variant of isl_calc_phys_slice0_extent_sa() specific to
+ * ISL_DIM_LAYOUT_GEN4_3D.
+ */
+static void
+isl_calc_phys_slice0_extent_sa_gen4_3d(
+      const struct isl_device *dev,
+      const struct isl_surf_init_info *restrict info,
+      const struct isl_extent3d *image_align_sa,
+      const struct isl_extent4d *phys_level0_sa,
+      struct isl_extent2d *phys_slice0_sa)
+{
+   assert(info->samples == 1);
+   assert(phys_level0_sa->array_len == 1);
+
+   uint32_t slice_w = 0;
+   uint32_t slice_h = 0;
+
+   uint32_t W0 = phys_level0_sa->w;
+   uint32_t H0 = phys_level0_sa->h;
+   uint32_t D0 = phys_level0_sa->d;
+
+   for (uint32_t l = 0; l < info->levels; ++l) {
+      uint32_t level_w = isl_align_npot(isl_minify(W0, l), image_align_sa->w);
+      uint32_t level_h = isl_align_npot(isl_minify(H0, l), image_align_sa->h);
+      uint32_t level_d = isl_align_npot(isl_minify(D0, l), image_align_sa->d);
+
+      uint32_t max_layers_horiz = MIN(level_d, 1u << l);
+      uint32_t max_layers_vert = isl_align(level_d, 1u << l) / (1u << l);
+
+      slice_w = MAX(slice_w, level_w * max_layers_horiz);
+      slice_h += level_h * max_layers_vert;
+   }
+
+   *phys_slice0_sa = (struct isl_extent2d) {
+      .w = slice_w,
+      .h = slice_h,
+   };
+}
+
+/**
+ * A variant of isl_calc_phys_slice0_extent_sa() specific to
+ * ISL_DIM_LAYOUT_GEN9_1D.
+ */
+static void
+isl_calc_phys_slice0_extent_sa_gen9_1d(
+      const struct isl_device *dev,
+      const struct isl_surf_init_info *restrict info,
+      const struct isl_extent3d *image_align_sa,
+      const struct isl_extent4d *phys_level0_sa,
+      struct isl_extent2d *phys_slice0_sa)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(info->format);
+
+   assert(phys_level0_sa->height == 1);
+   assert(phys_level0_sa->depth == 1);
+   assert(info->samples == 1);
+   assert(image_align_sa->w >= fmtl->bw);
+
+   uint32_t slice_w = 0;
+   const uint32_t W0 = phys_level0_sa->w;
+
+   for (uint32_t l = 0; l < info->levels; ++l) {
+      uint32_t W = isl_minify(W0, l);
+      uint32_t w = isl_align_npot(W, image_align_sa->w);
+
+      slice_w += w;
+   }
+
+   *phys_slice0_sa = isl_extent2d(slice_w, 1);
+}
+
+/**
+ * Calculate the physical extent of the surface's first array slice, in units
+ * of surface samples. If the surface is multi-leveled, then the result will
+ * be aligned to \a image_align_sa.
+ */
+static void
+isl_calc_phys_slice0_extent_sa(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_dim_layout dim_layout,
+                               enum isl_msaa_layout msaa_layout,
+                               const struct isl_extent3d *image_align_sa,
+                               const struct isl_extent4d *phys_level0_sa,
+                               struct isl_extent2d *phys_slice0_sa)
+{
+   switch (dim_layout) {
+   case ISL_DIM_LAYOUT_GEN9_1D:
+      isl_calc_phys_slice0_extent_sa_gen9_1d(dev, info,
+                                             image_align_sa, phys_level0_sa,
+                                             phys_slice0_sa);
+      return;
+   case ISL_DIM_LAYOUT_GEN4_2D:
+      isl_calc_phys_slice0_extent_sa_gen4_2d(dev, info, msaa_layout,
+                                             image_align_sa, phys_level0_sa,
+                                             phys_slice0_sa);
+      return;
+   case ISL_DIM_LAYOUT_GEN4_3D:
+      isl_calc_phys_slice0_extent_sa_gen4_3d(dev, info, image_align_sa,
+                                             phys_level0_sa, phys_slice0_sa);
+      return;
+   }
+}
+
+/**
+ * Calculate the pitch between physical array slices, in units of rows of
+ * surface elements.
+ */
+static uint32_t
+isl_calc_array_pitch_el_rows(const struct isl_device *dev,
+                             const struct isl_surf_init_info *restrict info,
+                             const struct isl_tile_info *tile_info,
+                             enum isl_dim_layout dim_layout,
+                             enum isl_array_pitch_span array_pitch_span,
+                             const struct isl_extent3d *image_align_sa,
+                             const struct isl_extent4d *phys_level0_sa,
+                             const struct isl_extent2d *phys_slice0_sa)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(info->format);
+   uint32_t pitch_sa_rows = 0;
+
+   switch (dim_layout) {
+   case ISL_DIM_LAYOUT_GEN9_1D:
+      /* Each row is an array slice */
+      pitch_sa_rows = 1;
+      break;
+   case ISL_DIM_LAYOUT_GEN4_2D:
+      switch (array_pitch_span) {
+      case ISL_ARRAY_PITCH_SPAN_COMPACT:
+         pitch_sa_rows = isl_align_npot(phys_slice0_sa->h, image_align_sa->h);
+         break;
+      case ISL_ARRAY_PITCH_SPAN_FULL: {
+         /* The QPitch equation is found in the Broadwell PRM >> Volume 5:
+          * Memory Views >> Common Surface Formats >> Surface Layout >> 2D
+          * Surfaces >> Surface Arrays.
+          */
+         uint32_t H0_sa = phys_level0_sa->h;
+         uint32_t H1_sa = isl_minify(H0_sa, 1);
+
+         uint32_t h0_sa = isl_align_npot(H0_sa, image_align_sa->h);
+         uint32_t h1_sa = isl_align_npot(H1_sa, image_align_sa->h);
+
+         uint32_t m;
+         if (ISL_DEV_GEN(dev) >= 7) {
+            /* The QPitch equation changed slightly in Ivybridge. */
+            m = 12;
+         } else {
+            m = 11;
+         }
+
+         pitch_sa_rows = h0_sa + h1_sa + (m * image_align_sa->h);
+
+         if (ISL_DEV_GEN(dev) == 6 && info->samples > 1 &&
+             (info->height % 4 == 1)) {
+            /* [SNB] Errata from the Sandy Bridge PRM >> Volume 4 Part 1:
+             * Graphics Core >> Section 7.18.3.7: Surface Arrays:
+             *
+             *    [SNB] Errata: Sampler MSAA Qpitch will be 4 greater than
+             *    the value calculated in the equation above , for every
+             *    other odd Surface Height starting from 1 i.e. 1,5,9,13.
+             *
+             * XXX(chadv): Is the errata natural corollary of the physical
+             * layout of interleaved samples?
+             */
+            pitch_sa_rows += 4;
+         }
+
+         pitch_sa_rows = isl_align_npot(pitch_sa_rows, fmtl->bh);
+         } /* end case */
+         break;
+      }
+      break;
+   case ISL_DIM_LAYOUT_GEN4_3D:
+      assert(array_pitch_span == ISL_ARRAY_PITCH_SPAN_COMPACT);
+      pitch_sa_rows = isl_align_npot(phys_slice0_sa->h, image_align_sa->h);
+      break;
+   default:
+      unreachable("bad isl_dim_layout");
+      break;
+   }
+
+   assert(pitch_sa_rows % fmtl->bh == 0);
+   uint32_t pitch_el_rows = pitch_sa_rows / fmtl->bh;
+
+   if (ISL_DEV_GEN(dev) >= 9 &&
+       info->dim == ISL_SURF_DIM_3D &&
+       tile_info->tiling != ISL_TILING_LINEAR) {
+      /* From the Skylake BSpec >> RENDER_SURFACE_STATE >> Surface QPitch:
+       *
+       *    Tile Mode != Linear: This field must be set to an integer multiple
+       *    of the tile height
+       */
+      pitch_el_rows = isl_align(pitch_el_rows, tile_info->height);
+   }
+
+   return pitch_el_rows;
+}
+
+/**
+ * Calculate the pitch of each surface row, in bytes.
+ */
+static uint32_t
+isl_calc_row_pitch(const struct isl_device *dev,
+                   const struct isl_surf_init_info *restrict info,
+                   const struct isl_tile_info *tile_info,
+                   const struct isl_extent3d *image_align_sa,
+                   const struct isl_extent2d *phys_slice0_sa)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(info->format);
+
+   uint32_t row_pitch = info->min_pitch;
+
+   /* First, align the surface to a cache line boundary, as the PRM explains
+    * below.
+    *
+    * From the Broadwell PRM >> Volume 5: Memory Views >> Common Surface
+    * Formats >> Surface Padding Requirements >> Render Target and Media
+    * Surfaces:
+    *
+    *    The data port accesses data (pixels) outside of the surface if they
+    *    are contained in the same cache request as pixels that are within the
+    *    surface. These pixels will not be returned by the requesting message,
+    *    however if these pixels lie outside of defined pages in the GTT,
+    *    a GTT error will result when the cache request is processed. In order
+    *    to avoid these GTT errors, “padding” at the bottom of the surface is
+    *    sometimes necessary.
+    *
+    * From the Broadwell PRM >> Volume 5: Memory Views >> Common Surface
+    * Formats >> Surface Padding Requirements >> Sampling Engine Surfaces:
+    *
+    *    The sampling engine accesses texels outside of the surface if they
+    *    are contained in the same cache line as texels that are within the
+    *    surface.  These texels will not participate in any calculation
+    *    performed by the sampling engine and will not affect the result of
+    *    any sampling engine operation, however if these texels lie outside of
+    *    defined pages in the GTT, a GTT error will result when the cache line
+    *    is accessed. In order to avoid these GTT errors, “padding” at the
+    *    bottom and right side of a sampling engine surface is sometimes
+    *    necessary.
+    *
+    *    It is possible that a cache line will straddle a page boundary if the
+    *    base address or pitch is not aligned. All pages included in the cache
+    *    lines that are part of the surface must map to valid GTT entries to
+    *    avoid errors. To determine the necessary padding on the bottom and
+    *    right side of the surface, refer to the table in  Alignment Unit Size
+    *    section for the i and j parameters for the surface format in use. The
+    *    surface must then be extended to the next multiple of the alignment
+    *    unit size in each dimension, and all texels contained in this
+    *    extended surface must have valid GTT entries.
+    *
+    *    For example, suppose the surface size is 15 texels by 10 texels and
+    *    the alignment parameters are i=4 and j=2. In this case, the extended
+    *    surface would be 16 by 10. Note that these calculations are done in
+    *    texels, and must be converted to bytes based on the surface format
+    *    being used to determine whether additional pages need to be defined.
+    */
+   assert(phys_slice0_sa->w % fmtl->bw == 0);
+   row_pitch = MAX(row_pitch, fmtl->bs * (phys_slice0_sa->w / fmtl->bw));
+
+   switch (tile_info->tiling) {
+   case ISL_TILING_LINEAR:
+      /* From the Broadwel PRM >> Volume 2d: Command Reference: Structures >>
+       * RENDER_SURFACE_STATE Surface Pitch (p349):
+       *
+       *    - For linear render target surfaces and surfaces accessed with the
+       *      typed data port messages, the pitch must be a multiple of the
+       *      element size for non-YUV surface formats.  Pitch must be
+       *      a multiple of 2 * element size for YUV surface formats.
+       *
+       *    - [Requirements for SURFTYPE_BUFFER and SURFTYPE_STRBUF, which we
+       *      ignore because isl doesn't do buffers.]
+       *
+       *    - For other linear surfaces, the pitch can be any multiple of
+       *      bytes.
+       */
+      if (info->usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) {
+         if (isl_format_is_yuv(info->format)) {
+            row_pitch = isl_align_npot(row_pitch, 2 * fmtl->bs);
+         } else  {
+            row_pitch = isl_align_npot(row_pitch, fmtl->bs);
+         }
+      }
+      break;
+   default:
+      /* From the Broadwel PRM >> Volume 2d: Command Reference: Structures >>
+       * RENDER_SURFACE_STATE Surface Pitch (p349):
+       *
+       *    - For tiled surfaces, the pitch must be a multiple of the tile
+       *      width.
+       */
+      row_pitch = isl_align(row_pitch, tile_info->width);
+      break;
+   }
+
+   return row_pitch;
+}
+
+/**
+ * Calculate the surface's total height, including padding, in units of
+ * surface elements.
+ */
+static uint32_t
+isl_calc_total_height_el(const struct isl_device *dev,
+                         const struct isl_surf_init_info *restrict info,
+                         const struct isl_tile_info *tile_info,
+                         uint32_t phys_array_len,
+                         uint32_t row_pitch,
+                         uint32_t array_pitch_el_rows)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(info->format);
+
+   uint32_t total_h_el = phys_array_len * array_pitch_el_rows;
+   uint32_t pad_bytes = 0;
+
+   /* From the Broadwell PRM >> Volume 5: Memory Views >> Common Surface
+    * Formats >> Surface Padding Requirements >> Render Target and Media
+    * Surfaces:
+    *
+    *   The data port accesses data (pixels) outside of the surface if they
+    *   are contained in the same cache request as pixels that are within the
+    *   surface. These pixels will not be returned by the requesting message,
+    *   however if these pixels lie outside of defined pages in the GTT,
+    *   a GTT error will result when the cache request is processed. In
+    *   order to avoid these GTT errors, “padding” at the bottom of the
+    *   surface is sometimes necessary.
+    *
+    * From the Broadwell PRM >> Volume 5: Memory Views >> Common Surface
+    * Formats >> Surface Padding Requirements >> Sampling Engine Surfaces:
+    *
+    *    ... Lots of padding requirements, all listed separately below.
+    */
+
+   /* We can safely ignore the first padding requirement, quoted below,
+    * because isl doesn't do buffers.
+    *
+    *    - [pre-BDW] For buffers, which have no inherent “height,” padding
+    *      requirements are different. A buffer must be padded to the next
+    *      multiple of 256 array elements, with an additional 16 bytes added
+    *      beyond that to account for the L1 cache line.
+    */
+
+   /*
+    *    - For compressed textures [...], padding at the bottom of the surface
+    *      is to an even compressed row.
+    */
+   if (isl_format_is_compressed(info->format))
+      total_h_el = isl_align(total_h_el, 2);
+
+   /*
+    *    - For cube surfaces, an additional two rows of padding are required
+    *      at the bottom of the surface.
+    */
+   if (info->usage & ISL_SURF_USAGE_CUBE_BIT)
+      total_h_el += 2;
+
+   /*
+    *    - For packed YUV, 96 bpt, 48 bpt, and 24 bpt surface formats,
+    *      additional padding is required. These surfaces require an extra row
+    *      plus 16 bytes of padding at the bottom in addition to the general
+    *      padding requirements.
+    */
+   if (isl_format_is_yuv(info->format) &&
+       (fmtl->bs == 96 || fmtl->bs == 48|| fmtl->bs == 24)) {
+      total_h_el += 1;
+      pad_bytes += 16;
+   }
+
+   /*
+    *    - For linear surfaces, additional padding of 64 bytes is required at
+    *      the bottom of the surface. This is in addition to the padding
+    *      required above.
+    */
+   if (tile_info->tiling == ISL_TILING_LINEAR)
+      pad_bytes += 64;
+
+   /* The below text weakens, not strengthens, the padding requirements for
+    * linear surfaces. Therefore we can safely ignore it.
+    *
+    *    - [BDW+] For SURFTYPE_BUFFER, SURFTYPE_1D, and SURFTYPE_2D non-array,
+    *      non-MSAA, non-mip-mapped surfaces in linear memory, the only
+    *      padding requirement is to the next aligned 64-byte boundary beyond
+    *      the end of the surface. The rest of the padding requirements
+    *      documented above do not apply to these surfaces.
+    */
+
+   /*
+    *    - [SKL+] For SURFTYPE_2D and SURFTYPE_3D with linear mode and
+    *      height % 4 != 0, the surface must be padded with
+    *      4-(height % 4)*Surface Pitch # of bytes.
+    */
+   if (ISL_DEV_GEN(dev) >= 9 &&
+       tile_info->tiling == ISL_TILING_LINEAR &&
+       (info->dim == ISL_SURF_DIM_2D || info->dim == ISL_SURF_DIM_3D)) {
+      total_h_el = isl_align(total_h_el, 4);
+   }
+
+   /*
+    *    - [SKL+] For SURFTYPE_1D with linear mode, the surface must be padded
+    *      to 4 times the Surface Pitch # of bytes
+    */
+   if (ISL_DEV_GEN(dev) >= 9 &&
+       tile_info->tiling == ISL_TILING_LINEAR &&
+       info->dim == ISL_SURF_DIM_1D) {
+      total_h_el += 4;
+   }
+
+   /* Be sloppy. Align any leftover padding to a row boundary. */
+   total_h_el += isl_align_div_npot(pad_bytes, row_pitch);
+
+   return total_h_el;
+}
+
+bool
+isl_surf_init_s(const struct isl_device *dev,
+                struct isl_surf *surf,
+                const struct isl_surf_init_info *restrict info)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(info->format);
+
+   const struct isl_extent4d logical_level0_px = {
+      .w = info->width,
+      .h = info->height,
+      .d = info->depth,
+      .a = info->array_len,
+   };
+
+   enum isl_dim_layout dim_layout =
+      isl_surf_choose_dim_layout(dev, info->dim);
+
+   enum isl_tiling tiling;
+   if (!isl_surf_choose_tiling(dev, info, &tiling))
+      return false;
+
+   struct isl_tile_info tile_info;
+   if (!isl_tiling_get_info(dev, tiling, fmtl->bs, &tile_info))
+      return false;
+
+   enum isl_msaa_layout msaa_layout;
+   if (!isl_choose_msaa_layout(dev, info, tiling, &msaa_layout))
+       return false;
+
+   struct isl_extent3d image_align_el;
+   isl_choose_image_alignment_el(dev, info, tiling, msaa_layout,
+                                 &image_align_el);
+
+   struct isl_extent3d image_align_sa =
+      isl_extent3d_el_to_sa(info->format, image_align_el);
+
+   struct isl_extent4d phys_level0_sa;
+   isl_calc_phys_level0_extent_sa(dev, info, dim_layout, tiling, msaa_layout,
+                                  &phys_level0_sa);
+   assert(phys_level0_sa.w % fmtl->bw == 0);
+   assert(phys_level0_sa.h % fmtl->bh == 0);
+
+   enum isl_array_pitch_span array_pitch_span =
+      isl_choose_array_pitch_span(dev, info, dim_layout, &phys_level0_sa);
+
+   struct isl_extent2d phys_slice0_sa;
+   isl_calc_phys_slice0_extent_sa(dev, info, dim_layout, msaa_layout,
+                                  &image_align_sa, &phys_level0_sa,
+                                  &phys_slice0_sa);
+   assert(phys_slice0_sa.w % fmtl->bw == 0);
+   assert(phys_slice0_sa.h % fmtl->bh == 0);
+
+   const uint32_t row_pitch = isl_calc_row_pitch(dev, info, &tile_info,
+                                                 &image_align_sa,
+                                                 &phys_slice0_sa);
+
+   const uint32_t array_pitch_el_rows =
+      isl_calc_array_pitch_el_rows(dev, info, &tile_info, dim_layout,
+                                   array_pitch_span, &image_align_sa,
+                                   &phys_level0_sa, &phys_slice0_sa);
+
+   const uint32_t total_h_el =
+      isl_calc_total_height_el(dev, info, &tile_info,
+                               phys_level0_sa.array_len, row_pitch,
+                               array_pitch_el_rows);
+
+   const uint32_t total_h_sa = total_h_el * fmtl->bh;
+   const uint32_t size = row_pitch * isl_align(total_h_sa, tile_info.height);
+
+   /* Alignment of surface base address, in bytes */
+   uint32_t base_alignment = MAX(1, info->min_alignment);
+   assert(isl_is_pow2(base_alignment) && isl_is_pow2(tile_info.size));
+   base_alignment = MAX(base_alignment, tile_info.size);
+
+   *surf = (struct isl_surf) {
+      .dim = info->dim,
+      .dim_layout = dim_layout,
+      .msaa_layout = msaa_layout,
+      .tiling = tiling,
+      .format = info->format,
+
+      .levels = info->levels,
+      .samples = info->samples,
+
+      .image_alignment_el = image_align_el,
+      .logical_level0_px = logical_level0_px,
+      .phys_level0_sa = phys_level0_sa,
+
+      .size = size,
+      .alignment = base_alignment,
+      .row_pitch = row_pitch,
+      .array_pitch_el_rows = array_pitch_el_rows,
+      .array_pitch_span = array_pitch_span,
+
+      .usage = info->usage,
+   };
+
+   return true;
+}
+
+void
+isl_surf_get_tile_info(const struct isl_device *dev,
+                       const struct isl_surf *surf,
+                       struct isl_tile_info *tile_info)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(surf->format);
+   isl_tiling_get_info(dev, surf->tiling, fmtl->bs, tile_info);
+}
+
+void
+isl_surf_fill_state_s(const struct isl_device *dev, void *state,
+                      const struct isl_surf_fill_state_info *restrict info)
+{
+#ifndef NDEBUG
+   isl_surf_usage_flags_t _base_usage =
+      info->view->usage & (ISL_SURF_USAGE_RENDER_TARGET_BIT |
+                           ISL_SURF_USAGE_TEXTURE_BIT |
+                           ISL_SURF_USAGE_STORAGE_BIT);
+   /* They may only specify one of the above bits at a time */
+   assert(__builtin_popcount(_base_usage) == 1);
+   /* The only other allowed bit is ISL_SURF_USAGE_CUBE_BIT */
+   assert((info->view->usage & ~ISL_SURF_USAGE_CUBE_BIT) == _base_usage);
+#endif
+
+   if (info->surf->dim == ISL_SURF_DIM_3D) {
+      assert(info->view->base_array_layer + info->view->array_len <=
+             info->surf->logical_level0_px.depth);
+   } else {
+      assert(info->view->base_array_layer + info->view->array_len <=
+             info->surf->logical_level0_px.array_len);
+   }
+
+   switch (ISL_DEV_GEN(dev)) {
+   case 7:
+      if (ISL_DEV_IS_HASWELL(dev)) {
+         isl_gen75_surf_fill_state_s(dev, state, info);
+      } else {
+         isl_gen7_surf_fill_state_s(dev, state, info);
+      }
+      break;
+   case 8:
+      isl_gen8_surf_fill_state_s(dev, state, info);
+      break;
+   case 9:
+      isl_gen9_surf_fill_state_s(dev, state, info);
+      break;
+   default:
+      assert(!"Cannot fill surface state for this gen");
+   }
+}
+
+void
+isl_buffer_fill_state_s(const struct isl_device *dev, void *state,
+                        const struct isl_buffer_fill_state_info *restrict info)
+{
+   switch (ISL_DEV_GEN(dev)) {
+   case 7:
+      if (ISL_DEV_IS_HASWELL(dev)) {
+         isl_gen75_buffer_fill_state_s(state, info);
+      } else {
+         isl_gen7_buffer_fill_state_s(state, info);
+      }
+      break;
+   case 8:
+      isl_gen8_buffer_fill_state_s(state, info);
+      break;
+   case 9:
+      isl_gen9_buffer_fill_state_s(state, info);
+      break;
+   default:
+      assert(!"Cannot fill surface state for this gen");
+   }
+}
+
+/**
+ * A variant of isl_surf_get_image_offset_sa() specific to
+ * ISL_DIM_LAYOUT_GEN4_2D.
+ */
+static void
+get_image_offset_sa_gen4_2d(const struct isl_surf *surf,
+                            uint32_t level, uint32_t layer,
+                            uint32_t *x_offset_sa,
+                            uint32_t *y_offset_sa)
+{
+   assert(level < surf->levels);
+   assert(layer < surf->phys_level0_sa.array_len);
+   assert(surf->phys_level0_sa.depth == 1);
+
+   const struct isl_extent3d image_align_sa =
+      isl_surf_get_image_alignment_sa(surf);
+
+   const uint32_t W0 = surf->phys_level0_sa.width;
+   const uint32_t H0 = surf->phys_level0_sa.height;
+
+   uint32_t x = 0;
+   uint32_t y = layer * isl_surf_get_array_pitch_sa_rows(surf);
+
+   for (uint32_t l = 0; l < level; ++l) {
+      if (l == 1) {
+         uint32_t W = isl_minify(W0, l);
+
+         if (surf->msaa_layout == ISL_MSAA_LAYOUT_INTERLEAVED)
+            isl_msaa_interleaved_scale_px_to_sa(surf->samples, &W, NULL);
+
+         x += isl_align_npot(W, image_align_sa.w);
+      } else {
+         uint32_t H = isl_minify(H0, l);
+
+         if (surf->msaa_layout == ISL_MSAA_LAYOUT_INTERLEAVED)
+            isl_msaa_interleaved_scale_px_to_sa(surf->samples, NULL, &H);
+
+         y += isl_align_npot(H, image_align_sa.h);
+      }
+   }
+
+   *x_offset_sa = x;
+   *y_offset_sa = y;
+}
+
+/**
+ * A variant of isl_surf_get_image_offset_sa() specific to
+ * ISL_DIM_LAYOUT_GEN4_3D.
+ */
+static void
+get_image_offset_sa_gen4_3d(const struct isl_surf *surf,
+                            uint32_t level, uint32_t logical_z_offset_px,
+                            uint32_t *x_offset_sa,
+                            uint32_t *y_offset_sa)
+{
+   assert(level < surf->levels);
+   assert(logical_z_offset_px < isl_minify(surf->phys_level0_sa.depth, level));
+   assert(surf->phys_level0_sa.array_len == 1);
+
+   const struct isl_extent3d image_align_sa =
+      isl_surf_get_image_alignment_sa(surf);
+
+   const uint32_t W0 = surf->phys_level0_sa.width;
+   const uint32_t H0 = surf->phys_level0_sa.height;
+   const uint32_t D0 = surf->phys_level0_sa.depth;
+
+   uint32_t x = 0;
+   uint32_t y = 0;
+
+   for (uint32_t l = 0; l < level; ++l) {
+      const uint32_t level_h = isl_align_npot(isl_minify(H0, l), image_align_sa.h);
+      const uint32_t level_d = isl_align_npot(isl_minify(D0, l), image_align_sa.d);
+      const uint32_t max_layers_vert = isl_align(level_d, 1u << l) / (1u << l);
+
+      y += level_h * max_layers_vert;
+   }
+
+   const uint32_t level_w = isl_align_npot(isl_minify(W0, level), image_align_sa.w);
+   const uint32_t level_h = isl_align_npot(isl_minify(H0, level), image_align_sa.h);
+   const uint32_t level_d = isl_align_npot(isl_minify(D0, level), image_align_sa.d);
+
+   const uint32_t max_layers_horiz = MIN(level_d, 1u << level);
+
+   x += level_w * (logical_z_offset_px % max_layers_horiz);
+   y += level_h * (logical_z_offset_px / max_layers_horiz);
+
+   *x_offset_sa = x;
+   *y_offset_sa = y;
+}
+
+/**
+ * A variant of isl_surf_get_image_offset_sa() specific to
+ * ISL_DIM_LAYOUT_GEN9_1D.
+ */
+static void
+get_image_offset_sa_gen9_1d(const struct isl_surf *surf,
+                            uint32_t level, uint32_t layer,
+                            uint32_t *x_offset_sa,
+                            uint32_t *y_offset_sa)
+{
+   assert(level < surf->levels);
+   assert(layer < surf->phys_level0_sa.array_len);
+   assert(surf->phys_level0_sa.height == 1);
+   assert(surf->phys_level0_sa.depth == 1);
+   assert(surf->samples == 1);
+
+   const uint32_t W0 = surf->phys_level0_sa.width;
+   const struct isl_extent3d image_align_sa =
+      isl_surf_get_image_alignment_sa(surf);
+
+   uint32_t x = 0;
+
+   for (uint32_t l = 0; l < level; ++l) {
+      uint32_t W = isl_minify(W0, l);
+      uint32_t w = isl_align_npot(W, image_align_sa.w);
+
+      x += w;
+   }
+
+   *x_offset_sa = x;
+   *y_offset_sa = layer * isl_surf_get_array_pitch_sa_rows(surf);
+}
+
+/**
+ * Calculate the offset, in units of surface samples, to a subimage in the
+ * surface.
+ *
+ * @invariant level < surface levels
+ * @invariant logical_array_layer < logical array length of surface
+ * @invariant logical_z_offset_px < logical depth of surface at level
+ */
+static void
+get_image_offset_sa(const struct isl_surf *surf,
+                    uint32_t level,
+                    uint32_t logical_array_layer,
+                    uint32_t logical_z_offset_px,
+                    uint32_t *x_offset_sa,
+                    uint32_t *y_offset_sa)
+{
+   assert(level < surf->levels);
+   assert(logical_array_layer < surf->logical_level0_px.array_len);
+   assert(logical_z_offset_px
+          < isl_minify(surf->logical_level0_px.depth, level));
+
+   switch (surf->dim_layout) {
+   case ISL_DIM_LAYOUT_GEN9_1D:
+      get_image_offset_sa_gen9_1d(surf, level, logical_array_layer,
+                                  x_offset_sa, y_offset_sa);
+      break;
+   case ISL_DIM_LAYOUT_GEN4_2D:
+      get_image_offset_sa_gen4_2d(surf, level, logical_array_layer
+                                  + logical_z_offset_px,
+                                  x_offset_sa, y_offset_sa);
+      break;
+   case ISL_DIM_LAYOUT_GEN4_3D:
+      get_image_offset_sa_gen4_3d(surf, level, logical_z_offset_px,
+                                  x_offset_sa, y_offset_sa);
+      break;
+   }
+}
+
+void
+isl_surf_get_image_offset_el(const struct isl_surf *surf,
+                             uint32_t level,
+                             uint32_t logical_array_layer,
+                             uint32_t logical_z_offset_px,
+                             uint32_t *x_offset_el,
+                             uint32_t *y_offset_el)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(surf->format);
+
+   assert(level < surf->levels);
+   assert(logical_array_layer < surf->logical_level0_px.array_len);
+   assert(logical_z_offset_px
+          < isl_minify(surf->logical_level0_px.depth, level));
+
+   uint32_t x_offset_sa, y_offset_sa;
+   get_image_offset_sa(surf, level,
+                       logical_array_layer,
+                       logical_z_offset_px,
+                       &x_offset_sa,
+                       &y_offset_sa);
+
+   *x_offset_el = x_offset_sa / fmtl->bw;
+   *y_offset_el = y_offset_sa / fmtl->bh;
+}
+
+void
+isl_surf_get_image_intratile_offset_el_xy(const struct isl_device *dev,
+                                       const struct isl_surf *surf,
+                                       uint32_t total_x_offset_el,
+                                       uint32_t total_y_offset_el,
+                                       uint32_t *base_address_offset,
+                                       uint32_t *x_offset_el,
+                                       uint32_t *y_offset_el)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(surf->format);
+
+   struct isl_tile_info tile_info;
+   isl_surf_get_tile_info(dev, surf, &tile_info);
+
+   uint32_t small_y_offset_el = total_y_offset_el % tile_info.height;
+   uint32_t big_y_offset_el = total_y_offset_el - small_y_offset_el;
+   uint32_t big_y_offset_B = big_y_offset_el * surf->row_pitch;
+
+   uint32_t total_x_offset_B = total_x_offset_el * fmtl->bs;
+   uint32_t small_x_offset_B = total_x_offset_B % tile_info.width;
+   uint32_t small_x_offset_el = small_x_offset_B / fmtl->bs;
+   uint32_t big_x_offset_B = (total_x_offset_B / tile_info.width) * tile_info.size;
+
+   *base_address_offset = big_y_offset_B + big_x_offset_B;
+   *x_offset_el = small_x_offset_el;
+   *y_offset_el = small_y_offset_el;
+
+
+}
+
+void
+isl_surf_get_image_intratile_offset_el(const struct isl_device *dev,
+                                       const struct isl_surf *surf,
+                                       uint32_t level,
+                                       uint32_t logical_array_layer,
+                                       uint32_t logical_z_offset,
+                                       uint32_t *base_address_offset,
+                                       uint32_t *x_offset_el,
+                                       uint32_t *y_offset_el)
+{
+   uint32_t total_x_offset_el;
+   uint32_t total_y_offset_el;
+   isl_surf_get_image_offset_el(surf, level,
+                                logical_array_layer,
+                                logical_z_offset,
+                                &total_x_offset_el,
+                                &total_y_offset_el);
+
+   isl_surf_get_image_intratile_offset_el_xy(dev, surf,
+                                total_x_offset_el,
+                                total_y_offset_el,
+                                base_address_offset,
+                                x_offset_el,
+                                y_offset_el);
+}
+
+uint32_t
+isl_surf_get_depth_format(const struct isl_device *dev,
+                          const struct isl_surf *surf)
+{
+   /* Support for separate stencil buffers began in gen5. Support for
+    * interleaved depthstencil buffers ceased in gen7. The intermediate gens,
+    * those that supported separate and interleaved stencil, were gen5 and
+    * gen6.
+    *
+    * For a list of all available formats, see the Sandybridge PRM >> Volume
+    * 2 Part 1: 3D/Media - 3D Pipeline >> 3DSTATE_DEPTH_BUFFER >> Surface
+    * Format (p321).
+    */
+
+   bool has_stencil = surf->usage & ISL_SURF_USAGE_STENCIL_BIT;
+
+   assert(surf->usage & ISL_SURF_USAGE_DEPTH_BIT);
+
+   if (has_stencil)
+      assert(ISL_DEV_GEN(dev) < 7);
+
+   switch (surf->format) {
+   default:
+      unreachable("bad isl depth format");
+   case ISL_FORMAT_R32_FLOAT_X8X24_TYPELESS:
+      assert(ISL_DEV_GEN(dev) < 7);
+      return 0; /* D32_FLOAT_S8X24_UINT */
+   case ISL_FORMAT_R32_FLOAT:
+      assert(!has_stencil);
+      return 1; /* D32_FLOAT */
+   case ISL_FORMAT_R24_UNORM_X8_TYPELESS:
+      if (has_stencil) {
+         assert(ISL_DEV_GEN(dev) < 7);
+         return 2; /* D24_UNORM_S8_UINT */
+      } else {
+         assert(ISL_DEV_GEN(dev) >= 5);
+         return 3; /* D24_UNORM_X8_UINT */
+      }
+   case ISL_FORMAT_R16_UNORM:
+      assert(!has_stencil);
+      return 5; /* D16_UNORM */
+   }
+}
diff --git a/src/intel/isl/isl.h b/src/intel/isl/isl.h
new file mode 100644 (file)
index 0000000..90193ca
--- /dev/null
@@ -0,0 +1,1189 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * @brief Intel Surface Layout
+ *
+ * Header Layout
+ * -------------
+ * The header is ordered as:
+ *    - forward declarations
+ *    - macros that may be overridden at compile-time for specific gens
+ *    - enums and constants
+ *    - structs and unions
+ *    - functions
+ */
+
+#pragma once
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "util/macros.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct brw_device_info;
+struct brw_image_param;
+
+#ifndef ISL_DEV_GEN
+/**
+ * @brief Get the hardware generation of isl_device.
+ *
+ * You can define this as a compile-time constant in the CFLAGS. For example,
+ * `gcc -DISL_DEV_GEN(dev)=9 ...`.
+ */
+#define ISL_DEV_GEN(__dev) ((__dev)->info->gen)
+#endif
+
+#ifndef ISL_DEV_IS_HASWELL
+/**
+ * @brief Get the hardware generation of isl_device.
+ *
+ * You can define this as a compile-time constant in the CFLAGS. For example,
+ * `gcc -DISL_DEV_GEN(dev)=9 ...`.
+ */
+#define ISL_DEV_IS_HASWELL(__dev) ((__dev)->info->is_haswell)
+#endif
+
+#ifndef ISL_DEV_USE_SEPARATE_STENCIL
+/**
+ * You can define this as a compile-time constant in the CFLAGS. For example,
+ * `gcc -DISL_DEV_USE_SEPARATE_STENCIL(dev)=1 ...`.
+ */
+#define ISL_DEV_USE_SEPARATE_STENCIL(__dev) ((__dev)->use_separate_stencil)
+#endif
+
+/**
+ * Hardware enumeration SURFACE_FORMAT.
+ *
+ * For the official list, see Broadwell PRM: Volume 2b: Command Reference:
+ * Enumerations: SURFACE_FORMAT.
+ */
+enum isl_format {
+   ISL_FORMAT_R32G32B32A32_FLOAT =                               0,
+   ISL_FORMAT_R32G32B32A32_SINT =                                1,
+   ISL_FORMAT_R32G32B32A32_UINT =                                2,
+   ISL_FORMAT_R32G32B32A32_UNORM =                               3,
+   ISL_FORMAT_R32G32B32A32_SNORM =                               4,
+   ISL_FORMAT_R64G64_FLOAT =                                     5,
+   ISL_FORMAT_R32G32B32X32_FLOAT =                               6,
+   ISL_FORMAT_R32G32B32A32_SSCALED =                             7,
+   ISL_FORMAT_R32G32B32A32_USCALED =                             8,
+   ISL_FORMAT_R32G32B32A32_SFIXED =                             32,
+   ISL_FORMAT_R64G64_PASSTHRU =                                 33,
+   ISL_FORMAT_R32G32B32_FLOAT =                                 64,
+   ISL_FORMAT_R32G32B32_SINT =                                  65,
+   ISL_FORMAT_R32G32B32_UINT =                                  66,
+   ISL_FORMAT_R32G32B32_UNORM =                                 67,
+   ISL_FORMAT_R32G32B32_SNORM =                                 68,
+   ISL_FORMAT_R32G32B32_SSCALED =                               69,
+   ISL_FORMAT_R32G32B32_USCALED =                               70,
+   ISL_FORMAT_R32G32B32_SFIXED =                                80,
+   ISL_FORMAT_R16G16B16A16_UNORM =                             128,
+   ISL_FORMAT_R16G16B16A16_SNORM =                             129,
+   ISL_FORMAT_R16G16B16A16_SINT =                              130,
+   ISL_FORMAT_R16G16B16A16_UINT =                              131,
+   ISL_FORMAT_R16G16B16A16_FLOAT =                             132,
+   ISL_FORMAT_R32G32_FLOAT =                                   133,
+   ISL_FORMAT_R32G32_SINT =                                    134,
+   ISL_FORMAT_R32G32_UINT =                                    135,
+   ISL_FORMAT_R32_FLOAT_X8X24_TYPELESS =                       136,
+   ISL_FORMAT_X32_TYPELESS_G8X24_UINT =                        137,
+   ISL_FORMAT_L32A32_FLOAT =                                   138,
+   ISL_FORMAT_R32G32_UNORM =                                   139,
+   ISL_FORMAT_R32G32_SNORM =                                   140,
+   ISL_FORMAT_R64_FLOAT =                                      141,
+   ISL_FORMAT_R16G16B16X16_UNORM =                             142,
+   ISL_FORMAT_R16G16B16X16_FLOAT =                             143,
+   ISL_FORMAT_A32X32_FLOAT =                                   144,
+   ISL_FORMAT_L32X32_FLOAT =                                   145,
+   ISL_FORMAT_I32X32_FLOAT =                                   146,
+   ISL_FORMAT_R16G16B16A16_SSCALED =                           147,
+   ISL_FORMAT_R16G16B16A16_USCALED =                           148,
+   ISL_FORMAT_R32G32_SSCALED =                                 149,
+   ISL_FORMAT_R32G32_USCALED =                                 150,
+   ISL_FORMAT_R32G32_SFIXED =                                  160,
+   ISL_FORMAT_R64_PASSTHRU =                                   161,
+   ISL_FORMAT_B8G8R8A8_UNORM =                                 192,
+   ISL_FORMAT_B8G8R8A8_UNORM_SRGB =                            193,
+   ISL_FORMAT_R10G10B10A2_UNORM =                              194,
+   ISL_FORMAT_R10G10B10A2_UNORM_SRGB =                         195,
+   ISL_FORMAT_R10G10B10A2_UINT =                               196,
+   ISL_FORMAT_R10G10B10_SNORM_A2_UNORM =                       197,
+   ISL_FORMAT_R8G8B8A8_UNORM =                                 199,
+   ISL_FORMAT_R8G8B8A8_UNORM_SRGB =                            200,
+   ISL_FORMAT_R8G8B8A8_SNORM =                                 201,
+   ISL_FORMAT_R8G8B8A8_SINT =                                  202,
+   ISL_FORMAT_R8G8B8A8_UINT =                                  203,
+   ISL_FORMAT_R16G16_UNORM =                                   204,
+   ISL_FORMAT_R16G16_SNORM =                                   205,
+   ISL_FORMAT_R16G16_SINT =                                    206,
+   ISL_FORMAT_R16G16_UINT =                                    207,
+   ISL_FORMAT_R16G16_FLOAT =                                   208,
+   ISL_FORMAT_B10G10R10A2_UNORM =                              209,
+   ISL_FORMAT_B10G10R10A2_UNORM_SRGB =                         210,
+   ISL_FORMAT_R11G11B10_FLOAT =                                211,
+   ISL_FORMAT_R32_SINT =                                       214,
+   ISL_FORMAT_R32_UINT =                                       215,
+   ISL_FORMAT_R32_FLOAT =                                      216,
+   ISL_FORMAT_R24_UNORM_X8_TYPELESS =                          217,
+   ISL_FORMAT_X24_TYPELESS_G8_UINT =                           218,
+   ISL_FORMAT_L32_UNORM =                                      221,
+   ISL_FORMAT_A32_UNORM =                                      222,
+   ISL_FORMAT_L16A16_UNORM =                                   223,
+   ISL_FORMAT_I24X8_UNORM =                                    224,
+   ISL_FORMAT_L24X8_UNORM =                                    225,
+   ISL_FORMAT_A24X8_UNORM =                                    226,
+   ISL_FORMAT_I32_FLOAT =                                      227,
+   ISL_FORMAT_L32_FLOAT =                                      228,
+   ISL_FORMAT_A32_FLOAT =                                      229,
+   ISL_FORMAT_X8B8_UNORM_G8R8_SNORM =                          230,
+   ISL_FORMAT_A8X8_UNORM_G8R8_SNORM =                          231,
+   ISL_FORMAT_B8X8_UNORM_G8R8_SNORM =                          232,
+   ISL_FORMAT_B8G8R8X8_UNORM =                                 233,
+   ISL_FORMAT_B8G8R8X8_UNORM_SRGB =                            234,
+   ISL_FORMAT_R8G8B8X8_UNORM =                                 235,
+   ISL_FORMAT_R8G8B8X8_UNORM_SRGB =                            236,
+   ISL_FORMAT_R9G9B9E5_SHAREDEXP =                             237,
+   ISL_FORMAT_B10G10R10X2_UNORM =                              238,
+   ISL_FORMAT_L16A16_FLOAT =                                   240,
+   ISL_FORMAT_R32_UNORM =                                      241,
+   ISL_FORMAT_R32_SNORM =                                      242,
+   ISL_FORMAT_R10G10B10X2_USCALED =                            243,
+   ISL_FORMAT_R8G8B8A8_SSCALED =                               244,
+   ISL_FORMAT_R8G8B8A8_USCALED =                               245,
+   ISL_FORMAT_R16G16_SSCALED =                                 246,
+   ISL_FORMAT_R16G16_USCALED =                                 247,
+   ISL_FORMAT_R32_SSCALED =                                    248,
+   ISL_FORMAT_R32_USCALED =                                    249,
+   ISL_FORMAT_B5G6R5_UNORM =                                   256,
+   ISL_FORMAT_B5G6R5_UNORM_SRGB =                              257,
+   ISL_FORMAT_B5G5R5A1_UNORM =                                 258,
+   ISL_FORMAT_B5G5R5A1_UNORM_SRGB =                            259,
+   ISL_FORMAT_B4G4R4A4_UNORM =                                 260,
+   ISL_FORMAT_B4G4R4A4_UNORM_SRGB =                            261,
+   ISL_FORMAT_R8G8_UNORM =                                     262,
+   ISL_FORMAT_R8G8_SNORM =                                     263,
+   ISL_FORMAT_R8G8_SINT =                                      264,
+   ISL_FORMAT_R8G8_UINT =                                      265,
+   ISL_FORMAT_R16_UNORM =                                      266,
+   ISL_FORMAT_R16_SNORM =                                      267,
+   ISL_FORMAT_R16_SINT =                                       268,
+   ISL_FORMAT_R16_UINT =                                       269,
+   ISL_FORMAT_R16_FLOAT =                                      270,
+   ISL_FORMAT_A8P8_UNORM_PALETTE0 =                            271,
+   ISL_FORMAT_A8P8_UNORM_PALETTE1 =                            272,
+   ISL_FORMAT_I16_UNORM =                                      273,
+   ISL_FORMAT_L16_UNORM =                                      274,
+   ISL_FORMAT_A16_UNORM =                                      275,
+   ISL_FORMAT_L8A8_UNORM =                                     276,
+   ISL_FORMAT_I16_FLOAT =                                      277,
+   ISL_FORMAT_L16_FLOAT =                                      278,
+   ISL_FORMAT_A16_FLOAT =                                      279,
+   ISL_FORMAT_L8A8_UNORM_SRGB =                                280,
+   ISL_FORMAT_R5G5_SNORM_B6_UNORM =                            281,
+   ISL_FORMAT_B5G5R5X1_UNORM =                                 282,
+   ISL_FORMAT_B5G5R5X1_UNORM_SRGB =                            283,
+   ISL_FORMAT_R8G8_SSCALED =                                   284,
+   ISL_FORMAT_R8G8_USCALED =                                   285,
+   ISL_FORMAT_R16_SSCALED =                                    286,
+   ISL_FORMAT_R16_USCALED =                                    287,
+   ISL_FORMAT_P8A8_UNORM_PALETTE0 =                            290,
+   ISL_FORMAT_P8A8_UNORM_PALETTE1 =                            291,
+   ISL_FORMAT_A1B5G5R5_UNORM =                                 292,
+   ISL_FORMAT_A4B4G4R4_UNORM =                                 293,
+   ISL_FORMAT_L8A8_UINT =                                      294,
+   ISL_FORMAT_L8A8_SINT =                                      295,
+   ISL_FORMAT_R8_UNORM =                                       320,
+   ISL_FORMAT_R8_SNORM =                                       321,
+   ISL_FORMAT_R8_SINT =                                        322,
+   ISL_FORMAT_R8_UINT =                                        323,
+   ISL_FORMAT_A8_UNORM =                                       324,
+   ISL_FORMAT_I8_UNORM =                                       325,
+   ISL_FORMAT_L8_UNORM =                                       326,
+   ISL_FORMAT_P4A4_UNORM_PALETTE0 =                            327,
+   ISL_FORMAT_A4P4_UNORM_PALETTE0 =                            328,
+   ISL_FORMAT_R8_SSCALED =                                     329,
+   ISL_FORMAT_R8_USCALED =                                     330,
+   ISL_FORMAT_P8_UNORM_PALETTE0 =                              331,
+   ISL_FORMAT_L8_UNORM_SRGB =                                  332,
+   ISL_FORMAT_P8_UNORM_PALETTE1 =                              333,
+   ISL_FORMAT_P4A4_UNORM_PALETTE1 =                            334,
+   ISL_FORMAT_A4P4_UNORM_PALETTE1 =                            335,
+   ISL_FORMAT_Y8_UNORM =                                       336,
+   ISL_FORMAT_L8_UINT =                                        338,
+   ISL_FORMAT_L8_SINT =                                        339,
+   ISL_FORMAT_I8_UINT =                                        340,
+   ISL_FORMAT_I8_SINT =                                        341,
+   ISL_FORMAT_DXT1_RGB_SRGB =                                  384,
+   ISL_FORMAT_R1_UNORM =                                       385,
+   ISL_FORMAT_YCRCB_NORMAL =                                   386,
+   ISL_FORMAT_YCRCB_SWAPUVY =                                  387,
+   ISL_FORMAT_P2_UNORM_PALETTE0 =                              388,
+   ISL_FORMAT_P2_UNORM_PALETTE1 =                              389,
+   ISL_FORMAT_BC1_UNORM =                                      390,
+   ISL_FORMAT_BC2_UNORM =                                      391,
+   ISL_FORMAT_BC3_UNORM =                                      392,
+   ISL_FORMAT_BC4_UNORM =                                      393,
+   ISL_FORMAT_BC5_UNORM =                                      394,
+   ISL_FORMAT_BC1_UNORM_SRGB =                                 395,
+   ISL_FORMAT_BC2_UNORM_SRGB =                                 396,
+   ISL_FORMAT_BC3_UNORM_SRGB =                                 397,
+   ISL_FORMAT_MONO8 =                                          398,
+   ISL_FORMAT_YCRCB_SWAPUV =                                   399,
+   ISL_FORMAT_YCRCB_SWAPY =                                    400,
+   ISL_FORMAT_DXT1_RGB =                                       401,
+   ISL_FORMAT_FXT1 =                                           402,
+   ISL_FORMAT_R8G8B8_UNORM =                                   403,
+   ISL_FORMAT_R8G8B8_SNORM =                                   404,
+   ISL_FORMAT_R8G8B8_SSCALED =                                 405,
+   ISL_FORMAT_R8G8B8_USCALED =                                 406,
+   ISL_FORMAT_R64G64B64A64_FLOAT =                             407,
+   ISL_FORMAT_R64G64B64_FLOAT =                                408,
+   ISL_FORMAT_BC4_SNORM =                                      409,
+   ISL_FORMAT_BC5_SNORM =                                      410,
+   ISL_FORMAT_R16G16B16_FLOAT =                                411,
+   ISL_FORMAT_R16G16B16_UNORM =                                412,
+   ISL_FORMAT_R16G16B16_SNORM =                                413,
+   ISL_FORMAT_R16G16B16_SSCALED =                              414,
+   ISL_FORMAT_R16G16B16_USCALED =                              415,
+   ISL_FORMAT_BC6H_SF16 =                                      417,
+   ISL_FORMAT_BC7_UNORM =                                      418,
+   ISL_FORMAT_BC7_UNORM_SRGB =                                 419,
+   ISL_FORMAT_BC6H_UF16 =                                      420,
+   ISL_FORMAT_PLANAR_420_8 =                                   421,
+   ISL_FORMAT_R8G8B8_UNORM_SRGB =                              424,
+   ISL_FORMAT_ETC1_RGB8 =                                      425,
+   ISL_FORMAT_ETC2_RGB8 =                                      426,
+   ISL_FORMAT_EAC_R11 =                                        427,
+   ISL_FORMAT_EAC_RG11 =                                       428,
+   ISL_FORMAT_EAC_SIGNED_R11 =                                 429,
+   ISL_FORMAT_EAC_SIGNED_RG11 =                                430,
+   ISL_FORMAT_ETC2_SRGB8 =                                     431,
+   ISL_FORMAT_R16G16B16_UINT =                                 432,
+   ISL_FORMAT_R16G16B16_SINT =                                 433,
+   ISL_FORMAT_R32_SFIXED =                                     434,
+   ISL_FORMAT_R10G10B10A2_SNORM =                              435,
+   ISL_FORMAT_R10G10B10A2_USCALED =                            436,
+   ISL_FORMAT_R10G10B10A2_SSCALED =                            437,
+   ISL_FORMAT_R10G10B10A2_SINT =                               438,
+   ISL_FORMAT_B10G10R10A2_SNORM =                              439,
+   ISL_FORMAT_B10G10R10A2_USCALED =                            440,
+   ISL_FORMAT_B10G10R10A2_SSCALED =                            441,
+   ISL_FORMAT_B10G10R10A2_UINT =                               442,
+   ISL_FORMAT_B10G10R10A2_SINT =                               443,
+   ISL_FORMAT_R64G64B64A64_PASSTHRU =                          444,
+   ISL_FORMAT_R64G64B64_PASSTHRU =                             445,
+   ISL_FORMAT_ETC2_RGB8_PTA =                                  448,
+   ISL_FORMAT_ETC2_SRGB8_PTA =                                 449,
+   ISL_FORMAT_ETC2_EAC_RGBA8 =                                 450,
+   ISL_FORMAT_ETC2_EAC_SRGB8_A8 =                              451,
+   ISL_FORMAT_R8G8B8_UINT =                                    456,
+   ISL_FORMAT_R8G8B8_SINT =                                    457,
+   ISL_FORMAT_RAW =                                            511,
+
+   /* Hardware doesn't understand this out-of-band value */
+   ISL_FORMAT_UNSUPPORTED =                             UINT16_MAX,
+};
+
+/**
+ * Numerical base type for channels of isl_format.
+ */
+enum isl_base_type {
+   ISL_VOID,
+   ISL_RAW,
+   ISL_UNORM,
+   ISL_SNORM,
+   ISL_UFLOAT,
+   ISL_SFLOAT,
+   ISL_UFIXED,
+   ISL_SFIXED,
+   ISL_UINT,
+   ISL_SINT,
+   ISL_USCALED,
+   ISL_SSCALED,
+};
+
+/**
+ * Colorspace of isl_format.
+ */
+enum isl_colorspace {
+   ISL_COLORSPACE_NONE = 0,
+   ISL_COLORSPACE_LINEAR,
+   ISL_COLORSPACE_SRGB,
+   ISL_COLORSPACE_YUV,
+};
+
+/**
+ * Texture compression mode of isl_format.
+ */
+enum isl_txc {
+   ISL_TXC_NONE = 0,
+   ISL_TXC_DXT1,
+   ISL_TXC_DXT3,
+   ISL_TXC_DXT5,
+   ISL_TXC_FXT1,
+   ISL_TXC_RGTC1,
+   ISL_TXC_RGTC2,
+   ISL_TXC_BPTC,
+   ISL_TXC_ETC1,
+   ISL_TXC_ETC2,
+};
+
+/**
+ * @brief Hardware tile mode
+ *
+ * WARNING: These values differ from the hardware enum values, which are
+ * unstable across hardware generations.
+ *
+ * Note that legacy Y tiling is ISL_TILING_Y0 instead of ISL_TILING_Y, to
+ * clearly distinguish it from Yf and Ys.
+ */
+enum isl_tiling {
+   ISL_TILING_LINEAR = 0,
+   ISL_TILING_W,
+   ISL_TILING_X,
+   ISL_TILING_Y0, /**< Legacy Y tiling */
+   ISL_TILING_Yf, /**< Standard 4K tiling. The 'f' means "four". */
+   ISL_TILING_Ys, /**< Standard 64K tiling. The 's' means "sixty-four". */
+};
+
+/**
+ * @defgroup Tiling Flags
+ * @{
+ */
+typedef uint32_t isl_tiling_flags_t;
+#define ISL_TILING_LINEAR_BIT             (1u << ISL_TILING_LINEAR)
+#define ISL_TILING_W_BIT                  (1u << ISL_TILING_W)
+#define ISL_TILING_X_BIT                  (1u << ISL_TILING_X)
+#define ISL_TILING_Y0_BIT                 (1u << ISL_TILING_Y0)
+#define ISL_TILING_Yf_BIT                 (1u << ISL_TILING_Yf)
+#define ISL_TILING_Ys_BIT                 (1u << ISL_TILING_Ys)
+#define ISL_TILING_ANY_MASK               (~0u)
+#define ISL_TILING_NON_LINEAR_MASK        (~ISL_TILING_LINEAR_BIT)
+
+/** Any Y tiling, including legacy Y tiling. */
+#define ISL_TILING_ANY_Y_MASK             (ISL_TILING_Y0_BIT | \
+                                           ISL_TILING_Yf_BIT | \
+                                           ISL_TILING_Ys_BIT)
+
+/** The Skylake BSpec refers to Yf and Ys as "standard tiling formats". */
+#define ISL_TILING_STD_Y_MASK             (ISL_TILING_Yf_BIT | \
+                                           ISL_TILING_Ys_BIT)
+/** @} */
+
+/**
+ * @brief Logical dimension of surface.
+ *
+ * Note: There is no dimension for cube map surfaces. ISL interprets cube maps
+ * as 2D array surfaces.
+ */
+enum isl_surf_dim {
+   ISL_SURF_DIM_1D,
+   ISL_SURF_DIM_2D,
+   ISL_SURF_DIM_3D,
+};
+
+/**
+ * @brief Physical layout of the surface's dimensions.
+ */
+enum isl_dim_layout {
+   /**
+    * For details, see the G35 PRM >> Volume 1: Graphics Core >> Section
+    * 6.17.3: 2D Surfaces.
+    *
+    * On many gens, 1D surfaces share the same layout as 2D surfaces.  From
+    * the G35 PRM >> Volume 1: Graphics Core >> Section 6.17.2: 1D Surfaces:
+    *
+    *    One-dimensional surfaces are identical to 2D surfaces with height of
+    *    one.
+    *
+    * @invariant isl_surf::phys_level0_sa::depth == 1
+    */
+   ISL_DIM_LAYOUT_GEN4_2D,
+
+   /**
+    * For details, see the G35 PRM >> Volume 1: Graphics Core >> Section
+    * 6.17.5: 3D Surfaces.
+    *
+    * @invariant isl_surf::phys_level0_sa::array_len == 1
+    */
+   ISL_DIM_LAYOUT_GEN4_3D,
+
+   /**
+    * For details, see the Skylake BSpec >> Memory Views >> Common Surface
+    * Formats >> Surface Layout and Tiling >> » 1D Surfaces.
+    */
+   ISL_DIM_LAYOUT_GEN9_1D,
+};
+
+/* TODO(chadv): Explain */
+enum isl_array_pitch_span {
+   ISL_ARRAY_PITCH_SPAN_FULL,
+   ISL_ARRAY_PITCH_SPAN_COMPACT,
+};
+
+/**
+ * @defgroup Surface Usage
+ * @{
+ */
+typedef uint64_t isl_surf_usage_flags_t;
+#define ISL_SURF_USAGE_RENDER_TARGET_BIT       (1u << 0)
+#define ISL_SURF_USAGE_DEPTH_BIT               (1u << 1)
+#define ISL_SURF_USAGE_STENCIL_BIT             (1u << 2)
+#define ISL_SURF_USAGE_TEXTURE_BIT             (1u << 3)
+#define ISL_SURF_USAGE_CUBE_BIT                (1u << 4)
+#define ISL_SURF_USAGE_DISABLE_AUX_BIT         (1u << 5)
+#define ISL_SURF_USAGE_DISPLAY_BIT             (1u << 6)
+#define ISL_SURF_USAGE_DISPLAY_ROTATE_90_BIT   (1u << 7)
+#define ISL_SURF_USAGE_DISPLAY_ROTATE_180_BIT  (1u << 8)
+#define ISL_SURF_USAGE_DISPLAY_ROTATE_270_BIT  (1u << 9)
+#define ISL_SURF_USAGE_DISPLAY_FLIP_X_BIT      (1u << 10)
+#define ISL_SURF_USAGE_DISPLAY_FLIP_Y_BIT      (1u << 11)
+#define ISL_SURF_USAGE_STORAGE_BIT             (1u << 12)
+/** @} */
+
+/**
+ * @brief A channel select (also known as texture swizzle) value
+ */
+enum isl_channel_select {
+   ISL_CHANNEL_SELECT_ZERO = 0,
+   ISL_CHANNEL_SELECT_ONE = 1,
+   ISL_CHANNEL_SELECT_RED = 4,
+   ISL_CHANNEL_SELECT_GREEN = 5,
+   ISL_CHANNEL_SELECT_BLUE = 6,
+   ISL_CHANNEL_SELECT_ALPHA = 7,
+};
+
+/**
+ * Identical to VkSampleCountFlagBits.
+ */
+enum isl_sample_count {
+   ISL_SAMPLE_COUNT_1_BIT     = 1u,
+   ISL_SAMPLE_COUNT_2_BIT     = 2u,
+   ISL_SAMPLE_COUNT_4_BIT     = 4u,
+   ISL_SAMPLE_COUNT_8_BIT     = 8u,
+   ISL_SAMPLE_COUNT_16_BIT    = 16u,
+};
+typedef uint32_t isl_sample_count_mask_t;
+
+/**
+ * @brief Multisample Format
+ */
+enum isl_msaa_layout {
+   /**
+    * @brief Suface is single-sampled.
+    */
+   ISL_MSAA_LAYOUT_NONE,
+
+   /**
+    * @brief [SNB+] Interleaved Multisample Format
+    *
+    * In this format, multiple samples are interleaved into each cacheline.
+    * In other words, the sample index is swizzled into the low 6 bits of the
+    * surface's virtual address space.
+    *
+    * For example, suppose the surface is legacy Y tiled, is 4x multisampled,
+    * and its pixel format is 32bpp. Then the first cacheline is arranged
+    * thus:
+    *
+    *    (0,0,0) (0,1,0)   (0,0,1) (1,0,1)
+    *    (1,0,0) (1,1,0)   (0,1,1) (1,1,1)
+    *
+    *    (0,0,2) (1,0,2)   (0,0,3) (1,0,3)
+    *    (0,1,2) (1,1,2)   (0,1,3) (1,1,3)
+    *
+    * The hardware docs refer to this format with multiple terms.  In
+    * Sandybridge, this is the only multisample format; so no term is used.
+    * The Ivybridge docs refer to surfaces in this format as IMS (Interleaved
+    * Multisample Surface). Later hardware docs additionally refer to this
+    * format as MSFMT_DEPTH_STENCIL (because the format is deprecated for
+    * color surfaces).
+    *
+    * See the Sandybridge PRM, Volume 4, Part 1, Section 2.7 "Multisampled
+    * Surface Behavior".
+    *
+    * See the Ivybridge PRM, Volume 1, Part 1, Section 6.18.4.1 "Interleaved
+    * Multisampled Surfaces".
+    */
+   ISL_MSAA_LAYOUT_INTERLEAVED,
+
+   /**
+    * @brief [IVB+] Array Multisample Format
+    *
+    * In this format, the surface's physical layout resembles that of a
+    * 2D array surface.
+    *
+    * Suppose the multisample surface's logical extent is (w, h) and its
+    * sample count is N. Then surface's physical extent is the same as
+    * a singlesample 2D surface whose logical extent is (w, h) and array
+    * length is N.  Array slice `i` contains the pixel values for sample
+    * index `i`.
+    *
+    * The Ivybridge docs refer to surfaces in this format as UMS
+    * (Uncompressed Multsample Layout) and CMS (Compressed Multisample
+    * Surface). The Broadwell docs additionally refer to this format as
+    * MSFMT_MSS (MSS=Multisample Surface Storage).
+    *
+    * See the Broadwell PRM, Volume 5 "Memory Views", Section "Uncompressed
+    * Multisample Surfaces".
+    *
+    * See the Broadwell PRM, Volume 5 "Memory Views", Section "Compressed
+    * Multisample Surfaces".
+    */
+   ISL_MSAA_LAYOUT_ARRAY,
+};
+
+
+struct isl_device {
+   const struct brw_device_info *info;
+   bool use_separate_stencil;
+   bool has_bit6_swizzling;
+};
+
+struct isl_extent2d {
+   union { uint32_t w, width; };
+   union { uint32_t h, height; };
+};
+
+struct isl_extent3d {
+   union { uint32_t w, width; };
+   union { uint32_t h, height; };
+   union { uint32_t d, depth; };
+};
+
+struct isl_extent4d {
+   union { uint32_t w, width; };
+   union { uint32_t h, height; };
+   union { uint32_t d, depth; };
+   union { uint32_t a, array_len; };
+};
+
+struct isl_channel_layout {
+   enum isl_base_type type;
+   uint8_t bits; /**< Size in bits */
+};
+
+/**
+ * Each format has 3D block extent (width, height, depth). The block extent of
+ * compressed formats is that of the format's compression block. For example,
+ * the block extent of ISL_FORMAT_ETC2_RGB8 is (w=4, h=4, d=1).  The block
+ * extent of uncompressed pixel formats, such as ISL_FORMAT_R8G8B8A8_UNORM, is
+ * is (w=1, h=1, d=1).
+ */
+struct isl_format_layout {
+   enum isl_format format;
+
+   uint8_t bs; /**< Block size, in bytes, rounded towards 0 */
+   uint8_t bw; /**< Block width, in pixels */
+   uint8_t bh; /**< Block height, in pixels */
+   uint8_t bd; /**< Block depth, in pixels */
+
+   struct {
+      struct isl_channel_layout r; /**< Red channel */
+      struct isl_channel_layout g; /**< Green channel */
+      struct isl_channel_layout b; /**< Blue channel */
+      struct isl_channel_layout a; /**< Alpha channel */
+      struct isl_channel_layout l; /**< Luminance channel */
+      struct isl_channel_layout i; /**< Intensity channel */
+      struct isl_channel_layout p; /**< Palette channel */
+   } channels;
+
+   enum isl_colorspace colorspace;
+   enum isl_txc txc;
+};
+
+struct isl_tile_info {
+   enum isl_tiling tiling;
+   uint32_t width; /**< in bytes */
+   uint32_t height; /**< in rows of memory */
+   uint32_t size; /**< in bytes */
+};
+
+/**
+ * @brief Input to surface initialization
+ *
+ * @invariant width >= 1
+ * @invariant height >= 1
+ * @invariant depth >= 1
+ * @invariant levels >= 1
+ * @invariant samples >= 1
+ * @invariant array_len >= 1
+ *
+ * @invariant if 1D then height == 1 and depth == 1 and samples == 1
+ * @invariant if 2D then depth == 1
+ * @invariant if 3D then array_len == 1 and samples == 1
+ */
+struct isl_surf_init_info {
+   enum isl_surf_dim dim;
+   enum isl_format format;
+
+   uint32_t width;
+   uint32_t height;
+   uint32_t depth;
+   uint32_t levels;
+   uint32_t array_len;
+   uint32_t samples;
+
+   /** Lower bound for isl_surf::alignment, in bytes. */
+   uint32_t min_alignment;
+
+   /** Lower bound for isl_surf::pitch, in bytes. */
+   uint32_t min_pitch;
+
+   isl_surf_usage_flags_t usage;
+
+   /** Flags that alter how ISL selects isl_surf::tiling.  */
+   isl_tiling_flags_t tiling_flags;
+};
+
+struct isl_surf {
+   enum isl_surf_dim dim;
+   enum isl_dim_layout dim_layout;
+   enum isl_msaa_layout msaa_layout;
+   enum isl_tiling tiling;
+   enum isl_format format;
+
+   /**
+    * Alignment of the upper-left sample of each subimage, in units of surface
+    * elements.
+    */
+   struct isl_extent3d image_alignment_el;
+
+   /**
+    * Logical extent of the surface's base level, in units of pixels.  This is
+    * identical to the extent defined in isl_surf_init_info.
+    */
+   struct isl_extent4d logical_level0_px;
+
+   /**
+    * Physical extent of the surface's base level, in units of physical
+    * surface samples and aligned to the format's compression block.
+    *
+    * Consider isl_dim_layout as an operator that transforms a logical surface
+    * layout to a physical surface layout. Then
+    *
+    *    logical_layout := (isl_surf::dim, isl_surf::logical_level0_px)
+    *    isl_surf::phys_level0_sa := isl_surf::dim_layout * logical_layout
+    */
+   struct isl_extent4d phys_level0_sa;
+
+   uint32_t levels;
+   uint32_t samples;
+
+   /** Total size of the surface, in bytes. */
+   uint32_t size;
+
+   /** Required alignment for the surface's base address. */
+   uint32_t alignment;
+
+   /**
+    * Pitch between vertically adjacent surface elements, in bytes.
+    */
+   uint32_t row_pitch;
+
+   /**
+    * Pitch between physical array slices, in rows of surface elements.
+    */
+   uint32_t array_pitch_el_rows;
+
+   enum isl_array_pitch_span array_pitch_span;
+
+   /** Copy of isl_surf_init_info::usage. */
+   isl_surf_usage_flags_t usage;
+};
+
+struct isl_view {
+   /**
+    * Indicates the usage of the particular view
+    *
+    * Normally, this is one bit.  However, for a cube map texture, it
+    * should be ISL_SURF_USAGE_TEXTURE_BIT | ISL_SURF_USAGE_CUBE_BIT.
+    */
+   isl_surf_usage_flags_t usage;
+
+   /**
+    * The format to use in the view
+    *
+    * This may differ from the format of the actual isl_surf but must have
+    * the same block size.
+    */
+   enum isl_format format;
+
+   uint32_t base_level;
+   uint32_t levels;
+
+   /**
+    * Base array layer
+    *
+    * For cube maps, both base_array_layer and array_len should be
+    * specified in terms of 2-D layers and must be a multiple of 6.
+    */
+   uint32_t base_array_layer;
+   uint32_t array_len;
+
+   enum isl_channel_select channel_select[4];
+};
+
+union isl_color_value {
+   float f32[4];
+   uint32_t u32[4];
+   int32_t i32[4];
+};
+
+struct isl_surf_fill_state_info {
+   const struct isl_surf *surf;
+   const struct isl_view *view;
+
+   /**
+    * The address of the surface in GPU memory.
+    */
+   uint64_t address;
+
+   /**
+    * The Memory Object Control state for the filled surface state.
+    *
+    * The exact format of this value depends on hardware generation.
+    */
+   uint32_t mocs;
+
+   /**
+    * The clear color for this surface
+    *
+    * Valid values depend on hardware generation.
+    */
+   union isl_color_value clear_color;
+};
+
+struct isl_buffer_fill_state_info {
+   /**
+    * The address of the surface in GPU memory.
+    */
+   uint64_t address;
+
+   /**
+    * The size of the buffer
+    */
+   uint64_t size;
+
+   /**
+    * The Memory Object Control state for the filled surface state.
+    *
+    * The exact format of this value depends on hardware generation.
+    */
+   uint32_t mocs;
+
+   /**
+    * The format to use in the surface state
+    *
+    * This may differ from the format of the actual isl_surf but have the
+    * same block size.
+    */
+   enum isl_format format;
+
+   uint32_t stride;
+};
+
+extern const struct isl_format_layout isl_format_layouts[];
+
+void
+isl_device_init(struct isl_device *dev,
+                const struct brw_device_info *info,
+                bool has_bit6_swizzling);
+
+isl_sample_count_mask_t ATTRIBUTE_CONST
+isl_device_get_sample_counts(struct isl_device *dev);
+
+static inline const struct isl_format_layout * ATTRIBUTE_CONST
+isl_format_get_layout(enum isl_format fmt)
+{
+   return &isl_format_layouts[fmt];
+}
+
+bool
+isl_format_has_uint_channel(enum isl_format fmt) ATTRIBUTE_CONST;
+
+bool
+isl_format_has_sint_channel(enum isl_format fmt) ATTRIBUTE_CONST;
+
+static inline bool
+isl_format_has_int_channel(enum isl_format fmt)
+{
+   return isl_format_has_uint_channel(fmt) ||
+          isl_format_has_sint_channel(fmt);
+}
+
+static inline bool
+isl_format_is_compressed(enum isl_format fmt)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(fmt);
+
+   return fmtl->txc != ISL_TXC_NONE;
+}
+
+static inline bool
+isl_format_has_bc_compression(enum isl_format fmt)
+{
+   switch (isl_format_get_layout(fmt)->txc) {
+   case ISL_TXC_DXT1:
+   case ISL_TXC_DXT3:
+   case ISL_TXC_DXT5:
+      return true;
+   case ISL_TXC_NONE:
+   case ISL_TXC_FXT1:
+   case ISL_TXC_RGTC1:
+   case ISL_TXC_RGTC2:
+   case ISL_TXC_BPTC:
+   case ISL_TXC_ETC1:
+   case ISL_TXC_ETC2:
+      return false;
+   }
+
+   unreachable("bad texture compression mode");
+   return false;
+}
+
+static inline bool
+isl_format_is_yuv(enum isl_format fmt)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(fmt);
+
+   return fmtl->colorspace == ISL_COLORSPACE_YUV;
+}
+
+static inline bool
+isl_format_block_is_1x1x1(enum isl_format fmt)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(fmt);
+
+   return fmtl->bw == 1 && fmtl->bh == 1 && fmtl->bd == 1;
+}
+
+static inline bool
+isl_format_is_rgb(enum isl_format fmt)
+{
+   return isl_format_layouts[fmt].channels.r.bits > 0 &&
+          isl_format_layouts[fmt].channels.g.bits > 0 &&
+          isl_format_layouts[fmt].channels.b.bits > 0 &&
+          isl_format_layouts[fmt].channels.a.bits == 0;
+}
+
+enum isl_format isl_format_rgb_to_rgba(enum isl_format rgb) ATTRIBUTE_CONST;
+enum isl_format isl_format_rgb_to_rgbx(enum isl_format rgb) ATTRIBUTE_CONST;
+
+bool isl_is_storage_image_format(enum isl_format fmt);
+
+enum isl_format
+isl_lower_storage_image_format(const struct isl_device *dev,
+                               enum isl_format fmt);
+
+static inline bool
+isl_tiling_is_any_y(enum isl_tiling tiling)
+{
+   return (1u << tiling) & ISL_TILING_ANY_MASK;
+}
+
+static inline bool
+isl_tiling_is_std_y(enum isl_tiling tiling)
+{
+   return (1u << tiling) & ISL_TILING_STD_Y_MASK;
+}
+
+bool
+isl_tiling_get_info(const struct isl_device *dev,
+                    enum isl_tiling tiling,
+                    uint32_t format_block_size,
+                    struct isl_tile_info *info);
+
+void
+isl_tiling_get_extent(const struct isl_device *dev,
+                      enum isl_tiling tiling,
+                      uint32_t format_block_size,
+                      struct isl_extent2d *e);
+bool
+isl_surf_choose_tiling(const struct isl_device *dev,
+                       const struct isl_surf_init_info *restrict info,
+                       enum isl_tiling *tiling);
+
+static inline bool
+isl_surf_usage_is_display(isl_surf_usage_flags_t usage)
+{
+   return usage & ISL_SURF_USAGE_DISPLAY_BIT;
+}
+
+static inline bool
+isl_surf_usage_is_depth(isl_surf_usage_flags_t usage)
+{
+   return usage & ISL_SURF_USAGE_DEPTH_BIT;
+}
+
+static inline bool
+isl_surf_usage_is_stencil(isl_surf_usage_flags_t usage)
+{
+   return usage & ISL_SURF_USAGE_STENCIL_BIT;
+}
+
+static inline bool
+isl_surf_usage_is_depth_and_stencil(isl_surf_usage_flags_t usage)
+{
+   return (usage & ISL_SURF_USAGE_DEPTH_BIT) &&
+          (usage & ISL_SURF_USAGE_STENCIL_BIT);
+}
+
+static inline bool
+isl_surf_usage_is_depth_or_stencil(isl_surf_usage_flags_t usage)
+{
+   return usage & (ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_STENCIL_BIT);
+}
+
+static inline bool
+isl_surf_info_is_z16(const struct isl_surf_init_info *info)
+{
+   return (info->usage & ISL_SURF_USAGE_DEPTH_BIT) &&
+          (info->format == ISL_FORMAT_R16_UNORM);
+}
+
+static inline bool
+isl_surf_info_is_z32_float(const struct isl_surf_init_info *info)
+{
+   return (info->usage & ISL_SURF_USAGE_DEPTH_BIT) &&
+          (info->format == ISL_FORMAT_R32_FLOAT);
+}
+
+static inline struct isl_extent2d
+isl_extent2d(uint32_t width, uint32_t height)
+{
+   return (struct isl_extent2d) { .w = width, .h = height };
+}
+
+static inline struct isl_extent3d
+isl_extent3d(uint32_t width, uint32_t height, uint32_t depth)
+{
+   return (struct isl_extent3d) { .w = width, .h = height, .d = depth };
+}
+
+static inline struct isl_extent4d
+isl_extent4d(uint32_t width, uint32_t height, uint32_t depth,
+             uint32_t array_len)
+{
+   return (struct isl_extent4d) {
+      .w = width,
+      .h = height,
+      .d = depth,
+      .a = array_len,
+   };
+}
+
+#define isl_surf_init(dev, surf, ...) \
+   isl_surf_init_s((dev), (surf), \
+                   &(struct isl_surf_init_info) {  __VA_ARGS__ });
+
+bool
+isl_surf_init_s(const struct isl_device *dev,
+                struct isl_surf *surf,
+                const struct isl_surf_init_info *restrict info);
+
+void
+isl_surf_get_tile_info(const struct isl_device *dev,
+                       const struct isl_surf *surf,
+                       struct isl_tile_info *tile_info);
+
+#define isl_surf_fill_state(dev, state, ...) \
+   isl_surf_fill_state_s((dev), (state), \
+                         &(struct isl_surf_fill_state_info) {  __VA_ARGS__ });
+
+void
+isl_surf_fill_state_s(const struct isl_device *dev, void *state,
+                      const struct isl_surf_fill_state_info *restrict info);
+
+#define isl_buffer_fill_state(dev, state, ...) \
+   isl_buffer_fill_state_s((dev), (state), \
+                           &(struct isl_buffer_fill_state_info) {  __VA_ARGS__ });
+
+void
+isl_buffer_fill_state_s(const struct isl_device *dev, void *state,
+                        const struct isl_buffer_fill_state_info *restrict info);
+
+void
+isl_surf_fill_image_param(const struct isl_device *dev,
+                          struct brw_image_param *param,
+                          const struct isl_surf *surf,
+                          const struct isl_view *view);
+
+void
+isl_buffer_fill_image_param(const struct isl_device *dev,
+                            struct brw_image_param *param,
+                            enum isl_format format,
+                            uint64_t size);
+
+/**
+ * Alignment of the upper-left sample of each subimage, in units of surface
+ * elements.
+ */
+static inline struct isl_extent3d
+isl_surf_get_image_alignment_el(const struct isl_surf *surf)
+{
+   return surf->image_alignment_el;
+}
+
+/**
+ * Alignment of the upper-left sample of each subimage, in units of surface
+ * samples.
+ */
+static inline struct isl_extent3d
+isl_surf_get_image_alignment_sa(const struct isl_surf *surf)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(surf->format);
+
+   return (struct isl_extent3d) {
+      .w = fmtl->bw * surf->image_alignment_el.w,
+      .h = fmtl->bh * surf->image_alignment_el.h,
+      .d = fmtl->bd * surf->image_alignment_el.d,
+   };
+}
+
+/**
+ * Pitch between vertically adjacent surface elements, in bytes.
+ */
+static inline uint32_t
+isl_surf_get_row_pitch(const struct isl_surf *surf)
+{
+   return surf->row_pitch;
+}
+
+/**
+ * Pitch between vertically adjacent surface elements, in units of surface elements.
+ */
+static inline uint32_t
+isl_surf_get_row_pitch_el(const struct isl_surf *surf)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(surf->format);
+
+   assert(surf->row_pitch % fmtl->bs == 0);
+   return surf->row_pitch / fmtl->bs;
+}
+
+/**
+ * Pitch between physical array slices, in rows of surface elements.
+ */
+static inline uint32_t
+isl_surf_get_array_pitch_el_rows(const struct isl_surf *surf)
+{
+   return surf->array_pitch_el_rows;
+}
+
+/**
+ * Pitch between physical array slices, in units of surface elements.
+ */
+static inline uint32_t
+isl_surf_get_array_pitch_el(const struct isl_surf *surf)
+{
+   return isl_surf_get_array_pitch_el_rows(surf) *
+          isl_surf_get_row_pitch_el(surf);
+}
+
+/**
+ * Pitch between physical array slices, in rows of surface samples.
+ */
+static inline uint32_t
+isl_surf_get_array_pitch_sa_rows(const struct isl_surf *surf)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(surf->format);
+   return fmtl->bh * isl_surf_get_array_pitch_el_rows(surf);
+}
+
+/**
+ * Pitch between physical array slices, in bytes.
+ */
+static inline uint32_t
+isl_surf_get_array_pitch(const struct isl_surf *surf)
+{
+   return isl_surf_get_array_pitch_sa_rows(surf) * surf->row_pitch;
+}
+
+/**
+ * Calculate the offset, in units of surface elements, to a subimage in the
+ * surface.
+ *
+ * @invariant level < surface levels
+ * @invariant logical_array_layer < logical array length of surface
+ * @invariant logical_z_offset_px < logical depth of surface at level
+ */
+void
+isl_surf_get_image_offset_el(const struct isl_surf *surf,
+                             uint32_t level,
+                             uint32_t logical_array_layer,
+                             uint32_t logical_z_offset_px,
+                             uint32_t *x_offset_el,
+                             uint32_t *y_offset_el);
+
+/**
+ * @brief Calculate the intratile offsets to a subimage in the surface.
+ *
+ * In @a base_address_offset return the offset from the base of the surface to
+ * the base address of the first tile of the subimage. In @a x_offset_el and
+ * @a y_offset_el, return the offset, in units of surface elements, from the
+ * tile's base to the subimage's first surface element. The x and y offsets
+ * are intratile offsets; that is, they do not exceed the boundary of the
+ * surface's tiling format.
+ */
+void
+isl_surf_get_image_intratile_offset_el(const struct isl_device *dev,
+                                       const struct isl_surf *surf,
+                                       uint32_t level,
+                                       uint32_t logical_array_layer,
+                                       uint32_t logical_z_offset,
+                                       uint32_t *base_address_offset,
+                                       uint32_t *x_offset_el,
+                                       uint32_t *y_offset_el);
+
+/**
+ * See above.
+ */
+void
+isl_surf_get_image_intratile_offset_el_xy(const struct isl_device *dev,
+                                       const struct isl_surf *surf,
+                                       uint32_t total_x_offset_el,
+                                       uint32_t total_y_offset_el,
+                                       uint32_t *base_address_offset,
+                                       uint32_t *x_offset_el,
+                                       uint32_t *y_offset_el);
+
+/**
+ * @brief Get value of 3DSTATE_DEPTH_BUFFER.SurfaceFormat
+ *
+ * @pre surf->usage has ISL_SURF_USAGE_DEPTH_BIT
+ * @pre surf->format must be a valid format for depth surfaces
+ */
+uint32_t
+isl_surf_get_depth_format(const struct isl_device *dev,
+                          const struct isl_surf *surf);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/intel/isl/isl_format.c b/src/intel/isl/isl_format.c
new file mode 100644 (file)
index 0000000..32bd701
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "isl.h"
+
+bool
+isl_format_has_uint_channel(enum isl_format fmt)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(fmt);
+
+   return fmtl->channels.r.type == ISL_UINT ||
+          fmtl->channels.g.type == ISL_UINT ||
+          fmtl->channels.b.type == ISL_UINT ||
+          fmtl->channels.a.type == ISL_UINT ||
+          fmtl->channels.l.type == ISL_UINT ||
+          fmtl->channels.i.type == ISL_UINT ||
+          fmtl->channels.p.type == ISL_UINT;
+}
+
+bool
+isl_format_has_sint_channel(enum isl_format fmt)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(fmt);
+
+   return fmtl->channels.r.type == ISL_SINT ||
+          fmtl->channels.g.type == ISL_SINT ||
+          fmtl->channels.b.type == ISL_SINT ||
+          fmtl->channels.a.type == ISL_SINT ||
+          fmtl->channels.l.type == ISL_SINT ||
+          fmtl->channels.i.type == ISL_SINT ||
+          fmtl->channels.p.type == ISL_SINT;
+}
+
+enum isl_format
+isl_format_rgb_to_rgba(enum isl_format rgb)
+{
+   assert(isl_format_is_rgb(rgb));
+
+   switch (rgb) {
+   case ISL_FORMAT_R32G32B32_FLOAT:    return ISL_FORMAT_R32G32B32A32_FLOAT;
+   case ISL_FORMAT_R32G32B32_SINT:     return ISL_FORMAT_R32G32B32A32_SINT;
+   case ISL_FORMAT_R32G32B32_UINT:     return ISL_FORMAT_R32G32B32A32_UINT;
+   case ISL_FORMAT_R32G32B32_UNORM:    return ISL_FORMAT_R32G32B32A32_UNORM;
+   case ISL_FORMAT_R32G32B32_SNORM:    return ISL_FORMAT_R32G32B32A32_SNORM;
+   case ISL_FORMAT_R32G32B32_SSCALED:  return ISL_FORMAT_R32G32B32A32_SSCALED;
+   case ISL_FORMAT_R32G32B32_USCALED:  return ISL_FORMAT_R32G32B32A32_USCALED;
+   case ISL_FORMAT_R32G32B32_SFIXED:   return ISL_FORMAT_R32G32B32A32_SFIXED;
+   case ISL_FORMAT_R8G8B8_UNORM:       return ISL_FORMAT_R8G8B8A8_UNORM;
+   case ISL_FORMAT_R8G8B8_SNORM:       return ISL_FORMAT_R8G8B8A8_SNORM;
+   case ISL_FORMAT_R8G8B8_SSCALED:     return ISL_FORMAT_R8G8B8A8_SSCALED;
+   case ISL_FORMAT_R8G8B8_USCALED:     return ISL_FORMAT_R8G8B8A8_USCALED;
+   case ISL_FORMAT_R16G16B16_FLOAT:    return ISL_FORMAT_R16G16B16A16_FLOAT;
+   case ISL_FORMAT_R16G16B16_UNORM:    return ISL_FORMAT_R16G16B16A16_UNORM;
+   case ISL_FORMAT_R16G16B16_SNORM:    return ISL_FORMAT_R16G16B16A16_SNORM;
+   case ISL_FORMAT_R16G16B16_SSCALED:  return ISL_FORMAT_R16G16B16A16_SSCALED;
+   case ISL_FORMAT_R16G16B16_USCALED:  return ISL_FORMAT_R16G16B16A16_USCALED;
+   case ISL_FORMAT_R8G8B8_UNORM_SRGB:  return ISL_FORMAT_R8G8B8A8_UNORM_SRGB;
+   case ISL_FORMAT_R16G16B16_UINT:     return ISL_FORMAT_R16G16B16A16_UINT;
+   case ISL_FORMAT_R16G16B16_SINT:     return ISL_FORMAT_R16G16B16A16_SINT;
+   case ISL_FORMAT_R8G8B8_UINT:        return ISL_FORMAT_R8G8B8A8_UINT;
+   case ISL_FORMAT_R8G8B8_SINT:        return ISL_FORMAT_R8G8B8A8_SINT;
+   default:
+      return ISL_FORMAT_UNSUPPORTED;
+   }
+}
+
+enum isl_format
+isl_format_rgb_to_rgbx(enum isl_format rgb)
+{
+   assert(isl_format_is_rgb(rgb));
+
+   switch (rgb) {
+   case ISL_FORMAT_R32G32B32_FLOAT:
+      return ISL_FORMAT_R32G32B32X32_FLOAT;
+   case ISL_FORMAT_R16G16B16_UNORM:
+      return ISL_FORMAT_R16G16B16X16_UNORM;
+   case ISL_FORMAT_R16G16B16_FLOAT:
+      return ISL_FORMAT_R16G16B16X16_FLOAT;
+   case ISL_FORMAT_R8G8B8_UNORM:
+      return ISL_FORMAT_R8G8B8X8_UNORM;
+   case ISL_FORMAT_R8G8B8_UNORM_SRGB:
+      return ISL_FORMAT_R8G8B8X8_UNORM_SRGB;
+   default:
+      return ISL_FORMAT_UNSUPPORTED;
+   }
+}
diff --git a/src/intel/isl/isl_format_layout.csv b/src/intel/isl/isl_format_layout.csv
new file mode 100644 (file)
index 0000000..af2786a
--- /dev/null
@@ -0,0 +1,287 @@
+# Copyright 2015 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#
+# @file
+# @brief Layout of all hardware surface formats
+#
+# For the official list, see Broadwell PRM: Volume 2b: Command Reference:
+# Enumerations: SURFACE_FORMAT.
+#
+
+
+# Columns:
+#    name: format name in PRM
+#    bpb: bits per block
+#    bw: block width, in pixels
+#    bh: block height, in pixels
+#    bd: block depth, in pixels
+#    r: red channel, data type and bitwidth
+#    g: green channel
+#    b: blue channel
+#    a: alpha channel
+#    l: luminance channel
+#    i: intensity channel
+#    p: palette channel
+#    space: colorspace
+#    txc: texture compression
+#
+# Data Types:
+#     x: void
+#     r: raw
+#    un: unorm
+#    sn: snorm
+#    uf: ufloat
+#    sf: sfloat
+#    ux: ufixed
+#    sx: sfixed
+#    ui: uint
+#    si: sint
+#    us: uscaled
+#    ss: sscaled
+
+
+# Table is aligned with the Vim commands below, using the Align plugin:
+#     :AlignCtrl lr+ p8000000000000P1
+#     /^# name/,$ Align,
+
+# name                      , bpb, bw, bh, bd,    r,    g,    b,    a,    l,    i,   p,  space,   txc
+R32G32B32A32_FLOAT          , 128,  1,  1,  1, sf32, sf32, sf32, sf32,     ,     ,    , linear,
+R32G32B32A32_SINT           , 128,  1,  1,  1, si32, si32, si32, si32,     ,     ,    , linear,
+R32G32B32A32_UINT           , 128,  1,  1,  1, ui32, ui32, ui32, ui32,     ,     ,    , linear,
+R32G32B32A32_UNORM          , 128,  1,  1,  1, un32, un32, un32, un32,     ,     ,    , linear,
+R32G32B32A32_SNORM          , 128,  1,  1,  1, sn32, sn32, sn32, sn32,     ,     ,    , linear,
+R64G64_FLOAT                , 128,  1,  1,  1, sf64, sf64,     ,     ,     ,     ,    , linear,
+R32G32B32X32_FLOAT          , 128,  1,  1,  1, sf32, sf32, sf32,  x32,     ,     ,    , linear,
+R32G32B32A32_SSCALED        , 128,  1,  1,  1, ss32, ss32, ss32, ss32,     ,     ,    , linear,
+R32G32B32A32_USCALED        , 128,  1,  1,  1, us32, us32, us32, us32,     ,     ,    , linear,
+R32G32B32A32_SFIXED         , 128,  1,  1,  1, sx32, sx32, sx32, sx32,     ,     ,    , linear,
+R64G64_PASSTHRU             , 128,  1,  1,  1,  r64,  r64,     ,     ,     ,     ,    ,       ,
+R32G32B32_FLOAT             ,  96,  1,  1,  1, sf32, sf32, sf32,     ,     ,     ,    , linear,
+R32G32B32_SINT              ,  96,  1,  1,  1, si32, si32, si32,     ,     ,     ,    , linear,
+R32G32B32_UINT              ,  96,  1,  1,  1, ui32, ui32, ui32,     ,     ,     ,    , linear,
+R32G32B32_UNORM             ,  96,  1,  1,  1, un32, un32, un32,     ,     ,     ,    , linear,
+R32G32B32_SNORM             ,  96,  1,  1,  1, sn32, sn32, sn32,     ,     ,     ,    , linear,
+R32G32B32_SSCALED           ,  96,  1,  1,  1, ss32, ss32, ss32,     ,     ,     ,    , linear,
+R32G32B32_USCALED           ,  96,  1,  1,  1, us32, us32, us32,     ,     ,     ,    , linear,
+R32G32B32_SFIXED            ,  96,  1,  1,  1, sx32, sx32, sx32,     ,     ,     ,    , linear,
+R16G16B16A16_UNORM          ,  64,  1,  1,  1, un16, un16, un16, un16,     ,     ,    , linear,
+R16G16B16A16_SNORM          ,  64,  1,  1,  1, sn16, sn16, sn16, sn16,     ,     ,    , linear,
+R16G16B16A16_SINT           ,  64,  1,  1,  1, si16, si16, si16, si16,     ,     ,    , linear,
+R16G16B16A16_UINT           ,  64,  1,  1,  1, ui16, ui16, ui16, ui16,     ,     ,    , linear,
+R16G16B16A16_FLOAT          ,  64,  1,  1,  1, sf16, sf16, sf16, sf16,     ,     ,    , linear,
+R32G32_FLOAT                ,  64,  1,  1,  1, sf32, sf32,     ,     ,     ,     ,    , linear,
+R32G32_SINT                 ,  64,  1,  1,  1, si32, si32,     ,     ,     ,     ,    , linear,
+R32G32_UINT                 ,  64,  1,  1,  1, ui32, ui32,     ,     ,     ,     ,    , linear,
+R32_FLOAT_X8X24_TYPELESS    ,  64,  1,  1,  1, sf32,   x8,  x24,     ,     ,     ,    , linear,
+X32_TYPELESS_G8X24_UINT     ,  64,  1,  1,  1,  x32,  ui8,  x24,     ,     ,     ,    , linear,
+L32A32_FLOAT                ,  64,  1,  1,  1,     ,     ,     , sf32, sf32,     ,    , linear,
+R32G32_UNORM                ,  64,  1,  1,  1, un32, un32,     ,     ,     ,     ,    , linear,
+R32G32_SNORM                ,  64,  1,  1,  1, sn32, sn32,     ,     ,     ,     ,    , linear,
+R64_FLOAT                   ,  64,  1,  1,  1, sf64,     ,     ,     ,     ,     ,    , linear,
+R16G16B16X16_UNORM          ,  64,  1,  1,  1, un16, un16, un16,  x16,     ,     ,    , linear,
+R16G16B16X16_FLOAT          ,  64,  1,  1,  1, sf16, sf16, sf16,  x16,     ,     ,    , linear,
+A32X32_FLOAT                ,  64,  1,  1,  1,     ,     ,     , sf32,  x32,     ,    ,  alpha,
+L32X32_FLOAT                ,  64,  1,  1,  1,     ,     ,     ,  x32, sf32,     ,    , linear,
+I32X32_FLOAT                ,  64,  1,  1,  1,     ,     ,     ,  x32,     , sf32,    , linear,
+R16G16B16A16_SSCALED        ,  64,  1,  1,  1, ss16, ss16, ss16, ss16,     ,     ,    , linear,
+R16G16B16A16_USCALED        ,  64,  1,  1,  1, us16, us16, us16, us16,     ,     ,    , linear,
+R32G32_SSCALED              ,  64,  1,  1,  1, ss32, ss32,     ,     ,     ,     ,    , linear,
+R32G32_USCALED              ,  64,  1,  1,  1, us32, us32,     ,     ,     ,     ,    , linear,
+R32G32_SFIXED               ,  64,  1,  1,  1, sx32, sx32,     ,     ,     ,     ,    , linear,
+R64_PASSTHRU                ,  64,  1,  1,  1,  r64,     ,     ,     ,     ,     ,    ,       ,
+B8G8R8A8_UNORM              ,  32,  1,  1,  1,  un8,  un8,  un8,  un8,     ,     ,    , linear,
+B8G8R8A8_UNORM_SRGB         ,  32,  1,  1,  1,  un8,  un8,  un8,  un8,     ,     ,    ,   srgb,
+R10G10B10A2_UNORM           ,  32,  1,  1,  1, un10, un10, un10,  un2,     ,     ,    , linear,
+R10G10B10A2_UNORM_SRGB      ,  32,  1,  1,  1, un10, un10, un10,  un2,     ,     ,    ,   srgb,
+R10G10B10A2_UINT            ,  32,  1,  1,  1, ui10, ui10, ui10,  ui2,     ,     ,    , linear,
+R10G10B10_SNORM_A2_UNORM    ,  32,  1,  1,  1, sn10, sn10, sn10,  un2,     ,     ,    , linear,
+R8G8B8A8_UNORM              ,  32,  1,  1,  1,  un8,  un8,  un8,  un8,     ,     ,    , linear,
+R8G8B8A8_UNORM_SRGB         ,  32,  1,  1,  1,  un8,  un8,  un8,  un8,     ,     ,    ,   srgb,
+R8G8B8A8_SNORM              ,  32,  1,  1,  1,  sn8,  sn8,  sn8,  sn8,     ,     ,    , linear,
+R8G8B8A8_SINT               ,  32,  1,  1,  1,  si8,  si8,  si8,  si8,     ,     ,    , linear,
+R8G8B8A8_UINT               ,  32,  1,  1,  1,  ui8,  ui8,  ui8,  ui8,     ,     ,    , linear,
+R16G16_UNORM                ,  32,  1,  1,  1, un16, un16,     ,     ,     ,     ,    , linear,
+R16G16_SNORM                ,  32,  1,  1,  1, sn16, sn16,     ,     ,     ,     ,    , linear,
+R16G16_SINT                 ,  32,  1,  1,  1, si16, si16,     ,     ,     ,     ,    , linear,
+R16G16_UINT                 ,  32,  1,  1,  1, ui16, ui16,     ,     ,     ,     ,    , linear,
+R16G16_FLOAT                ,  32,  1,  1,  1, sf16, sf16,     ,     ,     ,     ,    , linear,
+B10G10R10A2_UNORM           ,  32,  1,  1,  1, un10, un10, un10,  un2,     ,     ,    , linear,
+B10G10R10A2_UNORM_SRGB      ,  32,  1,  1,  1, un10, un10, un10,  un2,     ,     ,    ,   srgb,
+R11G11B10_FLOAT             ,  32,  1,  1,  1, uf11, uf11, uf10,     ,     ,     ,    , linear,
+R32_SINT                    ,  32,  1,  1,  1, si32,     ,     ,     ,     ,     ,    , linear,
+R32_UINT                    ,  32,  1,  1,  1, ui32,     ,     ,     ,     ,     ,    , linear,
+R32_FLOAT                   ,  32,  1,  1,  1, sf32,     ,     ,     ,     ,     ,    , linear,
+R24_UNORM_X8_TYPELESS       ,  32,  1,  1,  1, un24,   x8,     ,     ,     ,     ,    , linear,
+X24_TYPELESS_G8_UINT        ,  32,  1,  1,  1,  x24,  ui8,     ,     ,     ,     ,    , linear,
+L32_UNORM                   ,  32,  1,  1,  1,     ,     ,     ,     , un32,     ,    , linear,
+A32_UNORM                   ,  32,  1,  1,  1,     ,     ,     , un32,     ,     ,    ,  alpha,
+L16A16_UNORM                ,  32,  1,  1,  1,     ,     ,     , un16, un16,     ,    , linear,
+I24X8_UNORM                 ,  32,  1,  1,  1,     ,     ,     ,   x8,     , un24,    , linear,
+L24X8_UNORM                 ,  32,  1,  1,  1,     ,     ,     ,   x8, un24,     ,    , linear,
+A24X8_UNORM                 ,  32,  1,  1,  1,     ,     ,     , un24,   x8,     ,    ,  alpha,
+I32_FLOAT                   ,  32,  1,  1,  1,     ,     ,     ,     ,     , sf32,    , linear,
+L32_FLOAT                   ,  32,  1,  1,  1,     ,     ,     ,     , sf32,     ,    , linear,
+A32_FLOAT                   ,  32,  1,  1,  1,     ,     ,     , sf32,     ,     ,    ,  alpha,
+X8B8_UNORM_G8R8_SNORM       ,  32,  1,  1,  1,  sn8,  sn8,  un8,   x8,     ,     ,    , linear,
+A8X8_UNORM_G8R8_SNORM       ,  32,  1,  1,  1,  sn8,  sn8,   x8,  un8,     ,     ,    , linear,
+B8X8_UNORM_G8R8_SNORM       ,  32,  1,  1,  1,  sn8,  sn8,  un8,   x8,     ,     ,    , linear,
+B8G8R8X8_UNORM              ,  32,  1,  1,  1,  un8,  un8,  un8,   x8,     ,     ,    , linear,
+B8G8R8X8_UNORM_SRGB         ,  32,  1,  1,  1,  un8,  un8,  un8,   x8,     ,     ,    ,   srgb,
+R8G8B8X8_UNORM              ,  32,  1,  1,  1,  un8,  un8,  un8,   x8,     ,     ,    , linear,
+R8G8B8X8_UNORM_SRGB         ,  32,  1,  1,  1,  un8,  un8,  un8,   x8,     ,     ,    ,   srgb,
+R9G9B9E5_SHAREDEXP          ,  32,  1,  1,  1,  ui9,  ui9,  ui9,     ,     ,     ,    , linear,
+B10G10R10X2_UNORM           ,  32,  1,  1,  1, un10, un10, un10,   x2,     ,     ,    , linear,
+L16A16_FLOAT                ,  32,  1,  1,  1,     ,     ,     , sf16, sf16,     ,    , linear,
+R32_UNORM                   ,  32,  1,  1,  1, un32,     ,     ,     ,     ,     ,    , linear,
+R32_SNORM                   ,  32,  1,  1,  1, sn32,     ,     ,     ,     ,     ,    , linear,
+R10G10B10X2_USCALED         ,  32,  1,  1,  1, us10, us10, us10,   x2,     ,     ,    , linear,
+R8G8B8A8_SSCALED            ,  32,  1,  1,  1,  ss8,  ss8,  ss8,  ss8,     ,     ,    , linear,
+R8G8B8A8_USCALED            ,  32,  1,  1,  1,  us8,  us8,  us8,  us8,     ,     ,    , linear,
+R16G16_SSCALED              ,  32,  1,  1,  1, ss16,  ss6,     ,     ,     ,     ,    , linear,
+R16G16_USCALED              ,  32,  1,  1,  1, us16, us16,     ,     ,     ,     ,    , linear,
+R32_SSCALED                 ,  32,  1,  1,  1, ss32,     ,     ,     ,     ,     ,    , linear,
+R32_USCALED                 ,  32,  1,  1,  1, us32,     ,     ,     ,     ,     ,    , linear,
+B5G6R5_UNORM                ,  16,  1,  1,  1,  un5,  un6,  un5,     ,     ,     ,    , linear,
+B5G6R5_UNORM_SRGB           ,  16,  1,  1,  1,  un5,  un6,  un5,     ,     ,     ,    ,   srgb,
+B5G5R5A1_UNORM              ,  16,  1,  1,  1,  un5,  un5,  un5,  un1,     ,     ,    , linear,
+B5G5R5A1_UNORM_SRGB         ,  16,  1,  1,  1,  un5,  un5,  un5,  un1,     ,     ,    ,   srgb,
+B4G4R4A4_UNORM              ,  16,  1,  1,  1,  un4,  un4,  un4,  un4,     ,     ,    , linear,
+B4G4R4A4_UNORM_SRGB         ,  16,  1,  1,  1,  un4,  un4,  un4,  un4,     ,     ,    ,   srgb,
+R8G8_UNORM                  ,  16,  1,  1,  1,  un8,  un8,     ,     ,     ,     ,    , linear,
+R8G8_SNORM                  ,  16,  1,  1,  1,  sn8,  sn8,     ,     ,     ,     ,    , linear,
+R8G8_SINT                   ,  16,  1,  1,  1,  si8,  si8,     ,     ,     ,     ,    , linear,
+R8G8_UINT                   ,  16,  1,  1,  1,  ui8,  ui8,     ,     ,     ,     ,    , linear,
+R16_UNORM                   ,  16,  1,  1,  1, un16,     ,     ,     ,     ,     ,    , linear,
+R16_SNORM                   ,  16,  1,  1,  1, sn16,     ,     ,     ,     ,     ,    , linear,
+R16_SINT                    ,  16,  1,  1,  1, si16,     ,     ,     ,     ,     ,    , linear,
+R16_UINT                    ,  16,  1,  1,  1, ui16,     ,     ,     ,     ,     ,    , linear,
+R16_FLOAT                   ,  16,  1,  1,  1, sf16,     ,     ,     ,     ,     ,    , linear,
+A8P8_UNORM_PALETTE0         ,  16,  1,  1,  1,     ,     ,     ,  un8,     ,     , un8, linear,
+A8P8_UNORM_PALETTE1         ,  16,  1,  1,  1,     ,     ,     ,  un8,     ,     , un8, linear,
+I16_UNORM                   ,  16,  1,  1,  1,     ,     ,     ,     ,     , un16,    , linear,
+L16_UNORM                   ,  16,  1,  1,  1,     ,     ,     ,     , un16,     ,    , linear,
+A16_UNORM                   ,  16,  1,  1,  1,     ,     ,     , un16,     ,     ,    ,  alpha,
+L8A8_UNORM                  ,  16,  1,  1,  1,     ,     ,     ,  un8,  un8,     ,    , linear,
+I16_FLOAT                   ,  16,  1,  1,  1,     ,     ,     ,     ,     , sf16,    , linear,
+L16_FLOAT                   ,  16,  1,  1,  1,     ,     ,     ,     , sf16,     ,    , linear,
+A16_FLOAT                   ,  16,  1,  1,  1,     ,     ,     , sf16,     ,     ,    ,  alpha,
+L8A8_UNORM_SRGB             ,  16,  1,  1,  1,     ,     ,     ,  un8,  un8,     ,    ,   srgb,
+R5G5_SNORM_B6_UNORM         ,  16,  1,  1,  1,  sn5,  sn5,  un6,     ,     ,     ,    , linear,
+B5G5R5X1_UNORM              ,  16,  1,  1,  1,  un5,  un5,  un5,   x1,     ,     ,    , linear,
+B5G5R5X1_UNORM_SRGB         ,  16,  1,  1,  1,  un5,  un5,  un5,   x1,     ,     ,    ,   srgb,
+R8G8_SSCALED                ,  16,  1,  1,  1,  ss8,  ss8,     ,     ,     ,     ,    , linear,
+R8G8_USCALED                ,  16,  1,  1,  1,  us8,  us8,     ,     ,     ,     ,    , linear,
+R16_SSCALED                 ,  16,  1,  1,  1, ss16,     ,     ,     ,     ,     ,    , linear,
+R16_USCALED                 ,  16,  1,  1,  1, us16,     ,     ,     ,     ,     ,    , linear,
+P8A8_UNORM_PALETTE0         ,  16,  1,  1,  1,     ,     ,     ,  un8,     ,     , un8, linear,
+P8A8_UNORM_PALETTE1         ,  16,  1,  1,  1,     ,     ,     ,  un8,     ,     , un8, linear,
+A1B5G5R5_UNORM              ,  16,  1,  1,  1,  un5,  un5,  un5,  un1,     ,     ,    , linear,
+A4B4G4R4_UNORM              ,  16,  1,  1,  1,  un4,  un4,  un4,  un4,     ,     ,    , linear,
+L8A8_UINT                   ,  16,  1,  1,  1,     ,     ,     ,  ui8,  ui8,     ,    , linear,
+L8A8_SINT                   ,  16,  1,  1,  1,     ,     ,     ,  si8,  si8,     ,    , linear,
+R8_UNORM                    ,   8,  1,  1,  1,  un8,     ,     ,     ,     ,     ,    , linear,
+R8_SNORM                    ,   8,  1,  1,  1,  sn8,     ,     ,     ,     ,     ,    , linear,
+R8_SINT                     ,   8,  1,  1,  1,  si8,     ,     ,     ,     ,     ,    , linear,
+R8_UINT                     ,   8,  1,  1,  1,  ui8,     ,     ,     ,     ,     ,    , linear,
+A8_UNORM                    ,   8,  1,  1,  1,     ,     ,     ,  un8,     ,     ,    ,  alpha,
+I8_UNORM                    ,   8,  1,  1,  1,     ,     ,     ,     ,     ,  un8,    , linear,
+L8_UNORM                    ,   8,  1,  1,  1,     ,     ,     ,     ,  un8,     ,    , linear,
+P4A4_UNORM_PALETTE0         ,   8,  1,  1,  1,     ,     ,     ,  un4,     ,     , un4, linear,
+A4P4_UNORM_PALETTE0         ,   8,  1,  1,  1,     ,     ,     ,  un4,     ,     , un4, linear,
+R8_SSCALED                  ,   8,  1,  1,  1,  ss8,     ,     ,     ,     ,     ,    , linear,
+R8_USCALED                  ,   8,  1,  1,  1,  us8,     ,     ,     ,     ,     ,    , linear,
+P8_UNORM_PALETTE0           ,   8,  1,  1,  1,     ,     ,     ,     ,     ,     , un8, linear,
+L8_UNORM_SRGB               ,   8,  1,  1,  1,     ,     ,     ,     ,  un8,     ,    , linear,
+P8_UNORM_PALETTE1           ,   8,  1,  1,  1,     ,     ,     ,     ,     ,     , un8, linear,
+P4A4_UNORM_PALETTE1         ,   8,  1,  1,  1,     ,     ,     ,  un4,     ,     , un4, linear,
+A4P4_UNORM_PALETTE1         ,   8,  1,  1,  1,     ,     ,     ,  un4,     ,     , un4, linear,
+Y8_UNORM                    ,   0,  0,  0,  0,     ,     ,     ,     ,     ,     ,    ,    yuv,
+L8_UINT                     ,   8,  1,  1,  1,     ,     ,     ,     ,  ui8,     ,    , linear,
+L8_SINT                     ,   8,  1,  1,  1,     ,     ,     ,     ,  si8,     ,    , linear,
+I8_UINT                     ,   8,  1,  1,  1,     ,     ,     ,     ,     ,  ui8,    , linear,
+I8_SINT                     ,   8,  1,  1,  1,     ,     ,     ,     ,     ,  si8,    , linear,
+DXT1_RGB_SRGB               ,  64,  4,  4,  1,  un4,  un4,  un4,     ,     ,     ,    ,   srgb,  dxt1
+R1_UNORM                    ,   1,  1,  1,  1,  un1,     ,     ,     ,     ,     ,    , linear,
+YCRCB_NORMAL                ,   0,  0,  0,  0,     ,     ,     ,     ,     ,     ,    ,    yuv,
+YCRCB_SWAPUVY               ,   0,  0,  0,  0,     ,     ,     ,     ,     ,     ,    ,    yuv,
+P2_UNORM_PALETTE0           ,   2,  1,  1,  1,     ,     ,     ,     ,     ,     , un2, linear,
+P2_UNORM_PALETTE1           ,   2,  1,  1,  1,     ,     ,     ,     ,     ,     , un2, linear,
+BC1_UNORM                   ,  64,  4,  4,  1,  un4,  un4,  un4,  un4,     ,     ,    , linear,  dxt1
+BC2_UNORM                   , 128,  4,  4,  1,  un4,  un4,  un4,  un4,     ,     ,    , linear,  dxt3
+BC3_UNORM                   , 128,  4,  4,  1,  un4,  un4,  un4,  un4,     ,     ,    , linear,  dxt5
+BC4_UNORM                   ,  64,  4,  4,  1,  un8,     ,     ,     ,     ,     ,    , linear, rgtc1
+BC5_UNORM                   , 128,  4,  4,  1,  un8,  un8,     ,     ,     ,     ,    , linear, rgtc2
+BC1_UNORM_SRGB              ,  64,  4,  4,  1,  un4,  un4,  un4,  un4,     ,     ,    ,   srgb,  dxt1
+BC2_UNORM_SRGB              , 128,  4,  4,  1,  un4,  un4,  un4,  un4,     ,     ,    ,   srgb,  dxt3
+BC3_UNORM_SRGB              , 128,  4,  4,  1,  un4,  un4,  un4,  un4,     ,     ,    ,   srgb,  dxt5
+MONO8                       ,   1,  1,  1,  1,     ,     ,     ,     ,     ,     ,    ,       ,
+YCRCB_SWAPUV                ,   0,  0,  0,  0,     ,     ,     ,     ,     ,     ,    ,    yuv,
+YCRCB_SWAPY                 ,   0,  0,  0,  0,     ,     ,     ,     ,     ,     ,    ,    yuv,
+DXT1_RGB                    ,  64,  4,  4,  1,  un4,  un4,  un4,     ,     ,     ,    , linear,  dxt1
+FXT1                        , 128,  8,  4,  1,  un4,  un4,  un4,     ,     ,     ,    , linear,  fxt1
+R8G8B8_UNORM                ,  24,  1,  1,  1,  un8,  un8,  un8,     ,     ,     ,    , linear,
+R8G8B8_SNORM                ,  24,  1,  1,  1,  sn8,  sn8,  sn8,     ,     ,     ,    , linear,
+R8G8B8_SSCALED              ,  24,  1,  1,  1,  ss8,  ss8,  ss8,     ,     ,     ,    , linear,
+R8G8B8_USCALED              ,  24,  1,  1,  1,  us8,  us8,  us8,     ,     ,     ,    , linear,
+R64G64B64A64_FLOAT          , 256,  1,  1,  1, sf64, sf64, sf64, sf64,     ,     ,    , linear,
+R64G64B64_FLOAT             , 196,  1,  1,  1, sf64, sf64, sf64,     ,     ,     ,    , linear,
+BC4_SNORM                   ,  64,  4,  4,  1,  sn8,     ,     ,     ,     ,     ,    , linear, rgtc1
+BC5_SNORM                   , 128,  4,  4,  1,  sn8,  sn8,     ,     ,     ,     ,    , linear, rgtc2
+R16G16B16_FLOAT             ,  48,  1,  1,  1, sf16, sf16, sf16,     ,     ,     ,    , linear,
+R16G16B16_UNORM             ,  48,  1,  1,  1, un16, un16, un16,     ,     ,     ,    , linear,
+R16G16B16_SNORM             ,  48,  1,  1,  1, sn16, sn16, sn16,     ,     ,     ,    , linear,
+R16G16B16_SSCALED           ,  48,  1,  1,  1, ss16, ss16, ss16,     ,     ,     ,    , linear,
+R16G16B16_USCALED           ,  48,  1,  1,  1, us16, us16, us16,     ,     ,     ,    , linear,
+BC6H_SF16                   , 128,  4,  4,  1, sf16, sf16, sf16,     ,     ,     ,    , linear,  bptc
+BC7_UNORM                   , 128,  4,  4,  1,  un8,  un8,  un8,  un8,     ,     ,    , linear,  bptc
+BC7_UNORM_SRGB              , 128,  4,  4,  1,  un8,  un8,  un8,  un8,     ,     ,    ,   srgb,  bptc
+BC6H_UF16                   , 128,  4,  4,  1, uf16, uf16, uf16,     ,     ,     ,    , linear,  bptc
+PLANAR_420_8                ,   0,  0,  0,  0,     ,     ,     ,     ,     ,     ,    ,    yuv,
+R8G8B8_UNORM_SRGB           ,  24,  1,  1,  1,  un8,  un8,  un8,     ,     ,     ,    ,   srgb,
+ETC1_RGB8                   ,  64,  4,  4,  1,  un8,  un8,  un8,     ,     ,     ,    , linear,  etc1
+ETC2_RGB8                   ,  64,  4,  4,  1,  un8,  un8,  un8,     ,     ,     ,    , linear,  etc2
+EAC_R11                     ,  64,  4,  4,  1, un11,     ,     ,     ,     ,     ,    , linear,  etc2
+EAC_RG11                    , 128,  4,  4,  1, un11, un11,     ,     ,     ,     ,    , linear,  etc2
+EAC_SIGNED_R11              ,  64,  4,  4,  1, sn11,     ,     ,     ,     ,     ,    , linear,  etc2
+EAC_SIGNED_RG11             , 128,  4,  4,  1, sn11, sn11,     ,     ,     ,     ,    , linear,  etc2
+ETC2_SRGB8                  ,  64,  4,  4,  1,  un8,  un8,  un8,     ,     ,     ,    ,   srgb,  etc2
+R16G16B16_UINT              ,  48,  1,  1,  1, ui16, ui16, ui16,     ,     ,     ,    , linear,
+R16G16B16_SINT              ,  48,  1,  1,  1, si16, si16, si16,     ,     ,     ,    , linear,
+R32_SFIXED                  ,  32,  1,  1,  1, sx16,     ,     ,     ,     ,     ,    , linear,
+R10G10B10A2_SNORM           ,  32,  1,  1,  1, sn10, sn10, sn10,  sn2,     ,     ,    , linear,
+R10G10B10A2_USCALED         ,  32,  1,  1,  1, us10, us10, us10,  us2,     ,     ,    , linear,
+R10G10B10A2_SSCALED         ,  32,  1,  1,  1, ss10, ss10, ss10,  ss2,     ,     ,    , linear,
+R10G10B10A2_SINT            ,  32,  1,  1,  1, si10, si10, si10,  si2,     ,     ,    , linear,
+B10G10R10A2_SNORM           ,  32,  1,  1,  1, sn10, sn10, sn10,  sn2,     ,     ,    , linear,
+B10G10R10A2_USCALED         ,  32,  1,  1,  1, us10, us10, us10,  us2,     ,     ,    , linear,
+B10G10R10A2_SSCALED         ,  32,  1,  1,  1, ss10, ss10, ss10,  ss2,     ,     ,    , linear,
+B10G10R10A2_UINT            ,  32,  1,  1,  1, ui10, ui10, ui10,  ui2,     ,     ,    , linear,
+B10G10R10A2_SINT            ,  32,  1,  1,  1, si10, si10, si10,  si2,     ,     ,    , linear,
+R64G64B64A64_PASSTHRU       , 256,  1,  1,  1,  r64,  r64,  r64,  r64,     ,     ,    ,       ,
+R64G64B64_PASSTHRU          , 192,  1,  1,  1,  r64,  r64,  r64,     ,     ,     ,    ,       ,
+ETC2_RGB8_PTA               ,  64,  4,  4,  1,  un8,  un8,  un8,  un1,     ,     ,    , linear,  etc2
+ETC2_SRGB8_PTA              ,  64,  4,  4,  1,  un8,  un8,  un8,  un1,     ,     ,    ,   srgb,  etc2
+ETC2_EAC_RGBA8              , 128,  4,  4,  1,  un8,  un8,  un8,  un8,     ,     ,    , linear,  etc2
+ETC2_EAC_SRGB8_A8           , 128,  4,  4,  1,  un8,  un8,  un8,  un8,     ,     ,    ,   srgb,  etc2
+R8G8B8_UINT                 ,  24,  1,  1,  1,  ui8,  ui8,  ui8,     ,     ,     ,    , linear,
+R8G8B8_SINT                 ,  24,  1,  1,  1,  si8,  si8,  si8,     ,     ,     ,    , linear,
+RAW                         ,   0,  0,  0,  0,     ,     ,     ,     ,     ,     ,    ,       ,
diff --git a/src/intel/isl/isl_format_layout_gen.bash b/src/intel/isl/isl_format_layout_gen.bash
new file mode 100755 (executable)
index 0000000..db88382
--- /dev/null
@@ -0,0 +1,128 @@
+#!/usr/bin/env bash
+#
+# Copyright 2015 Intel Corporation
+#
+#  Permission is hereby granted, free of charge, to any person obtaining a
+#  copy of this software and associated documentation files (the "Software"),
+#  to deal in the Software without restriction, including without limitation
+#  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+#  and/or sell copies of the Software, and to permit persons to whom the
+#  Software is furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice (including the next
+#  paragraph) shall be included in all copies or substantial portions of the
+#  Software.
+#
+#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+#  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+#  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+#  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+#  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+#  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+#  IN THE SOFTWARE.
+
+set -eu
+set -o pipefail
+
+cat <<'EOF'
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#include "isl.h"
+
+const struct isl_format_layout
+isl_format_layouts[] = {
+EOF
+
+sed -r '
+# Delete comment lines and empty lines
+/^[[:space:]]*#/d
+/^[[:space:]]*$/d
+
+# Delete spaces
+s/[[:space:]]//g
+
+# Translate formats
+s/^([A-Za-z0-9_]+),*/ISL_FORMAT_\1,/
+
+# Translate data type of channels
+s/\<x([0-9]+),/ISL_VOID@\1,/g
+s/\<r([0-9]+),/ISL_RAW@\1,/g
+s/\<un([0-9]+),/ISL_UNORM@\1,/g
+s/\<sn([0-9]+),/ISL_SNORM@\1,/g
+s/\<uf([0-9]+),/ISL_UFLOAT@\1,/g
+s/\<sf([0-9]+),/ISL_SFLOAT@\1,/g
+s/\<ux([0-9]+),/ISL_UFIXED@\1,/g
+s/\<sx([0-9]+),/ISL_SFIXED@\1,/g
+s/\<ui([0-9]+),/ISL_UINT@\1,/g
+s/\<si([0-9]+),/ISL_SINT@\1,/g
+s/\<us([0-9]+),/ISL_USCALED@\1,/g
+s/\<ss([0-9]+),/ISL_SSCALED@\1,/g
+
+# Translate colorspaces
+# Interpret alpha-only formats as having no colorspace.
+s/\<(linear|srgb|yuv)\>/ISL_COLORSPACE_\1/
+s/\<alpha\>//
+
+# Translate texture compression
+s/\<(dxt|fxt|rgtc|bptc|etc)([0-9]*)\>/ISL_TXC_\1\2/
+' |
+tr 'a-z' 'A-Z' | # Convert to uppersace
+while IFS=, read -r format bpb bw bh bd \
+                    red green blue alpha \
+                    luminance intensity palette \
+                    colorspace txc
+do
+    : ${colorspace:=ISL_COLORSPACE_NONE}
+    : ${txc:=ISL_TXC_NONE}
+
+    cat <<EOF
+   [$format] = {
+      $format,
+      .bs = $((bpb/8)),
+      .bw = $bw, .bh = $bh, .bd = $bd,
+      .channels = {
+          .r = { $red },
+          .g = { $green },
+          .b = { $blue },
+          .a = { $alpha },
+          .l = { $luminance },
+          .i = { $intensity },
+          .p = { $palette },
+      },
+      .colorspace = $colorspace,
+      .txc = $txc,
+   },
+
+EOF
+done |
+sed -r '
+# Collapse empty channels
+s/\{  \}/{}/
+
+# Split non-empty channels into two members: base type and bit size
+s/@/, /
+'
+
+# Terminate the table
+printf '};\n'
diff --git a/src/intel/isl/isl_gen4.c b/src/intel/isl/isl_gen4.c
new file mode 100644 (file)
index 0000000..52aa565
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#include "isl_gen4.h"
+#include "isl_priv.h"
+
+bool
+gen4_choose_msaa_layout(const struct isl_device *dev,
+                        const struct isl_surf_init_info *info,
+                        enum isl_tiling tiling,
+                        enum isl_msaa_layout *msaa_layout)
+{
+   /* Gen4 and Gen5 do not support MSAA */
+   assert(info->samples >= 1);
+
+   *msaa_layout = ISL_MSAA_LAYOUT_NONE;
+   return true;
+}
+
+void
+gen4_choose_image_alignment_el(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent3d *image_align_el)
+{
+   assert(info->samples == 1);
+   assert(msaa_layout == ISL_MSAA_LAYOUT_NONE);
+   assert(!isl_tiling_is_std_y(tiling));
+
+   /* Note that neither the surface's horizontal nor vertical image alignment
+    * is programmable on gen4 nor gen5.
+    *
+    * From the G35 PRM (2008-01), Volume 1 Graphics Core, Section 6.17.3.4
+    * Alignment Unit Size:
+    *
+    *    Note that the compressed formats are padded to a full compression
+    *    cell.
+    *
+    *    +------------------------+--------+--------+
+    *    | format                 | halign | valign |
+    *    +------------------------+--------+--------+
+    *    | YUV 4:2:2 formats      |      4 |      2 |
+    *    | uncompressed formats   |      4 |      2 |
+    *    +------------------------+--------+--------+
+    */
+
+   if (isl_format_is_compressed(info->format)) {
+      *image_align_el = isl_extent3d(1, 1, 1);
+      return;
+   }
+
+   *image_align_el = isl_extent3d(4, 2, 1);
+}
diff --git a/src/intel/isl/isl_gen4.h b/src/intel/isl/isl_gen4.h
new file mode 100644 (file)
index 0000000..06cd70b
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "isl_priv.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+gen4_choose_msaa_layout(const struct isl_device *dev,
+                        const struct isl_surf_init_info *info,
+                        enum isl_tiling tiling,
+                        enum isl_msaa_layout *msaa_layout);
+
+void
+gen4_choose_image_alignment_el(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent3d *image_align_el);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/intel/isl/isl_gen6.c b/src/intel/isl/isl_gen6.c
new file mode 100644 (file)
index 0000000..24c3939
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#include "isl_gen6.h"
+#include "isl_priv.h"
+
+bool
+gen6_choose_msaa_layout(const struct isl_device *dev,
+                  const struct isl_surf_init_info *info,
+                  enum isl_tiling tiling,
+                  enum isl_msaa_layout *msaa_layout)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(info->format);
+
+   assert(ISL_DEV_GEN(dev) == 6);
+   assert(info->samples >= 1);
+
+   if (info->samples == 1) {
+      *msaa_layout = ISL_MSAA_LAYOUT_NONE;
+      return false;
+   }
+
+   /* From the Sandybridge PRM, Volume 4 Part 1 p72, SURFACE_STATE, Surface
+    * Format:
+    *
+    *    If Number of Multisamples is set to a value other than
+    *    MULTISAMPLECOUNT_1, this field cannot be set to the following
+    *    formats:
+    *
+    *       - any format with greater than 64 bits per element
+    *       - any compressed texture format (BC*)
+    *       - any YCRCB* format
+    */
+   if (fmtl->bs > 8)
+      return false;
+   if (isl_format_is_compressed(info->format))
+      return false;
+   if (isl_format_is_yuv(info->format))
+      return false;
+
+   /* From the Sandybridge PRM, Volume 4 Part 1 p85, SURFACE_STATE, Number of
+    * Multisamples:
+    *
+    *    If this field is any value other than MULTISAMPLECOUNT_1 the
+    *    following restrictions apply:
+    *
+    *       - the Surface Type must be SURFTYPE_2D
+    *       - [...]
+    */
+   if (info->dim != ISL_SURF_DIM_2D)
+      return false;
+
+   /* More obvious restrictions */
+   if (isl_surf_usage_is_display(info->usage))
+      return false;
+   if (tiling == ISL_TILING_LINEAR)
+      return false;
+   if (info->levels > 1)
+      return false;
+
+   *msaa_layout = ISL_MSAA_LAYOUT_INTERLEAVED;
+   return true;
+}
+
+void
+gen6_choose_image_alignment_el(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent3d *image_align_el)
+{
+   /* Note that the surface's horizontal image alignment is not programmable
+    * on Sandybridge.
+    *
+    * From the Sandybridge PRM (2011-05), Volume 1, Part 1, Section 7.18.3.4
+    * Alignment Unit Size:
+    *
+    *    Note that the compressed formats are padded to a full compression cell.
+    *
+    *    +------------------------+--------+--------+
+    *    | format                 | halign | valign |
+    *    +------------------------+--------+--------+
+    *    | YUV 4:2:2 formats      |      4 |      * |
+    *    | uncompressed formats   |      4 |      * |
+    *    +------------------------+--------+--------+
+    *
+    *    * For these formats, the vertical alignment factor “j” is determined
+    *      as follows:
+    *       - j = 4 for any depth buffer
+    *       - j = 2 for separate stencil buffer
+    *       - j = 4 for any render target surface is multisampled (4x)
+    *       - j = 2 for all other render target surface
+    *
+    * From the Sandrybridge PRM (2011-05), Volume 4, Part 1, Section 2.11.2
+    * SURFACE_STATE, Surface Vertical Alignment:
+    *
+    *    - This field must be set to VALIGN_2 if the Surface Format is 96 bits
+    *      per element (BPE).
+    *
+    *    - Value of 1 [VALIGN_4] is not supported for format YCRCB_NORMAL
+    *      (0x182), YCRCB_SWAPUVY (0x183), YCRCB_SWAPUV (0x18f), YCRCB_SWAPY
+    *      (0x190)
+    */
+
+   if (isl_format_is_compressed(info->format)) {
+      *image_align_el = isl_extent3d(1, 1, 1);
+      return;
+   }
+
+   if (isl_format_is_yuv(info->format)) {
+      *image_align_el = isl_extent3d(4, 2, 1);
+      return;
+   }
+
+   if (info->samples > 1) {
+      *image_align_el = isl_extent3d(4, 4, 1);
+      return;
+   }
+
+   if (isl_surf_usage_is_depth_or_stencil(info->usage) &&
+       !ISL_DEV_USE_SEPARATE_STENCIL(dev)) {
+      /* interleaved depthstencil buffer */
+      *image_align_el = isl_extent3d(4, 4, 1);
+      return;
+   }
+
+   if (isl_surf_usage_is_depth(info->usage)) {
+      /* separate depth buffer */
+      *image_align_el = isl_extent3d(4, 4, 1);
+      return;
+   }
+
+   if (isl_surf_usage_is_stencil(info->usage)) {
+      /* separate stencil buffer */
+      *image_align_el = isl_extent3d(4, 2, 1);
+      return;
+   }
+
+   *image_align_el = isl_extent3d(4, 2, 1);
+}
diff --git a/src/intel/isl/isl_gen6.h b/src/intel/isl/isl_gen6.h
new file mode 100644 (file)
index 0000000..0779c67
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "isl_priv.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+gen6_choose_msaa_layout(const struct isl_device *dev,
+                        const struct isl_surf_init_info *info,
+                        enum isl_tiling tiling,
+                        enum isl_msaa_layout *msaa_layout);
+
+void
+gen6_choose_image_alignment_el(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent3d *image_align_el);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/intel/isl/isl_gen7.c b/src/intel/isl/isl_gen7.c
new file mode 100644 (file)
index 0000000..7064e85
--- /dev/null
@@ -0,0 +1,395 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#include "isl_gen7.h"
+#include "isl_priv.h"
+
+bool
+gen7_choose_msaa_layout(const struct isl_device *dev,
+                        const struct isl_surf_init_info *info,
+                        enum isl_tiling tiling,
+                        enum isl_msaa_layout *msaa_layout)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(info->format);
+
+   bool require_array = false;
+   bool require_interleaved = false;
+
+   assert(ISL_DEV_GEN(dev) == 7);
+   assert(info->samples >= 1);
+
+   if (info->samples == 1) {
+      *msaa_layout = ISL_MSAA_LAYOUT_NONE;
+      return true;
+   }
+
+   /* From the Ivybridge PRM, Volume 4 Part 1 p63, SURFACE_STATE, Surface
+    * Format:
+    *
+    *    If Number of Multisamples is set to a value other than
+    *    MULTISAMPLECOUNT_1, this field cannot be set to the following
+    *    formats: any format with greater than 64 bits per element, any
+    *    compressed texture format (BC*), and any YCRCB* format.
+    */
+   if (fmtl->bs > 8)
+      return false;
+   if (isl_format_is_compressed(info->format))
+      return false;
+   if (isl_format_is_yuv(info->format))
+      return false;
+
+   /* From the Ivybridge PRM, Volume 4 Part 1 p73, SURFACE_STATE, Number of
+    * Multisamples:
+    *
+    *    - If this field is any value other than MULTISAMPLECOUNT_1, the
+    *      Surface Type must be SURFTYPE_2D.
+    *
+    *    - If this field is any value other than MULTISAMPLECOUNT_1, Surface
+    *      Min LOD, Mip Count / LOD, and Resource Min LOD must be set to zero
+    */
+   if (info->dim != ISL_SURF_DIM_2D)
+      return false;
+   if (info->levels > 1)
+      return false;
+
+   /* The Ivyrbridge PRM insists twice that signed integer formats cannot be
+    * multisampled.
+    *
+    * From the Ivybridge PRM, Volume 4 Part 1 p73, SURFACE_STATE, Number of
+    * Multisamples:
+    *
+    *    - This field must be set to MULTISAMPLECOUNT_1 for SINT MSRTs when
+    *      all RT channels are not written.
+    *
+    * And errata from the Ivybridge PRM, Volume 4 Part 1 p77,
+    * RENDER_SURFACE_STATE, MCS Enable:
+    *
+    *   This field must be set to 0 [MULTISAMPLECOUNT_1] for all SINT MSRTs
+    *   when all RT channels are not written.
+    *
+    * Note that the above SINT restrictions apply only to *MSRTs* (that is,
+    * *multisampled* render targets). The restrictions seem to permit an MCS
+    * if the render target is singlesampled.
+    */
+   if (isl_format_has_sint_channel(info->format))
+      return false;
+
+   /* More obvious restrictions */
+   if (isl_surf_usage_is_display(info->usage))
+      return false;
+   if (tiling == ISL_TILING_LINEAR)
+      return false;
+
+   /* From the Ivybridge PRM, Volume 4 Part 1 p72, SURFACE_STATE, Multisampled
+    * Suface Storage Format:
+    *
+    *    +---------------------+----------------------------------------------------------------+
+    *    | MSFMT_MSS           | Multsampled surface was/is rendered as a render target         |
+    *    | MSFMT_DEPTH_STENCIL | Multisampled surface was rendered as a depth or stencil buffer |
+    *    +---------------------+----------------------------------------------------------------+
+    *
+    * In the table above, MSFMT_MSS refers to ISL_MSAA_LAYOUT_ARRAY, and
+    * MSFMT_DEPTH_STENCIL refers to ISL_MSAA_LAYOUT_INTERLEAVED.
+    */
+   if (isl_surf_usage_is_depth_or_stencil(info->usage))
+      require_interleaved = true;
+
+   /* From the Ivybridge PRM, Volume 4 Part 1 p72, SURFACE_STATE, Multisampled
+    * Suface Storage Format:
+    *
+    *    If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8, Width
+    *    is >= 8192 (meaning the actual surface width is >= 8193 pixels), this
+    *    field must be set to MSFMT_MSS.
+    */
+   if (info->samples == 8 && info->width == 8192)
+      require_array = true;
+
+   /* From the Ivybridge PRM, Volume 4 Part 1 p72, SURFACE_STATE, Multisampled
+    * Suface Storage Format:
+    *
+    *    If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8,
+    *    ((Depth+1) * (Height+1)) is > 4,194,304, OR if the surface’s Number
+    *    of Multisamples is MULTISAMPLECOUNT_4, ((Depth+1) * (Height+1)) is
+    *    > 8,388,608, this field must be set to MSFMT_DEPTH_STENCIL.
+    */
+   if ((info->samples == 8 && info->height > 4194304u) ||
+       (info->samples == 4 && info->height > 8388608u))
+      require_interleaved = true;
+
+   /* From the Ivybridge PRM, Volume 4 Part 1 p72, SURFACE_STATE, Multisampled
+    * Suface Storage Format:
+    *
+    *    This field must be set to MSFMT_DEPTH_STENCIL if Surface Format is
+    *    one of the following: I24X8_UNORM, L24X8_UNORM, A24X8_UNORM, or
+    *    R24_UNORM_X8_TYPELESS.
+    */
+   if (info->format == ISL_FORMAT_I24X8_UNORM ||
+       info->format == ISL_FORMAT_L24X8_UNORM ||
+       info->format == ISL_FORMAT_A24X8_UNORM ||
+       info->format == ISL_FORMAT_R24_UNORM_X8_TYPELESS)
+      require_interleaved = true;
+
+   if (require_array && require_interleaved)
+      return false;
+
+   if (require_interleaved) {
+      *msaa_layout = ISL_MSAA_LAYOUT_INTERLEAVED;
+      return true;
+   }
+
+   /* Default to the array layout because it permits multisample
+    * compression.
+    */
+   *msaa_layout = ISL_MSAA_LAYOUT_ARRAY;
+   return true;
+}
+
+static bool
+gen7_format_needs_valign2(const struct isl_device *dev,
+                          enum isl_format format)
+{
+   /* This workaround applies only to gen7 */
+   if (ISL_DEV_GEN(dev) > 7)
+      return false;
+
+   /* From the Ivybridge PRM (2012-05-31), Volume 4, Part 1, Section 2.12.1,
+    * RENDER_SURFACE_STATE Surface Vertical Alignment:
+    *
+    *    - Value of 1 [VALIGN_4] is not supported for format YCRCB_NORMAL
+    *      (0x182), YCRCB_SWAPUVY (0x183), YCRCB_SWAPUV (0x18f), YCRCB_SWAPY
+    *      (0x190)
+    *
+    *    - VALIGN_4 is not supported for surface format R32G32B32_FLOAT.
+    */
+   return isl_format_is_yuv(format) ||
+          format == ISL_FORMAT_R32G32B32_FLOAT;
+}
+
+/**
+ * @brief Filter out tiling flags that are incompatible with the surface.
+ *
+ * The resultant outgoing @a flags is a subset of the incoming @a flags. The
+ * outgoing flags may be empty (0x0) if the incoming flags were too
+ * restrictive.
+ *
+ * For example, if the surface will be used for a display
+ * (ISL_SURF_USAGE_DISPLAY_BIT), then this function filters out all tiling
+ * flags except ISL_TILING_X_BIT and ISL_TILING_LINEAR_BIT.
+ */
+void
+gen7_filter_tiling(const struct isl_device *dev,
+                   const struct isl_surf_init_info *restrict info,
+                   isl_tiling_flags_t *flags)
+{
+   /* IVB+ requires separate stencil */
+   assert(ISL_DEV_USE_SEPARATE_STENCIL(dev));
+
+   /* Clear flags unsupported on this hardware */
+   if (ISL_DEV_GEN(dev) < 9) {
+      *flags &= ~ISL_TILING_Yf_BIT;
+      *flags &= ~ISL_TILING_Ys_BIT;
+   }
+
+   /* And... clear the Yf and Ys bits anyway because Anvil doesn't support
+    * them yet.
+    */
+   *flags &= ~ISL_TILING_Yf_BIT; /* FINISHME[SKL]: Support Yf */
+   *flags &= ~ISL_TILING_Ys_BIT; /* FINISHME[SKL]: Support Ys */
+
+   if (isl_surf_usage_is_depth(info->usage)) {
+      /* Depth requires Y. */
+      *flags &= ISL_TILING_ANY_Y_MASK;
+   }
+
+   /* Separate stencil requires W tiling, and W tiling requires separate
+    * stencil.
+    */
+   if (isl_surf_usage_is_stencil(info->usage)) {
+      *flags &= ISL_TILING_W_BIT;
+   } else {
+      *flags &= ~ISL_TILING_W_BIT;
+   }
+
+   if (info->usage & (ISL_SURF_USAGE_DISPLAY_ROTATE_90_BIT |
+                      ISL_SURF_USAGE_DISPLAY_ROTATE_180_BIT |
+                      ISL_SURF_USAGE_DISPLAY_ROTATE_270_BIT)) {
+      assert(*flags & ISL_SURF_USAGE_DISPLAY_BIT);
+      isl_finishme("%s:%s: handle rotated display surfaces",
+                   __FILE__, __func__);
+   }
+
+   if (info->usage & (ISL_SURF_USAGE_DISPLAY_FLIP_X_BIT |
+                      ISL_SURF_USAGE_DISPLAY_FLIP_Y_BIT)) {
+      assert(*flags & ISL_SURF_USAGE_DISPLAY_BIT);
+      isl_finishme("%s:%s: handle flipped display surfaces",
+                   __FILE__, __func__);
+   }
+
+   if (info->usage & ISL_SURF_USAGE_DISPLAY_BIT) {
+      /* Before Skylake, the display engine does not accept Y */
+      /* FINISHME[SKL]: Y tiling for display surfaces */
+      *flags &= (ISL_TILING_LINEAR_BIT | ISL_TILING_X_BIT);
+   }
+
+   if (info->samples > 1) {
+      /* From the Sandybridge PRM, Volume 4 Part 1, SURFACE_STATE Tiled
+       * Surface:
+       *
+       *   For multisample render targets, this field must be 1 (true). MSRTs
+       *   can only be tiled.
+       *
+       * Multisample surfaces never require X tiling, and Y tiling generally
+       * performs better than X. So choose Y. (Unless it's stencil, then it
+       * must be W).
+       */
+      *flags &= (ISL_TILING_ANY_Y_MASK | ISL_TILING_W_BIT);
+   }
+
+   /* workaround */
+   if (ISL_DEV_GEN(dev) == 7 &&
+       gen7_format_needs_valign2(dev, info->format) &&
+       (info->usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
+       info->samples == 1) {
+      /* Y tiling is illegal. From the Ivybridge PRM, Vol4 Part1 2.12.2.1,
+       * SURFACE_STATE Surface Vertical Alignment:
+       *
+       *     This field must be set to VALIGN_4 for all tiled Y Render Target
+       *     surfaces.
+       */
+      *flags &= ~ISL_TILING_Y0_BIT;
+   }
+}
+
+/**
+ * Choose horizontal subimage alignment, in units of surface elements.
+ */
+static uint32_t
+gen7_choose_halign_el(const struct isl_device *dev,
+                      const struct isl_surf_init_info *restrict info)
+{
+   if (isl_format_is_compressed(info->format))
+      return 1;
+
+   /* From the Ivybridge PRM (2012-05-31), Volume 4, Part 1, Section 2.12.1,
+    * RENDER_SURFACE_STATE Surface Hoizontal Alignment:
+    *
+    *    - This field is intended to be set to HALIGN_8 only if the surface
+    *      was rendered as a depth buffer with Z16 format or a stencil buffer,
+    *      since these surfaces support only alignment of 8. Use of HALIGN_8
+    *      for other surfaces is supported, but uses more memory.
+    */
+   if (isl_surf_info_is_z16(info) ||
+       isl_surf_usage_is_stencil(info->usage))
+      return 8;
+
+   return 4;
+}
+
+/**
+ * Choose vertical subimage alignment, in units of surface elements.
+ */
+static uint32_t
+gen7_choose_valign_el(const struct isl_device *dev,
+                      const struct isl_surf_init_info *restrict info,
+                      enum isl_tiling tiling)
+{
+   bool require_valign2 = false;
+   bool require_valign4 = false;
+
+   if (isl_format_is_compressed(info->format))
+      return 1;
+
+   if (gen7_format_needs_valign2(dev, info->format))
+      require_valign2 = true;
+
+   /* From the Ivybridge PRM, Volume 4, Part 1, Section 2.12.1:
+    * RENDER_SURFACE_STATE Surface Vertical Alignment:
+    *
+    *    - This field is intended to be set to VALIGN_4 if the surface was
+    *      rendered as a depth buffer, for a multisampled (4x) render target,
+    *      or for a multisampled (8x) render target, since these surfaces
+    *      support only alignment of 4.  Use of VALIGN_4 for other surfaces is
+    *      supported, but uses more memory.  This field must be set to
+    *      VALIGN_4 for all tiled Y Render Target surfaces.
+    *
+    */
+   if (isl_surf_usage_is_depth(info->usage) ||
+       info->samples > 1 ||
+       tiling == ISL_TILING_Y0) {
+      require_valign4 = true;
+   }
+
+   if (isl_surf_usage_is_stencil(info->usage)) {
+      /* The Ivybridge PRM states that the stencil buffer's vertical alignment
+       * is 8 [Ivybridge PRM, Volume 1, Part 1, Section 6.18.4.4 Alignment
+       * Unit Size]. However, valign=8 is outside the set of valid values of
+       * RENDER_SURFACE_STATE.SurfaceVerticalAlignment, which is VALIGN_2
+       * (0x0) and VALIGN_4 (0x1).
+       *
+       * The PRM is generally confused about the width, height, and alignment
+       * of the stencil buffer; and this confusion appears elsewhere. For
+       * example, the following PRM text effectively converts the stencil
+       * buffer's 8-pixel alignment to a 4-pixel alignment [Ivybridge PRM,
+       * Volume 1, Part 1, Section
+       * 6.18.4.2 Base Address and LOD Calculation]:
+       *
+       *    For separate stencil buffer, the width must be mutiplied by 2 and
+       *    height divided by 2 as follows:
+       *
+       *       w_L = 2*i*ceil(W_L/i)
+       *       h_L = 1/2*j*ceil(H_L/j)
+       *
+       * The root of the confusion is that, in W tiling, each pair of rows is
+       * interleaved into one.
+       *
+       * FINISHME(chadv): Decide to set valign=4 or valign=8 after isl's API
+       * is more polished.
+       */
+      require_valign4 = true;
+   }
+
+   assert(!require_valign2 || !require_valign4);
+
+   if (require_valign4)
+      return 4;
+
+   /* Prefer VALIGN_2 because it conserves memory. */
+   return 2;
+}
+
+void
+gen7_choose_image_alignment_el(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent3d *image_align_el)
+{
+   /* IVB+ does not support combined depthstencil. */
+   assert(!isl_surf_usage_is_depth_and_stencil(info->usage));
+
+   *image_align_el = (struct isl_extent3d) {
+      .w = gen7_choose_halign_el(dev, info),
+      .h = gen7_choose_valign_el(dev, info, tiling),
+      .d = 1,
+   };
+}
diff --git a/src/intel/isl/isl_gen7.h b/src/intel/isl/isl_gen7.h
new file mode 100644 (file)
index 0000000..2a95b68
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "isl_priv.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void
+gen7_filter_tiling(const struct isl_device *dev,
+                   const struct isl_surf_init_info *restrict info,
+                   isl_tiling_flags_t *flags);
+
+bool
+gen7_choose_msaa_layout(const struct isl_device *dev,
+                        const struct isl_surf_init_info *info,
+                        enum isl_tiling tiling,
+                        enum isl_msaa_layout *msaa_layout);
+
+void
+gen7_choose_image_alignment_el(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent3d *image_align_el);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/intel/isl/isl_gen8.c b/src/intel/isl/isl_gen8.c
new file mode 100644 (file)
index 0000000..a46427a
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#include "isl_gen8.h"
+#include "isl_priv.h"
+
+bool
+gen8_choose_msaa_layout(const struct isl_device *dev,
+                        const struct isl_surf_init_info *info,
+                        enum isl_tiling tiling,
+                        enum isl_msaa_layout *msaa_layout)
+{
+   bool require_array = false;
+   bool require_interleaved = false;
+
+   assert(info->samples >= 1);
+
+   if (info->samples == 1) {
+      *msaa_layout = ISL_MSAA_LAYOUT_NONE;
+      return true;
+   }
+
+   /* From the Broadwell PRM >> Volume2d: Command Structures >>
+    * RENDER_SURFACE_STATE Tile Mode:
+    *
+    *    - If Number of Multisamples is not MULTISAMPLECOUNT_1, this field
+    *      must be YMAJOR.
+    *
+    * As usual, though, stencil is special.
+    */
+   if (!isl_tiling_is_any_y(tiling) && !isl_surf_usage_is_stencil(info->usage))
+      return false;
+
+   /* From the Broadwell PRM >> Volume2d: Command Structures >>
+    * RENDER_SURFACE_STATE Multisampled Surface Storage Format:
+    *
+    *    All multisampled render target surfaces must have this field set to
+    *    MSFMT_MSS
+    */
+   if (info->usage & ISL_SURF_USAGE_RENDER_TARGET_BIT)
+      require_array = true;
+
+   /* From the Broadwell PRM >> Volume2d: Command Structures >>
+    * RENDER_SURFACE_STATE Number of Multisamples:
+    *
+    *    - If this field is any value other than MULTISAMPLECOUNT_1, the
+    *      Surface Type must be SURFTYPE_2D This field must be set to
+    *      MULTISAMPLECOUNT_1 unless the surface is a Sampling Engine surface
+    *      or Render Target surface.
+    *
+    *    - If this field is any value other than MULTISAMPLECOUNT_1, Surface
+    *      Min LOD, Mip Count / LOD, and Resource Min LOD must be set to zero.
+    */
+   if (info->dim != ISL_SURF_DIM_2D)
+      return false;
+   if (info->levels > 1)
+      return false;
+
+   /* More obvious restrictions */
+   if (isl_surf_usage_is_display(info->usage))
+      return false;
+   if (isl_format_is_compressed(info->format))
+      return false;
+   if (isl_format_is_yuv(info->format))
+      return false;
+
+   if (isl_surf_usage_is_depth_or_stencil(info->usage))
+      require_interleaved = true;
+
+   if (require_array && require_interleaved)
+      return false;
+
+   if (require_interleaved) {
+      *msaa_layout = ISL_MSAA_LAYOUT_INTERLEAVED;
+      return true;
+   }
+
+   *msaa_layout = ISL_MSAA_LAYOUT_ARRAY;
+   return true;
+}
+
+/**
+ * Choose horizontal subimage alignment, in units of surface elements.
+ */
+static uint32_t
+gen8_choose_halign_el(const struct isl_device *dev,
+                      const struct isl_surf_init_info *restrict info)
+{
+   if (isl_format_is_compressed(info->format))
+      return 1;
+
+   /* From the Broadwell PRM, Volume 2d "Command Reference: Structures",
+    * RENDER_SURFACE_STATE Surface Horizontal Alignment, p326:
+    *
+    *    - This field is intended to be set to HALIGN_8 only if the surface
+    *      was rendered as a depth buffer with Z16 format or a stencil buffer.
+    *      In this case it must be set to HALIGN_8 since these surfaces
+    *      support only alignment of 8. [...]
+    */
+   if (isl_surf_info_is_z16(info))
+      return 8;
+   if (isl_surf_usage_is_stencil(info->usage))
+      return 8;
+
+   /* From the Broadwell PRM, Volume 2d "Command Reference: Structures",
+    * RENDER_SURFACE_STATE Surface Horizontal Alignment, p326:
+    *
+    *      [...] For Z32 formats it must be set to HALIGN_4.
+    */
+   if (isl_surf_usage_is_depth(info->usage))
+      return 4;
+
+   if (!(info->usage & ISL_SURF_USAGE_DISABLE_AUX_BIT)) {
+      /* From the Broadwell PRM, Volume 2d "Command Reference: Structures",
+       * RENDER_SURFACE_STATE Surface Horizontal Alignment, p326:
+       *
+       *    - When Auxiliary Surface Mode is set to AUX_CCS_D or AUX_CCS_E,
+       *      HALIGN 16 must be used.
+       *
+       * This case handles color surfaces that may own an auxiliary MCS, CCS_D,
+       * or CCS_E. Depth buffers, including those that own an auxiliary HiZ
+       * surface, are handled above and do not require HALIGN_16.
+       */
+      assert(!isl_surf_usage_is_depth(info->usage));
+      return 16;
+   }
+
+   /* XXX(chadv): I believe the hardware requires each image to be
+    * cache-aligned. If that's true, then defaulting to halign=4 is wrong for
+    * many formats. Depending on the format's block size, we may need to
+    * increase halign to 8.
+    */
+   return 4;
+}
+
+/**
+ * Choose vertical subimage alignment, in units of surface elements.
+ */
+static uint32_t
+gen8_choose_valign_el(const struct isl_device *dev,
+                      const struct isl_surf_init_info *restrict info)
+{
+   /* From the Broadwell PRM > Volume 2d: Command Reference: Structures
+    * > RENDER_SURFACE_STATE Surface Vertical Alignment (p325):
+    *
+    *    - For Sampling Engine and Render Target Surfaces: This field
+    *      specifies the vertical alignment requirement in elements for the
+    *      surface. [...] An element is defined as a pixel in uncompresed
+    *      surface formats, and as a compression block in compressed surface
+    *      formats. For MSFMT_DEPTH_STENCIL type multisampled surfaces, an
+    *      element is a sample.
+    *
+    *    - This field is intended to be set to VALIGN_4 if the surface was
+    *      rendered as a depth buffer, for a multisampled (4x) render target,
+    *      or for a multisampled (8x) render target, since these surfaces
+    *      support only alignment of 4. Use of VALIGN_4 for other surfaces is
+    *      supported, but increases memory usage.
+    *
+    *    - This field is intended to be set to VALIGN_8 only if the surface
+    *       was rendered as a stencil buffer, since stencil buffer surfaces
+    *       support only alignment of 8. If set to VALIGN_8, Surface Format
+    *       must be R8_UINT.
+    */
+
+   if (isl_format_is_compressed(info->format))
+      return 1;
+
+   if (isl_surf_usage_is_stencil(info->usage))
+      return 8;
+
+   return 4;
+}
+
+void
+gen8_choose_image_alignment_el(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent3d *image_align_el)
+{
+   assert(!isl_tiling_is_std_y(tiling));
+
+   /* The below text from the Broadwell PRM provides some insight into the
+    * hardware's requirements for LOD alignment.  From the Broadwell PRM >>
+    * Volume 5: Memory Views >> Surface Layout >> 2D Surfaces:
+    *
+    *    These [2D surfaces] must adhere to the following memory organization
+    *    rules:
+    *
+    *       - For non-compressed texture formats, each mipmap must start on an
+    *         even row within the monolithic rectangular area. For
+    *         1-texel-high mipmaps, this may require a row of padding below
+    *         the previous mipmap. This restriction does not apply to any
+    *         compressed texture formats; each subsequent (lower-res)
+    *         compressed mipmap is positioned directly below the previous
+    *         mipmap.
+    *
+    *       - Vertical alignment restrictions vary with memory tiling type:
+    *         1 DWord for linear, 16-byte (DQWord) for tiled. (Note that tiled
+    *         mipmaps are not required to start at the left edge of a tile
+    *         row.)
+    */
+
+   *image_align_el = (struct isl_extent3d) {
+      .w = gen8_choose_halign_el(dev, info),
+      .h = gen8_choose_valign_el(dev, info),
+      .d = 1,
+   };
+}
diff --git a/src/intel/isl/isl_gen8.h b/src/intel/isl/isl_gen8.h
new file mode 100644 (file)
index 0000000..2017ea8
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "isl_priv.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+gen8_choose_msaa_layout(const struct isl_device *dev,
+                        const struct isl_surf_init_info *info,
+                        enum isl_tiling tiling,
+                        enum isl_msaa_layout *msaa_layout);
+
+void
+gen8_choose_image_alignment_el(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent3d *image_align_el);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/intel/isl/isl_gen9.c b/src/intel/isl/isl_gen9.c
new file mode 100644 (file)
index 0000000..aa290aa
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#include "isl_gen8.h"
+#include "isl_gen9.h"
+#include "isl_priv.h"
+
+/**
+ * Calculate the surface's subimage alignment, in units of surface samples,
+ * for the standard tiling formats Yf and Ys.
+ */
+static void
+gen9_calc_std_image_alignment_sa(const struct isl_device *dev,
+                                 const struct isl_surf_init_info *restrict info,
+                                 enum isl_tiling tiling,
+                                 enum isl_msaa_layout msaa_layout,
+                                 struct isl_extent3d *align_sa)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(info->format);
+
+   assert(isl_tiling_is_std_y(tiling));
+
+   const uint32_t bs = fmtl->bs;
+   const uint32_t is_Ys = tiling == ISL_TILING_Ys;
+
+   switch (info->dim) {
+   case ISL_SURF_DIM_1D:
+      /* See the Skylake BSpec > Memory Views > Common Surface Formats > Surface
+       * Layout and Tiling > 1D Surfaces > 1D Alignment Requirements.
+       */
+      *align_sa = (struct isl_extent3d) {
+         .w = 1 << (12 - (ffs(bs) - 1) + (4 * is_Ys)),
+         .h = 1,
+         .d = 1,
+      };
+      return;
+   case ISL_SURF_DIM_2D:
+      /* See the Skylake BSpec > Memory Views > Common Surface Formats >
+       * Surface Layout and Tiling > 2D Surfaces > 2D/CUBE Alignment
+       * Requirements.
+       */
+      *align_sa = (struct isl_extent3d) {
+         .w = 1 << (6 - ((ffs(bs) - 1) / 2) + (4 * is_Ys)),
+         .h = 1 << (6 - ((ffs(bs) - 0) / 2) + (4 * is_Ys)),
+         .d = 1,
+      };
+
+      if (is_Ys) {
+         /* FINISHME(chadv): I don't trust this code. Untested. */
+         isl_finishme("%s:%s: [SKL+] multisample TileYs", __FILE__, __func__);
+
+         switch (msaa_layout) {
+         case ISL_MSAA_LAYOUT_NONE:
+         case ISL_MSAA_LAYOUT_INTERLEAVED:
+            break;
+         case ISL_MSAA_LAYOUT_ARRAY:
+            align_sa->w >>= (ffs(info->samples) - 0) / 2;
+            align_sa->h >>= (ffs(info->samples) - 1) / 2;
+            break;
+         }
+      }
+      return;
+
+   case ISL_SURF_DIM_3D:
+      /* See the Skylake BSpec > Memory Views > Common Surface Formats > Surface
+       * Layout and Tiling > 1D Surfaces > 1D Alignment Requirements.
+       */
+      *align_sa = (struct isl_extent3d) {
+         .w = 1 << (4 - ((ffs(bs) + 1) / 3) + (4 * is_Ys)),
+         .h = 1 << (4 - ((ffs(bs) - 1) / 3) + (2 * is_Ys)),
+         .d = 1 << (4 - ((ffs(bs) - 0) / 3) + (2 * is_Ys)),
+      };
+      return;
+   }
+
+   unreachable("bad isl_surface_type");
+}
+
+void
+gen9_choose_image_alignment_el(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent3d *image_align_el)
+{
+   /* This BSpec text provides some insight into the hardware's alignment
+    * requirements [Skylake BSpec > Memory Views > Common Surface Formats >
+    * Surface Layout and Tiling > 2D Surfaces]:
+    *
+    *    An LOD must be aligned to a cache-line except for some special cases
+    *    related to Planar YUV surfaces.  In general, the cache-alignment
+    *    restriction implies there is a minimum height for an LOD of 4 texels.
+    *    So, LODs which are smaller than 4 high are padded.
+    *
+    * From the Skylake BSpec, RENDER_SURFACE_STATE Surface Vertical Alignment:
+    *
+    *    - For Sampling Engine and Render Target Surfaces: This field
+    *      specifies the vertical alignment requirement in elements for the
+    *      surface. [...] An element is defined as a pixel in uncompresed
+    *      surface formats, and as a compression block in compressed surface
+    *      formats. For MSFMT_DEPTH_STENCIL type multisampled surfaces, an
+    *      element is a sample.
+    *
+    *    - This field is used for 2D, CUBE, and 3D surface alignment when Tiled
+    *      Resource Mode is TRMODE_NONE (Tiled Resource Mode is disabled).
+    *      This field is ignored for 1D surfaces and also when Tiled Resource
+    *      Mode is not TRMODE_NONE (e.g. Tiled Resource Mode is enabled).
+    *
+    *      See the appropriate Alignment  table in the "Surface Layout and
+    *      Tiling" section under Common Surface Formats for the table of
+    *      alignment values for Tiled Resrouces.
+    *
+    *    - For uncompressed surfaces, the units of "j" are rows of pixels on
+    *      the physical surface. For compressed texture formats, the units of
+    *      "j" are in compression blocks, thus each increment in "j" is equal
+    *      to h pixels, where h is the height of the compression block in
+    *      pixels.
+    *
+    *    - Valid Values: VALIGN_4, VALIGN_8, VALIGN_16
+    *
+    * From the Skylake BSpec, RENDER_SURFACE_STATE Surface Horizontal
+    * Alignment:
+    *
+    *    -  For uncompressed surfaces, the units of "i" are pixels on the
+    *       physical surface. For compressed texture formats, the units of "i"
+    *       are in compression blocks, thus each increment in "i" is equal to
+    *       w pixels, where w is the width of the compression block in pixels.
+    *
+    *    - Valid Values: HALIGN_4, HALIGN_8, HALIGN_16
+    */
+
+   if (isl_tiling_is_std_y(tiling)) {
+      struct isl_extent3d image_align_sa;
+      gen9_calc_std_image_alignment_sa(dev, info, tiling, msaa_layout,
+                                     &image_align_sa);
+
+      *image_align_el = isl_extent3d_sa_to_el(info->format, image_align_sa);
+      return;
+   }
+
+   if (info->dim == ISL_SURF_DIM_1D) {
+      /* See the Skylake BSpec > Memory Views > Common Surface Formats > Surface
+       * Layout and Tiling > 1D Surfaces > 1D Alignment Requirements.
+       */
+      *image_align_el = isl_extent3d(64, 1, 1);
+      return;
+   }
+
+   if (isl_format_is_compressed(info->format)) {
+      /* On Gen9, the meaning of RENDER_SURFACE_STATE's
+       * SurfaceHorizontalAlignment and SurfaceVerticalAlignment changed for
+       * compressed formats. They now indicate a multiple of the compression
+       * block.  For example, if the compression mode is ETC2 then HALIGN_4
+       * indicates a horizontal alignment of 16 pixels.
+       *
+       * To avoid wasting memory, choose the smallest alignment possible:
+       * HALIGN_4 and VALIGN_4.
+       */
+      *image_align_el = isl_extent3d(4, 4, 1);
+      return;
+   }
+
+   gen8_choose_image_alignment_el(dev, info, tiling, msaa_layout,
+                                  image_align_el);
+}
diff --git a/src/intel/isl/isl_gen9.h b/src/intel/isl/isl_gen9.h
new file mode 100644 (file)
index 0000000..64ed0aa
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "isl_priv.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void
+gen9_choose_image_alignment_el(const struct isl_device *dev,
+                               const struct isl_surf_init_info *restrict info,
+                               enum isl_tiling tiling,
+                               enum isl_msaa_layout msaa_layout,
+                               struct isl_extent3d *image_align_el);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/intel/isl/isl_priv.h b/src/intel/isl/isl_priv.h
new file mode 100644 (file)
index 0000000..7b22259
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include <assert.h>
+
+#include "brw_device_info.h"
+#include "util/macros.h"
+
+#include "isl.h"
+
+#define isl_finishme(format, ...) \
+   __isl_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__)
+
+void PRINTFLIKE(3, 4) UNUSED
+__isl_finishme(const char *file, int line, const char *fmt, ...);
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+static inline uint32_t
+ffs(uint32_t n) {
+   return __builtin_ffs(n);
+}
+
+static inline bool
+isl_is_pow2(uintmax_t n)
+{
+   return !(n & (n - 1));
+}
+
+/**
+ * Alignment must be a power of 2.
+ */
+static inline bool
+isl_is_aligned(uintmax_t n, uintmax_t a)
+{
+   assert(isl_is_pow2(a));
+   return (n & (a - 1)) == 0;
+}
+
+/**
+ * Alignment must be a power of 2.
+ */
+static inline uintmax_t
+isl_align(uintmax_t n, uintmax_t a)
+{
+   assert(a != 0 && isl_is_pow2(a));
+   return (n + a - 1) & ~(a - 1);
+}
+
+static inline uintmax_t
+isl_align_npot(uintmax_t n, uintmax_t a)
+{
+   assert(a > 0);
+   return ((n + a - 1) / a) * a;
+}
+
+/**
+ * Alignment must be a power of 2.
+ */
+static inline uintmax_t
+isl_align_div(uintmax_t n, uintmax_t a)
+{
+   return isl_align(n, a) / a;
+}
+
+static inline uintmax_t
+isl_align_div_npot(uintmax_t n, uintmax_t a)
+{
+   return isl_align_npot(n, a) / a;
+}
+
+/**
+ * Log base 2, rounding towards zero.
+ */
+static inline uint32_t
+isl_log2u(uint32_t n)
+{
+   assert(n != 0);
+   return 31 - __builtin_clz(n);
+}
+
+static inline uint32_t
+isl_minify(uint32_t n, uint32_t levels)
+{
+   if (unlikely(n == 0))
+      return 0;
+   else
+      return MAX(n >> levels, 1);
+}
+
+static inline struct isl_extent3d
+isl_extent3d_sa_to_el(enum isl_format fmt, struct isl_extent3d extent_sa)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(fmt);
+
+   assert(extent_sa.w % fmtl->bw == 0);
+   assert(extent_sa.h % fmtl->bh == 0);
+   assert(extent_sa.d % fmtl->bd == 0);
+
+   return (struct isl_extent3d) {
+      .w = extent_sa.w / fmtl->bw,
+      .h = extent_sa.h / fmtl->bh,
+      .d = extent_sa.d / fmtl->bd,
+   };
+}
+
+static inline struct isl_extent3d
+isl_extent3d_el_to_sa(enum isl_format fmt, struct isl_extent3d extent_el)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(fmt);
+
+   return (struct isl_extent3d) {
+      .w = extent_el.w * fmtl->bw,
+      .h = extent_el.h * fmtl->bh,
+      .d = extent_el.d * fmtl->bd,
+   };
+}
+
+void
+isl_gen7_surf_fill_state_s(const struct isl_device *dev, void *state,
+                           const struct isl_surf_fill_state_info *restrict info);
+
+void
+isl_gen75_surf_fill_state_s(const struct isl_device *dev, void *state,
+                            const struct isl_surf_fill_state_info *restrict info);
+void
+isl_gen8_surf_fill_state_s(const struct isl_device *dev, void *state,
+                           const struct isl_surf_fill_state_info *restrict info);
+void
+isl_gen9_surf_fill_state_s(const struct isl_device *dev, void *state,
+                           const struct isl_surf_fill_state_info *restrict info);
+
+void
+isl_gen7_buffer_fill_state_s(void *state,
+                             const struct isl_buffer_fill_state_info *restrict info);
+
+void
+isl_gen75_buffer_fill_state_s(void *state,
+                              const struct isl_buffer_fill_state_info *restrict info);
+
+void
+isl_gen8_buffer_fill_state_s(void *state,
+                             const struct isl_buffer_fill_state_info *restrict info);
+
+void
+isl_gen9_buffer_fill_state_s(void *state,
+                             const struct isl_buffer_fill_state_info *restrict info);
diff --git a/src/intel/isl/isl_storage_image.c b/src/intel/isl/isl_storage_image.c
new file mode 100644 (file)
index 0000000..2b5b5cd
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "isl_priv.h"
+#include "brw_compiler.h"
+
+bool
+isl_is_storage_image_format(enum isl_format format)
+{
+   /* XXX: Maybe we should put this in the CSV? */
+
+   switch (format) {
+   case ISL_FORMAT_R32G32B32A32_UINT:
+   case ISL_FORMAT_R32G32B32A32_SINT:
+   case ISL_FORMAT_R32G32B32A32_FLOAT:
+   case ISL_FORMAT_R32_UINT:
+   case ISL_FORMAT_R32_SINT:
+   case ISL_FORMAT_R32_FLOAT:
+   case ISL_FORMAT_R16G16B16A16_UINT:
+   case ISL_FORMAT_R16G16B16A16_SINT:
+   case ISL_FORMAT_R16G16B16A16_FLOAT:
+   case ISL_FORMAT_R32G32_UINT:
+   case ISL_FORMAT_R32G32_SINT:
+   case ISL_FORMAT_R32G32_FLOAT:
+   case ISL_FORMAT_R8G8B8A8_UINT:
+   case ISL_FORMAT_R8G8B8A8_SINT:
+   case ISL_FORMAT_R16G16_UINT:
+   case ISL_FORMAT_R16G16_SINT:
+   case ISL_FORMAT_R16G16_FLOAT:
+   case ISL_FORMAT_R8G8_UINT:
+   case ISL_FORMAT_R8G8_SINT:
+   case ISL_FORMAT_R16_UINT:
+   case ISL_FORMAT_R16_FLOAT:
+   case ISL_FORMAT_R16_SINT:
+   case ISL_FORMAT_R8_UINT:
+   case ISL_FORMAT_R8_SINT:
+   case ISL_FORMAT_R10G10B10A2_UINT:
+   case ISL_FORMAT_R10G10B10A2_UNORM:
+   case ISL_FORMAT_R11G11B10_FLOAT:
+   case ISL_FORMAT_R16G16B16A16_UNORM:
+   case ISL_FORMAT_R16G16B16A16_SNORM:
+   case ISL_FORMAT_R8G8B8A8_UNORM:
+   case ISL_FORMAT_R8G8B8A8_SNORM:
+   case ISL_FORMAT_R16G16_UNORM:
+   case ISL_FORMAT_R16G16_SNORM:
+   case ISL_FORMAT_R8G8_UNORM:
+   case ISL_FORMAT_R8G8_SNORM:
+   case ISL_FORMAT_R16_UNORM:
+   case ISL_FORMAT_R16_SNORM:
+   case ISL_FORMAT_R8_UNORM:
+   case ISL_FORMAT_R8_SNORM:
+      return true;
+   default:
+      return false;
+   }
+}
+
+enum isl_format
+isl_lower_storage_image_format(const struct isl_device *dev,
+                               enum isl_format format)
+{
+   switch (format) {
+   /* These are never lowered.  Up to BDW we'll have to fall back to untyped
+    * surface access for 128bpp formats.
+    */
+   case ISL_FORMAT_R32G32B32A32_UINT:
+   case ISL_FORMAT_R32G32B32A32_SINT:
+   case ISL_FORMAT_R32G32B32A32_FLOAT:
+   case ISL_FORMAT_R32_UINT:
+   case ISL_FORMAT_R32_SINT:
+   case ISL_FORMAT_R32_FLOAT:
+      return format;
+
+   /* From HSW to BDW the only 64bpp format supported for typed access is
+    * RGBA_UINT16.  IVB falls back to untyped.
+    */
+   case ISL_FORMAT_R16G16B16A16_UINT:
+   case ISL_FORMAT_R16G16B16A16_SINT:
+   case ISL_FORMAT_R16G16B16A16_FLOAT:
+   case ISL_FORMAT_R32G32_UINT:
+   case ISL_FORMAT_R32G32_SINT:
+   case ISL_FORMAT_R32G32_FLOAT:
+      return (ISL_DEV_GEN(dev) >= 9 ? format :
+              ISL_DEV_GEN(dev) >= 8 || dev->info->is_haswell ?
+              ISL_FORMAT_R16G16B16A16_UINT :
+              ISL_FORMAT_R32G32_UINT);
+
+   /* Up to BDW no SINT or FLOAT formats of less than 32 bits per component
+    * are supported.  IVB doesn't support formats with more than one component
+    * for typed access.  For 8 and 16 bpp formats IVB relies on the
+    * undocumented behavior that typed reads from R_UINT8 and R_UINT16
+    * surfaces actually do a 32-bit misaligned read.  The alternative would be
+    * to use two surface state entries with different formats for each image,
+    * one for reading (using R_UINT32) and another one for writing (using
+    * R_UINT8 or R_UINT16), but that would complicate the shaders we generate
+    * even more.
+    */
+   case ISL_FORMAT_R8G8B8A8_UINT:
+   case ISL_FORMAT_R8G8B8A8_SINT:
+      return (ISL_DEV_GEN(dev) >= 9 ? format :
+              ISL_DEV_GEN(dev) >= 8 || dev->info->is_haswell ?
+              ISL_FORMAT_R8G8B8A8_UINT : ISL_FORMAT_R32_UINT);
+
+   case ISL_FORMAT_R16G16_UINT:
+   case ISL_FORMAT_R16G16_SINT:
+   case ISL_FORMAT_R16G16_FLOAT:
+      return (ISL_DEV_GEN(dev) >= 9 ? format :
+              ISL_DEV_GEN(dev) >= 8 || dev->info->is_haswell ?
+              ISL_FORMAT_R16G16_UINT : ISL_FORMAT_R32_UINT);
+
+   case ISL_FORMAT_R8G8_UINT:
+   case ISL_FORMAT_R8G8_SINT:
+      return (ISL_DEV_GEN(dev) >= 9 ? format :
+              ISL_DEV_GEN(dev) >= 8 || dev->info->is_haswell ?
+              ISL_FORMAT_R8G8_UINT : ISL_FORMAT_R16_UINT);
+
+   case ISL_FORMAT_R16_UINT:
+   case ISL_FORMAT_R16_FLOAT:
+   case ISL_FORMAT_R16_SINT:
+      return (ISL_DEV_GEN(dev) >= 9 ? format : ISL_FORMAT_R16_UINT);
+
+   case ISL_FORMAT_R8_UINT:
+   case ISL_FORMAT_R8_SINT:
+      return (ISL_DEV_GEN(dev) >= 9 ? format : ISL_FORMAT_R8_UINT);
+
+   /* Neither the 2/10/10/10 nor the 11/11/10 packed formats are supported
+    * by the hardware.
+    */
+   case ISL_FORMAT_R10G10B10A2_UINT:
+   case ISL_FORMAT_R10G10B10A2_UNORM:
+   case ISL_FORMAT_R11G11B10_FLOAT:
+      return ISL_FORMAT_R32_UINT;
+
+   /* No normalized fixed-point formats are supported by the hardware. */
+   case ISL_FORMAT_R16G16B16A16_UNORM:
+   case ISL_FORMAT_R16G16B16A16_SNORM:
+      return (ISL_DEV_GEN(dev) >= 8 || dev->info->is_haswell ?
+              ISL_FORMAT_R16G16B16A16_UINT :
+              ISL_FORMAT_R32G32_UINT);
+
+   case ISL_FORMAT_R8G8B8A8_UNORM:
+   case ISL_FORMAT_R8G8B8A8_SNORM:
+      return (ISL_DEV_GEN(dev) >= 8 || dev->info->is_haswell ?
+              ISL_FORMAT_R8G8B8A8_UINT : ISL_FORMAT_R32_UINT);
+
+   case ISL_FORMAT_R16G16_UNORM:
+   case ISL_FORMAT_R16G16_SNORM:
+      return (ISL_DEV_GEN(dev) >= 8 || dev->info->is_haswell ?
+              ISL_FORMAT_R16G16_UINT : ISL_FORMAT_R32_UINT);
+
+   case ISL_FORMAT_R8G8_UNORM:
+   case ISL_FORMAT_R8G8_SNORM:
+      return (ISL_DEV_GEN(dev) >= 8 || dev->info->is_haswell ?
+              ISL_FORMAT_R8G8_UINT : ISL_FORMAT_R16_UINT);
+
+   case ISL_FORMAT_R16_UNORM:
+   case ISL_FORMAT_R16_SNORM:
+      return ISL_FORMAT_R16_UINT;
+
+   case ISL_FORMAT_R8_UNORM:
+   case ISL_FORMAT_R8_SNORM:
+      return ISL_FORMAT_R8_UINT;
+
+   default:
+      assert(!"Unknown image format");
+      return ISL_FORMAT_UNSUPPORTED;
+   }
+}
+
+static const struct brw_image_param image_param_defaults = {
+   /* Set the swizzling shifts to all-ones to effectively disable
+    * swizzling -- See emit_address_calculation() in
+    * brw_fs_surface_builder.cpp for a more detailed explanation of
+    * these parameters.
+    */
+   .swizzling = { 0xff, 0xff },
+};
+
+void
+isl_surf_fill_image_param(const struct isl_device *dev,
+                          struct brw_image_param *param,
+                          const struct isl_surf *surf,
+                          const struct isl_view *view)
+{
+   *param = image_param_defaults;
+
+   param->size[0] = isl_minify(surf->logical_level0_px.w, view->base_level);
+   param->size[1] = isl_minify(surf->logical_level0_px.h, view->base_level);
+   if (surf->dim == ISL_SURF_DIM_3D) {
+      param->size[2] = isl_minify(surf->logical_level0_px.d, view->base_level);
+   } else {
+      param->size[2] = surf->logical_level0_px.array_len -
+                       view->base_array_layer;
+   }
+
+   isl_surf_get_image_offset_el(surf, view->base_level, view->base_array_layer,
+                                0, &param->offset[0],  &param->offset[1]);
+
+   const int cpp = isl_format_get_layout(surf->format)->bs;
+   param->stride[0] = cpp;
+   param->stride[1] = surf->row_pitch / cpp;
+
+   const struct isl_extent3d image_align_sa =
+      isl_surf_get_image_alignment_sa(surf);
+   if (ISL_DEV_GEN(dev) < 9 && surf->dim == ISL_SURF_DIM_3D) {
+      param->stride[2] = isl_align_npot(param->size[0], image_align_sa.w);
+      param->stride[3] = isl_align_npot(param->size[1], image_align_sa.h);
+   } else {
+      param->stride[2] = 0;
+      param->stride[3] = isl_surf_get_array_pitch_el_rows(surf);
+   }
+
+   switch (surf->tiling) {
+   case ISL_TILING_LINEAR:
+      /* image_param_defaults is good enough */
+      break;
+
+   case ISL_TILING_X:
+      /* An X tile is a rectangular block of 512x8 bytes. */
+      param->tiling[0] = isl_log2u(512 / cpp);
+      param->tiling[1] = isl_log2u(8);
+
+      if (dev->has_bit6_swizzling) {
+         /* Right shifts required to swizzle bits 9 and 10 of the memory
+          * address with bit 6.
+          */
+         param->swizzling[0] = 3;
+         param->swizzling[1] = 4;
+      }
+      break;
+
+   case ISL_TILING_Y0:
+      /* The layout of a Y-tiled surface in memory isn't really fundamentally
+       * different to the layout of an X-tiled surface, we simply pretend that
+       * the surface is broken up in a number of smaller 16Bx32 tiles, each
+       * one arranged in X-major order just like is the case for X-tiling.
+       */
+      param->tiling[0] = isl_log2u(16 / cpp);
+      param->tiling[1] = isl_log2u(32);
+
+      if (dev->has_bit6_swizzling) {
+         /* Right shift required to swizzle bit 9 of the memory address with
+          * bit 6.
+          */
+         param->swizzling[0] = 3;
+         param->swizzling[1] = 0xff;
+      }
+      break;
+
+   default:
+      assert(!"Unhandled storage image tiling");
+   }
+
+   /* 3D textures are arranged in 2D in memory with 2^lod slices per row.  The
+    * address calculation algorithm (emit_address_calculation() in
+    * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
+    * modulus equal to the LOD.
+    */
+   param->tiling[2] = (ISL_DEV_GEN(dev) < 9 && surf->dim == ISL_SURF_DIM_3D ?
+                       view->base_level : 0);
+}
+
+void
+isl_buffer_fill_image_param(const struct isl_device *dev,
+                            struct brw_image_param *param,
+                            enum isl_format format,
+                            uint64_t size)
+{
+   *param = image_param_defaults;
+
+   param->stride[0] = isl_format_layouts[format].bs;
+   param->size[0] = size / param->stride[0];
+}
diff --git a/src/intel/isl/isl_surface_state.c b/src/intel/isl/isl_surface_state.c
new file mode 100644 (file)
index 0000000..6afe45d
--- /dev/null
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice (including the next
+ *  paragraph) shall be included in all copies or substantial portions of the
+ *  Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ *  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ *  IN THE SOFTWARE.
+ */
+
+#include <stdint.h>
+
+#define __gen_address_type uint64_t
+#define __gen_user_data void
+
+static inline uint64_t
+__gen_combine_address(void *data, void *loc, uint64_t addr, uint32_t delta)
+{
+   return addr + delta;
+}
+
+#include "genxml/gen_macros.h"
+#include "genxml/genX_pack.h"
+
+#include "isl_priv.h"
+
+#define __PASTE2(x, y) x ## y
+#define __PASTE(x, y) __PASTE2(x, y)
+#define isl_genX(x) __PASTE(isl_, genX(x))
+
+#if GEN_GEN >= 8
+static const uint8_t isl_to_gen_halign[] = {
+    [4] = HALIGN4,
+    [8] = HALIGN8,
+    [16] = HALIGN16,
+};
+
+static const uint8_t isl_to_gen_valign[] = {
+    [4] = VALIGN4,
+    [8] = VALIGN8,
+    [16] = VALIGN16,
+};
+#else
+static const uint8_t isl_to_gen_halign[] = {
+    [4] = HALIGN_4,
+    [8] = HALIGN_8,
+};
+
+static const uint8_t isl_to_gen_valign[] = {
+    [2] = VALIGN_2,
+    [4] = VALIGN_4,
+};
+#endif
+
+#if GEN_GEN >= 8
+static const uint8_t isl_to_gen_tiling[] = {
+   [ISL_TILING_LINEAR]  = LINEAR,
+   [ISL_TILING_X]       = XMAJOR,
+   [ISL_TILING_Y0]      = YMAJOR,
+   [ISL_TILING_Yf]      = YMAJOR,
+   [ISL_TILING_Ys]      = YMAJOR,
+   [ISL_TILING_W]       = WMAJOR,
+};
+#endif
+
+#if GEN_GEN >= 8
+static const uint32_t isl_to_gen_multisample_layout[] = {
+   [ISL_MSAA_LAYOUT_NONE]           = MSS,
+   [ISL_MSAA_LAYOUT_INTERLEAVED]    = DEPTH_STENCIL,
+   [ISL_MSAA_LAYOUT_ARRAY]          = MSS,
+};
+#else
+static const uint32_t isl_to_gen_multisample_layout[] = {
+   [ISL_MSAA_LAYOUT_NONE]           = MSFMT_MSS,
+   [ISL_MSAA_LAYOUT_INTERLEAVED]    = MSFMT_DEPTH_STENCIL,
+   [ISL_MSAA_LAYOUT_ARRAY]          = MSFMT_MSS,
+};
+#endif
+
+static const uint8_t
+get_surftype(enum isl_surf_dim dim, isl_surf_usage_flags_t usage)
+{
+   switch (dim) {
+   default:
+      unreachable("bad isl_surf_dim");
+   case ISL_SURF_DIM_1D:
+      assert(!(usage & ISL_SURF_USAGE_CUBE_BIT));
+      return SURFTYPE_1D;
+   case ISL_SURF_DIM_2D:
+      if (usage & ISL_SURF_USAGE_STORAGE_BIT) {
+         /* Storage images are always plain 2-D, not cube */
+         return SURFTYPE_2D;
+      } else if (usage & ISL_SURF_USAGE_CUBE_BIT) {
+         return SURFTYPE_CUBE;
+      } else {
+         return SURFTYPE_2D;
+      }
+   case ISL_SURF_DIM_3D:
+      assert(!(usage & ISL_SURF_USAGE_CUBE_BIT));
+      return SURFTYPE_3D;
+   }
+}
+
+/**
+ * Get the values to pack into RENDER_SUFFACE_STATE.SurfaceHorizontalAlignment
+ * and SurfaceVerticalAlignment.
+ */
+static void
+get_halign_valign(const struct isl_surf *surf,
+                  uint32_t *halign, uint32_t *valign)
+{
+   if (GEN_GEN >= 9) {
+      if (isl_tiling_is_std_y(surf->tiling) ||
+          surf->dim_layout == ISL_DIM_LAYOUT_GEN9_1D) {
+         /* The hardware ignores the alignment values. Anyway, the surface's
+          * true alignment is likely outside the enum range of HALIGN* and
+          * VALIGN*.
+          */
+         *halign = 0;
+         *valign = 0;
+      } else {
+         /* In Skylake, RENDER_SUFFACE_STATE.SurfaceVerticalAlignment is in units
+          * of surface elements (not pixels nor samples). For compressed formats,
+          * a "surface element" is defined as a compression block.  For example,
+          * if SurfaceVerticalAlignment is VALIGN_4 and SurfaceFormat is an ETC2
+          * format (ETC2 has a block height of 4), then the vertical alignment is
+          * 4 compression blocks or, equivalently, 16 pixels.
+          */
+         struct isl_extent3d image_align_el
+            = isl_surf_get_image_alignment_el(surf);
+
+         *halign = isl_to_gen_halign[image_align_el.width];
+         *valign = isl_to_gen_valign[image_align_el.height];
+      }
+   } else {
+      /* Pre-Skylake, RENDER_SUFFACE_STATE.SurfaceVerticalAlignment is in
+       * units of surface samples.  For example, if SurfaceVerticalAlignment
+       * is VALIGN_4 and the surface is singlesampled, then for any surface
+       * format (compressed or not) the vertical alignment is
+       * 4 pixels.
+       */
+      struct isl_extent3d image_align_sa
+         = isl_surf_get_image_alignment_sa(surf);
+
+      *halign = isl_to_gen_halign[image_align_sa.width];
+      *valign = isl_to_gen_valign[image_align_sa.height];
+   }
+}
+
+#if GEN_GEN >= 8
+static uint32_t
+get_qpitch(const struct isl_surf *surf)
+{
+   switch (surf->dim) {
+   default:
+      assert(!"Bad isl_surf_dim");
+   case ISL_SURF_DIM_1D:
+      if (GEN_GEN >= 9) {
+         /* QPitch is usually expressed as rows of surface elements (where
+          * a surface element is an compression block or a single surface
+          * sample). Skylake 1D is an outlier.
+          *
+          * From the Skylake BSpec >> Memory Views >> Common Surface
+          * Formats >> Surface Layout and Tiling >> 1D Surfaces:
+          *
+          *    Surface QPitch specifies the distance in pixels between array
+          *    slices.
+          */
+         return isl_surf_get_array_pitch_el(surf);
+      } else {
+         return isl_surf_get_array_pitch_el_rows(surf);
+      }
+   case ISL_SURF_DIM_2D:
+   case ISL_SURF_DIM_3D:
+      if (GEN_GEN >= 9) {
+         return isl_surf_get_array_pitch_el_rows(surf);
+      } else {
+         /* From the Broadwell PRM for RENDER_SURFACE_STATE.QPitch
+          *
+          *    "This field must be set to an integer multiple of the Surface
+          *    Vertical Alignment. For compressed textures (BC*, FXT1,
+          *    ETC*, and EAC* Surface Formats), this field is in units of
+          *    rows in the uncompressed surface, and must be set to an
+          *    integer multiple of the vertical alignment parameter "j"
+          *    defined in the Common Surface Formats section."
+          */
+         return isl_surf_get_array_pitch_sa_rows(surf);
+      }
+   }
+}
+#endif /* GEN_GEN >= 8 */
+
+void
+isl_genX(surf_fill_state_s)(const struct isl_device *dev, void *state,
+                            const struct isl_surf_fill_state_info *restrict info)
+{
+   uint32_t halign, valign;
+   get_halign_valign(info->surf, &halign, &valign);
+
+   struct GENX(RENDER_SURFACE_STATE) s = {
+      .SurfaceType = get_surftype(info->surf->dim, info->view->usage),
+      .SurfaceArray = info->surf->phys_level0_sa.array_len > 1,
+      .SurfaceVerticalAlignment = valign,
+      .SurfaceHorizontalAlignment = halign,
+
+#if GEN_GEN >= 8
+      .TileMode = isl_to_gen_tiling[info->surf->tiling],
+#else
+      .TiledSurface = info->surf->tiling != ISL_TILING_LINEAR,
+      .TileWalk = info->surf->tiling == ISL_TILING_X ? TILEWALK_XMAJOR :
+                                                       TILEWALK_YMAJOR,
+#endif
+
+      .VerticalLineStride = 0,
+      .VerticalLineStrideOffset = 0,
+
+#if (GEN_GEN == 7)
+      .SurfaceArraySpacing = info->surf->array_pitch_span ==
+                             ISL_ARRAY_PITCH_SPAN_COMPACT,
+#endif
+
+#if GEN_GEN >= 8
+      .SamplerL2BypassModeDisable = true,
+#endif
+
+#if GEN_GEN >= 8
+      .RenderCacheReadWriteMode = WriteOnlyCache,
+#else
+      .RenderCacheReadWriteMode = 0,
+#endif
+
+#if GEN_GEN >= 8
+      .CubeFaceEnablePositiveZ = 1,
+      .CubeFaceEnableNegativeZ = 1,
+      .CubeFaceEnablePositiveY = 1,
+      .CubeFaceEnableNegativeY = 1,
+      .CubeFaceEnablePositiveX = 1,
+      .CubeFaceEnableNegativeX = 1,
+#else
+      .CubeFaceEnables = 0x3f,
+#endif
+
+#if GEN_GEN >= 8
+      .SurfaceQPitch = get_qpitch(info->surf) >> 2,
+#endif
+
+      .Width = info->surf->logical_level0_px.width - 1,
+      .Height = info->surf->logical_level0_px.height - 1,
+      .Depth = 0, /* TEMPLATE */
+
+      .SurfacePitch = info->surf->row_pitch - 1,
+      .RenderTargetViewExtent = 0, /* TEMPLATE */
+      .MinimumArrayElement = 0, /* TEMPLATE */
+
+      .MultisampledSurfaceStorageFormat =
+         isl_to_gen_multisample_layout[info->surf->msaa_layout],
+      .NumberofMultisamples = ffs(info->surf->samples) - 1,
+      .MultisamplePositionPaletteIndex = 0, /* UNUSED */
+
+      .XOffset = 0,
+      .YOffset = 0,
+
+      .ResourceMinLOD = 0.0,
+
+      .MIPCountLOD = 0, /* TEMPLATE */
+      .SurfaceMinLOD = 0, /* TEMPLATE */
+
+#if (GEN_GEN >= 8 || GEN_IS_HASWELL)
+      .ShaderChannelSelectRed = info->view->channel_select[0],
+      .ShaderChannelSelectGreen = info->view->channel_select[1],
+      .ShaderChannelSelectBlue = info->view->channel_select[2],
+      .ShaderChannelSelectAlpha = info->view->channel_select[3],
+#endif
+
+      .SurfaceBaseAddress = info->address,
+      .MOCS = info->mocs,
+
+#if GEN_GEN >= 8
+      .AuxiliarySurfaceMode = AUX_NONE,
+#else
+      .MCSEnable = false,
+#endif
+   };
+
+   if (info->view->usage & ISL_SURF_USAGE_STORAGE_BIT) {
+      s.SurfaceFormat = isl_lower_storage_image_format(dev, info->view->format);
+   } else {
+      s.SurfaceFormat = info->view->format;
+   }
+
+   switch (s.SurfaceType) {
+   case SURFTYPE_1D:
+   case SURFTYPE_2D:
+      s.MinimumArrayElement = info->view->base_array_layer;
+
+      /* From the Broadwell PRM >> RENDER_SURFACE_STATE::Depth:
+       *
+       *    For SURFTYPE_1D, 2D, and CUBE: The range of this field is reduced
+       *    by one for each increase from zero of Minimum Array Element. For
+       *    example, if Minimum Array Element is set to 1024 on a 2D surface,
+       *    the range of this field is reduced to [0,1023].
+       *
+       * In other words, 'Depth' is the number of array layers.
+       */
+      s.Depth = info->view->array_len - 1;
+
+      /* From the Broadwell PRM >> RENDER_SURFACE_STATE::RenderTargetViewExtent:
+       *
+       *    For Render Target and Typed Dataport 1D and 2D Surfaces:
+       *    This field must be set to the same value as the Depth field.
+       */
+      s.RenderTargetViewExtent = s.Depth;
+      break;
+   case SURFTYPE_CUBE:
+      s.MinimumArrayElement = info->view->base_array_layer;
+      /* Same as SURFTYPE_2D, but divided by 6 */
+      s.Depth = info->view->array_len / 6 - 1;
+      s.RenderTargetViewExtent = s.Depth;
+      break;
+   case SURFTYPE_3D:
+      s.MinimumArrayElement = info->view->base_array_layer;
+
+      /* From the Broadwell PRM >> RENDER_SURFACE_STATE::Depth:
+       *
+       *    If the volume texture is MIP-mapped, this field specifies the
+       *    depth of the base MIP level.
+       */
+      s.Depth = info->surf->logical_level0_px.depth - 1;
+
+      /* From the Broadwell PRM >> RENDER_SURFACE_STATE::RenderTargetViewExtent:
+       *
+       *    For Render Target and Typed Dataport 3D Surfaces: This field
+       *    indicates the extent of the accessible 'R' coordinates minus 1 on
+       *    the LOD currently being rendered to.
+       */
+      s.RenderTargetViewExtent = isl_minify(info->surf->logical_level0_px.depth,
+                                            info->view->base_level) - 1;
+      break;
+   default:
+      unreachable(!"bad SurfaceType");
+   }
+
+   if (info->view->usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) {
+      /* For render target surfaces, the hardware interprets field
+       * MIPCount/LOD as LOD. The Broadwell PRM says:
+       *
+       *    MIPCountLOD defines the LOD that will be rendered into.
+       *    SurfaceMinLOD is ignored.
+       */
+      s.MIPCountLOD = info->view->base_level;
+      s.SurfaceMinLOD = 0;
+   } else {
+      /* For non render target surfaces, the hardware interprets field
+       * MIPCount/LOD as MIPCount.  The range of levels accessible by the
+       * sampler engine is [SurfaceMinLOD, SurfaceMinLOD + MIPCountLOD].
+       */
+      s.SurfaceMinLOD = info->view->base_level;
+      s.MIPCountLOD = MAX(info->view->levels, 1) - 1;
+   }
+
+#if GEN_GEN >= 8
+   /* From the CHV PRM, Volume 2d, page 321 (RENDER_SURFACE_STATE dword 0
+    * bit 9 "Sampler L2 Bypass Mode Disable" Programming Notes):
+    *
+    *    This bit must be set for the following surface types: BC2_UNORM
+    *    BC3_UNORM BC5_UNORM BC5_SNORM BC7_UNORM
+    */
+   if (GEN_GEN >= 9 || dev->info->is_cherryview) {
+      switch (info->view->format) {
+      case ISL_FORMAT_BC2_UNORM:
+      case ISL_FORMAT_BC3_UNORM:
+      case ISL_FORMAT_BC5_UNORM:
+      case ISL_FORMAT_BC5_SNORM:
+      case ISL_FORMAT_BC7_UNORM:
+         s.SamplerL2BypassModeDisable = true;
+         break;
+      default:
+         break;
+      }
+   }
+#endif
+
+   if (GEN_GEN <= 8) {
+      /* Prior to Sky Lake, we only have one bit for the clear color which
+       * gives us 0 or 1 in whatever the surface's format happens to be.
+       */
+      if (isl_format_has_int_channel(info->view->format)) {
+         for (unsigned i = 0; i < 4; i++) {
+            assert(info->clear_color.u32[i] == 0 ||
+                   info->clear_color.u32[i] == 1);
+         }
+      } else {
+         for (unsigned i = 0; i < 4; i++) {
+            assert(info->clear_color.f32[i] == 0.0f ||
+                   info->clear_color.f32[i] == 1.0f);
+         }
+      }
+      s.RedClearColor = info->clear_color.u32[0] != 0;
+      s.GreenClearColor = info->clear_color.u32[1] != 0;
+      s.BlueClearColor = info->clear_color.u32[2] != 0;
+      s.AlphaClearColor = info->clear_color.u32[3] != 0;
+   } else {
+      s.RedClearColor = info->clear_color.u32[0];
+      s.GreenClearColor = info->clear_color.u32[1];
+      s.BlueClearColor = info->clear_color.u32[2];
+      s.AlphaClearColor = info->clear_color.u32[3];
+   }
+
+   GENX(RENDER_SURFACE_STATE_pack)(NULL, state, &s);
+}
+
+void
+isl_genX(buffer_fill_state_s)(void *state,
+                              const struct isl_buffer_fill_state_info *restrict info)
+{
+   uint32_t num_elements = info->size / info->stride;
+
+   struct GENX(RENDER_SURFACE_STATE) surface_state = {
+      .SurfaceType = SURFTYPE_BUFFER,
+      .SurfaceArray = false,
+      .SurfaceFormat = info->format,
+      .SurfaceVerticalAlignment = isl_to_gen_valign[4],
+      .SurfaceHorizontalAlignment = isl_to_gen_halign[4],
+      .Height = ((num_elements - 1) >> 7) & 0x3fff,
+      .Width = (num_elements - 1) & 0x7f,
+      .Depth = ((num_elements - 1) >> 21) & 0x3f,
+      .SurfacePitch = info->stride - 1,
+      .NumberofMultisamples = MULTISAMPLECOUNT_1,
+
+#if (GEN_GEN >= 8)
+      .TileMode = LINEAR,
+#else
+      .TiledSurface = false,
+#endif
+
+#if (GEN_GEN >= 8)
+      .SamplerL2BypassModeDisable = true,
+      .RenderCacheReadWriteMode = WriteOnlyCache,
+#else
+      .RenderCacheReadWriteMode = 0,
+#endif
+
+      .MOCS = info->mocs,
+
+#if (GEN_GEN >= 8 || GEN_IS_HASWELL)
+      .ShaderChannelSelectRed = SCS_RED,
+      .ShaderChannelSelectGreen = SCS_GREEN,
+      .ShaderChannelSelectBlue = SCS_BLUE,
+      .ShaderChannelSelectAlpha = SCS_ALPHA,
+#endif
+      .SurfaceBaseAddress = info->address,
+   };
+
+   GENX(RENDER_SURFACE_STATE_pack)(NULL, state, &surface_state);
+}
diff --git a/src/intel/isl/tests/.gitignore b/src/intel/isl/tests/.gitignore
new file mode 100644 (file)
index 0000000..ba70ecf
--- /dev/null
@@ -0,0 +1 @@
+/isl_surf_get_image_offset_test
diff --git a/src/intel/isl/tests/isl_surf_get_image_offset_test.c b/src/intel/isl/tests/isl_surf_get_image_offset_test.c
new file mode 100644 (file)
index 0000000..34b336e
--- /dev/null
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "brw_device_info.h"
+#include "isl.h"
+#include "isl_priv.h"
+
+#define BDW_GT2_DEVID 0x161a
+
+// An asssert that works regardless of NDEBUG.
+#define t_assert(cond) \
+   do { \
+      if (!(cond)) { \
+         fprintf(stderr, "%s:%d: assertion failed\n", __FILE__, __LINE__); \
+         abort(); \
+      } \
+   } while (0)
+
+static void
+t_assert_extent4d(const struct isl_extent4d *e, uint32_t width,
+                  uint32_t height, uint32_t depth, uint32_t array_len)
+{
+   t_assert(e->width == width);
+   t_assert(e->height == height);
+   t_assert(e->depth == depth);
+   t_assert(e->array_len == array_len);
+}
+
+static void
+t_assert_image_alignment_el(const struct isl_surf *surf,
+                            uint32_t w, uint32_t h, uint32_t d)
+{
+   struct isl_extent3d align_el;
+
+   align_el = isl_surf_get_image_alignment_el(surf);
+   t_assert(align_el.w == w);
+   t_assert(align_el.h == h);
+   t_assert(align_el.d == d);
+
+}
+
+static void
+t_assert_image_alignment_sa(const struct isl_surf *surf,
+                            uint32_t w, uint32_t h, uint32_t d)
+{
+   struct isl_extent3d align_sa;
+
+   align_sa = isl_surf_get_image_alignment_sa(surf);
+   t_assert(align_sa.w == w);
+   t_assert(align_sa.h == h);
+   t_assert(align_sa.d == d);
+
+}
+
+static void
+t_assert_offset_el(const struct isl_surf *surf,
+                   uint32_t level,
+                   uint32_t logical_array_layer,
+                   uint32_t logical_z_offset_px,
+                   uint32_t expected_x_offset_el,
+                   uint32_t expected_y_offset_el)
+{
+   uint32_t x, y;
+   isl_surf_get_image_offset_el(surf, level, logical_array_layer,
+                                logical_z_offset_px, &x, &y);
+
+   t_assert(x == expected_x_offset_el);
+   t_assert(y == expected_y_offset_el);
+}
+
+static void
+t_assert_intratile_offset_el(const struct isl_device *dev,
+                             const struct isl_surf *surf,
+                             uint32_t level,
+                             uint32_t logical_array_layer,
+                             uint32_t logical_z_offset_px,
+                             uint32_t expected_base_address_offset,
+                             uint32_t expected_x_offset_el,
+                             uint32_t expected_y_offset_el)
+{
+   uint32_t base_address_offset;
+   uint32_t x_offset_el, y_offset_el;
+   isl_surf_get_image_intratile_offset_el(dev, surf,
+                                          level,
+                                          logical_array_layer,
+                                          logical_z_offset_px,
+                                          &base_address_offset,
+                                          &x_offset_el,
+                                          &y_offset_el);
+
+   t_assert(base_address_offset == expected_base_address_offset);
+   t_assert(x_offset_el == expected_x_offset_el);
+   t_assert(y_offset_el == expected_y_offset_el);
+}
+
+static void
+t_assert_phys_level0_sa(const struct isl_surf *surf, uint32_t width,
+                        uint32_t height, uint32_t depth, uint32_t array_len)
+{
+   t_assert_extent4d(&surf->phys_level0_sa, width, height, depth, array_len);
+}
+
+static void
+t_assert_gen4_3d_layer(const struct isl_surf *surf,
+                       uint32_t level,
+                       uint32_t aligned_width,
+                       uint32_t aligned_height,
+                       uint32_t depth,
+                       uint32_t horiz_layers,
+                       uint32_t vert_layers,
+                       uint32_t *base_y)
+{
+   for (uint32_t z = 0; z < depth; ++z) {
+      t_assert_offset_el(surf, level, 0, z,
+                        aligned_width * (z % horiz_layers),
+                        *base_y + aligned_height * (z / horiz_layers));
+   }
+
+   *base_y += aligned_height * vert_layers;
+}
+
+static void
+test_bdw_2d_r8g8b8a8_unorm_512x512_array01_samples01_noaux_tiley0(void)
+{
+   bool ok;
+
+   struct isl_device dev;
+   isl_device_init(&dev, brw_get_device_info(BDW_GT2_DEVID),
+                   /*bit6_swizzle*/ false);
+
+   struct isl_surf surf;
+   ok = isl_surf_init(&dev, &surf,
+                      .dim = ISL_SURF_DIM_2D,
+                      .format = ISL_FORMAT_R8G8B8A8_UNORM,
+                      .width = 512,
+                      .height = 512,
+                      .depth = 1,
+                      .levels = 10,
+                      .array_len = 1,
+                      .samples = 1,
+                      .usage = ISL_SURF_USAGE_TEXTURE_BIT |
+                               ISL_SURF_USAGE_DISABLE_AUX_BIT,
+                      .tiling_flags = ISL_TILING_Y0_BIT);
+   t_assert(ok);
+
+   t_assert_image_alignment_el(&surf, 4, 4, 1);
+   t_assert_image_alignment_sa(&surf, 4, 4, 1);
+   t_assert_phys_level0_sa(&surf, 512, 512, 1, 1);
+   t_assert(isl_surf_get_array_pitch_el_rows(&surf) >= 772);
+   t_assert(isl_surf_get_array_pitch_el_rows(&surf) ==
+            isl_surf_get_array_pitch_sa_rows(&surf));
+
+   /* Row pitch should be minimal possible */
+   t_assert(surf.row_pitch == 2048);
+
+   t_assert_offset_el(&surf, 0, 0, 0, 0, 0); // +0, +0
+   t_assert_offset_el(&surf, 1, 0, 0, 0, 512); // +0, +512
+   t_assert_offset_el(&surf, 2, 0, 0, 256, 512); // +256, +0
+   t_assert_offset_el(&surf, 3, 0, 0, 256, 640); // +0, +128
+   t_assert_offset_el(&surf, 4, 0, 0, 256, 704); // +0, +64
+   t_assert_offset_el(&surf, 5, 0, 0, 256, 736); // +0, +32
+   t_assert_offset_el(&surf, 6, 0, 0, 256, 752); // +0, +16
+   t_assert_offset_el(&surf, 7, 0, 0, 256, 760); // +0, +8
+   t_assert_offset_el(&surf, 8, 0, 0, 256, 764); // +0, +4
+   t_assert_offset_el(&surf, 9, 0, 0, 256, 768); // +0, +4
+
+   t_assert_intratile_offset_el(&dev, &surf, 0, 0, 0,      0x0, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf, 1, 0, 0, 0x100000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf, 2, 0, 0, 0x108000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf, 3, 0, 0, 0x148000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf, 4, 0, 0, 0x168000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf, 5, 0, 0, 0x178000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf, 6, 0, 0, 0x178000, 0, 16);
+   t_assert_intratile_offset_el(&dev, &surf, 7, 0, 0, 0x178000, 0, 24);
+   t_assert_intratile_offset_el(&dev, &surf, 8, 0, 0, 0x178000, 0, 28);
+   t_assert_intratile_offset_el(&dev, &surf, 9, 0, 0, 0x188000, 0,  0);
+}
+
+static void
+test_bdw_2d_r8g8b8a8_unorm_1024x1024_array06_samples01_noaux_tiley0(void)
+{
+   bool ok;
+
+   struct isl_device dev;
+   isl_device_init(&dev, brw_get_device_info(BDW_GT2_DEVID),
+                   /*bit6_swizzle*/ false);
+
+   struct isl_surf surf;
+   ok = isl_surf_init(&dev, &surf,
+                      .dim = ISL_SURF_DIM_2D,
+                      .format = ISL_FORMAT_R8G8B8A8_UNORM,
+                      .width = 1024,
+                      .height = 1024,
+                      .depth = 1,
+                      .levels = 11,
+                      .array_len = 6,
+                      .samples = 1,
+                      .usage = ISL_SURF_USAGE_TEXTURE_BIT |
+                               ISL_SURF_USAGE_DISABLE_AUX_BIT,
+                      .tiling_flags = ISL_TILING_Y0_BIT);
+   t_assert(ok);
+
+   t_assert_image_alignment_el(&surf, 4, 4, 1);
+   t_assert_image_alignment_sa(&surf, 4, 4, 1);
+
+   t_assert(isl_surf_get_array_pitch_el_rows(&surf) >= 1540);
+   t_assert(isl_surf_get_array_pitch_el_rows(&surf) ==
+            isl_surf_get_array_pitch_sa_rows(&surf));
+
+   /* Row pitch should be minimal possible */
+   t_assert(surf.row_pitch == 4096);
+
+   for (uint32_t a = 0; a < 6; ++a) {
+      uint32_t b = a * isl_surf_get_array_pitch_sa_rows(&surf);
+
+      t_assert_offset_el(&surf, 0, a, 0, 0, b + 0); // +0, +0
+      t_assert_offset_el(&surf, 1, a, 0, 0, b + 1024); // +0, +1024
+      t_assert_offset_el(&surf, 2, a, 0, 512, b + 1024); // +512, +0
+      t_assert_offset_el(&surf, 3, a, 0, 512, b + 1280); // +0, +256
+      t_assert_offset_el(&surf, 4, a, 0, 512, b + 1408); // +0, +128
+      t_assert_offset_el(&surf, 5, a, 0, 512, b + 1472); // +0, +64
+      t_assert_offset_el(&surf, 6, a, 0, 512, b + 1504); // +0, +32
+      t_assert_offset_el(&surf, 7, a, 0, 512, b + 1520); // +0, +16
+      t_assert_offset_el(&surf, 8, a, 0, 512, b + 1528); // +0, +8
+      t_assert_offset_el(&surf, 9, a, 0, 512, b + 1532); // +0, +4
+      t_assert_offset_el(&surf, 10, a, 0, 512, b + 1536); // +0, +4
+
+   }
+
+   /* The layout below assumes a specific array pitch. It will need updating
+    * if isl's array pitch calculations ever change.
+    */
+   t_assert(isl_surf_get_array_pitch_el_rows(&surf) == 1540);
+
+   /* array layer 0 */
+   t_assert_intratile_offset_el(&dev, &surf,  0, 0, 0,         0x0, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf,  1, 0, 0,    0x400000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf,  2, 0, 0,    0x410000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf,  3, 0, 0,    0x510000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf,  4, 0, 0,    0x590000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf,  5, 0, 0,    0x5d0000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf,  6, 0, 0,    0x5f0000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf,  7, 0, 0,    0x5f0000, 0, 16);
+   t_assert_intratile_offset_el(&dev, &surf,  8, 0, 0,    0x5f0000, 0, 24);
+   t_assert_intratile_offset_el(&dev, &surf,  9, 0, 0,    0x5f0000, 0, 28);
+   t_assert_intratile_offset_el(&dev, &surf, 10, 0, 0,    0x610000, 0,  0);
+
+   /* array layer 1 */
+   t_assert_intratile_offset_el(&dev, &surf,  0, 1, 0,    0x600000, 0,  4);
+   t_assert_intratile_offset_el(&dev, &surf,  1, 1, 0,    0xa00000, 0,  4);
+   t_assert_intratile_offset_el(&dev, &surf,  2, 1, 0,    0xa10000, 0,  4);
+   t_assert_intratile_offset_el(&dev, &surf,  3, 1, 0,    0xb10000, 0,  4);
+   t_assert_intratile_offset_el(&dev, &surf,  4, 1, 0,    0xb90000, 0,  4);
+   t_assert_intratile_offset_el(&dev, &surf,  5, 1, 0,    0xbd0000, 0,  4);
+   t_assert_intratile_offset_el(&dev, &surf,  6, 1, 0,    0xbf0000, 0,  4);
+   t_assert_intratile_offset_el(&dev, &surf,  7, 1, 0,    0xbf0000, 0, 20);
+   t_assert_intratile_offset_el(&dev, &surf,  8, 1, 0,    0xbf0000, 0, 28);
+   t_assert_intratile_offset_el(&dev, &surf,  9, 1, 0,    0xc10000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf, 10, 1, 0,    0xc10000, 0,  4);
+
+   /* array layer 2 */
+   t_assert_intratile_offset_el(&dev, &surf,  0, 2, 0,    0xc00000, 0,  8);
+   t_assert_intratile_offset_el(&dev, &surf,  1, 2, 0,   0x1000000, 0,  8);
+   t_assert_intratile_offset_el(&dev, &surf,  2, 2, 0,   0x1010000, 0,  8);
+   t_assert_intratile_offset_el(&dev, &surf,  3, 2, 0,   0x1110000, 0,  8);
+   t_assert_intratile_offset_el(&dev, &surf,  4, 2, 0,   0x1190000, 0,  8);
+   t_assert_intratile_offset_el(&dev, &surf,  5, 2, 0,   0x11d0000, 0,  8);
+   t_assert_intratile_offset_el(&dev, &surf,  6, 2, 0,   0x11f0000, 0,  8);
+   t_assert_intratile_offset_el(&dev, &surf,  7, 2, 0,   0x11f0000, 0, 24);
+   t_assert_intratile_offset_el(&dev, &surf,  8, 2, 0,   0x1210000, 0,  0);
+   t_assert_intratile_offset_el(&dev, &surf,  9, 2, 0,   0x1210000, 0,  4);
+   t_assert_intratile_offset_el(&dev, &surf, 10, 2, 0,   0x1210000, 0,  8);
+
+   /* skip the remaining array layers */
+}
+
+static void
+test_bdw_3d_r8g8b8a8_unorm_256x256x256_levels09_tiley0(void)
+{
+   bool ok;
+
+   struct isl_device dev;
+   isl_device_init(&dev, brw_get_device_info(BDW_GT2_DEVID),
+                   /*bit6_swizzle*/ false);
+
+   struct isl_surf surf;
+   ok = isl_surf_init(&dev, &surf,
+                      .dim = ISL_SURF_DIM_3D,
+                      .format = ISL_FORMAT_R8G8B8A8_UNORM,
+                      .width = 256,
+                      .height = 256,
+                      .depth = 256,
+                      .levels = 9,
+                      .array_len = 1,
+                      .samples = 1,
+                      .usage = ISL_SURF_USAGE_TEXTURE_BIT |
+                               ISL_SURF_USAGE_DISABLE_AUX_BIT,
+                      .tiling_flags = ISL_TILING_Y0_BIT);
+   t_assert(ok);
+
+   t_assert_image_alignment_el(&surf, 4, 4, 1);
+   t_assert_image_alignment_sa(&surf, 4, 4, 1);
+   t_assert(isl_surf_get_array_pitch_el_rows(&surf) == 74916);
+   t_assert(isl_surf_get_array_pitch_sa_rows(&surf) ==
+            isl_surf_get_array_pitch_el_rows(&surf));
+
+   uint32_t base_y = 0;
+
+   t_assert_gen4_3d_layer(&surf, 0, 256, 256, 256,   1, 256, &base_y);
+   t_assert_gen4_3d_layer(&surf, 1, 128, 128, 128,   2,  64, &base_y);
+   t_assert_gen4_3d_layer(&surf, 2,  64,  64,  64,   4,  16, &base_y);
+   t_assert_gen4_3d_layer(&surf, 3,  32,  32,  32,   8,   4, &base_y);
+   t_assert_gen4_3d_layer(&surf, 4,  16,  16,  16,  16,   1, &base_y);
+   t_assert_gen4_3d_layer(&surf, 5,   8,   8,   8,  32,   1, &base_y);
+   t_assert_gen4_3d_layer(&surf, 6,   4,   4,   4,  64,   1, &base_y);
+   t_assert_gen4_3d_layer(&surf, 7,   4,   4,   2, 128,   1, &base_y);
+   t_assert_gen4_3d_layer(&surf, 8,   4,   4,   1, 256,   1, &base_y);
+}
+
+int main(void)
+{
+   /* FINISHME: Add tests for npot sizes */
+   /* FINISHME: Add tests for 1D surfaces */
+
+   test_bdw_2d_r8g8b8a8_unorm_512x512_array01_samples01_noaux_tiley0();
+   test_bdw_2d_r8g8b8a8_unorm_1024x1024_array06_samples01_noaux_tiley0();
+   test_bdw_3d_r8g8b8a8_unorm_256x256x256_levels09_tiley0();
+}
diff --git a/src/intel/vulkan/.gitignore b/src/intel/vulkan/.gitignore
new file mode 100644 (file)
index 0000000..40afc2e
--- /dev/null
@@ -0,0 +1,9 @@
+# Generated source files
+/*_spirv_autogen.h
+/anv_entrypoints.c
+/anv_entrypoints.h
+/wayland-drm-protocol.c
+/wayland-drm-client-protocol.h
+/dev_icd.json
+/intel_icd.json
+/gen*_pack.h
\ No newline at end of file
diff --git a/src/intel/vulkan/Makefile.am b/src/intel/vulkan/Makefile.am
new file mode 100644 (file)
index 0000000..acf84e5
--- /dev/null
@@ -0,0 +1,209 @@
+# Copyright © 2015 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+SUBDIRS = . tests
+
+vulkan_includedir = $(includedir)/vulkan
+
+vulkan_include_HEADERS =                               \
+       $(top_srcdir)/include/vulkan/vk_platform.h      \
+       $(top_srcdir)/include/vulkan/vulkan.h           \
+       $(top_srcdir)/include/vulkan/vulkan_intel.h
+
+# Used when generating entrypoints to filter out unwanted extensions
+VULKAN_ENTRYPOINT_CPPFLAGS = \
+   -I$(top_srcdir)/include/vulkan \
+   -DVK_USE_PLATFORM_XCB_KHR \
+   -DVK_USE_PLATFORM_WAYLAND_KHR
+
+lib_LTLIBRARIES = libvulkan_intel.la
+
+check_LTLIBRARIES = libvulkan-test.la
+
+PER_GEN_LIBS = \
+   libanv-gen7.la \
+   libanv-gen75.la \
+   libanv-gen8.la \
+   libanv-gen9.la
+
+noinst_LTLIBRARIES = $(PER_GEN_LIBS)
+
+# The gallium includes are for the util/u_math.h include from main/macros.h
+
+AM_CPPFLAGS = \
+       $(INTEL_CFLAGS) \
+       $(VALGRIND_CFLAGS) \
+       $(DEFINES) \
+       -I$(top_srcdir)/include \
+       -I$(top_srcdir)/src \
+       -I$(top_srcdir)/src/compiler \
+       -I$(top_srcdir)/src/mapi \
+       -I$(top_srcdir)/src/mesa \
+       -I$(top_srcdir)/src/mesa/drivers/dri/common \
+       -I$(top_srcdir)/src/mesa/drivers/dri/i965 \
+       -I$(top_srcdir)/src/gallium/auxiliary \
+       -I$(top_srcdir)/src/gallium/include \
+       -I$(top_srcdir)/src/intel/ \
+       -I$(top_builddir)/src \
+       -I$(top_builddir)/src/compiler \
+       -I$(top_builddir)/src/compiler/nir \
+       -I$(top_builddir)/src/intel
+
+libvulkan_intel_la_CFLAGS = $(CFLAGS) -Wno-override-init
+
+VULKAN_SOURCES =                                        \
+       anv_allocator.c                                 \
+       anv_cmd_buffer.c                                \
+       anv_batch_chain.c                               \
+       anv_descriptor_set.c                            \
+       anv_device.c                                    \
+        anv_dump.c                                      \
+       anv_entrypoints.c                               \
+       anv_entrypoints.h                               \
+       anv_formats.c                                   \
+       anv_image.c                                     \
+       anv_intel.c                                     \
+       anv_meta.c                                      \
+       anv_meta_blit.c                                 \
+       anv_meta_blit2d.c                               \
+       anv_meta_clear.c                                \
+       anv_meta_copy.c                                 \
+       anv_meta_resolve.c                              \
+       anv_nir_apply_dynamic_offsets.c                 \
+       anv_nir_apply_pipeline_layout.c                 \
+       anv_nir_lower_push_constants.c                  \
+       anv_pass.c                                      \
+       anv_pipeline.c                                  \
+       anv_pipeline_cache.c                            \
+       anv_private.h                                   \
+       anv_query.c                                     \
+       anv_util.c                                      \
+       anv_wsi.c                                       \
+       anv_wsi_x11.c
+
+BUILT_SOURCES =                                         \
+       anv_entrypoints.h                               \
+       anv_entrypoints.c
+
+libanv_gen7_la_SOURCES =                                \
+       genX_cmd_buffer.c                               \
+       genX_pipeline.c                                 \
+       gen7_cmd_buffer.c                               \
+       gen7_pipeline.c                                 \
+       genX_state.c
+libanv_gen7_la_CFLAGS = $(libvulkan_intel_la_CFLAGS) -DGEN_VERSIONx10=70
+
+libanv_gen75_la_SOURCES =                               \
+       genX_cmd_buffer.c                               \
+       genX_pipeline.c                                 \
+       gen7_cmd_buffer.c                               \
+       gen7_pipeline.c                                 \
+       genX_state.c
+libanv_gen75_la_CFLAGS = $(libvulkan_intel_la_CFLAGS) -DGEN_VERSIONx10=75
+
+libanv_gen8_la_SOURCES =                                       \
+       genX_cmd_buffer.c                               \
+       genX_pipeline.c                                 \
+       gen8_cmd_buffer.c                               \
+       gen8_pipeline.c                                 \
+       genX_state.c
+libanv_gen8_la_CFLAGS = $(libvulkan_intel_la_CFLAGS) -DGEN_VERSIONx10=80
+
+libanv_gen9_la_SOURCES =                                       \
+       genX_cmd_buffer.c                               \
+       genX_pipeline.c                                 \
+       gen8_cmd_buffer.c                               \
+       gen8_pipeline.c                                 \
+       genX_state.c
+libanv_gen9_la_CFLAGS = $(libvulkan_intel_la_CFLAGS) -DGEN_VERSIONx10=90
+
+if HAVE_EGL_PLATFORM_WAYLAND
+BUILT_SOURCES += \
+       wayland-drm-protocol.c \
+       wayland-drm-client-protocol.h
+
+%-protocol.c : $(top_srcdir)/src/egl/wayland/wayland-drm/%.xml
+       $(AM_V_GEN)$(WAYLAND_SCANNER) code < $< > $@
+
+%-client-protocol.h : $(top_srcdir)/src/egl/wayland/wayland-drm/%.xml
+       $(AM_V_GEN)$(WAYLAND_SCANNER) client-header < $< > $@
+
+AM_CPPFLAGS += -I$(top_srcdir)/src/egl/wayland/wayland-drm
+VULKAN_SOURCES += \
+       wayland-drm-protocol.c \
+       anv_wsi_wayland.c
+libvulkan_intel_la_CFLAGS += -DHAVE_WAYLAND_PLATFORM
+endif
+
+libvulkan_intel_la_SOURCES =                            \
+       $(VULKAN_SOURCES)                               \
+       anv_gem.c
+
+anv_entrypoints.h : anv_entrypoints_gen.py $(vulkan_include_HEADERS)
+       $(AM_V_GEN) cat $(vulkan_include_HEADERS) | $(CPP) $(VULKAN_ENTRYPOINT_CPPFLAGS) - | $(PYTHON2) $< header > $@
+
+anv_entrypoints.c : anv_entrypoints_gen.py $(vulkan_include_HEADERS)
+       $(AM_V_GEN) cat $(vulkan_include_HEADERS) | $(CPP) $(VULKAN_ENTRYPOINT_CPPFLAGS) - | $(PYTHON2) $< code > $@
+
+CLEANFILES = $(BUILT_SOURCES)
+
+libvulkan_intel_la_LIBADD = $(WAYLAND_LIBS) \
+       -lxcb -lxcb-dri3 -lxcb-present -lxcb-sync -lxshmfence \
+       $(top_builddir)/src/intel/isl/libisl.la \
+       $(top_builddir)/src/mesa/drivers/dri/i965/libi965_compiler.la \
+       $(top_builddir)/src/mesa/libmesa.la \
+       $(top_builddir)/src/mesa/drivers/dri/common/libdri_test_stubs.la \
+       -lpthread -ldl -lstdc++ \
+        $(PER_GEN_LIBS)
+
+libvulkan_intel_la_LDFLAGS = \
+        -module -avoid-version -shared -shrext .so
+
+
+# Generate icd files. It would be nice to just be able to add these to
+# AC_CONFIG_FILES, but @libdir@ typically expands to '${exec_prefix}/lib64',
+# which we can't put in the icd file. When running sed from the Makefile we
+# can use ${libdir}, which expands completely and we avoid putting Makefile
+# variables in the icd file.
+
+icdconfdir=$(sysconfdir)/vulkan/icd.d
+icdconf_DATA = intel_icd.json
+noinst_DATA = dev_icd.json
+
+%.json : %.json.in
+       $(AM_V_GEN) $(SED) \
+               -e "s#@build_libdir@#${abs_top_builddir}/${LIB_DIR}#" \
+               -e "s#@install_libdir@#${libdir}#" < $< > $@
+
+
+# Libvulkan with dummy gem. Used for unit tests.
+
+libvulkan_test_la_SOURCES =                             \
+       $(VULKAN_SOURCES)                               \
+       anv_gem_stubs.c
+
+libvulkan_test_la_CFLAGS =                              \
+       -I$(top_srcdir)/src/intel/vulkan                \
+       $(libvulkan_intel_la_CFLAGS)
+
+libvulkan_test_la_LIBADD = $(libvulkan_intel_la_LIBADD)
+
+include $(top_srcdir)/install-lib-links.mk
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
new file mode 100644 (file)
index 0000000..4fc8338
--- /dev/null
@@ -0,0 +1,870 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define _DEFAULT_SOURCE
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <values.h>
+#include <assert.h>
+#include <linux/futex.h>
+#include <linux/memfd.h>
+#include <sys/time.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+
+#include "anv_private.h"
+
+#ifdef HAVE_VALGRIND
+#define VG_NOACCESS_READ(__ptr) ({                       \
+   VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
+   __typeof(*(__ptr)) __val = *(__ptr);                  \
+   VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
+   __val;                                                \
+})
+#define VG_NOACCESS_WRITE(__ptr, __val) ({                  \
+   VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr)));  \
+   *(__ptr) = (__val);                                      \
+   VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));   \
+})
+#else
+#define VG_NOACCESS_READ(__ptr) (*(__ptr))
+#define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
+#endif
+
+/* Design goals:
+ *
+ *  - Lock free (except when resizing underlying bos)
+ *
+ *  - Constant time allocation with typically only one atomic
+ *
+ *  - Multiple allocation sizes without fragmentation
+ *
+ *  - Can grow while keeping addresses and offset of contents stable
+ *
+ *  - All allocations within one bo so we can point one of the
+ *    STATE_BASE_ADDRESS pointers at it.
+ *
+ * The overall design is a two-level allocator: top level is a fixed size, big
+ * block (8k) allocator, which operates out of a bo.  Allocation is done by
+ * either pulling a block from the free list or growing the used range of the
+ * bo.  Growing the range may run out of space in the bo which we then need to
+ * grow.  Growing the bo is tricky in a multi-threaded, lockless environment:
+ * we need to keep all pointers and contents in the old map valid.  GEM bos in
+ * general can't grow, but we use a trick: we create a memfd and use ftruncate
+ * to grow it as necessary.  We mmap the new size and then create a gem bo for
+ * it using the new gem userptr ioctl.  Without heavy-handed locking around
+ * our allocation fast-path, there isn't really a way to munmap the old mmap,
+ * so we just keep it around until garbage collection time.  While the block
+ * allocator is lockless for normal operations, we block other threads trying
+ * to allocate while we're growing the map.  It sholdn't happen often, and
+ * growing is fast anyway.
+ *
+ * At the next level we can use various sub-allocators.  The state pool is a
+ * pool of smaller, fixed size objects, which operates much like the block
+ * pool.  It uses a free list for freeing objects, but when it runs out of
+ * space it just allocates a new block from the block pool.  This allocator is
+ * intended for longer lived state objects such as SURFACE_STATE and most
+ * other persistent state objects in the API.  We may need to track more info
+ * with these object and a pointer back to the CPU object (eg VkImage).  In
+ * those cases we just allocate a slightly bigger object and put the extra
+ * state after the GPU state object.
+ *
+ * The state stream allocator works similar to how the i965 DRI driver streams
+ * all its state.  Even with Vulkan, we need to emit transient state (whether
+ * surface state base or dynamic state base), and for that we can just get a
+ * block and fill it up.  These cases are local to a command buffer and the
+ * sub-allocator need not be thread safe.  The streaming allocator gets a new
+ * block when it runs out of space and chains them together so they can be
+ * easily freed.
+ */
+
+/* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
+ * We use it to indicate the free list is empty. */
+#define EMPTY 1
+
+struct anv_mmap_cleanup {
+   void *map;
+   size_t size;
+   uint32_t gem_handle;
+};
+
+#define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
+
+static inline long
+sys_futex(void *addr1, int op, int val1,
+          struct timespec *timeout, void *addr2, int val3)
+{
+   return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
+}
+
+static inline int
+futex_wake(uint32_t *addr, int count)
+{
+   return sys_futex(addr, FUTEX_WAKE, count, NULL, NULL, 0);
+}
+
+static inline int
+futex_wait(uint32_t *addr, int32_t value)
+{
+   return sys_futex(addr, FUTEX_WAIT, value, NULL, NULL, 0);
+}
+
+static inline int
+memfd_create(const char *name, unsigned int flags)
+{
+   return syscall(SYS_memfd_create, name, flags);
+}
+
+static inline uint32_t
+ilog2_round_up(uint32_t value)
+{
+   assert(value != 0);
+   return 32 - __builtin_clz(value - 1);
+}
+
+static inline uint32_t
+round_to_power_of_two(uint32_t value)
+{
+   return 1 << ilog2_round_up(value);
+}
+
+static bool
+anv_free_list_pop(union anv_free_list *list, void **map, int32_t *offset)
+{
+   union anv_free_list current, new, old;
+
+   current.u64 = list->u64;
+   while (current.offset != EMPTY) {
+      /* We have to add a memory barrier here so that the list head (and
+       * offset) gets read before we read the map pointer.  This way we
+       * know that the map pointer is valid for the given offset at the
+       * point where we read it.
+       */
+      __sync_synchronize();
+
+      int32_t *next_ptr = *map + current.offset;
+      new.offset = VG_NOACCESS_READ(next_ptr);
+      new.count = current.count + 1;
+      old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
+      if (old.u64 == current.u64) {
+         *offset = current.offset;
+         return true;
+      }
+      current = old;
+   }
+
+   return false;
+}
+
+static void
+anv_free_list_push(union anv_free_list *list, void *map, int32_t offset)
+{
+   union anv_free_list current, old, new;
+   int32_t *next_ptr = map + offset;
+
+   old = *list;
+   do {
+      current = old;
+      VG_NOACCESS_WRITE(next_ptr, current.offset);
+      new.offset = offset;
+      new.count = current.count + 1;
+      old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
+   } while (old.u64 != current.u64);
+}
+
+/* All pointers in the ptr_free_list are assumed to be page-aligned.  This
+ * means that the bottom 12 bits should all be zero.
+ */
+#define PFL_COUNT(x) ((uintptr_t)(x) & 0xfff)
+#define PFL_PTR(x) ((void *)((uintptr_t)(x) & ~(uintptr_t)0xfff))
+#define PFL_PACK(ptr, count) ({           \
+   (void *)(((uintptr_t)(ptr) & ~(uintptr_t)0xfff) | ((count) & 0xfff)); \
+})
+
+static bool
+anv_ptr_free_list_pop(void **list, void **elem)
+{
+   void *current = *list;
+   while (PFL_PTR(current) != NULL) {
+      void **next_ptr = PFL_PTR(current);
+      void *new_ptr = VG_NOACCESS_READ(next_ptr);
+      unsigned new_count = PFL_COUNT(current) + 1;
+      void *new = PFL_PACK(new_ptr, new_count);
+      void *old = __sync_val_compare_and_swap(list, current, new);
+      if (old == current) {
+         *elem = PFL_PTR(current);
+         return true;
+      }
+      current = old;
+   }
+
+   return false;
+}
+
+static void
+anv_ptr_free_list_push(void **list, void *elem)
+{
+   void *old, *current;
+   void **next_ptr = elem;
+
+   /* The pointer-based free list requires that the pointer be
+    * page-aligned.  This is because we use the bottom 12 bits of the
+    * pointer to store a counter to solve the ABA concurrency problem.
+    */
+   assert(((uintptr_t)elem & 0xfff) == 0);
+
+   old = *list;
+   do {
+      current = old;
+      VG_NOACCESS_WRITE(next_ptr, PFL_PTR(current));
+      unsigned new_count = PFL_COUNT(current) + 1;
+      void *new = PFL_PACK(elem, new_count);
+      old = __sync_val_compare_and_swap(list, current, new);
+   } while (old != current);
+}
+
+static uint32_t
+anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state);
+
+void
+anv_block_pool_init(struct anv_block_pool *pool,
+                    struct anv_device *device, uint32_t block_size)
+{
+   assert(util_is_power_of_two(block_size));
+
+   pool->device = device;
+   pool->bo.gem_handle = 0;
+   pool->bo.offset = 0;
+   pool->bo.size = 0;
+   pool->bo.is_winsys_bo = false;
+   pool->block_size = block_size;
+   pool->free_list = ANV_FREE_LIST_EMPTY;
+   pool->back_free_list = ANV_FREE_LIST_EMPTY;
+
+   pool->fd = memfd_create("block pool", MFD_CLOEXEC);
+   if (pool->fd == -1)
+      return;
+
+   /* Just make it 2GB up-front.  The Linux kernel won't actually back it
+    * with pages until we either map and fault on one of them or we use
+    * userptr and send a chunk of it off to the GPU.
+    */
+   if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1)
+      return;
+
+   anv_vector_init(&pool->mmap_cleanups,
+                   round_to_power_of_two(sizeof(struct anv_mmap_cleanup)), 128);
+
+   pool->state.next = 0;
+   pool->state.end = 0;
+   pool->back_state.next = 0;
+   pool->back_state.end = 0;
+
+   /* Immediately grow the pool so we'll have a backing bo. */
+   pool->state.end = anv_block_pool_grow(pool, &pool->state);
+}
+
+void
+anv_block_pool_finish(struct anv_block_pool *pool)
+{
+   struct anv_mmap_cleanup *cleanup;
+
+   anv_vector_foreach(cleanup, &pool->mmap_cleanups) {
+      if (cleanup->map)
+         munmap(cleanup->map, cleanup->size);
+      if (cleanup->gem_handle)
+         anv_gem_close(pool->device, cleanup->gem_handle);
+   }
+
+   anv_vector_finish(&pool->mmap_cleanups);
+
+   close(pool->fd);
+}
+
+#define PAGE_SIZE 4096
+
+/** Grows and re-centers the block pool.
+ *
+ * We grow the block pool in one or both directions in such a way that the
+ * following conditions are met:
+ *
+ *  1) The size of the entire pool is always a power of two.
+ *
+ *  2) The pool only grows on both ends.  Neither end can get
+ *     shortened.
+ *
+ *  3) At the end of the allocation, we have about twice as much space
+ *     allocated for each end as we have used.  This way the pool doesn't
+ *     grow too far in one direction or the other.
+ *
+ *  4) If the _alloc_back() has never been called, then the back portion of
+ *     the pool retains a size of zero.  (This makes it easier for users of
+ *     the block pool that only want a one-sided pool.)
+ *
+ *  5) We have enough space allocated for at least one more block in
+ *     whichever side `state` points to.
+ *
+ *  6) The center of the pool is always aligned to both the block_size of
+ *     the pool and a 4K CPU page.
+ */
+static uint32_t
+anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
+{
+   size_t size;
+   void *map;
+   uint32_t gem_handle;
+   struct anv_mmap_cleanup *cleanup;
+
+   pthread_mutex_lock(&pool->device->mutex);
+
+   assert(state == &pool->state || state == &pool->back_state);
+
+   /* Gather a little usage information on the pool.  Since we may have
+    * threadsd waiting in queue to get some storage while we resize, it's
+    * actually possible that total_used will be larger than old_size.  In
+    * particular, block_pool_alloc() increments state->next prior to
+    * calling block_pool_grow, so this ensures that we get enough space for
+    * which ever side tries to grow the pool.
+    *
+    * We align to a page size because it makes it easier to do our
+    * calculations later in such a way that we state page-aigned.
+    */
+   uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE);
+   uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE);
+   uint32_t total_used = front_used + back_used;
+
+   assert(state == &pool->state || back_used > 0);
+
+   size_t old_size = pool->bo.size;
+
+   if (old_size != 0 &&
+       back_used * 2 <= pool->center_bo_offset &&
+       front_used * 2 <= (old_size - pool->center_bo_offset)) {
+      /* If we're in this case then this isn't the firsta allocation and we
+       * already have enough space on both sides to hold double what we
+       * have allocated.  There's nothing for us to do.
+       */
+      goto done;
+   }
+
+   if (old_size == 0) {
+      /* This is the first allocation */
+      size = MAX2(32 * pool->block_size, PAGE_SIZE);
+   } else {
+      size = old_size * 2;
+   }
+
+   /* We can't have a block pool bigger than 1GB because we use signed
+    * 32-bit offsets in the free list and we don't want overflow.  We
+    * should never need a block pool bigger than 1GB anyway.
+    */
+   assert(size <= (1u << 31));
+
+   /* We compute a new center_bo_offset such that, when we double the size
+    * of the pool, we maintain the ratio of how much is used by each side.
+    * This way things should remain more-or-less balanced.
+    */
+   uint32_t center_bo_offset;
+   if (back_used == 0) {
+      /* If we're in this case then we have never called alloc_back().  In
+       * this case, we want keep the offset at 0 to make things as simple
+       * as possible for users that don't care about back allocations.
+       */
+      center_bo_offset = 0;
+   } else {
+      /* Try to "center" the allocation based on how much is currently in
+       * use on each side of the center line.
+       */
+      center_bo_offset = ((uint64_t)size * back_used) / total_used;
+
+      /* Align down to a multiple of both the block size and page size */
+      uint32_t granularity = MAX2(pool->block_size, PAGE_SIZE);
+      assert(util_is_power_of_two(granularity));
+      center_bo_offset &= ~(granularity - 1);
+
+      assert(center_bo_offset >= back_used);
+
+      /* Make sure we don't shrink the back end of the pool */
+      if (center_bo_offset < pool->back_state.end)
+         center_bo_offset = pool->back_state.end;
+
+      /* Make sure that we don't shrink the front end of the pool */
+      if (size - center_bo_offset < pool->state.end)
+         center_bo_offset = size - pool->state.end;
+   }
+
+   assert(center_bo_offset % pool->block_size == 0);
+   assert(center_bo_offset % PAGE_SIZE == 0);
+
+   /* Assert that we only ever grow the pool */
+   assert(center_bo_offset >= pool->back_state.end);
+   assert(size - center_bo_offset >= pool->state.end);
+
+   cleanup = anv_vector_add(&pool->mmap_cleanups);
+   if (!cleanup)
+      goto fail;
+   *cleanup = ANV_MMAP_CLEANUP_INIT;
+
+   /* Just leak the old map until we destroy the pool.  We can't munmap it
+    * without races or imposing locking on the block allocate fast path. On
+    * the whole the leaked maps adds up to less than the size of the
+    * current map.  MAP_POPULATE seems like the right thing to do, but we
+    * should try to get some numbers.
+    */
+   map = mmap(NULL, size, PROT_READ | PROT_WRITE,
+              MAP_SHARED | MAP_POPULATE, pool->fd,
+              BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
+   cleanup->map = map;
+   cleanup->size = size;
+
+   if (map == MAP_FAILED)
+      goto fail;
+
+   gem_handle = anv_gem_userptr(pool->device, map, size);
+   if (gem_handle == 0)
+      goto fail;
+   cleanup->gem_handle = gem_handle;
+
+#if 0
+   /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
+    * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
+    * always created as I915_CACHING_CACHED, which on non-LLC means
+    * snooped. That can be useful but comes with a bit of overheard.  Since
+    * we're eplicitly clflushing and don't want the overhead we need to turn
+    * it off. */
+   if (!pool->device->info.has_llc) {
+      anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_NONE);
+      anv_gem_set_domain(pool->device, gem_handle,
+                         I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+   }
+#endif
+
+   /* Now that we successfull allocated everything, we can write the new
+    * values back into pool. */
+   pool->map = map + center_bo_offset;
+   pool->center_bo_offset = center_bo_offset;
+   pool->bo.gem_handle = gem_handle;
+   pool->bo.size = size;
+   pool->bo.map = map;
+   pool->bo.index = 0;
+
+done:
+   pthread_mutex_unlock(&pool->device->mutex);
+
+   /* Return the appropreate new size.  This function never actually
+    * updates state->next.  Instead, we let the caller do that because it
+    * needs to do so in order to maintain its concurrency model.
+    */
+   if (state == &pool->state) {
+      return pool->bo.size - pool->center_bo_offset;
+   } else {
+      assert(pool->center_bo_offset > 0);
+      return pool->center_bo_offset;
+   }
+
+fail:
+   pthread_mutex_unlock(&pool->device->mutex);
+
+   return 0;
+}
+
+static uint32_t
+anv_block_pool_alloc_new(struct anv_block_pool *pool,
+                         struct anv_block_state *pool_state)
+{
+   struct anv_block_state state, old, new;
+
+   while (1) {
+      state.u64 = __sync_fetch_and_add(&pool_state->u64, pool->block_size);
+      if (state.next < state.end) {
+         assert(pool->map);
+         return state.next;
+      } else if (state.next == state.end) {
+         /* We allocated the first block outside the pool, we have to grow it.
+          * pool_state->next acts a mutex: threads who try to allocate now will
+          * get block indexes above the current limit and hit futex_wait
+          * below. */
+         new.next = state.next + pool->block_size;
+         new.end = anv_block_pool_grow(pool, pool_state);
+         assert(new.end >= new.next && new.end % pool->block_size == 0);
+         old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
+         if (old.next != state.next)
+            futex_wake(&pool_state->end, INT_MAX);
+         return state.next;
+      } else {
+         futex_wait(&pool_state->end, state.end);
+         continue;
+      }
+   }
+}
+
+int32_t
+anv_block_pool_alloc(struct anv_block_pool *pool)
+{
+   int32_t offset;
+
+   /* Try free list first. */
+   if (anv_free_list_pop(&pool->free_list, &pool->map, &offset)) {
+      assert(offset >= 0);
+      assert(pool->map);
+      return offset;
+   }
+
+   return anv_block_pool_alloc_new(pool, &pool->state);
+}
+
+/* Allocates a block out of the back of the block pool.
+ *
+ * This will allocated a block earlier than the "start" of the block pool.
+ * The offsets returned from this function will be negative but will still
+ * be correct relative to the block pool's map pointer.
+ *
+ * If you ever use anv_block_pool_alloc_back, then you will have to do
+ * gymnastics with the block pool's BO when doing relocations.
+ */
+int32_t
+anv_block_pool_alloc_back(struct anv_block_pool *pool)
+{
+   int32_t offset;
+
+   /* Try free list first. */
+   if (anv_free_list_pop(&pool->back_free_list, &pool->map, &offset)) {
+      assert(offset < 0);
+      assert(pool->map);
+      return offset;
+   }
+
+   offset = anv_block_pool_alloc_new(pool, &pool->back_state);
+
+   /* The offset we get out of anv_block_pool_alloc_new() is actually the
+    * number of bytes downwards from the middle to the end of the block.
+    * We need to turn it into a (negative) offset from the middle to the
+    * start of the block.
+    */
+   assert(offset >= 0);
+   return -(offset + pool->block_size);
+}
+
+void
+anv_block_pool_free(struct anv_block_pool *pool, int32_t offset)
+{
+   if (offset < 0) {
+      anv_free_list_push(&pool->back_free_list, pool->map, offset);
+   } else {
+      anv_free_list_push(&pool->free_list, pool->map, offset);
+   }
+}
+
+static void
+anv_fixed_size_state_pool_init(struct anv_fixed_size_state_pool *pool,
+                               size_t state_size)
+{
+   /* At least a cache line and must divide the block size. */
+   assert(state_size >= 64 && util_is_power_of_two(state_size));
+
+   pool->state_size = state_size;
+   pool->free_list = ANV_FREE_LIST_EMPTY;
+   pool->block.next = 0;
+   pool->block.end = 0;
+}
+
+static uint32_t
+anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool *pool,
+                                struct anv_block_pool *block_pool)
+{
+   int32_t offset;
+   struct anv_block_state block, old, new;
+
+   /* Try free list first. */
+   if (anv_free_list_pop(&pool->free_list, &block_pool->map, &offset)) {
+      assert(offset >= 0);
+      return offset;
+   }
+
+   /* If free list was empty (or somebody raced us and took the items) we
+    * allocate a new item from the end of the block */
+ restart:
+   block.u64 = __sync_fetch_and_add(&pool->block.u64, pool->state_size);
+
+   if (block.next < block.end) {
+      return block.next;
+   } else if (block.next == block.end) {
+      offset = anv_block_pool_alloc(block_pool);
+      new.next = offset + pool->state_size;
+      new.end = offset + block_pool->block_size;
+      old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
+      if (old.next != block.next)
+         futex_wake(&pool->block.end, INT_MAX);
+      return offset;
+   } else {
+      futex_wait(&pool->block.end, block.end);
+      goto restart;
+   }
+}
+
+static void
+anv_fixed_size_state_pool_free(struct anv_fixed_size_state_pool *pool,
+                               struct anv_block_pool *block_pool,
+                               uint32_t offset)
+{
+   anv_free_list_push(&pool->free_list, block_pool->map, offset);
+}
+
+void
+anv_state_pool_init(struct anv_state_pool *pool,
+                    struct anv_block_pool *block_pool)
+{
+   pool->block_pool = block_pool;
+   for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
+      size_t size = 1 << (ANV_MIN_STATE_SIZE_LOG2 + i);
+      anv_fixed_size_state_pool_init(&pool->buckets[i], size);
+   }
+   VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
+}
+
+void
+anv_state_pool_finish(struct anv_state_pool *pool)
+{
+   VG(VALGRIND_DESTROY_MEMPOOL(pool));
+}
+
+struct anv_state
+anv_state_pool_alloc(struct anv_state_pool *pool, size_t size, size_t align)
+{
+   unsigned size_log2 = ilog2_round_up(size < align ? align : size);
+   assert(size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
+   if (size_log2 < ANV_MIN_STATE_SIZE_LOG2)
+      size_log2 = ANV_MIN_STATE_SIZE_LOG2;
+   unsigned bucket = size_log2 - ANV_MIN_STATE_SIZE_LOG2;
+
+   struct anv_state state;
+   state.alloc_size = 1 << size_log2;
+   state.offset = anv_fixed_size_state_pool_alloc(&pool->buckets[bucket],
+                                                  pool->block_pool);
+   state.map = pool->block_pool->map + state.offset;
+   VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size));
+   return state;
+}
+
+void
+anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state)
+{
+   assert(util_is_power_of_two(state.alloc_size));
+   unsigned size_log2 = ilog2_round_up(state.alloc_size);
+   assert(size_log2 >= ANV_MIN_STATE_SIZE_LOG2 &&
+          size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
+   unsigned bucket = size_log2 - ANV_MIN_STATE_SIZE_LOG2;
+
+   VG(VALGRIND_MEMPOOL_FREE(pool, state.map));
+   anv_fixed_size_state_pool_free(&pool->buckets[bucket],
+                                  pool->block_pool, state.offset);
+}
+
+#define NULL_BLOCK 1
+struct anv_state_stream_block {
+   /* The next block */
+   struct anv_state_stream_block *next;
+
+   /* The offset into the block pool at which this block starts */
+   uint32_t offset;
+
+#ifdef HAVE_VALGRIND
+   /* A pointer to the first user-allocated thing in this block.  This is
+    * what valgrind sees as the start of the block.
+    */
+   void *_vg_ptr;
+#endif
+};
+
+/* The state stream allocator is a one-shot, single threaded allocator for
+ * variable sized blocks.  We use it for allocating dynamic state.
+ */
+void
+anv_state_stream_init(struct anv_state_stream *stream,
+                      struct anv_block_pool *block_pool)
+{
+   stream->block_pool = block_pool;
+   stream->block = NULL;
+
+   /* Ensure that next + whatever > end.  This way the first call to
+    * state_stream_alloc fetches a new block.
+    */
+   stream->next = 1;
+   stream->end = 0;
+
+   VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false));
+}
+
+void
+anv_state_stream_finish(struct anv_state_stream *stream)
+{
+   VG(const uint32_t block_size = stream->block_pool->block_size);
+
+   struct anv_state_stream_block *next = stream->block;
+   while (next != NULL) {
+      VG(VALGRIND_MAKE_MEM_DEFINED(next, sizeof(*next)));
+      struct anv_state_stream_block sb = VG_NOACCESS_READ(next);
+      VG(VALGRIND_MEMPOOL_FREE(stream, sb._vg_ptr));
+      VG(VALGRIND_MAKE_MEM_UNDEFINED(next, block_size));
+      anv_block_pool_free(stream->block_pool, sb.offset);
+      next = sb.next;
+   }
+
+   VG(VALGRIND_DESTROY_MEMPOOL(stream));
+}
+
+struct anv_state
+anv_state_stream_alloc(struct anv_state_stream *stream,
+                       uint32_t size, uint32_t alignment)
+{
+   struct anv_state_stream_block *sb = stream->block;
+
+   struct anv_state state;
+
+   state.offset = align_u32(stream->next, alignment);
+   if (state.offset + size > stream->end) {
+      uint32_t block = anv_block_pool_alloc(stream->block_pool);
+      sb = stream->block_pool->map + block;
+
+      VG(VALGRIND_MAKE_MEM_UNDEFINED(sb, sizeof(*sb)));
+      sb->next = stream->block;
+      sb->offset = block;
+      VG(sb->_vg_ptr = NULL);
+      VG(VALGRIND_MAKE_MEM_NOACCESS(sb, stream->block_pool->block_size));
+
+      stream->block = sb;
+      stream->start = block;
+      stream->next = block + sizeof(*sb);
+      stream->end = block + stream->block_pool->block_size;
+
+      state.offset = align_u32(stream->next, alignment);
+      assert(state.offset + size <= stream->end);
+   }
+
+   assert(state.offset > stream->start);
+   state.map = (void *)sb + (state.offset - stream->start);
+   state.alloc_size = size;
+
+#ifdef HAVE_VALGRIND
+   void *vg_ptr = VG_NOACCESS_READ(&sb->_vg_ptr);
+   if (vg_ptr == NULL) {
+      vg_ptr = state.map;
+      VG_NOACCESS_WRITE(&sb->_vg_ptr, vg_ptr);
+      VALGRIND_MEMPOOL_ALLOC(stream, vg_ptr, size);
+   } else {
+      void *state_end = state.map + state.alloc_size;
+      /* This only updates the mempool.  The newly allocated chunk is still
+       * marked as NOACCESS. */
+      VALGRIND_MEMPOOL_CHANGE(stream, vg_ptr, vg_ptr, state_end - vg_ptr);
+      /* Mark the newly allocated chunk as undefined */
+      VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size);
+   }
+#endif
+
+   stream->next = state.offset + size;
+
+   return state;
+}
+
+struct bo_pool_bo_link {
+   struct bo_pool_bo_link *next;
+   struct anv_bo bo;
+};
+
+void
+anv_bo_pool_init(struct anv_bo_pool *pool,
+                 struct anv_device *device, uint32_t bo_size)
+{
+   pool->device = device;
+   pool->bo_size = bo_size;
+   pool->free_list = NULL;
+
+   VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
+}
+
+void
+anv_bo_pool_finish(struct anv_bo_pool *pool)
+{
+   struct bo_pool_bo_link *link = PFL_PTR(pool->free_list);
+   while (link != NULL) {
+      struct bo_pool_bo_link link_copy = VG_NOACCESS_READ(link);
+
+      anv_gem_munmap(link_copy.bo.map, pool->bo_size);
+      anv_gem_close(pool->device, link_copy.bo.gem_handle);
+      link = link_copy.next;
+   }
+
+   VG(VALGRIND_DESTROY_MEMPOOL(pool));
+}
+
+VkResult
+anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo)
+{
+   VkResult result;
+
+   void *next_free_void;
+   if (anv_ptr_free_list_pop(&pool->free_list, &next_free_void)) {
+      struct bo_pool_bo_link *next_free = next_free_void;
+      *bo = VG_NOACCESS_READ(&next_free->bo);
+      assert(bo->map == next_free);
+      assert(bo->size == pool->bo_size);
+
+      VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, pool->bo_size));
+
+      return VK_SUCCESS;
+   }
+
+   struct anv_bo new_bo;
+
+   result = anv_bo_init_new(&new_bo, pool->device, pool->bo_size);
+   if (result != VK_SUCCESS)
+      return result;
+
+   assert(new_bo.size == pool->bo_size);
+
+   new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pool->bo_size, 0);
+   if (new_bo.map == NULL) {
+      anv_gem_close(pool->device, new_bo.gem_handle);
+      return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
+   }
+
+   *bo = new_bo;
+
+   VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, pool->bo_size));
+
+   return VK_SUCCESS;
+}
+
+void
+anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo_in)
+{
+   /* Make a copy in case the anv_bo happens to be storred in the BO */
+   struct anv_bo bo = *bo_in;
+   struct bo_pool_bo_link *link = bo.map;
+   link->bo = bo;
+
+   VG(VALGRIND_MEMPOOL_FREE(pool, bo.map));
+   anv_ptr_free_list_push(&pool->free_list, link);
+}
diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c
new file mode 100644 (file)
index 0000000..d24dd06
--- /dev/null
@@ -0,0 +1,1077 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+#include "genxml/gen7_pack.h"
+#include "genxml/gen8_pack.h"
+
+/** \file anv_batch_chain.c
+ *
+ * This file contains functions related to anv_cmd_buffer as a data
+ * structure.  This involves everything required to create and destroy
+ * the actual batch buffers as well as link them together and handle
+ * relocations and surface state.  It specifically does *not* contain any
+ * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
+ */
+
+/*-----------------------------------------------------------------------*
+ * Functions related to anv_reloc_list
+ *-----------------------------------------------------------------------*/
+
+static VkResult
+anv_reloc_list_init_clone(struct anv_reloc_list *list,
+                          const VkAllocationCallbacks *alloc,
+                          const struct anv_reloc_list *other_list)
+{
+   if (other_list) {
+      list->num_relocs = other_list->num_relocs;
+      list->array_length = other_list->array_length;
+   } else {
+      list->num_relocs = 0;
+      list->array_length = 256;
+   }
+
+   list->relocs =
+      anv_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
+                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+   if (list->relocs == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   list->reloc_bos =
+      anv_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
+                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+   if (list->reloc_bos == NULL) {
+      anv_free(alloc, list->relocs);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+
+   if (other_list) {
+      memcpy(list->relocs, other_list->relocs,
+             list->array_length * sizeof(*list->relocs));
+      memcpy(list->reloc_bos, other_list->reloc_bos,
+             list->array_length * sizeof(*list->reloc_bos));
+   }
+
+   return VK_SUCCESS;
+}
+
+VkResult
+anv_reloc_list_init(struct anv_reloc_list *list,
+                    const VkAllocationCallbacks *alloc)
+{
+   return anv_reloc_list_init_clone(list, alloc, NULL);
+}
+
+void
+anv_reloc_list_finish(struct anv_reloc_list *list,
+                      const VkAllocationCallbacks *alloc)
+{
+   anv_free(alloc, list->relocs);
+   anv_free(alloc, list->reloc_bos);
+}
+
+static VkResult
+anv_reloc_list_grow(struct anv_reloc_list *list,
+                    const VkAllocationCallbacks *alloc,
+                    size_t num_additional_relocs)
+{
+   if (list->num_relocs + num_additional_relocs <= list->array_length)
+      return VK_SUCCESS;
+
+   size_t new_length = list->array_length * 2;
+   while (new_length < list->num_relocs + num_additional_relocs)
+      new_length *= 2;
+
+   struct drm_i915_gem_relocation_entry *new_relocs =
+      anv_alloc(alloc, new_length * sizeof(*list->relocs), 8,
+                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (new_relocs == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   struct anv_bo **new_reloc_bos =
+      anv_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
+                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (new_relocs == NULL) {
+      anv_free(alloc, new_relocs);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+
+   memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
+   memcpy(new_reloc_bos, list->reloc_bos,
+          list->num_relocs * sizeof(*list->reloc_bos));
+
+   anv_free(alloc, list->relocs);
+   anv_free(alloc, list->reloc_bos);
+
+   list->array_length = new_length;
+   list->relocs = new_relocs;
+   list->reloc_bos = new_reloc_bos;
+
+   return VK_SUCCESS;
+}
+
+uint64_t
+anv_reloc_list_add(struct anv_reloc_list *list,
+                   const VkAllocationCallbacks *alloc,
+                   uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
+{
+   struct drm_i915_gem_relocation_entry *entry;
+   int index;
+
+   const uint32_t domain =
+      target_bo->is_winsys_bo ? I915_GEM_DOMAIN_RENDER : 0;
+
+   anv_reloc_list_grow(list, alloc, 1);
+   /* TODO: Handle failure */
+
+   /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
+   index = list->num_relocs++;
+   list->reloc_bos[index] = target_bo;
+   entry = &list->relocs[index];
+   entry->target_handle = target_bo->gem_handle;
+   entry->delta = delta;
+   entry->offset = offset;
+   entry->presumed_offset = target_bo->offset;
+   entry->read_domains = domain;
+   entry->write_domain = domain;
+   VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
+
+   return target_bo->offset + delta;
+}
+
+static void
+anv_reloc_list_append(struct anv_reloc_list *list,
+                      const VkAllocationCallbacks *alloc,
+                      struct anv_reloc_list *other, uint32_t offset)
+{
+   anv_reloc_list_grow(list, alloc, other->num_relocs);
+   /* TODO: Handle failure */
+
+   memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
+          other->num_relocs * sizeof(other->relocs[0]));
+   memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
+          other->num_relocs * sizeof(other->reloc_bos[0]));
+
+   for (uint32_t i = 0; i < other->num_relocs; i++)
+      list->relocs[i + list->num_relocs].offset += offset;
+
+   list->num_relocs += other->num_relocs;
+}
+
+/*-----------------------------------------------------------------------*
+ * Functions related to anv_batch
+ *-----------------------------------------------------------------------*/
+
+void *
+anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
+{
+   if (batch->next + num_dwords * 4 > batch->end)
+      batch->extend_cb(batch, batch->user_data);
+
+   void *p = batch->next;
+
+   batch->next += num_dwords * 4;
+   assert(batch->next <= batch->end);
+
+   return p;
+}
+
+uint64_t
+anv_batch_emit_reloc(struct anv_batch *batch,
+                     void *location, struct anv_bo *bo, uint32_t delta)
+{
+   return anv_reloc_list_add(batch->relocs, batch->alloc,
+                             location - batch->start, bo, delta);
+}
+
+void
+anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
+{
+   uint32_t size, offset;
+
+   size = other->next - other->start;
+   assert(size % 4 == 0);
+
+   if (batch->next + size > batch->end)
+      batch->extend_cb(batch, batch->user_data);
+
+   assert(batch->next + size <= batch->end);
+
+   VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
+   memcpy(batch->next, other->start, size);
+
+   offset = batch->next - batch->start;
+   anv_reloc_list_append(batch->relocs, batch->alloc,
+                         other->relocs, offset);
+
+   batch->next += size;
+}
+
+/*-----------------------------------------------------------------------*
+ * Functions related to anv_batch_bo
+ *-----------------------------------------------------------------------*/
+
+static VkResult
+anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
+                    struct anv_batch_bo **bbo_out)
+{
+   VkResult result;
+
+   struct anv_batch_bo *bbo = anv_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
+                                        8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (bbo == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
+   if (result != VK_SUCCESS)
+      goto fail_alloc;
+
+   result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
+   if (result != VK_SUCCESS)
+      goto fail_bo_alloc;
+
+   *bbo_out = bbo;
+
+   return VK_SUCCESS;
+
+ fail_bo_alloc:
+   anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
+ fail_alloc:
+   anv_free(&cmd_buffer->pool->alloc, bbo);
+
+   return result;
+}
+
+static VkResult
+anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
+                   const struct anv_batch_bo *other_bbo,
+                   struct anv_batch_bo **bbo_out)
+{
+   VkResult result;
+
+   struct anv_batch_bo *bbo = anv_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
+                                        8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (bbo == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
+   if (result != VK_SUCCESS)
+      goto fail_alloc;
+
+   result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
+                                      &other_bbo->relocs);
+   if (result != VK_SUCCESS)
+      goto fail_bo_alloc;
+
+   bbo->length = other_bbo->length;
+   memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
+
+   bbo->last_ss_pool_bo_offset = other_bbo->last_ss_pool_bo_offset;
+
+   *bbo_out = bbo;
+
+   return VK_SUCCESS;
+
+ fail_bo_alloc:
+   anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
+ fail_alloc:
+   anv_free(&cmd_buffer->pool->alloc, bbo);
+
+   return result;
+}
+
+static void
+anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
+                   size_t batch_padding)
+{
+   batch->next = batch->start = bbo->bo.map;
+   batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
+   batch->relocs = &bbo->relocs;
+   bbo->last_ss_pool_bo_offset = 0;
+   bbo->relocs.num_relocs = 0;
+}
+
+static void
+anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
+                      size_t batch_padding)
+{
+   batch->start = bbo->bo.map;
+   batch->next = bbo->bo.map + bbo->length;
+   batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
+   batch->relocs = &bbo->relocs;
+}
+
+static void
+anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
+{
+   assert(batch->start == bbo->bo.map);
+   bbo->length = batch->next - batch->start;
+   VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
+}
+
+static void
+anv_batch_bo_destroy(struct anv_batch_bo *bbo,
+                     struct anv_cmd_buffer *cmd_buffer)
+{
+   anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
+   anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
+   anv_free(&cmd_buffer->pool->alloc, bbo);
+}
+
+static VkResult
+anv_batch_bo_list_clone(const struct list_head *list,
+                        struct anv_cmd_buffer *cmd_buffer,
+                        struct list_head *new_list)
+{
+   VkResult result = VK_SUCCESS;
+
+   list_inithead(new_list);
+
+   struct anv_batch_bo *prev_bbo = NULL;
+   list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
+      struct anv_batch_bo *new_bbo = NULL;
+      result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
+      if (result != VK_SUCCESS)
+         break;
+      list_addtail(&new_bbo->link, new_list);
+
+      if (prev_bbo) {
+         /* As we clone this list of batch_bo's, they chain one to the
+          * other using MI_BATCH_BUFFER_START commands.  We need to fix up
+          * those relocations as we go.  Fortunately, this is pretty easy
+          * as it will always be the last relocation in the list.
+          */
+         uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
+         assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
+         prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
+      }
+
+      prev_bbo = new_bbo;
+   }
+
+   if (result != VK_SUCCESS) {
+      list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
+         anv_batch_bo_destroy(bbo, cmd_buffer);
+   }
+
+   return result;
+}
+
+/*-----------------------------------------------------------------------*
+ * Functions related to anv_batch_bo
+ *-----------------------------------------------------------------------*/
+
+static inline struct anv_batch_bo *
+anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
+{
+   return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
+}
+
+struct anv_address
+anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
+{
+   return (struct anv_address) {
+      .bo = &cmd_buffer->device->surface_state_block_pool.bo,
+      .offset = *(int32_t *)anv_vector_head(&cmd_buffer->bt_blocks),
+   };
+}
+
+static void
+emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
+                        struct anv_bo *bo, uint32_t offset)
+{
+   /* In gen8+ the address field grew to two dwords to accomodate 48 bit
+    * offsets. The high 16 bits are in the last dword, so we can use the gen8
+    * version in either case, as long as we set the instruction length in the
+    * header accordingly.  This means that we always emit three dwords here
+    * and all the padding and adjustment we do in this file works for all
+    * gens.
+    */
+
+   const uint32_t gen7_length =
+      GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
+   const uint32_t gen8_length =
+      GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
+
+   anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START,
+      .DWordLength = cmd_buffer->device->info.gen < 8 ?
+                     gen7_length : gen8_length,
+      ._2ndLevelBatchBuffer = _1stlevelbatch,
+      .AddressSpaceIndicator = ASI_PPGTT,
+      .BatchBufferStartAddress = { bo, offset });
+}
+
+static void
+cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
+                             struct anv_batch_bo *bbo)
+{
+   struct anv_batch *batch = &cmd_buffer->batch;
+   struct anv_batch_bo *current_bbo =
+      anv_cmd_buffer_current_batch_bo(cmd_buffer);
+
+   /* We set the end of the batch a little short so we would be sure we
+    * have room for the chaining command.  Since we're about to emit the
+    * chaining command, let's set it back where it should go.
+    */
+   batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
+   assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
+
+   emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
+
+   anv_batch_bo_finish(current_bbo, batch);
+}
+
+static VkResult
+anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
+{
+   struct anv_cmd_buffer *cmd_buffer = _data;
+   struct anv_batch_bo *new_bbo;
+
+   VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
+   if (result != VK_SUCCESS)
+      return result;
+
+   struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
+   if (seen_bbo == NULL) {
+      anv_batch_bo_destroy(new_bbo, cmd_buffer);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+   *seen_bbo = new_bbo;
+
+   cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
+
+   list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
+
+   anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
+
+   return VK_SUCCESS;
+}
+
+struct anv_state
+anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
+                                   uint32_t entries, uint32_t *state_offset)
+{
+   struct anv_block_pool *block_pool =
+       &cmd_buffer->device->surface_state_block_pool;
+   int32_t *bt_block = anv_vector_head(&cmd_buffer->bt_blocks);
+   struct anv_state state;
+
+   state.alloc_size = align_u32(entries * 4, 32);
+
+   if (cmd_buffer->bt_next + state.alloc_size > block_pool->block_size)
+      return (struct anv_state) { 0 };
+
+   state.offset = cmd_buffer->bt_next;
+   state.map = block_pool->map + *bt_block + state.offset;
+
+   cmd_buffer->bt_next += state.alloc_size;
+
+   assert(*bt_block < 0);
+   *state_offset = -(*bt_block);
+
+   return state;
+}
+
+struct anv_state
+anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
+{
+   return anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
+}
+
+struct anv_state
+anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
+                                   uint32_t size, uint32_t alignment)
+{
+   return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
+                                 size, alignment);
+}
+
+VkResult
+anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_block_pool *block_pool =
+       &cmd_buffer->device->surface_state_block_pool;
+
+   int32_t *offset = anv_vector_add(&cmd_buffer->bt_blocks);
+   if (offset == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   *offset = anv_block_pool_alloc_back(block_pool);
+   cmd_buffer->bt_next = 0;
+
+   return VK_SUCCESS;
+}
+
+VkResult
+anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_batch_bo *batch_bo;
+   VkResult result;
+
+   list_inithead(&cmd_buffer->batch_bos);
+
+   result = anv_batch_bo_create(cmd_buffer, &batch_bo);
+   if (result != VK_SUCCESS)
+      return result;
+
+   list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
+
+   cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
+   cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
+   cmd_buffer->batch.user_data = cmd_buffer;
+
+   anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
+                      GEN8_MI_BATCH_BUFFER_START_length * 4);
+
+   int success = anv_vector_init(&cmd_buffer->seen_bbos,
+                                 sizeof(struct anv_bo *),
+                                 8 * sizeof(struct anv_bo *));
+   if (!success)
+      goto fail_batch_bo;
+
+   *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
+
+   success = anv_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t),
+                             8 * sizeof(int32_t));
+   if (!success)
+      goto fail_seen_bbos;
+
+   result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
+                                &cmd_buffer->pool->alloc);
+   if (result != VK_SUCCESS)
+      goto fail_bt_blocks;
+
+   anv_cmd_buffer_new_binding_table_block(cmd_buffer);
+
+   cmd_buffer->execbuf2.objects = NULL;
+   cmd_buffer->execbuf2.bos = NULL;
+   cmd_buffer->execbuf2.array_length = 0;
+
+   return VK_SUCCESS;
+
+ fail_bt_blocks:
+   anv_vector_finish(&cmd_buffer->bt_blocks);
+ fail_seen_bbos:
+   anv_vector_finish(&cmd_buffer->seen_bbos);
+ fail_batch_bo:
+   anv_batch_bo_destroy(batch_bo, cmd_buffer);
+
+   return result;
+}
+
+void
+anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
+{
+   int32_t *bt_block;
+   anv_vector_foreach(bt_block, &cmd_buffer->bt_blocks) {
+      anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
+                          *bt_block);
+   }
+   anv_vector_finish(&cmd_buffer->bt_blocks);
+
+   anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
+
+   anv_vector_finish(&cmd_buffer->seen_bbos);
+
+   /* Destroy all of the batch buffers */
+   list_for_each_entry_safe(struct anv_batch_bo, bbo,
+                            &cmd_buffer->batch_bos, link) {
+      anv_batch_bo_destroy(bbo, cmd_buffer);
+   }
+
+   anv_free(&cmd_buffer->pool->alloc, cmd_buffer->execbuf2.objects);
+   anv_free(&cmd_buffer->pool->alloc, cmd_buffer->execbuf2.bos);
+}
+
+void
+anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
+{
+   /* Delete all but the first batch bo */
+   assert(!list_empty(&cmd_buffer->batch_bos));
+   while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
+      struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
+      list_del(&bbo->link);
+      anv_batch_bo_destroy(bbo, cmd_buffer);
+   }
+   assert(!list_empty(&cmd_buffer->batch_bos));
+
+   anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
+                      &cmd_buffer->batch,
+                      GEN8_MI_BATCH_BUFFER_START_length * 4);
+
+   while (anv_vector_length(&cmd_buffer->bt_blocks) > 1) {
+      int32_t *bt_block = anv_vector_remove(&cmd_buffer->bt_blocks);
+      anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
+                          *bt_block);
+   }
+   assert(anv_vector_length(&cmd_buffer->bt_blocks) == 1);
+   cmd_buffer->bt_next = 0;
+
+   cmd_buffer->surface_relocs.num_relocs = 0;
+
+   /* Reset the list of seen buffers */
+   cmd_buffer->seen_bbos.head = 0;
+   cmd_buffer->seen_bbos.tail = 0;
+
+   *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
+      anv_cmd_buffer_current_batch_bo(cmd_buffer);
+}
+
+void
+anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
+
+   if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+      /* When we start a batch buffer, we subtract a certain amount of
+       * padding from the end to ensure that we always have room to emit a
+       * BATCH_BUFFER_START to chain to the next BO.  We need to remove
+       * that padding before we end the batch; otherwise, we may end up
+       * with our BATCH_BUFFER_END in another BO.
+       */
+      cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
+      assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
+
+      anv_batch_emit(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END);
+
+      /* Round batch up to an even number of dwords. */
+      if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
+         anv_batch_emit(&cmd_buffer->batch, GEN7_MI_NOOP);
+
+      cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
+   }
+
+   anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
+
+   if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+      /* If this is a secondary command buffer, we need to determine the
+       * mode in which it will be executed with vkExecuteCommands.  We
+       * determine this statically here so that this stays in sync with the
+       * actual ExecuteCommands implementation.
+       */
+      if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
+          (batch_bo->length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
+         /* If the secondary has exactly one batch buffer in its list *and*
+          * that batch buffer is less than half of the maximum size, we're
+          * probably better of simply copying it into our batch.
+          */
+         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
+      } else if (!(cmd_buffer->usage_flags &
+                   VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
+         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
+
+         /* When we chain, we need to add an MI_BATCH_BUFFER_START command
+          * with its relocation.  In order to handle this we'll increment here
+          * so we can unconditionally decrement right before adding the
+          * MI_BATCH_BUFFER_START command.
+          */
+         batch_bo->relocs.num_relocs++;
+         cmd_buffer->batch.next += GEN8_MI_BATCH_BUFFER_START_length * 4;
+      } else {
+         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
+      }
+   }
+}
+
+static inline VkResult
+anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
+                             struct list_head *list)
+{
+   list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
+      struct anv_batch_bo **bbo_ptr = anv_vector_add(&cmd_buffer->seen_bbos);
+      if (bbo_ptr == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+      *bbo_ptr = bbo;
+   }
+
+   return VK_SUCCESS;
+}
+
+void
+anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
+                             struct anv_cmd_buffer *secondary)
+{
+   switch (secondary->exec_mode) {
+   case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
+      anv_batch_emit_batch(&primary->batch, &secondary->batch);
+      anv_cmd_buffer_emit_state_base_address(primary);
+      break;
+   case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
+      struct anv_batch_bo *first_bbo =
+         list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
+      struct anv_batch_bo *last_bbo =
+         list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
+
+      emit_batch_buffer_start(primary, &first_bbo->bo, 0);
+
+      struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
+      assert(primary->batch.start == this_bbo->bo.map);
+      uint32_t offset = primary->batch.next - primary->batch.start;
+      const uint32_t inst_size = GEN8_MI_BATCH_BUFFER_START_length * 4;
+
+      /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
+       * can emit a new command and relocation for the current splice.  In
+       * order to handle the initial-use case, we incremented next and
+       * num_relocs in end_batch_buffer() so we can alyways just subtract
+       * here.
+       */
+      last_bbo->relocs.num_relocs--;
+      secondary->batch.next -= inst_size;
+      emit_batch_buffer_start(secondary, &this_bbo->bo, offset);
+      anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
+
+      /* After patching up the secondary buffer, we need to clflush the
+       * modified instruction in case we're on a !llc platform. We use a
+       * little loop to handle the case where the instruction crosses a cache
+       * line boundary.
+       */
+      if (!primary->device->info.has_llc) {
+         void *inst = secondary->batch.next - inst_size;
+         void *p = (void *) (((uintptr_t) inst) & ~CACHELINE_MASK);
+         __builtin_ia32_mfence();
+         while (p < secondary->batch.next) {
+            __builtin_ia32_clflush(p);
+            p += CACHELINE_SIZE;
+         }
+      }
+
+      anv_cmd_buffer_emit_state_base_address(primary);
+      break;
+   }
+   case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
+      struct list_head copy_list;
+      VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
+                                                secondary,
+                                                &copy_list);
+      if (result != VK_SUCCESS)
+         return; /* FIXME */
+
+      anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
+
+      struct anv_batch_bo *first_bbo =
+         list_first_entry(&copy_list, struct anv_batch_bo, link);
+      struct anv_batch_bo *last_bbo =
+         list_last_entry(&copy_list, struct anv_batch_bo, link);
+
+      cmd_buffer_chain_to_batch_bo(primary, first_bbo);
+
+      list_splicetail(&copy_list, &primary->batch_bos);
+
+      anv_batch_bo_continue(last_bbo, &primary->batch,
+                            GEN8_MI_BATCH_BUFFER_START_length * 4);
+
+      anv_cmd_buffer_emit_state_base_address(primary);
+      break;
+   }
+   default:
+      assert(!"Invalid execution mode");
+   }
+
+   anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
+                         &secondary->surface_relocs, 0);
+}
+
+static VkResult
+anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
+                      struct anv_bo *bo,
+                      struct anv_reloc_list *relocs)
+{
+   struct drm_i915_gem_exec_object2 *obj = NULL;
+
+   if (bo->index < cmd_buffer->execbuf2.bo_count &&
+       cmd_buffer->execbuf2.bos[bo->index] == bo)
+      obj = &cmd_buffer->execbuf2.objects[bo->index];
+
+   if (obj == NULL) {
+      /* We've never seen this one before.  Add it to the list and assign
+       * an id that we can use later.
+       */
+      if (cmd_buffer->execbuf2.bo_count >= cmd_buffer->execbuf2.array_length) {
+         uint32_t new_len = cmd_buffer->execbuf2.objects ?
+                            cmd_buffer->execbuf2.array_length * 2 : 64;
+
+         struct drm_i915_gem_exec_object2 *new_objects =
+            anv_alloc(&cmd_buffer->pool->alloc, new_len * sizeof(*new_objects),
+                      8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+         if (new_objects == NULL)
+            return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+         struct anv_bo **new_bos =
+            anv_alloc(&cmd_buffer->pool->alloc, new_len * sizeof(*new_bos),
+                      8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+         if (new_objects == NULL) {
+            anv_free(&cmd_buffer->pool->alloc, new_objects);
+            return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+         }
+
+         if (cmd_buffer->execbuf2.objects) {
+            memcpy(new_objects, cmd_buffer->execbuf2.objects,
+                   cmd_buffer->execbuf2.bo_count * sizeof(*new_objects));
+            memcpy(new_bos, cmd_buffer->execbuf2.bos,
+                   cmd_buffer->execbuf2.bo_count * sizeof(*new_bos));
+         }
+
+         cmd_buffer->execbuf2.objects = new_objects;
+         cmd_buffer->execbuf2.bos = new_bos;
+         cmd_buffer->execbuf2.array_length = new_len;
+      }
+
+      assert(cmd_buffer->execbuf2.bo_count < cmd_buffer->execbuf2.array_length);
+
+      bo->index = cmd_buffer->execbuf2.bo_count++;
+      obj = &cmd_buffer->execbuf2.objects[bo->index];
+      cmd_buffer->execbuf2.bos[bo->index] = bo;
+
+      obj->handle = bo->gem_handle;
+      obj->relocation_count = 0;
+      obj->relocs_ptr = 0;
+      obj->alignment = 0;
+      obj->offset = bo->offset;
+      obj->flags = bo->is_winsys_bo ? EXEC_OBJECT_WRITE : 0;
+      obj->rsvd1 = 0;
+      obj->rsvd2 = 0;
+   }
+
+   if (relocs != NULL && obj->relocation_count == 0) {
+      /* This is the first time we've ever seen a list of relocations for
+       * this BO.  Go ahead and set the relocations and then walk the list
+       * of relocations and add them all.
+       */
+      obj->relocation_count = relocs->num_relocs;
+      obj->relocs_ptr = (uintptr_t) relocs->relocs;
+
+      for (size_t i = 0; i < relocs->num_relocs; i++) {
+         /* A quick sanity check on relocations */
+         assert(relocs->relocs[i].offset < bo->size);
+         anv_cmd_buffer_add_bo(cmd_buffer, relocs->reloc_bos[i], NULL);
+      }
+   }
+
+   return VK_SUCCESS;
+}
+
+static void
+anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
+                              struct anv_reloc_list *list)
+{
+   struct anv_bo *bo;
+
+   /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
+    * struct drm_i915_gem_exec_object2 against the bos current offset and if
+    * all bos haven't moved it will skip relocation processing alltogether.
+    * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
+    * value of offset so we can set it either way.  For that to work we need
+    * to make sure all relocs use the same presumed offset.
+    */
+
+   for (size_t i = 0; i < list->num_relocs; i++) {
+      bo = list->reloc_bos[i];
+      if (bo->offset != list->relocs[i].presumed_offset)
+         cmd_buffer->execbuf2.need_reloc = true;
+
+      list->relocs[i].target_handle = bo->index;
+   }
+}
+
+static uint64_t
+read_reloc(const struct anv_device *device, const void *p)
+{
+   if (device->info.gen >= 8)
+      return *(uint64_t *)p;
+   else
+      return *(uint32_t *)p;
+}
+
+static void
+write_reloc(const struct anv_device *device, void *p, uint64_t v)
+{
+   if (device->info.gen >= 8)
+      *(uint64_t *)p = v;
+   else
+      *(uint32_t *)p = v;
+}
+
+static void
+adjust_relocations_from_block_pool(struct anv_block_pool *pool,
+                                   struct anv_reloc_list *relocs)
+{
+   for (size_t i = 0; i < relocs->num_relocs; i++) {
+      /* In general, we don't know how stale the relocated value is.  It
+       * may have been used last time or it may not.  Since we don't want
+       * to stomp it while the GPU may be accessing it, we haven't updated
+       * it anywhere else in the code.  Instead, we just set the presumed
+       * offset to what it is now based on the delta and the data in the
+       * block pool.  Then the kernel will update it for us if needed.
+       */
+      assert(relocs->relocs[i].offset < pool->state.end);
+      const void *p = pool->map + relocs->relocs[i].offset;
+
+      /* We're reading back the relocated value from potentially incoherent
+       * memory here. However, any change to the value will be from the kernel
+       * writing out relocations, which will keep the CPU cache up to date.
+       */
+      relocs->relocs[i].presumed_offset =
+         read_reloc(pool->device, p) - relocs->relocs[i].delta;
+
+      /* All of the relocations from this block pool to other BO's should
+       * have been emitted relative to the surface block pool center.  We
+       * need to add the center offset to make them relative to the
+       * beginning of the actual GEM bo.
+       */
+      relocs->relocs[i].offset += pool->center_bo_offset;
+   }
+}
+
+static void
+adjust_relocations_to_block_pool(struct anv_block_pool *pool,
+                                 struct anv_bo *from_bo,
+                                 struct anv_reloc_list *relocs,
+                                 uint32_t *last_pool_center_bo_offset)
+{
+   assert(*last_pool_center_bo_offset <= pool->center_bo_offset);
+   uint32_t delta = pool->center_bo_offset - *last_pool_center_bo_offset;
+
+   /* When we initially emit relocations into a block pool, we don't
+    * actually know what the final center_bo_offset will be so we just emit
+    * it as if center_bo_offset == 0.  Now that we know what the center
+    * offset is, we need to walk the list of relocations and adjust any
+    * relocations that point to the pool bo with the correct offset.
+    */
+   for (size_t i = 0; i < relocs->num_relocs; i++) {
+      if (relocs->reloc_bos[i] == &pool->bo) {
+         /* Adjust the delta value in the relocation to correctly
+          * correspond to the new delta.  Initially, this value may have
+          * been negative (if treated as unsigned), but we trust in
+          * uint32_t roll-over to fix that for us at this point.
+          */
+         relocs->relocs[i].delta += delta;
+
+         /* Since the delta has changed, we need to update the actual
+          * relocated value with the new presumed value.  This function
+          * should only be called on batch buffers, so we know it isn't in
+          * use by the GPU at the moment.
+          */
+         assert(relocs->relocs[i].offset < from_bo->size);
+         write_reloc(pool->device, from_bo->map + relocs->relocs[i].offset,
+                     relocs->relocs[i].presumed_offset +
+                     relocs->relocs[i].delta);
+      }
+   }
+
+   *last_pool_center_bo_offset = pool->center_bo_offset;
+}
+
+void
+anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_batch *batch = &cmd_buffer->batch;
+   struct anv_block_pool *ss_pool =
+      &cmd_buffer->device->surface_state_block_pool;
+
+   cmd_buffer->execbuf2.bo_count = 0;
+   cmd_buffer->execbuf2.need_reloc = false;
+
+   adjust_relocations_from_block_pool(ss_pool, &cmd_buffer->surface_relocs);
+   anv_cmd_buffer_add_bo(cmd_buffer, &ss_pool->bo, &cmd_buffer->surface_relocs);
+
+   /* First, we walk over all of the bos we've seen and add them and their
+    * relocations to the validate list.
+    */
+   struct anv_batch_bo **bbo;
+   anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
+      adjust_relocations_to_block_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
+                                       &(*bbo)->last_ss_pool_bo_offset);
+
+      anv_cmd_buffer_add_bo(cmd_buffer, &(*bbo)->bo, &(*bbo)->relocs);
+   }
+
+   struct anv_batch_bo *first_batch_bo =
+      list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
+
+   /* The kernel requires that the last entry in the validation list be the
+    * batch buffer to execute.  We can simply swap the element
+    * corresponding to the first batch_bo in the chain with the last
+    * element in the list.
+    */
+   if (first_batch_bo->bo.index != cmd_buffer->execbuf2.bo_count - 1) {
+      uint32_t idx = first_batch_bo->bo.index;
+      uint32_t last_idx = cmd_buffer->execbuf2.bo_count - 1;
+
+      struct drm_i915_gem_exec_object2 tmp_obj =
+         cmd_buffer->execbuf2.objects[idx];
+      assert(cmd_buffer->execbuf2.bos[idx] == &first_batch_bo->bo);
+
+      cmd_buffer->execbuf2.objects[idx] = cmd_buffer->execbuf2.objects[last_idx];
+      cmd_buffer->execbuf2.bos[idx] = cmd_buffer->execbuf2.bos[last_idx];
+      cmd_buffer->execbuf2.bos[idx]->index = idx;
+
+      cmd_buffer->execbuf2.objects[last_idx] = tmp_obj;
+      cmd_buffer->execbuf2.bos[last_idx] = &first_batch_bo->bo;
+      first_batch_bo->bo.index = last_idx;
+   }
+
+   /* Now we go through and fixup all of the relocation lists to point to
+    * the correct indices in the object array.  We have to do this after we
+    * reorder the list above as some of the indices may have changed.
+    */
+   anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
+      anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
+
+   anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
+
+   if (!cmd_buffer->device->info.has_llc) {
+      __builtin_ia32_mfence();
+      anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
+         for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
+            __builtin_ia32_clflush((*bbo)->bo.map + i);
+      }
+   }
+
+   cmd_buffer->execbuf2.execbuf = (struct drm_i915_gem_execbuffer2) {
+      .buffers_ptr = (uintptr_t) cmd_buffer->execbuf2.objects,
+      .buffer_count = cmd_buffer->execbuf2.bo_count,
+      .batch_start_offset = 0,
+      .batch_len = batch->next - batch->start,
+      .cliprects_ptr = 0,
+      .num_cliprects = 0,
+      .DR1 = 0,
+      .DR4 = 0,
+      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER |
+               I915_EXEC_CONSTANTS_REL_GENERAL,
+      .rsvd1 = cmd_buffer->device->context_id,
+      .rsvd2 = 0,
+   };
+
+   if (!cmd_buffer->execbuf2.need_reloc)
+      cmd_buffer->execbuf2.execbuf.flags |= I915_EXEC_NO_RELOC;
+}
diff --git a/src/intel/vulkan/anv_cmd_buffer.c b/src/intel/vulkan/anv_cmd_buffer.c
new file mode 100644 (file)
index 0000000..ac8bf5f
--- /dev/null
@@ -0,0 +1,1227 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+/** \file anv_cmd_buffer.c
+ *
+ * This file contains all of the stuff for emitting commands into a command
+ * buffer.  This includes implementations of most of the vkCmd*
+ * entrypoints.  This file is concerned entirely with state emission and
+ * not with the command buffer data structure itself.  As far as this file
+ * is concerned, most of anv_cmd_buffer is magic.
+ */
+
+/* TODO: These are taken from GLES.  We should check the Vulkan spec */
+const struct anv_dynamic_state default_dynamic_state = {
+   .viewport = {
+      .count = 0,
+   },
+   .scissor = {
+      .count = 0,
+   },
+   .line_width = 1.0f,
+   .depth_bias = {
+      .bias = 0.0f,
+      .clamp = 0.0f,
+      .slope = 0.0f,
+   },
+   .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
+   .depth_bounds = {
+      .min = 0.0f,
+      .max = 1.0f,
+   },
+   .stencil_compare_mask = {
+      .front = ~0u,
+      .back = ~0u,
+   },
+   .stencil_write_mask = {
+      .front = ~0u,
+      .back = ~0u,
+   },
+   .stencil_reference = {
+      .front = 0u,
+      .back = 0u,
+   },
+};
+
+void
+anv_dynamic_state_copy(struct anv_dynamic_state *dest,
+                       const struct anv_dynamic_state *src,
+                       uint32_t copy_mask)
+{
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+      dest->viewport.count = src->viewport.count;
+      typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
+                   src->viewport.count);
+   }
+
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+      dest->scissor.count = src->scissor.count;
+      typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
+                   src->scissor.count);
+   }
+
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
+      dest->line_width = src->line_width;
+
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
+      dest->depth_bias = src->depth_bias;
+
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
+      typed_memcpy(dest->blend_constants, src->blend_constants, 4);
+
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
+      dest->depth_bounds = src->depth_bounds;
+
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
+      dest->stencil_compare_mask = src->stencil_compare_mask;
+
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
+      dest->stencil_write_mask = src->stencil_write_mask;
+
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
+      dest->stencil_reference = src->stencil_reference;
+}
+
+static void
+anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_cmd_state *state = &cmd_buffer->state;
+
+   memset(&state->descriptors, 0, sizeof(state->descriptors));
+   memset(&state->push_constants, 0, sizeof(state->push_constants));
+   memset(state->binding_tables, 0, sizeof(state->binding_tables));
+   memset(state->samplers, 0, sizeof(state->samplers));
+
+   /* 0 isn't a valid config.  This ensures that we always configure L3$. */
+   cmd_buffer->state.current_l3_config = 0;
+
+   state->dirty = ~0;
+   state->vb_dirty = 0;
+   state->descriptors_dirty = 0;
+   state->push_constants_dirty = 0;
+   state->pipeline = NULL;
+   state->restart_index = UINT32_MAX;
+   state->dynamic = default_dynamic_state;
+   state->need_query_wa = true;
+
+   if (state->attachments != NULL) {
+      anv_free(&cmd_buffer->pool->alloc, state->attachments);
+      state->attachments = NULL;
+   }
+
+   state->gen7.index_buffer = NULL;
+}
+
+/**
+ * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
+ */
+void
+anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
+                                const VkRenderPassBeginInfo *info)
+{
+   struct anv_cmd_state *state = &cmd_buffer->state;
+   ANV_FROM_HANDLE(anv_render_pass, pass, info->renderPass);
+
+   anv_free(&cmd_buffer->pool->alloc, state->attachments);
+
+   if (pass->attachment_count == 0) {
+      state->attachments = NULL;
+      return;
+   }
+
+   state->attachments = anv_alloc(&cmd_buffer->pool->alloc,
+                                  pass->attachment_count *
+                                       sizeof(state->attachments[0]),
+                                  8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (state->attachments == NULL) {
+      /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
+      abort();
+   }
+
+   for (uint32_t i = 0; i < pass->attachment_count; ++i) {
+      struct anv_render_pass_attachment *att = &pass->attachments[i];
+      VkImageAspectFlags clear_aspects = 0;
+
+      if (anv_format_is_color(att->format)) {
+         /* color attachment */
+         if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+            clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
+         }
+      } else {
+         /* depthstencil attachment */
+         if (att->format->has_depth &&
+             att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+            clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
+         }
+         if (att->format->has_stencil &&
+             att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+            clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
+         }
+      }
+
+      state->attachments[i].pending_clear_aspects = clear_aspects;
+      if (clear_aspects) {
+         assert(info->clearValueCount > i);
+         state->attachments[i].clear_value = info->pClearValues[i];
+      }
+   }
+}
+
+static VkResult
+anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
+                                          gl_shader_stage stage, uint32_t size)
+{
+   struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
+
+   if (*ptr == NULL) {
+      *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+      if (*ptr == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   } else if ((*ptr)->size < size) {
+      *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+      if (*ptr == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+   (*ptr)->size = size;
+
+   return VK_SUCCESS;
+}
+
+#define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
+   anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
+      (offsetof(struct anv_push_constants, field) + \
+       sizeof(cmd_buffer->state.push_constants[0]->field)))
+
+static VkResult anv_create_cmd_buffer(
+    struct anv_device *                         device,
+    struct anv_cmd_pool *                       pool,
+    VkCommandBufferLevel                        level,
+    VkCommandBuffer*                            pCommandBuffer)
+{
+   struct anv_cmd_buffer *cmd_buffer;
+   VkResult result;
+
+   cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
+                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (cmd_buffer == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+   cmd_buffer->device = device;
+   cmd_buffer->pool = pool;
+   cmd_buffer->level = level;
+   cmd_buffer->state.attachments = NULL;
+
+   result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
+   if (result != VK_SUCCESS)
+      goto fail;
+
+   anv_state_stream_init(&cmd_buffer->surface_state_stream,
+                         &device->surface_state_block_pool);
+   anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
+                         &device->dynamic_state_block_pool);
+
+   if (pool) {
+      list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+   } else {
+      /* Init the pool_link so we can safefly call list_del when we destroy
+       * the command buffer
+       */
+      list_inithead(&cmd_buffer->pool_link);
+   }
+
+   *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
+
+   return VK_SUCCESS;
+
+ fail:
+   anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
+
+   return result;
+}
+
+VkResult anv_AllocateCommandBuffers(
+    VkDevice                                    _device,
+    const VkCommandBufferAllocateInfo*          pAllocateInfo,
+    VkCommandBuffer*                            pCommandBuffers)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
+
+   VkResult result = VK_SUCCESS;
+   uint32_t i;
+
+   for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
+      result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
+                                     &pCommandBuffers[i]);
+      if (result != VK_SUCCESS)
+         break;
+   }
+
+   if (result != VK_SUCCESS)
+      anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
+                             i, pCommandBuffers);
+
+   return result;
+}
+
+static void
+anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
+{
+   list_del(&cmd_buffer->pool_link);
+
+   anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
+
+   anv_state_stream_finish(&cmd_buffer->surface_state_stream);
+   anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
+
+   anv_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
+   anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
+}
+
+void anv_FreeCommandBuffers(
+    VkDevice                                    device,
+    VkCommandPool                               commandPool,
+    uint32_t                                    commandBufferCount,
+    const VkCommandBuffer*                      pCommandBuffers)
+{
+   for (uint32_t i = 0; i < commandBufferCount; i++) {
+      ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
+
+      anv_cmd_buffer_destroy(cmd_buffer);
+   }
+}
+
+VkResult anv_ResetCommandBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkCommandBufferResetFlags                   flags)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   cmd_buffer->usage_flags = 0;
+   cmd_buffer->state.current_pipeline = UINT32_MAX;
+   anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
+   anv_cmd_state_reset(cmd_buffer);
+
+   anv_state_stream_finish(&cmd_buffer->surface_state_stream);
+   anv_state_stream_init(&cmd_buffer->surface_state_stream,
+                         &cmd_buffer->device->surface_state_block_pool);
+
+   anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
+   anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
+                         &cmd_buffer->device->dynamic_state_block_pool);
+
+   return VK_SUCCESS;
+}
+
+void
+anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
+{
+   switch (cmd_buffer->device->info.gen) {
+   case 7:
+      if (cmd_buffer->device->info.is_haswell)
+         return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
+      else
+         return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
+   case 8:
+      return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
+   case 9:
+      return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
+   default:
+      unreachable("unsupported gen\n");
+   }
+}
+
+VkResult anv_BeginCommandBuffer(
+    VkCommandBuffer                             commandBuffer,
+    const VkCommandBufferBeginInfo*             pBeginInfo)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   /* If this is the first vkBeginCommandBuffer, we must *initialize* the
+    * command buffer's state. Otherwise, we must *reset* its state. In both
+    * cases we reset it.
+    *
+    * From the Vulkan 1.0 spec:
+    *
+    *    If a command buffer is in the executable state and the command buffer
+    *    was allocated from a command pool with the
+    *    VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
+    *    vkBeginCommandBuffer implicitly resets the command buffer, behaving
+    *    as if vkResetCommandBuffer had been called with
+    *    VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
+    *    the command buffer in the recording state.
+    */
+   anv_ResetCommandBuffer(commandBuffer, /*flags*/ 0);
+
+   cmd_buffer->usage_flags = pBeginInfo->flags;
+
+   assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
+          !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
+
+   anv_cmd_buffer_emit_state_base_address(cmd_buffer);
+
+   if (cmd_buffer->usage_flags &
+       VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+      cmd_buffer->state.framebuffer =
+         anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
+      cmd_buffer->state.pass =
+         anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
+
+      struct anv_subpass *subpass =
+         &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
+
+      anv_cmd_buffer_set_subpass(cmd_buffer, subpass);
+   }
+
+   return VK_SUCCESS;
+}
+
+VkResult anv_EndCommandBuffer(
+    VkCommandBuffer                             commandBuffer)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   struct anv_device *device = cmd_buffer->device;
+
+   anv_cmd_buffer_end_batch_buffer(cmd_buffer);
+
+   if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+      /* The algorithm used to compute the validate list is not threadsafe as
+       * it uses the bo->index field.  We have to lock the device around it.
+       * Fortunately, the chances for contention here are probably very low.
+       */
+      pthread_mutex_lock(&device->mutex);
+      anv_cmd_buffer_prepare_execbuf(cmd_buffer);
+      pthread_mutex_unlock(&device->mutex);
+   }
+
+   return VK_SUCCESS;
+}
+
+void anv_CmdBindPipeline(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineBindPoint                         pipelineBindPoint,
+    VkPipeline                                  _pipeline)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
+
+   switch (pipelineBindPoint) {
+   case VK_PIPELINE_BIND_POINT_COMPUTE:
+      cmd_buffer->state.compute_pipeline = pipeline;
+      cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
+      cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
+      cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
+      break;
+
+   case VK_PIPELINE_BIND_POINT_GRAPHICS:
+      cmd_buffer->state.pipeline = pipeline;
+      cmd_buffer->state.vb_dirty |= pipeline->vb_used;
+      cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
+      cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
+      cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
+
+      /* Apply the dynamic state from the pipeline */
+      cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
+      anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
+                             &pipeline->dynamic_state,
+                             pipeline->dynamic_state_mask);
+      break;
+
+   default:
+      assert(!"invalid bind point");
+      break;
+   }
+}
+
+void anv_CmdSetViewport(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    firstViewport,
+    uint32_t                                    viewportCount,
+    const VkViewport*                           pViewports)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   const uint32_t total_count = firstViewport + viewportCount;
+   if (cmd_buffer->state.dynamic.viewport.count < total_count)
+      cmd_buffer->state.dynamic.viewport.count = total_count;
+
+   memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
+          pViewports, viewportCount * sizeof(*pViewports));
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
+}
+
+void anv_CmdSetScissor(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    firstScissor,
+    uint32_t                                    scissorCount,
+    const VkRect2D*                             pScissors)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   const uint32_t total_count = firstScissor + scissorCount;
+   if (cmd_buffer->state.dynamic.scissor.count < total_count)
+      cmd_buffer->state.dynamic.scissor.count = total_count;
+
+   memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
+          pScissors, scissorCount * sizeof(*pScissors));
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
+}
+
+void anv_CmdSetLineWidth(
+    VkCommandBuffer                             commandBuffer,
+    float                                       lineWidth)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   cmd_buffer->state.dynamic.line_width = lineWidth;
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
+}
+
+void anv_CmdSetDepthBias(
+    VkCommandBuffer                             commandBuffer,
+    float                                       depthBiasConstantFactor,
+    float                                       depthBiasClamp,
+    float                                       depthBiasSlopeFactor)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
+   cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
+   cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
+}
+
+void anv_CmdSetBlendConstants(
+    VkCommandBuffer                             commandBuffer,
+    const float                                 blendConstants[4])
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   memcpy(cmd_buffer->state.dynamic.blend_constants,
+          blendConstants, sizeof(float) * 4);
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
+}
+
+void anv_CmdSetDepthBounds(
+    VkCommandBuffer                             commandBuffer,
+    float                                       minDepthBounds,
+    float                                       maxDepthBounds)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
+   cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
+}
+
+void anv_CmdSetStencilCompareMask(
+    VkCommandBuffer                             commandBuffer,
+    VkStencilFaceFlags                          faceMask,
+    uint32_t                                    compareMask)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
+      cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
+   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
+      cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
+}
+
+void anv_CmdSetStencilWriteMask(
+    VkCommandBuffer                             commandBuffer,
+    VkStencilFaceFlags                          faceMask,
+    uint32_t                                    writeMask)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
+      cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
+   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
+      cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
+}
+
+void anv_CmdSetStencilReference(
+    VkCommandBuffer                             commandBuffer,
+    VkStencilFaceFlags                          faceMask,
+    uint32_t                                    reference)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
+      cmd_buffer->state.dynamic.stencil_reference.front = reference;
+   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
+      cmd_buffer->state.dynamic.stencil_reference.back = reference;
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
+}
+
+void anv_CmdBindDescriptorSets(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineBindPoint                         pipelineBindPoint,
+    VkPipelineLayout                            _layout,
+    uint32_t                                    firstSet,
+    uint32_t                                    descriptorSetCount,
+    const VkDescriptorSet*                      pDescriptorSets,
+    uint32_t                                    dynamicOffsetCount,
+    const uint32_t*                             pDynamicOffsets)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
+   struct anv_descriptor_set_layout *set_layout;
+
+   assert(firstSet + descriptorSetCount < MAX_SETS);
+
+   uint32_t dynamic_slot = 0;
+   for (uint32_t i = 0; i < descriptorSetCount; i++) {
+      ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
+      set_layout = layout->set[firstSet + i].layout;
+
+      if (cmd_buffer->state.descriptors[firstSet + i] != set) {
+         cmd_buffer->state.descriptors[firstSet + i] = set;
+         cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
+      }
+
+      if (set_layout->dynamic_offset_count > 0) {
+         anv_foreach_stage(s, set_layout->shader_stages) {
+            anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
+
+            struct anv_push_constants *push =
+               cmd_buffer->state.push_constants[s];
+
+            unsigned d = layout->set[firstSet + i].dynamic_offset_start;
+            const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
+            struct anv_descriptor *desc = set->descriptors;
+
+            for (unsigned b = 0; b < set_layout->binding_count; b++) {
+               if (set_layout->binding[b].dynamic_offset_index < 0)
+                  continue;
+
+               unsigned array_size = set_layout->binding[b].array_size;
+               for (unsigned j = 0; j < array_size; j++) {
+                  uint32_t range = 0;
+                  if (desc->buffer_view)
+                     range = desc->buffer_view->range;
+                  push->dynamic[d].offset = *(offsets++);
+                  push->dynamic[d].range = range;
+                  desc++;
+                  d++;
+               }
+            }
+         }
+         cmd_buffer->state.push_constants_dirty |= set_layout->shader_stages;
+      }
+   }
+}
+
+void anv_CmdBindVertexBuffers(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    firstBinding,
+    uint32_t                                    bindingCount,
+    const VkBuffer*                             pBuffers,
+    const VkDeviceSize*                         pOffsets)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
+
+   /* We have to defer setting up vertex buffer since we need the buffer
+    * stride from the pipeline. */
+
+   assert(firstBinding + bindingCount < MAX_VBS);
+   for (uint32_t i = 0; i < bindingCount; i++) {
+      vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
+      vb[firstBinding + i].offset = pOffsets[i];
+      cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
+   }
+}
+
+static void
+add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
+                        struct anv_state state, struct anv_bo *bo, uint32_t offset)
+{
+   /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
+    * 9 for gen8+.  We only write the first dword for gen8+ here and rely on
+    * the initial state to set the high bits to 0. */
+
+   const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
+
+   anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
+                      state.offset + dword * 4, bo, offset);
+}
+
+const struct anv_format *
+anv_format_for_descriptor_type(VkDescriptorType type)
+{
+   switch (type) {
+   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+      return anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT);
+
+   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+      return anv_format_for_vk_format(VK_FORMAT_UNDEFINED);
+
+   default:
+      unreachable("Invalid descriptor type");
+   }
+}
+
+static struct anv_state
+anv_cmd_buffer_alloc_null_surface_state(struct anv_cmd_buffer *cmd_buffer,
+                                        struct anv_framebuffer *fb)
+{
+   switch (cmd_buffer->device->info.gen) {
+   case 7:
+      if (cmd_buffer->device->info.is_haswell) {
+         return gen75_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
+      } else {
+         return gen7_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
+      }
+   case 8:
+      return gen8_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
+   case 9:
+      return gen9_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
+   default:
+      unreachable("Invalid hardware generation");
+   }
+}
+
+VkResult
+anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
+                                  gl_shader_stage stage,
+                                  struct anv_state *bt_state)
+{
+   struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+   struct anv_subpass *subpass = cmd_buffer->state.subpass;
+   struct anv_pipeline_bind_map *map;
+   uint32_t bias, state_offset;
+
+   switch (stage) {
+   case  MESA_SHADER_COMPUTE:
+      map = &cmd_buffer->state.compute_pipeline->bindings[stage];
+      bias = 1;
+      break;
+   default:
+      map = &cmd_buffer->state.pipeline->bindings[stage];
+      bias = 0;
+      break;
+   }
+
+   if (bias + map->surface_count == 0) {
+      *bt_state = (struct anv_state) { 0, };
+      return VK_SUCCESS;
+   }
+
+   *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
+                                                  bias + map->surface_count,
+                                                  &state_offset);
+   uint32_t *bt_map = bt_state->map;
+
+   if (bt_state->map == NULL)
+      return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+   if (stage == MESA_SHADER_COMPUTE &&
+       get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
+      struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
+      uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
+
+      struct anv_state surface_state;
+      surface_state =
+         anv_cmd_buffer_alloc_surface_state(cmd_buffer);
+
+      const struct anv_format *format =
+         anv_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
+      anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
+                                    format->isl_format, bo_offset, 12, 1);
+
+      bt_map[0] = surface_state.offset + state_offset;
+      add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
+   }
+
+   if (map->surface_count == 0)
+      goto out;
+
+   if (map->image_count > 0) {
+      VkResult result =
+         anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
+      if (result != VK_SUCCESS)
+         return result;
+
+      cmd_buffer->state.push_constants_dirty |= 1 << stage;
+   }
+
+   uint32_t image = 0;
+   for (uint32_t s = 0; s < map->surface_count; s++) {
+      struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
+
+      struct anv_state surface_state;
+      struct anv_bo *bo;
+      uint32_t bo_offset;
+
+      if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
+         /* Color attachment binding */
+         assert(stage == MESA_SHADER_FRAGMENT);
+         if (binding->offset < subpass->color_count) {
+            const struct anv_image_view *iview =
+               fb->attachments[subpass->color_attachments[binding->offset]];
+
+            assert(iview->color_rt_surface_state.alloc_size);
+            surface_state = iview->color_rt_surface_state;
+            add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
+                                    iview->bo, iview->offset);
+         } else {
+            /* Null render target */
+            struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+            surface_state =
+               anv_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
+         }
+
+         bt_map[bias + s] = surface_state.offset + state_offset;
+         continue;
+      }
+
+      struct anv_descriptor_set *set =
+         cmd_buffer->state.descriptors[binding->set];
+      struct anv_descriptor *desc = &set->descriptors[binding->offset];
+
+      switch (desc->type) {
+      case VK_DESCRIPTOR_TYPE_SAMPLER:
+         /* Nothing for us to do here */
+         continue;
+
+      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+      case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+      case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+         surface_state = desc->image_view->sampler_surface_state;
+         assert(surface_state.alloc_size);
+         bo = desc->image_view->bo;
+         bo_offset = desc->image_view->offset;
+         break;
+
+      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
+         surface_state = desc->image_view->storage_surface_state;
+         assert(surface_state.alloc_size);
+         bo = desc->image_view->bo;
+         bo_offset = desc->image_view->offset;
+
+         struct brw_image_param *image_param =
+            &cmd_buffer->state.push_constants[stage]->images[image++];
+
+         *image_param = desc->image_view->storage_image_param;
+         image_param->surface_idx = bias + s;
+         break;
+      }
+
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+         surface_state = desc->buffer_view->surface_state;
+         assert(surface_state.alloc_size);
+         bo = desc->buffer_view->bo;
+         bo_offset = desc->buffer_view->offset;
+         break;
+
+      case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+         surface_state = desc->buffer_view->storage_surface_state;
+         assert(surface_state.alloc_size);
+         bo = desc->buffer_view->bo;
+         bo_offset = desc->buffer_view->offset;
+
+         struct brw_image_param *image_param =
+            &cmd_buffer->state.push_constants[stage]->images[image++];
+
+         *image_param = desc->buffer_view->storage_image_param;
+         image_param->surface_idx = bias + s;
+         break;
+
+      default:
+         assert(!"Invalid descriptor type");
+         continue;
+      }
+
+      bt_map[bias + s] = surface_state.offset + state_offset;
+      add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
+   }
+   assert(image == map->image_count);
+
+ out:
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(*bt_state);
+
+   return VK_SUCCESS;
+}
+
+VkResult
+anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
+                             gl_shader_stage stage, struct anv_state *state)
+{
+   struct anv_pipeline_bind_map *map;
+
+   if (stage == MESA_SHADER_COMPUTE)
+      map = &cmd_buffer->state.compute_pipeline->bindings[stage];
+   else
+      map = &cmd_buffer->state.pipeline->bindings[stage];
+
+   if (map->sampler_count == 0) {
+      *state = (struct anv_state) { 0, };
+      return VK_SUCCESS;
+   }
+
+   uint32_t size = map->sampler_count * 16;
+   *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
+
+   if (state->map == NULL)
+      return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+   for (uint32_t s = 0; s < map->sampler_count; s++) {
+      struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
+      struct anv_descriptor_set *set =
+         cmd_buffer->state.descriptors[binding->set];
+      struct anv_descriptor *desc = &set->descriptors[binding->offset];
+
+      if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
+          desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+         continue;
+
+      struct anv_sampler *sampler = desc->sampler;
+
+      /* This can happen if we have an unfilled slot since TYPE_SAMPLER
+       * happens to be zero.
+       */
+      if (sampler == NULL)
+         continue;
+
+      memcpy(state->map + (s * 16),
+             sampler->state, sizeof(sampler->state));
+   }
+
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(*state);
+
+   return VK_SUCCESS;
+}
+
+struct anv_state
+anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
+                            const void *data, uint32_t size, uint32_t alignment)
+{
+   struct anv_state state;
+
+   state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
+   memcpy(state.map, data, size);
+
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(state);
+
+   VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
+
+   return state;
+}
+
+struct anv_state
+anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
+                             uint32_t *a, uint32_t *b,
+                             uint32_t dwords, uint32_t alignment)
+{
+   struct anv_state state;
+   uint32_t *p;
+
+   state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+                                              dwords * 4, alignment);
+   p = state.map;
+   for (uint32_t i = 0; i < dwords; i++)
+      p[i] = a[i] | b[i];
+
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(state);
+
+   VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
+
+   return state;
+}
+
+/**
+ * @brief Setup the command buffer for recording commands inside the given
+ * subpass.
+ *
+ * This does not record all commands needed for starting the subpass.
+ * Starting the subpass may require additional commands.
+ *
+ * Note that vkCmdBeginRenderPass, vkCmdNextSubpass, and vkBeginCommandBuffer
+ * with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, all setup the
+ * command buffer for recording commands for some subpass.  But only the first
+ * two, vkCmdBeginRenderPass and vkCmdNextSubpass, can start a subpass.
+ */
+void
+anv_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
+                           struct anv_subpass *subpass)
+{
+   switch (cmd_buffer->device->info.gen) {
+   case 7:
+      if (cmd_buffer->device->info.is_haswell) {
+         gen75_cmd_buffer_set_subpass(cmd_buffer, subpass);
+      } else {
+         gen7_cmd_buffer_set_subpass(cmd_buffer, subpass);
+      }
+      break;
+   case 8:
+      gen8_cmd_buffer_set_subpass(cmd_buffer, subpass);
+      break;
+   case 9:
+      gen9_cmd_buffer_set_subpass(cmd_buffer, subpass);
+      break;
+   default:
+      unreachable("unsupported gen\n");
+   }
+}
+
+struct anv_state
+anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
+                              gl_shader_stage stage)
+{
+   struct anv_push_constants *data =
+      cmd_buffer->state.push_constants[stage];
+   const struct brw_stage_prog_data *prog_data =
+      cmd_buffer->state.pipeline->prog_data[stage];
+
+   /* If we don't actually have any push constants, bail. */
+   if (data == NULL || prog_data->nr_params == 0)
+      return (struct anv_state) { .offset = 0 };
+
+   struct anv_state state =
+      anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+                                         prog_data->nr_params * sizeof(float),
+                                         32 /* bottom 5 bits MBZ */);
+
+   /* Walk through the param array and fill the buffer with data */
+   uint32_t *u32_map = state.map;
+   for (unsigned i = 0; i < prog_data->nr_params; i++) {
+      uint32_t offset = (uintptr_t)prog_data->param[i];
+      u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
+   }
+
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(state);
+
+   return state;
+}
+
+struct anv_state
+anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_push_constants *data =
+      cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
+   struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+   const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
+   const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+
+   const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
+   const unsigned push_constant_data_size =
+      (local_id_dwords + prog_data->nr_params) * 4;
+   const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
+   const unsigned param_aligned_count =
+      reg_aligned_constant_size / sizeof(uint32_t);
+
+   /* If we don't actually have any push constants, bail. */
+   if (reg_aligned_constant_size == 0)
+      return (struct anv_state) { .offset = 0 };
+
+   const unsigned threads = pipeline->cs_thread_width_max;
+   const unsigned total_push_constants_size =
+      reg_aligned_constant_size * threads;
+   const unsigned push_constant_alignment =
+      cmd_buffer->device->info.gen < 8 ? 32 : 64;
+   const unsigned aligned_total_push_constants_size =
+      ALIGN(total_push_constants_size, push_constant_alignment);
+   struct anv_state state =
+      anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+                                         aligned_total_push_constants_size,
+                                         push_constant_alignment);
+
+   /* Walk through the param array and fill the buffer with data */
+   uint32_t *u32_map = state.map;
+
+   brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
+                                reg_aligned_constant_size);
+
+   /* Setup uniform data for the first thread */
+   for (unsigned i = 0; i < prog_data->nr_params; i++) {
+      uint32_t offset = (uintptr_t)prog_data->param[i];
+      u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
+   }
+
+   /* Copy uniform data from the first thread to every other thread */
+   const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
+   for (unsigned t = 1; t < threads; t++) {
+      memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
+             &u32_map[local_id_dwords],
+             uniform_data_size);
+   }
+
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(state);
+
+   return state;
+}
+
+void anv_CmdPushConstants(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineLayout                            layout,
+    VkShaderStageFlags                          stageFlags,
+    uint32_t                                    offset,
+    uint32_t                                    size,
+    const void*                                 pValues)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   anv_foreach_stage(stage, stageFlags) {
+      anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
+
+      memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
+             pValues, size);
+   }
+
+   cmd_buffer->state.push_constants_dirty |= stageFlags;
+}
+
+void anv_CmdExecuteCommands(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    commandBufferCount,
+    const VkCommandBuffer*                      pCmdBuffers)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
+
+   assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+
+   for (uint32_t i = 0; i < commandBufferCount; i++) {
+      ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
+
+      assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+
+      anv_cmd_buffer_add_secondary(primary, secondary);
+   }
+}
+
+VkResult anv_CreateCommandPool(
+    VkDevice                                    _device,
+    const VkCommandPoolCreateInfo*              pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkCommandPool*                              pCmdPool)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_cmd_pool *pool;
+
+   pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pool == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   if (pAllocator)
+      pool->alloc = *pAllocator;
+   else
+      pool->alloc = device->alloc;
+
+   list_inithead(&pool->cmd_buffers);
+
+   *pCmdPool = anv_cmd_pool_to_handle(pool);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyCommandPool(
+    VkDevice                                    _device,
+    VkCommandPool                               commandPool,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
+
+   anv_ResetCommandPool(_device, commandPool, 0);
+
+   anv_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult anv_ResetCommandPool(
+    VkDevice                                    device,
+    VkCommandPool                               commandPool,
+    VkCommandPoolResetFlags                     flags)
+{
+   ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
+
+   /* FIXME: vkResetCommandPool must not destroy its command buffers. The
+    * Vulkan 1.0 spec requires that it only reset them:
+    *
+    *    Resetting a command pool recycles all of the resources from all of
+    *    the command buffers allocated from the command pool back to the
+    *    command pool. All command buffers that have been allocated from the
+    *    command pool are put in the initial state.
+    */
+   list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
+                            &pool->cmd_buffers, pool_link) {
+      anv_cmd_buffer_destroy(cmd_buffer);
+   }
+
+   return VK_SUCCESS;
+}
+
+/**
+ * Return NULL if the current subpass has no depthstencil attachment.
+ */
+const struct anv_image_view *
+anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
+{
+   const struct anv_subpass *subpass = cmd_buffer->state.subpass;
+   const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+
+   if (subpass->depth_stencil_attachment == VK_ATTACHMENT_UNUSED)
+      return NULL;
+
+   const struct anv_image_view *iview =
+      fb->attachments[subpass->depth_stencil_attachment];
+
+   assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
+                                VK_IMAGE_ASPECT_STENCIL_BIT));
+
+   return iview;
+}
diff --git a/src/intel/vulkan/anv_descriptor_set.c b/src/intel/vulkan/anv_descriptor_set.c
new file mode 100644 (file)
index 0000000..dd645c3
--- /dev/null
@@ -0,0 +1,655 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+/*
+ * Descriptor set layouts.
+ */
+
+VkResult anv_CreateDescriptorSetLayout(
+    VkDevice                                    _device,
+    const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDescriptorSetLayout*                      pSetLayout)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_descriptor_set_layout *set_layout;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
+
+   uint32_t max_binding = 0;
+   uint32_t immutable_sampler_count = 0;
+   for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
+      max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
+      if (pCreateInfo->pBindings[j].pImmutableSamplers)
+         immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
+   }
+
+   size_t size = sizeof(struct anv_descriptor_set_layout) +
+                 (max_binding + 1) * sizeof(set_layout->binding[0]) +
+                 immutable_sampler_count * sizeof(struct anv_sampler *);
+
+   set_layout = anv_alloc2(&device->alloc, pAllocator, size, 8,
+                           VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!set_layout)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   /* We just allocate all the samplers at the end of the struct */
+   struct anv_sampler **samplers =
+      (struct anv_sampler **)&set_layout->binding[max_binding + 1];
+
+   set_layout->binding_count = max_binding + 1;
+   set_layout->shader_stages = 0;
+   set_layout->size = 0;
+
+   for (uint32_t b = 0; b <= max_binding; b++) {
+      /* Initialize all binding_layout entries to -1 */
+      memset(&set_layout->binding[b], -1, sizeof(set_layout->binding[b]));
+
+      set_layout->binding[b].immutable_samplers = NULL;
+   }
+
+   /* Initialize all samplers to 0 */
+   memset(samplers, 0, immutable_sampler_count * sizeof(*samplers));
+
+   uint32_t sampler_count[MESA_SHADER_STAGES] = { 0, };
+   uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
+   uint32_t image_count[MESA_SHADER_STAGES] = { 0, };
+   uint32_t buffer_count = 0;
+   uint32_t dynamic_offset_count = 0;
+
+   for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
+      const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
+      uint32_t b = binding->binding;
+
+      assert(binding->descriptorCount > 0);
+      set_layout->binding[b].array_size = binding->descriptorCount;
+      set_layout->binding[b].descriptor_index = set_layout->size;
+      set_layout->size += binding->descriptorCount;
+
+      switch (binding->descriptorType) {
+      case VK_DESCRIPTOR_TYPE_SAMPLER:
+      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+         anv_foreach_stage(s, binding->stageFlags) {
+            set_layout->binding[b].stage[s].sampler_index = sampler_count[s];
+            sampler_count[s] += binding->descriptorCount;
+         }
+         break;
+      default:
+         break;
+      }
+
+      switch (binding->descriptorType) {
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+         set_layout->binding[b].buffer_index = buffer_count;
+         buffer_count += binding->descriptorCount;
+         /* fall through */
+
+      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+      case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+      case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+         anv_foreach_stage(s, binding->stageFlags) {
+            set_layout->binding[b].stage[s].surface_index = surface_count[s];
+            surface_count[s] += binding->descriptorCount;
+         }
+         break;
+      default:
+         break;
+      }
+
+      switch (binding->descriptorType) {
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+         set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
+         dynamic_offset_count += binding->descriptorCount;
+         break;
+      default:
+         break;
+      }
+
+      switch (binding->descriptorType) {
+      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+      case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+         anv_foreach_stage(s, binding->stageFlags) {
+            set_layout->binding[b].stage[s].image_index = image_count[s];
+            image_count[s] += binding->descriptorCount;
+         }
+         break;
+      default:
+         break;
+      }
+
+      if (binding->pImmutableSamplers) {
+         set_layout->binding[b].immutable_samplers = samplers;
+         samplers += binding->descriptorCount;
+
+         for (uint32_t i = 0; i < binding->descriptorCount; i++)
+            set_layout->binding[b].immutable_samplers[i] =
+               anv_sampler_from_handle(binding->pImmutableSamplers[i]);
+      } else {
+         set_layout->binding[b].immutable_samplers = NULL;
+      }
+
+      set_layout->shader_stages |= binding->stageFlags;
+   }
+
+   set_layout->buffer_count = buffer_count;
+   set_layout->dynamic_offset_count = dynamic_offset_count;
+
+   *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyDescriptorSetLayout(
+    VkDevice                                    _device,
+    VkDescriptorSetLayout                       _set_layout,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
+
+   anv_free2(&device->alloc, pAllocator, set_layout);
+}
+
+/*
+ * Pipeline layouts.  These have nothing to do with the pipeline.  They are
+ * just muttiple descriptor set layouts pasted together
+ */
+
+VkResult anv_CreatePipelineLayout(
+    VkDevice                                    _device,
+    const VkPipelineLayoutCreateInfo*           pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipelineLayout*                           pPipelineLayout)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_pipeline_layout *layout;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
+
+   layout = anv_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (layout == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   layout->num_sets = pCreateInfo->setLayoutCount;
+
+   unsigned dynamic_offset_count = 0;
+
+   memset(layout->stage, 0, sizeof(layout->stage));
+   for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
+      ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
+                      pCreateInfo->pSetLayouts[set]);
+      layout->set[set].layout = set_layout;
+
+      layout->set[set].dynamic_offset_start = dynamic_offset_count;
+      for (uint32_t b = 0; b < set_layout->binding_count; b++) {
+         if (set_layout->binding[b].dynamic_offset_index < 0)
+            continue;
+
+         dynamic_offset_count += set_layout->binding[b].array_size;
+         for (gl_shader_stage s = 0; s < MESA_SHADER_STAGES; s++) {
+            if (set_layout->binding[b].stage[s].surface_index >= 0)
+               layout->stage[s].has_dynamic_offsets = true;
+         }
+      }
+   }
+
+   *pPipelineLayout = anv_pipeline_layout_to_handle(layout);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyPipelineLayout(
+    VkDevice                                    _device,
+    VkPipelineLayout                            _pipelineLayout,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
+
+   anv_free2(&device->alloc, pAllocator, pipeline_layout);
+}
+
+/*
+ * Descriptor pools.
+ *
+ * These are implemented using a big pool of memory and a free-list for the
+ * host memory allocations and a state_stream and a free list for the buffer
+ * view surface state. The spec allows us to fail to allocate due to
+ * fragmentation in all cases but two: 1) after pool reset, allocating up
+ * until the pool size with no freeing must succeed and 2) allocating and
+ * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
+ * and the free lists lets us recycle blocks for case 2).
+ */
+
+#define EMPTY 1
+
+VkResult anv_CreateDescriptorPool(
+    VkDevice                                    _device,
+    const VkDescriptorPoolCreateInfo*           pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDescriptorPool*                           pDescriptorPool)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_descriptor_pool *pool;
+
+   uint32_t descriptor_count = 0;
+   uint32_t buffer_count = 0;
+   for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
+      switch (pCreateInfo->pPoolSizes[i].type) {
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+         buffer_count += pCreateInfo->pPoolSizes[i].descriptorCount;
+      default:
+         descriptor_count += pCreateInfo->pPoolSizes[i].descriptorCount;
+         break;
+      }
+   }
+
+   const size_t size =
+      sizeof(*pool) +
+      pCreateInfo->maxSets * sizeof(struct anv_descriptor_set) +
+      descriptor_count * sizeof(struct anv_descriptor) +
+      buffer_count * sizeof(struct anv_buffer_view);
+
+   pool = anv_alloc2(&device->alloc, pAllocator, size, 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!pool)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   pool->size = size;
+   pool->next = 0;
+   pool->free_list = EMPTY;
+
+   anv_state_stream_init(&pool->surface_state_stream,
+                         &device->surface_state_block_pool);
+   pool->surface_state_free_list = NULL;
+
+   *pDescriptorPool = anv_descriptor_pool_to_handle(pool);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyDescriptorPool(
+    VkDevice                                    _device,
+    VkDescriptorPool                            _pool,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_descriptor_pool, pool, _pool);
+
+   anv_state_stream_finish(&pool->surface_state_stream);
+   anv_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult anv_ResetDescriptorPool(
+    VkDevice                                    _device,
+    VkDescriptorPool                            descriptorPool,
+    VkDescriptorPoolResetFlags                  flags)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
+
+   pool->next = 0;
+   pool->free_list = EMPTY;
+   anv_state_stream_finish(&pool->surface_state_stream);
+   anv_state_stream_init(&pool->surface_state_stream,
+                         &device->surface_state_block_pool);
+   pool->surface_state_free_list = NULL;
+
+   return VK_SUCCESS;
+}
+
+struct pool_free_list_entry {
+   uint32_t next;
+   uint32_t size;
+};
+
+static size_t
+layout_size(const struct anv_descriptor_set_layout *layout)
+{
+   return
+      sizeof(struct anv_descriptor_set) +
+      layout->size * sizeof(struct anv_descriptor) +
+      layout->buffer_count * sizeof(struct anv_buffer_view);
+}
+
+struct surface_state_free_list_entry {
+   void *next;
+   uint32_t offset;
+};
+
+VkResult
+anv_descriptor_set_create(struct anv_device *device,
+                          struct anv_descriptor_pool *pool,
+                          const struct anv_descriptor_set_layout *layout,
+                          struct anv_descriptor_set **out_set)
+{
+   struct anv_descriptor_set *set;
+   const size_t size = layout_size(layout);
+
+   set = NULL;
+   if (size <= pool->size - pool->next) {
+      set = (struct anv_descriptor_set *) (pool->data + pool->next);
+      pool->next += size;
+   } else {
+      struct pool_free_list_entry *entry;
+      uint32_t *link = &pool->free_list;
+      for (uint32_t f = pool->free_list; f != EMPTY; f = entry->next) {
+         entry = (struct pool_free_list_entry *) (pool->data + f);
+         if (size <= entry->size) {
+            *link = entry->next;
+            set = (struct anv_descriptor_set *) entry;
+            break;
+         }
+         link = &entry->next;
+      }
+   }
+
+   if (set == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   set->size = size;
+   set->layout = layout;
+   set->buffer_views =
+      (struct anv_buffer_view *) &set->descriptors[layout->size];
+   set->buffer_count = layout->buffer_count;
+
+   /* Go through and fill out immutable samplers if we have any */
+   struct anv_descriptor *desc = set->descriptors;
+   for (uint32_t b = 0; b < layout->binding_count; b++) {
+      if (layout->binding[b].immutable_samplers) {
+         for (uint32_t i = 0; i < layout->binding[b].array_size; i++) {
+            /* The type will get changed to COMBINED_IMAGE_SAMPLER in
+             * UpdateDescriptorSets if needed.  However, if the descriptor
+             * set has an immutable sampler, UpdateDescriptorSets may never
+             * touch it, so we need to make sure it's 100% valid now.
+             */
+            desc[i] = (struct anv_descriptor) {
+               .type = VK_DESCRIPTOR_TYPE_SAMPLER,
+               .sampler = layout->binding[b].immutable_samplers[i],
+            };
+         }
+      }
+      desc += layout->binding[b].array_size;
+   }
+
+   /* Allocate surface state for the buffer views. */
+   for (uint32_t b = 0; b < layout->buffer_count; b++) {
+      struct surface_state_free_list_entry *entry =
+         pool->surface_state_free_list;
+      struct anv_state state;
+
+      if (entry) {
+         state.map = entry;
+         state.offset = entry->offset;
+         state.alloc_size = 64;
+         pool->surface_state_free_list = entry->next;
+      } else {
+         state = anv_state_stream_alloc(&pool->surface_state_stream, 64, 64);
+      }
+
+      set->buffer_views[b].surface_state = state;
+   }
+
+   *out_set = set;
+
+   return VK_SUCCESS;
+}
+
+void
+anv_descriptor_set_destroy(struct anv_device *device,
+                           struct anv_descriptor_pool *pool,
+                           struct anv_descriptor_set *set)
+{
+   /* Put the buffer view surface state back on the free list. */
+   for (uint32_t b = 0; b < set->buffer_count; b++) {
+      struct surface_state_free_list_entry *entry =
+         set->buffer_views[b].surface_state.map;
+      entry->next = pool->surface_state_free_list;
+      pool->surface_state_free_list = entry;
+   }
+
+   /* Put the descriptor set allocation back on the free list. */
+   const uint32_t index = (char *) set - pool->data;
+   if (index + set->size == pool->next) {
+      pool->next = index;
+   } else {
+      struct pool_free_list_entry *entry = (struct pool_free_list_entry *) set;
+      entry->next = pool->free_list;
+      entry->size = set->size;
+      pool->free_list = (char *) entry - pool->data;
+   }
+}
+
+VkResult anv_AllocateDescriptorSets(
+    VkDevice                                    _device,
+    const VkDescriptorSetAllocateInfo*          pAllocateInfo,
+    VkDescriptorSet*                            pDescriptorSets)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
+
+   VkResult result = VK_SUCCESS;
+   struct anv_descriptor_set *set;
+   uint32_t i;
+
+   for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
+      ANV_FROM_HANDLE(anv_descriptor_set_layout, layout,
+                      pAllocateInfo->pSetLayouts[i]);
+
+      result = anv_descriptor_set_create(device, pool, layout, &set);
+      if (result != VK_SUCCESS)
+         break;
+
+      pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
+   }
+
+   if (result != VK_SUCCESS)
+      anv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
+                             i, pDescriptorSets);
+
+   return result;
+}
+
+VkResult anv_FreeDescriptorSets(
+    VkDevice                                    _device,
+    VkDescriptorPool                            descriptorPool,
+    uint32_t                                    count,
+    const VkDescriptorSet*                      pDescriptorSets)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
+
+   for (uint32_t i = 0; i < count; i++) {
+      ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
+
+      anv_descriptor_set_destroy(device, pool, set);
+   }
+
+   return VK_SUCCESS;
+}
+
+void anv_UpdateDescriptorSets(
+    VkDevice                                    _device,
+    uint32_t                                    descriptorWriteCount,
+    const VkWriteDescriptorSet*                 pDescriptorWrites,
+    uint32_t                                    descriptorCopyCount,
+    const VkCopyDescriptorSet*                  pDescriptorCopies)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   for (uint32_t i = 0; i < descriptorWriteCount; i++) {
+      const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
+      ANV_FROM_HANDLE(anv_descriptor_set, set, write->dstSet);
+      const struct anv_descriptor_set_binding_layout *bind_layout =
+         &set->layout->binding[write->dstBinding];
+      struct anv_descriptor *desc =
+         &set->descriptors[bind_layout->descriptor_index];
+      desc += write->dstArrayElement;
+
+      switch (write->descriptorType) {
+      case VK_DESCRIPTOR_TYPE_SAMPLER:
+         for (uint32_t j = 0; j < write->descriptorCount; j++) {
+            ANV_FROM_HANDLE(anv_sampler, sampler,
+                            write->pImageInfo[j].sampler);
+
+            desc[j] = (struct anv_descriptor) {
+               .type = VK_DESCRIPTOR_TYPE_SAMPLER,
+               .sampler = sampler,
+            };
+         }
+         break;
+
+      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+         for (uint32_t j = 0; j < write->descriptorCount; j++) {
+            ANV_FROM_HANDLE(anv_image_view, iview,
+                            write->pImageInfo[j].imageView);
+            ANV_FROM_HANDLE(anv_sampler, sampler,
+                            write->pImageInfo[j].sampler);
+
+            desc[j].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+            desc[j].image_view = iview;
+
+            /* If this descriptor has an immutable sampler, we don't want
+             * to stomp on it.
+             */
+            if (sampler)
+               desc[j].sampler = sampler;
+         }
+         break;
+
+      case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+         for (uint32_t j = 0; j < write->descriptorCount; j++) {
+            ANV_FROM_HANDLE(anv_image_view, iview,
+                            write->pImageInfo[j].imageView);
+
+            desc[j] = (struct anv_descriptor) {
+               .type = write->descriptorType,
+               .image_view = iview,
+            };
+         }
+         break;
+
+      case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+         for (uint32_t j = 0; j < write->descriptorCount; j++) {
+            ANV_FROM_HANDLE(anv_buffer_view, bview,
+                            write->pTexelBufferView[j]);
+
+            desc[j] = (struct anv_descriptor) {
+               .type = write->descriptorType,
+               .buffer_view = bview,
+            };
+         }
+         break;
+
+      case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+         anv_finishme("input attachments not implemented");
+         break;
+
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+         for (uint32_t j = 0; j < write->descriptorCount; j++) {
+            assert(write->pBufferInfo[j].buffer);
+            ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
+            assert(buffer);
+
+            struct anv_buffer_view *view =
+               &set->buffer_views[bind_layout->buffer_index];
+            view += write->dstArrayElement + j;
+
+            const struct anv_format *format =
+               anv_format_for_descriptor_type(write->descriptorType);
+
+            view->format = format->isl_format;
+            view->bo = buffer->bo;
+            view->offset = buffer->offset + write->pBufferInfo[j].offset;
+
+            /* For buffers with dynamic offsets, we use the full possible
+             * range in the surface state and do the actual range-checking
+             * in the shader.
+             */
+            if (bind_layout->dynamic_offset_index >= 0 ||
+                write->pBufferInfo[j].range == VK_WHOLE_SIZE)
+               view->range = buffer->size - write->pBufferInfo[j].offset;
+            else
+               view->range = write->pBufferInfo[j].range;
+
+            anv_fill_buffer_surface_state(device, view->surface_state,
+                                          view->format,
+                                          view->offset, view->range, 1);
+
+            desc[j] = (struct anv_descriptor) {
+               .type = write->descriptorType,
+               .buffer_view = view,
+            };
+
+         }
+
+      default:
+         break;
+      }
+   }
+
+   for (uint32_t i = 0; i < descriptorCopyCount; i++) {
+      const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
+      ANV_FROM_HANDLE(anv_descriptor_set, src, copy->dstSet);
+      ANV_FROM_HANDLE(anv_descriptor_set, dst, copy->dstSet);
+
+      const struct anv_descriptor_set_binding_layout *src_layout =
+         &src->layout->binding[copy->srcBinding];
+      struct anv_descriptor *src_desc =
+         &src->descriptors[src_layout->descriptor_index];
+      src_desc += copy->srcArrayElement;
+
+      const struct anv_descriptor_set_binding_layout *dst_layout =
+         &dst->layout->binding[copy->dstBinding];
+      struct anv_descriptor *dst_desc =
+         &dst->descriptors[dst_layout->descriptor_index];
+      dst_desc += copy->dstArrayElement;
+
+      for (uint32_t j = 0; j < copy->descriptorCount; j++)
+         dst_desc[j] = src_desc[j];
+   }
+}
diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
new file mode 100644 (file)
index 0000000..768e2eb
--- /dev/null
@@ -0,0 +1,1776 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+#include "mesa/main/git_sha1.h"
+#include "util/strtod.h"
+#include "util/debug.h"
+
+#include "genxml/gen7_pack.h"
+
+struct anv_dispatch_table dtable;
+
+static void
+compiler_debug_log(void *data, const char *fmt, ...)
+{ }
+
+static void
+compiler_perf_log(void *data, const char *fmt, ...)
+{
+   va_list args;
+   va_start(args, fmt);
+
+   if (unlikely(INTEL_DEBUG & DEBUG_PERF))
+      vfprintf(stderr, fmt, args);
+
+   va_end(args);
+}
+
+static VkResult
+anv_physical_device_init(struct anv_physical_device *device,
+                         struct anv_instance *instance,
+                         const char *path)
+{
+   VkResult result;
+   int fd;
+
+   fd = open(path, O_RDWR | O_CLOEXEC);
+   if (fd < 0)
+      return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+                       "failed to open %s: %m", path);
+
+   device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+   device->instance = instance;
+   device->path = path;
+
+   device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
+   if (!device->chipset_id) {
+      result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+                         "failed to get chipset id: %m");
+      goto fail;
+   }
+
+   device->name = brw_get_device_name(device->chipset_id);
+   device->info = brw_get_device_info(device->chipset_id);
+   if (!device->info) {
+      result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+                         "failed to get device info");
+      goto fail;
+   }
+
+   if (device->info->is_haswell) {
+      fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
+   } else if (device->info->gen == 7 && !device->info->is_baytrail) {
+      fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
+   } else if (device->info->gen == 7 && device->info->is_baytrail) {
+      fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
+   } else if (device->info->gen >= 8) {
+      /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
+       * supported as anything */
+   } else {
+      result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
+                         "Vulkan not yet supported on %s", device->name);
+      goto fail;
+   }
+
+   if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
+      result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+                         "failed to get aperture size: %m");
+      goto fail;
+   }
+
+   if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
+      result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+                         "kernel missing gem wait");
+      goto fail;
+   }
+
+   if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
+      result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+                         "kernel missing execbuf2");
+      goto fail;
+   }
+
+   if (!device->info->has_llc &&
+       anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
+      result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+                         "kernel missing wc mmap");
+      goto fail;
+   }
+
+   bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
+
+   close(fd);
+
+   brw_process_intel_debug_variable();
+
+   device->compiler = brw_compiler_create(NULL, device->info);
+   if (device->compiler == NULL) {
+      result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      goto fail;
+   }
+   device->compiler->shader_debug_log = compiler_debug_log;
+   device->compiler->shader_perf_log = compiler_perf_log;
+
+   /* XXX: Actually detect bit6 swizzling */
+   isl_device_init(&device->isl_dev, device->info, swizzled);
+
+   return VK_SUCCESS;
+
+fail:
+   close(fd);
+   return result;
+}
+
+static void
+anv_physical_device_finish(struct anv_physical_device *device)
+{
+   ralloc_free(device->compiler);
+}
+
+static const VkExtensionProperties global_extensions[] = {
+   {
+      .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
+      .specVersion = 25,
+   },
+   {
+      .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
+      .specVersion = 5,
+   },
+#ifdef HAVE_WAYLAND_PLATFORM
+   {
+      .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
+      .specVersion = 4,
+   },
+#endif
+};
+
+static const VkExtensionProperties device_extensions[] = {
+   {
+      .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+      .specVersion = 67,
+   },
+};
+
+static void *
+default_alloc_func(void *pUserData, size_t size, size_t align, 
+                   VkSystemAllocationScope allocationScope)
+{
+   return malloc(size);
+}
+
+static void *
+default_realloc_func(void *pUserData, void *pOriginal, size_t size,
+                     size_t align, VkSystemAllocationScope allocationScope)
+{
+   return realloc(pOriginal, size);
+}
+
+static void
+default_free_func(void *pUserData, void *pMemory)
+{
+   free(pMemory);
+}
+
+static const VkAllocationCallbacks default_alloc = {
+   .pUserData = NULL,
+   .pfnAllocation = default_alloc_func,
+   .pfnReallocation = default_realloc_func,
+   .pfnFree = default_free_func,
+};
+
+VkResult anv_CreateInstance(
+    const VkInstanceCreateInfo*                 pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkInstance*                                 pInstance)
+{
+   struct anv_instance *instance;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
+
+   uint32_t client_version;
+   if (pCreateInfo->pApplicationInfo &&
+       pCreateInfo->pApplicationInfo->apiVersion != 0) {
+      client_version = pCreateInfo->pApplicationInfo->apiVersion;
+   } else {
+      client_version = VK_MAKE_VERSION(1, 0, 0);
+   }
+
+   if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
+       client_version > VK_MAKE_VERSION(1, 0, 3)) {
+      return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
+                       "Client requested version %d.%d.%d",
+                       VK_VERSION_MAJOR(client_version),
+                       VK_VERSION_MINOR(client_version),
+                       VK_VERSION_PATCH(client_version));
+   }
+
+   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
+      bool found = false;
+      for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
+         if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
+                    global_extensions[j].extensionName) == 0) {
+            found = true;
+            break;
+         }
+      }
+      if (!found)
+         return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
+   }
+
+   instance = anv_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+   if (!instance)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+
+   if (pAllocator)
+      instance->alloc = *pAllocator;
+   else
+      instance->alloc = default_alloc;
+
+   instance->apiVersion = client_version;
+   instance->physicalDeviceCount = -1;
+
+   memset(instance->wsi, 0, sizeof(instance->wsi));
+
+   _mesa_locale_init();
+
+   VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
+
+   anv_init_wsi(instance);
+
+   *pInstance = anv_instance_to_handle(instance);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyInstance(
+    VkInstance                                  _instance,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_instance, instance, _instance);
+
+   if (instance->physicalDeviceCount > 0) {
+      /* We support at most one physical device. */
+      assert(instance->physicalDeviceCount == 1);
+      anv_physical_device_finish(&instance->physicalDevice);
+   }
+
+   anv_finish_wsi(instance);
+
+   VG(VALGRIND_DESTROY_MEMPOOL(instance));
+
+   _mesa_locale_fini();
+
+   anv_free(&instance->alloc, instance);
+}
+
+VkResult anv_EnumeratePhysicalDevices(
+    VkInstance                                  _instance,
+    uint32_t*                                   pPhysicalDeviceCount,
+    VkPhysicalDevice*                           pPhysicalDevices)
+{
+   ANV_FROM_HANDLE(anv_instance, instance, _instance);
+   VkResult result;
+
+   if (instance->physicalDeviceCount < 0) {
+      result = anv_physical_device_init(&instance->physicalDevice,
+                                        instance, "/dev/dri/renderD128");
+      if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
+         instance->physicalDeviceCount = 0;
+      } else if (result == VK_SUCCESS) {
+         instance->physicalDeviceCount = 1;
+      } else {
+         return result;
+      }
+   }
+
+   /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
+    * otherwise it's an inout parameter.
+    *
+    * The Vulkan spec (git aaed022) says:
+    *
+    *    pPhysicalDeviceCount is a pointer to an unsigned integer variable
+    *    that is initialized with the number of devices the application is
+    *    prepared to receive handles to. pname:pPhysicalDevices is pointer to
+    *    an array of at least this many VkPhysicalDevice handles [...].
+    *
+    *    Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
+    *    overwrites the contents of the variable pointed to by
+    *    pPhysicalDeviceCount with the number of physical devices in in the
+    *    instance; otherwise, vkEnumeratePhysicalDevices overwrites
+    *    pPhysicalDeviceCount with the number of physical handles written to
+    *    pPhysicalDevices.
+    */
+   if (!pPhysicalDevices) {
+      *pPhysicalDeviceCount = instance->physicalDeviceCount;
+   } else if (*pPhysicalDeviceCount >= 1) {
+      pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
+      *pPhysicalDeviceCount = 1;
+   } else {
+      *pPhysicalDeviceCount = 0;
+   }
+
+   return VK_SUCCESS;
+}
+
+void anv_GetPhysicalDeviceFeatures(
+    VkPhysicalDevice                            physicalDevice,
+    VkPhysicalDeviceFeatures*                   pFeatures)
+{
+   ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
+
+   *pFeatures = (VkPhysicalDeviceFeatures) {
+      .robustBufferAccess                       = true,
+      .fullDrawIndexUint32                      = true,
+      .imageCubeArray                           = false,
+      .independentBlend                         = pdevice->info->gen >= 8,
+      .geometryShader                           = true,
+      .tessellationShader                       = false,
+      .sampleRateShading                        = false,
+      .dualSrcBlend                             = true,
+      .logicOp                                  = true,
+      .multiDrawIndirect                        = false,
+      .drawIndirectFirstInstance                = false,
+      .depthClamp                               = false,
+      .depthBiasClamp                           = false,
+      .fillModeNonSolid                         = true,
+      .depthBounds                              = false,
+      .wideLines                                = true,
+      .largePoints                              = true,
+      .alphaToOne                               = true,
+      .multiViewport                            = true,
+      .samplerAnisotropy                        = false, /* FINISHME */
+      .textureCompressionETC2                   = true,
+      .textureCompressionASTC_LDR               = true,
+      .textureCompressionBC                     = true,
+      .occlusionQueryPrecise                    = true,
+      .pipelineStatisticsQuery                  = false,
+      .vertexPipelineStoresAndAtomics           = pdevice->info->gen >= 8,
+      .fragmentStoresAndAtomics                 = true,
+      .shaderTessellationAndGeometryPointSize   = true,
+      .shaderImageGatherExtended                = true,
+      .shaderStorageImageExtendedFormats        = false,
+      .shaderStorageImageMultisample            = false,
+      .shaderUniformBufferArrayDynamicIndexing  = true,
+      .shaderSampledImageArrayDynamicIndexing   = true,
+      .shaderStorageBufferArrayDynamicIndexing  = true,
+      .shaderStorageImageArrayDynamicIndexing   = true,
+      .shaderStorageImageReadWithoutFormat      = false,
+      .shaderStorageImageWriteWithoutFormat     = true,
+      .shaderClipDistance                       = false,
+      .shaderCullDistance                       = false,
+      .shaderFloat64                            = false,
+      .shaderInt64                              = false,
+      .shaderInt16                              = false,
+      .alphaToOne                               = true,
+      .variableMultisampleRate                  = false,
+      .inheritedQueries                         = false,
+   };
+}
+
+void
+anv_device_get_cache_uuid(void *uuid)
+{
+   memset(uuid, 0, VK_UUID_SIZE);
+   snprintf(uuid, VK_UUID_SIZE, "anv-%s", MESA_GIT_SHA1 + 4);
+}
+
+void anv_GetPhysicalDeviceProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkPhysicalDeviceProperties*                 pProperties)
+{
+   ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
+   const struct brw_device_info *devinfo = pdevice->info;
+
+   anv_finishme("Get correct values for VkPhysicalDeviceLimits");
+
+   const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
+
+   VkSampleCountFlags sample_counts =
+      isl_device_get_sample_counts(&pdevice->isl_dev);
+
+   VkPhysicalDeviceLimits limits = {
+      .maxImageDimension1D                      = (1 << 14),
+      .maxImageDimension2D                      = (1 << 14),
+      .maxImageDimension3D                      = (1 << 11),
+      .maxImageDimensionCube                    = (1 << 14),
+      .maxImageArrayLayers                      = (1 << 11),
+      .maxTexelBufferElements                   = 128 * 1024 * 1024,
+      .maxUniformBufferRange                    = UINT32_MAX,
+      .maxStorageBufferRange                    = UINT32_MAX,
+      .maxPushConstantsSize                     = MAX_PUSH_CONSTANTS_SIZE,
+      .maxMemoryAllocationCount                 = UINT32_MAX,
+      .maxSamplerAllocationCount                = 64 * 1024,
+      .bufferImageGranularity                   = 64, /* A cache line */
+      .sparseAddressSpaceSize                   = 0,
+      .maxBoundDescriptorSets                   = MAX_SETS,
+      .maxPerStageDescriptorSamplers            = 64,
+      .maxPerStageDescriptorUniformBuffers      = 64,
+      .maxPerStageDescriptorStorageBuffers      = 64,
+      .maxPerStageDescriptorSampledImages       = 64,
+      .maxPerStageDescriptorStorageImages       = 64,
+      .maxPerStageDescriptorInputAttachments    = 64,
+      .maxPerStageResources                     = 128,
+      .maxDescriptorSetSamplers                 = 256,
+      .maxDescriptorSetUniformBuffers           = 256,
+      .maxDescriptorSetUniformBuffersDynamic    = 256,
+      .maxDescriptorSetStorageBuffers           = 256,
+      .maxDescriptorSetStorageBuffersDynamic    = 256,
+      .maxDescriptorSetSampledImages            = 256,
+      .maxDescriptorSetStorageImages            = 256,
+      .maxDescriptorSetInputAttachments         = 256,
+      .maxVertexInputAttributes                 = 32,
+      .maxVertexInputBindings                   = 32,
+      .maxVertexInputAttributeOffset            = 2047,
+      .maxVertexInputBindingStride              = 2048,
+      .maxVertexOutputComponents                = 128,
+      .maxTessellationGenerationLevel           = 0,
+      .maxTessellationPatchSize                 = 0,
+      .maxTessellationControlPerVertexInputComponents = 0,
+      .maxTessellationControlPerVertexOutputComponents = 0,
+      .maxTessellationControlPerPatchOutputComponents = 0,
+      .maxTessellationControlTotalOutputComponents = 0,
+      .maxTessellationEvaluationInputComponents = 0,
+      .maxTessellationEvaluationOutputComponents = 0,
+      .maxGeometryShaderInvocations             = 32,
+      .maxGeometryInputComponents               = 64,
+      .maxGeometryOutputComponents              = 128,
+      .maxGeometryOutputVertices                = 256,
+      .maxGeometryTotalOutputComponents         = 1024,
+      .maxFragmentInputComponents               = 128,
+      .maxFragmentOutputAttachments             = 8,
+      .maxFragmentDualSrcAttachments            = 2,
+      .maxFragmentCombinedOutputResources       = 8,
+      .maxComputeSharedMemorySize               = 32768,
+      .maxComputeWorkGroupCount                 = { 65535, 65535, 65535 },
+      .maxComputeWorkGroupInvocations           = 16 * devinfo->max_cs_threads,
+      .maxComputeWorkGroupSize = {
+         16 * devinfo->max_cs_threads,
+         16 * devinfo->max_cs_threads,
+         16 * devinfo->max_cs_threads,
+      },
+      .subPixelPrecisionBits                    = 4 /* FIXME */,
+      .subTexelPrecisionBits                    = 4 /* FIXME */,
+      .mipmapPrecisionBits                      = 4 /* FIXME */,
+      .maxDrawIndexedIndexValue                 = UINT32_MAX,
+      .maxDrawIndirectCount                     = UINT32_MAX,
+      .maxSamplerLodBias                        = 16,
+      .maxSamplerAnisotropy                     = 16,
+      .maxViewports                             = MAX_VIEWPORTS,
+      .maxViewportDimensions                    = { (1 << 14), (1 << 14) },
+      .viewportBoundsRange                      = { -16384.0, 16384.0 },
+      .viewportSubPixelBits                     = 13, /* We take a float? */
+      .minMemoryMapAlignment                    = 4096, /* A page */
+      .minTexelBufferOffsetAlignment            = 1,
+      .minUniformBufferOffsetAlignment          = 1,
+      .minStorageBufferOffsetAlignment          = 1,
+      .minTexelOffset                           = -8,
+      .maxTexelOffset                           = 7,
+      .minTexelGatherOffset                     = -8,
+      .maxTexelGatherOffset                     = 7,
+      .minInterpolationOffset                   = 0, /* FIXME */
+      .maxInterpolationOffset                   = 0, /* FIXME */
+      .subPixelInterpolationOffsetBits          = 0, /* FIXME */
+      .maxFramebufferWidth                      = (1 << 14),
+      .maxFramebufferHeight                     = (1 << 14),
+      .maxFramebufferLayers                     = (1 << 10),
+      .framebufferColorSampleCounts             = sample_counts,
+      .framebufferDepthSampleCounts             = sample_counts,
+      .framebufferStencilSampleCounts           = sample_counts,
+      .framebufferNoAttachmentsSampleCounts     = sample_counts,
+      .maxColorAttachments                      = MAX_RTS,
+      .sampledImageColorSampleCounts            = sample_counts,
+      .sampledImageIntegerSampleCounts          = VK_SAMPLE_COUNT_1_BIT,
+      .sampledImageDepthSampleCounts            = sample_counts,
+      .sampledImageStencilSampleCounts          = sample_counts,
+      .storageImageSampleCounts                 = VK_SAMPLE_COUNT_1_BIT,
+      .maxSampleMaskWords                       = 1,
+      .timestampComputeAndGraphics              = false,
+      .timestampPeriod                          = time_stamp_base / (1000 * 1000 * 1000),
+      .maxClipDistances                         = 0 /* FIXME */,
+      .maxCullDistances                         = 0 /* FIXME */,
+      .maxCombinedClipAndCullDistances          = 0 /* FIXME */,
+      .discreteQueuePriorities                  = 1,
+      .pointSizeRange                           = { 0.125, 255.875 },
+      .lineWidthRange                           = { 0.0, 7.9921875 },
+      .pointSizeGranularity                     = (1.0 / 8.0),
+      .lineWidthGranularity                     = (1.0 / 128.0),
+      .strictLines                              = false, /* FINISHME */
+      .standardSampleLocations                  = true,
+      .optimalBufferCopyOffsetAlignment         = 128,
+      .optimalBufferCopyRowPitchAlignment       = 128,
+      .nonCoherentAtomSize                      = 64,
+   };
+
+   *pProperties = (VkPhysicalDeviceProperties) {
+      .apiVersion = VK_MAKE_VERSION(1, 0, 2),
+      .driverVersion = 1,
+      .vendorID = 0x8086,
+      .deviceID = pdevice->chipset_id,
+      .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
+      .limits = limits,
+      .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
+   };
+
+   strcpy(pProperties->deviceName, pdevice->name);
+   anv_device_get_cache_uuid(pProperties->pipelineCacheUUID);
+}
+
+void anv_GetPhysicalDeviceQueueFamilyProperties(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t*                                   pCount,
+    VkQueueFamilyProperties*                    pQueueFamilyProperties)
+{
+   if (pQueueFamilyProperties == NULL) {
+      *pCount = 1;
+      return;
+   }
+
+   assert(*pCount >= 1);
+
+   *pQueueFamilyProperties = (VkQueueFamilyProperties) {
+      .queueFlags = VK_QUEUE_GRAPHICS_BIT |
+                    VK_QUEUE_COMPUTE_BIT |
+                    VK_QUEUE_TRANSFER_BIT,
+      .queueCount = 1,
+      .timestampValidBits = 36, /* XXX: Real value here */
+      .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
+   };
+}
+
+void anv_GetPhysicalDeviceMemoryProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkPhysicalDeviceMemoryProperties*           pMemoryProperties)
+{
+   ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
+   VkDeviceSize heap_size;
+
+   /* Reserve some wiggle room for the driver by exposing only 75% of the
+    * aperture to the heap.
+    */
+   heap_size = 3 * physical_device->aperture_size / 4;
+
+   if (physical_device->info->has_llc) {
+      /* Big core GPUs share LLC with the CPU and thus one memory type can be
+       * both cached and coherent at the same time.
+       */
+      pMemoryProperties->memoryTypeCount = 1;
+      pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
+         .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+                          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+                          VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+                          VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+         .heapIndex = 0,
+      };
+   } else {
+      /* The spec requires that we expose a host-visible, coherent memory
+       * type, but Atom GPUs don't share LLC. Thus we offer two memory types
+       * to give the application a choice between cached, but not coherent and
+       * coherent but uncached (WC though).
+       */
+      pMemoryProperties->memoryTypeCount = 2;
+      pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
+         .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+                          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+                          VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+         .heapIndex = 0,
+      };
+      pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
+         .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+                          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+                          VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+         .heapIndex = 0,
+      };
+   }
+
+   pMemoryProperties->memoryHeapCount = 1;
+   pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
+      .size = heap_size,
+      .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+   };
+}
+
+PFN_vkVoidFunction anv_GetInstanceProcAddr(
+    VkInstance                                  instance,
+    const char*                                 pName)
+{
+   return anv_lookup_entrypoint(pName);
+}
+
+/* The loader wants us to expose a second GetInstanceProcAddr function
+ * to work around certain LD_PRELOAD issues seen in apps.
+ */
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
+    VkInstance                                  instance,
+    const char*                                 pName);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
+    VkInstance                                  instance,
+    const char*                                 pName)
+{
+   return anv_GetInstanceProcAddr(instance, pName);
+}
+
+PFN_vkVoidFunction anv_GetDeviceProcAddr(
+    VkDevice                                    device,
+    const char*                                 pName)
+{
+   return anv_lookup_entrypoint(pName);
+}
+
+static VkResult
+anv_queue_init(struct anv_device *device, struct anv_queue *queue)
+{
+   queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+   queue->device = device;
+   queue->pool = &device->surface_state_pool;
+
+   return VK_SUCCESS;
+}
+
+static void
+anv_queue_finish(struct anv_queue *queue)
+{
+}
+
+static struct anv_state
+anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
+{
+   struct anv_state state;
+
+   state = anv_state_pool_alloc(pool, size, align);
+   memcpy(state.map, p, size);
+
+   if (!pool->block_pool->device->info.has_llc)
+      anv_state_clflush(state);
+
+   return state;
+}
+
+struct gen8_border_color {
+   union {
+      float float32[4];
+      uint32_t uint32[4];
+   };
+   /* Pad out to 64 bytes */
+   uint32_t _pad[12];
+};
+
+static void
+anv_device_init_border_colors(struct anv_device *device)
+{
+   static const struct gen8_border_color border_colors[] = {
+      [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] =  { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
+      [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] =       { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
+      [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] =       { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
+      [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] =    { .uint32 = { 0, 0, 0, 0 } },
+      [VK_BORDER_COLOR_INT_OPAQUE_BLACK] =         { .uint32 = { 0, 0, 0, 1 } },
+      [VK_BORDER_COLOR_INT_OPAQUE_WHITE] =         { .uint32 = { 1, 1, 1, 1 } },
+   };
+
+   device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
+                                                    sizeof(border_colors), 64,
+                                                    border_colors);
+}
+
+VkResult
+anv_device_submit_simple_batch(struct anv_device *device,
+                               struct anv_batch *batch)
+{
+   struct drm_i915_gem_execbuffer2 execbuf;
+   struct drm_i915_gem_exec_object2 exec2_objects[1];
+   struct anv_bo bo;
+   VkResult result = VK_SUCCESS;
+   uint32_t size;
+   int64_t timeout;
+   int ret;
+
+   /* Kernel driver requires 8 byte aligned batch length */
+   size = align_u32(batch->next - batch->start, 8);
+   assert(size < device->batch_bo_pool.bo_size);
+   result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo);
+   if (result != VK_SUCCESS)
+      return result;
+
+   memcpy(bo.map, batch->start, size);
+   if (!device->info.has_llc)
+      anv_clflush_range(bo.map, size);
+
+   exec2_objects[0].handle = bo.gem_handle;
+   exec2_objects[0].relocation_count = 0;
+   exec2_objects[0].relocs_ptr = 0;
+   exec2_objects[0].alignment = 0;
+   exec2_objects[0].offset = bo.offset;
+   exec2_objects[0].flags = 0;
+   exec2_objects[0].rsvd1 = 0;
+   exec2_objects[0].rsvd2 = 0;
+
+   execbuf.buffers_ptr = (uintptr_t) exec2_objects;
+   execbuf.buffer_count = 1;
+   execbuf.batch_start_offset = 0;
+   execbuf.batch_len = size;
+   execbuf.cliprects_ptr = 0;
+   execbuf.num_cliprects = 0;
+   execbuf.DR1 = 0;
+   execbuf.DR4 = 0;
+
+   execbuf.flags =
+      I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
+   execbuf.rsvd1 = device->context_id;
+   execbuf.rsvd2 = 0;
+
+   ret = anv_gem_execbuffer(device, &execbuf);
+   if (ret != 0) {
+      /* We don't know the real error. */
+      result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
+      goto fail;
+   }
+
+   timeout = INT64_MAX;
+   ret = anv_gem_wait(device, bo.gem_handle, &timeout);
+   if (ret != 0) {
+      /* We don't know the real error. */
+      result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
+      goto fail;
+   }
+
+ fail:
+   anv_bo_pool_free(&device->batch_bo_pool, &bo);
+
+   return result;
+}
+
+VkResult anv_CreateDevice(
+    VkPhysicalDevice                            physicalDevice,
+    const VkDeviceCreateInfo*                   pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDevice*                                   pDevice)
+{
+   ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
+   VkResult result;
+   struct anv_device *device;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
+
+   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
+      bool found = false;
+      for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
+         if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
+                    device_extensions[j].extensionName) == 0) {
+            found = true;
+            break;
+         }
+      }
+      if (!found)
+         return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
+   }
+
+   anv_set_dispatch_devinfo(physical_device->info);
+
+   device = anv_alloc2(&physical_device->instance->alloc, pAllocator,
+                       sizeof(*device), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+   if (!device)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+   device->instance = physical_device->instance;
+   device->chipset_id = physical_device->chipset_id;
+
+   if (pAllocator)
+      device->alloc = *pAllocator;
+   else
+      device->alloc = physical_device->instance->alloc;
+
+   /* XXX(chadv): Can we dup() physicalDevice->fd here? */
+   device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
+   if (device->fd == -1) {
+      result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+      goto fail_device;
+   }
+
+   device->context_id = anv_gem_create_context(device);
+   if (device->context_id == -1) {
+      result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+      goto fail_fd;
+   }
+
+   device->info = *physical_device->info;
+   device->isl_dev = physical_device->isl_dev;
+
+   pthread_mutex_init(&device->mutex, NULL);
+
+   anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
+
+   anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
+
+   anv_state_pool_init(&device->dynamic_state_pool,
+                       &device->dynamic_state_block_pool);
+
+   anv_block_pool_init(&device->instruction_block_pool, device, 128 * 1024);
+   anv_pipeline_cache_init(&device->default_pipeline_cache, device);
+
+   anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
+
+   anv_state_pool_init(&device->surface_state_pool,
+                       &device->surface_state_block_pool);
+
+   anv_bo_init_new(&device->workaround_bo, device, 1024);
+
+   anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
+
+   anv_queue_init(device, &device->queue);
+
+   switch (device->info.gen) {
+   case 7:
+      if (!device->info.is_haswell)
+         result = gen7_init_device_state(device);
+      else
+         result = gen75_init_device_state(device);
+      break;
+   case 8:
+      result = gen8_init_device_state(device);
+      break;
+   case 9:
+      result = gen9_init_device_state(device);
+      break;
+   default:
+      /* Shouldn't get here as we don't create physical devices for any other
+       * gens. */
+      unreachable("unhandled gen");
+   }
+   if (result != VK_SUCCESS)
+      goto fail_fd;
+
+   result = anv_device_init_meta(device);
+   if (result != VK_SUCCESS)
+      goto fail_fd;
+
+   anv_device_init_border_colors(device);
+
+   *pDevice = anv_device_to_handle(device);
+
+   return VK_SUCCESS;
+
+ fail_fd:
+   close(device->fd);
+ fail_device:
+   anv_free(&device->alloc, device);
+
+   return result;
+}
+
+void anv_DestroyDevice(
+    VkDevice                                    _device,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   anv_queue_finish(&device->queue);
+
+   anv_device_finish_meta(device);
+
+#ifdef HAVE_VALGRIND
+   /* We only need to free these to prevent valgrind errors.  The backing
+    * BO will go away in a couple of lines so we don't actually leak.
+    */
+   anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
+#endif
+
+   anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
+   anv_gem_close(device, device->workaround_bo.gem_handle);
+
+   anv_bo_pool_finish(&device->batch_bo_pool);
+   anv_state_pool_finish(&device->dynamic_state_pool);
+   anv_block_pool_finish(&device->dynamic_state_block_pool);
+   anv_block_pool_finish(&device->instruction_block_pool);
+   anv_state_pool_finish(&device->surface_state_pool);
+   anv_block_pool_finish(&device->surface_state_block_pool);
+   anv_block_pool_finish(&device->scratch_block_pool);
+
+   close(device->fd);
+
+   pthread_mutex_destroy(&device->mutex);
+
+   anv_free(&device->alloc, device);
+}
+
+VkResult anv_EnumerateInstanceExtensionProperties(
+    const char*                                 pLayerName,
+    uint32_t*                                   pPropertyCount,
+    VkExtensionProperties*                      pProperties)
+{
+   if (pProperties == NULL) {
+      *pPropertyCount = ARRAY_SIZE(global_extensions);
+      return VK_SUCCESS;
+   }
+
+   assert(*pPropertyCount >= ARRAY_SIZE(global_extensions));
+
+   *pPropertyCount = ARRAY_SIZE(global_extensions);
+   memcpy(pProperties, global_extensions, sizeof(global_extensions));
+
+   return VK_SUCCESS;
+}
+
+VkResult anv_EnumerateDeviceExtensionProperties(
+    VkPhysicalDevice                            physicalDevice,
+    const char*                                 pLayerName,
+    uint32_t*                                   pPropertyCount,
+    VkExtensionProperties*                      pProperties)
+{
+   if (pProperties == NULL) {
+      *pPropertyCount = ARRAY_SIZE(device_extensions);
+      return VK_SUCCESS;
+   }
+
+   assert(*pPropertyCount >= ARRAY_SIZE(device_extensions));
+
+   *pPropertyCount = ARRAY_SIZE(device_extensions);
+   memcpy(pProperties, device_extensions, sizeof(device_extensions));
+
+   return VK_SUCCESS;
+}
+
+VkResult anv_EnumerateInstanceLayerProperties(
+    uint32_t*                                   pPropertyCount,
+    VkLayerProperties*                          pProperties)
+{
+   if (pProperties == NULL) {
+      *pPropertyCount = 0;
+      return VK_SUCCESS;
+   }
+
+   /* None supported at this time */
+   return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
+}
+
+VkResult anv_EnumerateDeviceLayerProperties(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t*                                   pPropertyCount,
+    VkLayerProperties*                          pProperties)
+{
+   if (pProperties == NULL) {
+      *pPropertyCount = 0;
+      return VK_SUCCESS;
+   }
+
+   /* None supported at this time */
+   return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
+}
+
+void anv_GetDeviceQueue(
+    VkDevice                                    _device,
+    uint32_t                                    queueNodeIndex,
+    uint32_t                                    queueIndex,
+    VkQueue*                                    pQueue)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   assert(queueIndex == 0);
+
+   *pQueue = anv_queue_to_handle(&device->queue);
+}
+
+VkResult anv_QueueSubmit(
+    VkQueue                                     _queue,
+    uint32_t                                    submitCount,
+    const VkSubmitInfo*                         pSubmits,
+    VkFence                                     _fence)
+{
+   ANV_FROM_HANDLE(anv_queue, queue, _queue);
+   ANV_FROM_HANDLE(anv_fence, fence, _fence);
+   struct anv_device *device = queue->device;
+   int ret;
+
+   for (uint32_t i = 0; i < submitCount; i++) {
+      for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
+         ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
+                         pSubmits[i].pCommandBuffers[j]);
+         assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+
+         ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
+         if (ret != 0) {
+            /* We don't know the real error. */
+            return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+                             "execbuf2 failed: %m");
+         }
+
+         for (uint32_t k = 0; k < cmd_buffer->execbuf2.bo_count; k++)
+            cmd_buffer->execbuf2.bos[k]->offset = cmd_buffer->execbuf2.objects[k].offset;
+      }
+   }
+
+   if (fence) {
+      ret = anv_gem_execbuffer(device, &fence->execbuf);
+      if (ret != 0) {
+         /* We don't know the real error. */
+         return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+                          "execbuf2 failed: %m");
+      }
+   }
+
+   return VK_SUCCESS;
+}
+
+VkResult anv_QueueWaitIdle(
+    VkQueue                                     _queue)
+{
+   ANV_FROM_HANDLE(anv_queue, queue, _queue);
+
+   return ANV_CALL(DeviceWaitIdle)(anv_device_to_handle(queue->device));
+}
+
+VkResult anv_DeviceWaitIdle(
+    VkDevice                                    _device)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_batch batch;
+
+   uint32_t cmds[8];
+   batch.start = batch.next = cmds;
+   batch.end = (void *) cmds + sizeof(cmds);
+
+   anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
+   anv_batch_emit(&batch, GEN7_MI_NOOP);
+
+   return anv_device_submit_simple_batch(device, &batch);
+}
+
+VkResult
+anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
+{
+   bo->gem_handle = anv_gem_create(device, size);
+   if (!bo->gem_handle)
+      return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
+   bo->map = NULL;
+   bo->index = 0;
+   bo->offset = 0;
+   bo->size = size;
+   bo->is_winsys_bo = false;
+
+   return VK_SUCCESS;
+}
+
+VkResult anv_AllocateMemory(
+    VkDevice                                    _device,
+    const VkMemoryAllocateInfo*                 pAllocateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDeviceMemory*                             pMem)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_device_memory *mem;
+   VkResult result;
+
+   assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
+
+   if (pAllocateInfo->allocationSize == 0) {
+      /* Apparently, this is allowed */
+      *pMem = VK_NULL_HANDLE;
+      return VK_SUCCESS;
+   }
+
+   /* We support exactly one memory heap. */
+   assert(pAllocateInfo->memoryTypeIndex == 0 ||
+          (!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
+
+   /* FINISHME: Fail if allocation request exceeds heap size. */
+
+   mem = anv_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
+                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (mem == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   /* The kernel is going to give us whole pages anyway */
+   uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
+
+   result = anv_bo_init_new(&mem->bo, device, alloc_size);
+   if (result != VK_SUCCESS)
+      goto fail;
+
+   mem->type_index = pAllocateInfo->memoryTypeIndex;
+
+   *pMem = anv_device_memory_to_handle(mem);
+
+   return VK_SUCCESS;
+
+ fail:
+   anv_free2(&device->alloc, pAllocator, mem);
+
+   return result;
+}
+
+void anv_FreeMemory(
+    VkDevice                                    _device,
+    VkDeviceMemory                              _mem,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
+
+   if (mem == NULL)
+      return;
+
+   if (mem->bo.map)
+      anv_gem_munmap(mem->bo.map, mem->bo.size);
+
+   if (mem->bo.gem_handle != 0)
+      anv_gem_close(device, mem->bo.gem_handle);
+
+   anv_free2(&device->alloc, pAllocator, mem);
+}
+
+VkResult anv_MapMemory(
+    VkDevice                                    _device,
+    VkDeviceMemory                              _memory,
+    VkDeviceSize                                offset,
+    VkDeviceSize                                size,
+    VkMemoryMapFlags                            flags,
+    void**                                      ppData)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
+
+   if (mem == NULL) {
+      *ppData = NULL;
+      return VK_SUCCESS;
+   }
+
+   if (size == VK_WHOLE_SIZE)
+      size = mem->bo.size - offset;
+
+   /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
+    * takes a VkDeviceMemory pointer, it seems like only one map of the memory
+    * at a time is valid. We could just mmap up front and return an offset
+    * pointer here, but that may exhaust virtual memory on 32 bit
+    * userspace. */
+
+   uint32_t gem_flags = 0;
+   if (!device->info.has_llc && mem->type_index == 0)
+      gem_flags |= I915_MMAP_WC;
+
+   /* GEM will fail to map if the offset isn't 4k-aligned.  Round down. */
+   uint64_t map_offset = offset & ~4095ull;
+   assert(offset >= map_offset);
+   uint64_t map_size = (offset + size) - map_offset;
+
+   /* Let's map whole pages */
+   map_size = align_u64(map_size, 4096);
+
+   mem->map = anv_gem_mmap(device, mem->bo.gem_handle,
+                           map_offset, map_size, gem_flags);
+   mem->map_size = map_size;
+
+   *ppData = mem->map + (offset - map_offset);
+
+   return VK_SUCCESS;
+}
+
+void anv_UnmapMemory(
+    VkDevice                                    _device,
+    VkDeviceMemory                              _memory)
+{
+   ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
+
+   if (mem == NULL)
+      return;
+
+   anv_gem_munmap(mem->map, mem->map_size);
+}
+
+static void
+clflush_mapped_ranges(struct anv_device         *device,
+                      uint32_t                   count,
+                      const VkMappedMemoryRange *ranges)
+{
+   for (uint32_t i = 0; i < count; i++) {
+      ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
+      void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
+      void *end;
+
+      if (ranges[i].offset + ranges[i].size > mem->map_size)
+         end = mem->map + mem->map_size;
+      else
+         end = mem->map + ranges[i].offset + ranges[i].size;
+
+      while (p < end) {
+         __builtin_ia32_clflush(p);
+         p += CACHELINE_SIZE;
+      }
+   }
+}
+
+VkResult anv_FlushMappedMemoryRanges(
+    VkDevice                                    _device,
+    uint32_t                                    memoryRangeCount,
+    const VkMappedMemoryRange*                  pMemoryRanges)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   if (device->info.has_llc)
+      return VK_SUCCESS;
+
+   /* Make sure the writes we're flushing have landed. */
+   __builtin_ia32_mfence();
+
+   clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
+
+   return VK_SUCCESS;
+}
+
+VkResult anv_InvalidateMappedMemoryRanges(
+    VkDevice                                    _device,
+    uint32_t                                    memoryRangeCount,
+    const VkMappedMemoryRange*                  pMemoryRanges)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   if (device->info.has_llc)
+      return VK_SUCCESS;
+
+   clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
+
+   /* Make sure no reads get moved up above the invalidate. */
+   __builtin_ia32_mfence();
+
+   return VK_SUCCESS;
+}
+
+void anv_GetBufferMemoryRequirements(
+    VkDevice                                    device,
+    VkBuffer                                    _buffer,
+    VkMemoryRequirements*                       pMemoryRequirements)
+{
+   ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+
+   /* The Vulkan spec (git aaed022) says:
+    *
+    *    memoryTypeBits is a bitfield and contains one bit set for every
+    *    supported memory type for the resource. The bit `1<<i` is set if and
+    *    only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
+    *    structure for the physical device is supported.
+    *
+    * We support exactly one memory type.
+    */
+   pMemoryRequirements->memoryTypeBits = 1;
+
+   pMemoryRequirements->size = buffer->size;
+   pMemoryRequirements->alignment = 16;
+}
+
+void anv_GetImageMemoryRequirements(
+    VkDevice                                    device,
+    VkImage                                     _image,
+    VkMemoryRequirements*                       pMemoryRequirements)
+{
+   ANV_FROM_HANDLE(anv_image, image, _image);
+
+   /* The Vulkan spec (git aaed022) says:
+    *
+    *    memoryTypeBits is a bitfield and contains one bit set for every
+    *    supported memory type for the resource. The bit `1<<i` is set if and
+    *    only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
+    *    structure for the physical device is supported.
+    *
+    * We support exactly one memory type.
+    */
+   pMemoryRequirements->memoryTypeBits = 1;
+
+   pMemoryRequirements->size = image->size;
+   pMemoryRequirements->alignment = image->alignment;
+}
+
+void anv_GetImageSparseMemoryRequirements(
+    VkDevice                                    device,
+    VkImage                                     image,
+    uint32_t*                                   pSparseMemoryRequirementCount,
+    VkSparseImageMemoryRequirements*            pSparseMemoryRequirements)
+{
+   stub();
+}
+
+void anv_GetDeviceMemoryCommitment(
+    VkDevice                                    device,
+    VkDeviceMemory                              memory,
+    VkDeviceSize*                               pCommittedMemoryInBytes)
+{
+   *pCommittedMemoryInBytes = 0;
+}
+
+VkResult anv_BindBufferMemory(
+    VkDevice                                    device,
+    VkBuffer                                    _buffer,
+    VkDeviceMemory                              _memory,
+    VkDeviceSize                                memoryOffset)
+{
+   ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
+   ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+
+   if (mem) {
+      buffer->bo = &mem->bo;
+      buffer->offset = memoryOffset;
+   } else {
+      buffer->bo = NULL;
+      buffer->offset = 0;
+   }
+
+   return VK_SUCCESS;
+}
+
+VkResult anv_BindImageMemory(
+    VkDevice                                    device,
+    VkImage                                     _image,
+    VkDeviceMemory                              _memory,
+    VkDeviceSize                                memoryOffset)
+{
+   ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
+   ANV_FROM_HANDLE(anv_image, image, _image);
+
+   if (mem) {
+      image->bo = &mem->bo;
+      image->offset = memoryOffset;
+   } else {
+      image->bo = NULL;
+      image->offset = 0;
+   }
+
+   return VK_SUCCESS;
+}
+
+VkResult anv_QueueBindSparse(
+    VkQueue                                     queue,
+    uint32_t                                    bindInfoCount,
+    const VkBindSparseInfo*                     pBindInfo,
+    VkFence                                     fence)
+{
+   stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+}
+
+VkResult anv_CreateFence(
+    VkDevice                                    _device,
+    const VkFenceCreateInfo*                    pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkFence*                                    pFence)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_bo fence_bo;
+   struct anv_fence *fence;
+   struct anv_batch batch;
+   VkResult result;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
+
+   result = anv_bo_pool_alloc(&device->batch_bo_pool, &fence_bo);
+   if (result != VK_SUCCESS)
+      return result;
+
+   /* Fences are small.  Just store the CPU data structure in the BO. */
+   fence = fence_bo.map;
+   fence->bo = fence_bo;
+
+   /* Place the batch after the CPU data but on its own cache line. */
+   const uint32_t batch_offset = align_u32(sizeof(*fence), CACHELINE_SIZE);
+   batch.next = batch.start = fence->bo.map + batch_offset;
+   batch.end = fence->bo.map + fence->bo.size;
+   anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
+   anv_batch_emit(&batch, GEN7_MI_NOOP);
+
+   if (!device->info.has_llc) {
+      assert(((uintptr_t) batch.start & CACHELINE_MASK) == 0);
+      assert(batch.next - batch.start <= CACHELINE_SIZE);
+      __builtin_ia32_mfence();
+      __builtin_ia32_clflush(fence->bo.map);
+   }
+
+   fence->exec2_objects[0].handle = fence->bo.gem_handle;
+   fence->exec2_objects[0].relocation_count = 0;
+   fence->exec2_objects[0].relocs_ptr = 0;
+   fence->exec2_objects[0].alignment = 0;
+   fence->exec2_objects[0].offset = fence->bo.offset;
+   fence->exec2_objects[0].flags = 0;
+   fence->exec2_objects[0].rsvd1 = 0;
+   fence->exec2_objects[0].rsvd2 = 0;
+
+   fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
+   fence->execbuf.buffer_count = 1;
+   fence->execbuf.batch_start_offset = batch.start - fence->bo.map;
+   fence->execbuf.batch_len = batch.next - batch.start;
+   fence->execbuf.cliprects_ptr = 0;
+   fence->execbuf.num_cliprects = 0;
+   fence->execbuf.DR1 = 0;
+   fence->execbuf.DR4 = 0;
+
+   fence->execbuf.flags =
+      I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
+   fence->execbuf.rsvd1 = device->context_id;
+   fence->execbuf.rsvd2 = 0;
+
+   fence->ready = false;
+
+   *pFence = anv_fence_to_handle(fence);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyFence(
+    VkDevice                                    _device,
+    VkFence                                     _fence,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_fence, fence, _fence);
+
+   assert(fence->bo.map == fence);
+   anv_bo_pool_free(&device->batch_bo_pool, &fence->bo);
+}
+
+VkResult anv_ResetFences(
+    VkDevice                                    _device,
+    uint32_t                                    fenceCount,
+    const VkFence*                              pFences)
+{
+   for (uint32_t i = 0; i < fenceCount; i++) {
+      ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
+      fence->ready = false;
+   }
+
+   return VK_SUCCESS;
+}
+
+VkResult anv_GetFenceStatus(
+    VkDevice                                    _device,
+    VkFence                                     _fence)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_fence, fence, _fence);
+   int64_t t = 0;
+   int ret;
+
+   if (fence->ready)
+      return VK_SUCCESS;
+
+   ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
+   if (ret == 0) {
+      fence->ready = true;
+      return VK_SUCCESS;
+   }
+
+   return VK_NOT_READY;
+}
+
+VkResult anv_WaitForFences(
+    VkDevice                                    _device,
+    uint32_t                                    fenceCount,
+    const VkFence*                              pFences,
+    VkBool32                                    waitAll,
+    uint64_t                                    timeout)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
+    * to block indefinitely timeouts <= 0.  Unfortunately, this was broken
+    * for a couple of kernel releases.  Since there's no way to know
+    * whether or not the kernel we're using is one of the broken ones, the
+    * best we can do is to clamp the timeout to INT64_MAX.  This limits the
+    * maximum timeout from 584 years to 292 years - likely not a big deal.
+    */
+   if (timeout > INT64_MAX)
+      timeout = INT64_MAX;
+
+   int64_t t = timeout;
+
+   /* FIXME: handle !waitAll */
+
+   for (uint32_t i = 0; i < fenceCount; i++) {
+      ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
+      int ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
+      if (ret == -1 && errno == ETIME) {
+         return VK_TIMEOUT;
+      } else if (ret == -1) {
+         /* We don't know the real error. */
+         return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+                          "gem wait failed: %m");
+      }
+   }
+
+   return VK_SUCCESS;
+}
+
+// Queue semaphore functions
+
+VkResult anv_CreateSemaphore(
+    VkDevice                                    device,
+    const VkSemaphoreCreateInfo*                pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSemaphore*                                pSemaphore)
+{
+   /* The DRM execbuffer ioctl always execute in-oder, even between different
+    * rings. As such, there's nothing to do for the user space semaphore.
+    */
+
+   *pSemaphore = (VkSemaphore)1;
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroySemaphore(
+    VkDevice                                    device,
+    VkSemaphore                                 semaphore,
+    const VkAllocationCallbacks*                pAllocator)
+{
+}
+
+// Event functions
+
+VkResult anv_CreateEvent(
+    VkDevice                                    _device,
+    const VkEventCreateInfo*                    pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkEvent*                                    pEvent)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_state state;
+   struct anv_event *event;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
+
+   state = anv_state_pool_alloc(&device->dynamic_state_pool,
+                                sizeof(*event), 8);
+   event = state.map;
+   event->state = state;
+   event->semaphore = VK_EVENT_RESET;
+
+   if (!device->info.has_llc) {
+      /* Make sure the writes we're flushing have landed. */
+      __builtin_ia32_mfence();
+      __builtin_ia32_clflush(event);
+   }
+
+   *pEvent = anv_event_to_handle(event);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyEvent(
+    VkDevice                                    _device,
+    VkEvent                                     _event,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   anv_state_pool_free(&device->dynamic_state_pool, event->state);
+}
+
+VkResult anv_GetEventStatus(
+    VkDevice                                    _device,
+    VkEvent                                     _event)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   if (!device->info.has_llc) {
+      /* Invalidate read cache before reading event written by GPU. */
+      __builtin_ia32_clflush(event);
+      __builtin_ia32_mfence();
+
+   }
+
+   return event->semaphore;
+}
+
+VkResult anv_SetEvent(
+    VkDevice                                    _device,
+    VkEvent                                     _event)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   event->semaphore = VK_EVENT_SET;
+
+   if (!device->info.has_llc) {
+      /* Make sure the writes we're flushing have landed. */
+      __builtin_ia32_mfence();
+      __builtin_ia32_clflush(event);
+   }
+
+   return VK_SUCCESS;
+}
+
+VkResult anv_ResetEvent(
+    VkDevice                                    _device,
+    VkEvent                                     _event)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   event->semaphore = VK_EVENT_RESET;
+
+   if (!device->info.has_llc) {
+      /* Make sure the writes we're flushing have landed. */
+      __builtin_ia32_mfence();
+      __builtin_ia32_clflush(event);
+   }
+
+   return VK_SUCCESS;
+}
+
+// Buffer functions
+
+VkResult anv_CreateBuffer(
+    VkDevice                                    _device,
+    const VkBufferCreateInfo*                   pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkBuffer*                                   pBuffer)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_buffer *buffer;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
+
+   buffer = anv_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (buffer == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   buffer->size = pCreateInfo->size;
+   buffer->usage = pCreateInfo->usage;
+   buffer->bo = NULL;
+   buffer->offset = 0;
+
+   *pBuffer = anv_buffer_to_handle(buffer);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyBuffer(
+    VkDevice                                    _device,
+    VkBuffer                                    _buffer,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+
+   anv_free2(&device->alloc, pAllocator, buffer);
+}
+
+void
+anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
+                              enum isl_format format,
+                              uint32_t offset, uint32_t range, uint32_t stride)
+{
+   isl_buffer_fill_state(&device->isl_dev, state.map,
+                         .address = offset,
+                         .mocs = device->default_mocs,
+                         .size = range,
+                         .format = format,
+                         .stride = stride);
+
+   if (!device->info.has_llc)
+      anv_state_clflush(state);
+}
+
+void anv_DestroySampler(
+    VkDevice                                    _device,
+    VkSampler                                   _sampler,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
+
+   anv_free2(&device->alloc, pAllocator, sampler);
+}
+
+VkResult anv_CreateFramebuffer(
+    VkDevice                                    _device,
+    const VkFramebufferCreateInfo*              pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkFramebuffer*                              pFramebuffer)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_framebuffer *framebuffer;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
+
+   size_t size = sizeof(*framebuffer) +
+                 sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
+   framebuffer = anv_alloc2(&device->alloc, pAllocator, size, 8,
+                            VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (framebuffer == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   framebuffer->attachment_count = pCreateInfo->attachmentCount;
+   for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
+      VkImageView _iview = pCreateInfo->pAttachments[i];
+      framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
+   }
+
+   framebuffer->width = pCreateInfo->width;
+   framebuffer->height = pCreateInfo->height;
+   framebuffer->layers = pCreateInfo->layers;
+
+   *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyFramebuffer(
+    VkDevice                                    _device,
+    VkFramebuffer                               _fb,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
+
+   anv_free2(&device->alloc, pAllocator, fb);
+}
+
+void vkCmdDbgMarkerBegin(
+    VkCommandBuffer                              commandBuffer,
+    const char*                                 pMarker)
+   __attribute__ ((visibility ("default")));
+
+void vkCmdDbgMarkerEnd(
+   VkCommandBuffer                              commandBuffer)
+   __attribute__ ((visibility ("default")));
+
+void vkCmdDbgMarkerBegin(
+    VkCommandBuffer                              commandBuffer,
+    const char*                                 pMarker)
+{
+}
+
+void vkCmdDbgMarkerEnd(
+    VkCommandBuffer                              commandBuffer)
+{
+}
diff --git a/src/intel/vulkan/anv_dump.c b/src/intel/vulkan/anv_dump.c
new file mode 100644 (file)
index 0000000..b7fa28b
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_private.h"
+
+/* This file contains utility functions for help debugging.  They can be
+ * called from GDB or similar to help inspect images and buffers.
+ */
+
+void
+anv_dump_image_to_ppm(struct anv_device *device,
+                      struct anv_image *image, unsigned miplevel,
+                      unsigned array_layer, const char *filename)
+{
+   VkDevice vk_device = anv_device_to_handle(device);
+   VkResult result;
+
+   VkExtent2D extent = { image->extent.width, image->extent.height };
+   for (unsigned i = 0; i < miplevel; i++) {
+      extent.width = MAX2(1, extent.width / 2);
+      extent.height = MAX2(1, extent.height / 2);
+   }
+
+   VkImage copy_image;
+   result = anv_CreateImage(vk_device,
+      &(VkImageCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+         .imageType = VK_IMAGE_TYPE_2D,
+         .format = VK_FORMAT_R8G8B8A8_UNORM,
+         .extent = (VkExtent3D) { extent.width, extent.height, 1 },
+         .mipLevels = 1,
+         .arrayLayers = 1,
+         .samples = 1,
+         .tiling = VK_IMAGE_TILING_LINEAR,
+         .usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT,
+         .flags = 0,
+      }, NULL, &copy_image);
+   assert(result == VK_SUCCESS);
+
+   VkMemoryRequirements reqs;
+   anv_GetImageMemoryRequirements(vk_device, copy_image, &reqs);
+
+   VkDeviceMemory memory;
+   result = anv_AllocateMemory(vk_device,
+      &(VkMemoryAllocateInfo) {
+         .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+         .allocationSize = reqs.size,
+         .memoryTypeIndex = 0,
+      }, NULL, &memory);
+   assert(result == VK_SUCCESS);
+
+   result = anv_BindImageMemory(vk_device, copy_image, memory, 0);
+   assert(result == VK_SUCCESS);
+
+   VkCommandPool commandPool;
+   result = anv_CreateCommandPool(vk_device,
+      &(VkCommandPoolCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+         .queueFamilyIndex = 0,
+         .flags = 0,
+      }, NULL, &commandPool);
+   assert(result == VK_SUCCESS);
+
+   VkCommandBuffer cmd;
+   result = anv_AllocateCommandBuffers(vk_device,
+      &(VkCommandBufferAllocateInfo) {
+         .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+         .commandPool = commandPool,
+         .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+         .commandBufferCount = 1,
+      }, &cmd);
+   assert(result == VK_SUCCESS);
+
+   result = anv_BeginCommandBuffer(cmd,
+      &(VkCommandBufferBeginInfo) {
+         .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+         .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
+      });
+   assert(result == VK_SUCCESS);
+
+   anv_CmdBlitImage(cmd,
+      anv_image_to_handle(image), VK_IMAGE_LAYOUT_GENERAL,
+      copy_image, VK_IMAGE_LAYOUT_GENERAL, 1,
+      &(VkImageBlit) {
+         .srcSubresource = {
+            .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+            .mipLevel = miplevel,
+            .baseArrayLayer = array_layer,
+            .layerCount = 1,
+         },
+         .srcOffsets = {
+            { 0, 0, 0 },
+            { extent.width, extent.height, 1 },
+         },
+         .dstSubresource = {
+            .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+            .mipLevel = 0,
+            .baseArrayLayer = 0,
+            .layerCount = 1,
+         },
+         .dstOffsets = {
+            { 0, 0, 0 },
+            { extent.width, extent.height, 1 },
+         },
+      }, VK_FILTER_NEAREST);
+
+   ANV_CALL(CmdPipelineBarrier)(cmd,
+      VK_PIPELINE_STAGE_TRANSFER_BIT,
+      VK_PIPELINE_STAGE_TRANSFER_BIT,
+      true, 0, NULL, 0, NULL, 1,
+      &(VkImageMemoryBarrier) {
+         .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+         .srcAccessMask = VK_ACCESS_HOST_READ_BIT,
+         .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
+         .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
+         .newLayout = VK_IMAGE_LAYOUT_GENERAL,
+         .srcQueueFamilyIndex = 0,
+         .dstQueueFamilyIndex = 0,
+         .image = copy_image,
+         .subresourceRange = (VkImageSubresourceRange) {
+            .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+            .baseMipLevel = 0,
+            .levelCount = 1,
+            .baseArrayLayer = 0,
+            .layerCount = 1,
+         },
+      });
+
+   result = anv_EndCommandBuffer(cmd);
+   assert(result == VK_SUCCESS);
+
+   VkFence fence;
+   result = anv_CreateFence(vk_device,
+      &(VkFenceCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+         .flags = 0,
+      }, NULL, &fence);
+   assert(result == VK_SUCCESS);
+
+   result = anv_QueueSubmit(anv_queue_to_handle(&device->queue), 1,
+      &(VkSubmitInfo) {
+         .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+         .commandBufferCount = 1,
+         .pCommandBuffers = &cmd,
+      }, fence);
+   assert(result == VK_SUCCESS);
+
+   result = anv_WaitForFences(vk_device, 1, &fence, true, UINT64_MAX);
+   assert(result == VK_SUCCESS);
+
+   anv_DestroyFence(vk_device, fence, NULL);
+   anv_DestroyCommandPool(vk_device, commandPool, NULL);
+
+   uint8_t *map;
+   result = anv_MapMemory(vk_device, memory, 0, reqs.size, 0, (void **)&map);
+   assert(result == VK_SUCCESS);
+
+   VkSubresourceLayout layout;
+   anv_GetImageSubresourceLayout(vk_device, copy_image,
+      &(VkImageSubresource) {
+         .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+         .mipLevel = 0,
+         .arrayLayer = 0,
+      }, &layout);
+
+   map += layout.offset;
+
+   /* Now we can finally write the PPM file */
+   FILE *file = fopen(filename, "wb");
+   assert(file);
+
+   fprintf(file, "P6\n%d %d\n255\n", extent.width, extent.height);
+   for (unsigned y = 0; y < extent.height; y++) {
+      uint8_t row[extent.width * 3];
+      for (unsigned x = 0; x < extent.width; x++) {
+         row[x * 3 + 0] = map[x * 4 + 0];
+         row[x * 3 + 1] = map[x * 4 + 1];
+         row[x * 3 + 2] = map[x * 4 + 2];
+      }
+      fwrite(row, 3, extent.width, file);
+
+      map += layout.rowPitch;
+   }
+   fclose(file);
+
+   anv_UnmapMemory(vk_device, memory);
+   anv_DestroyImage(vk_device, copy_image, NULL);
+   anv_FreeMemory(vk_device, memory, NULL);
+}
diff --git a/src/intel/vulkan/anv_entrypoints_gen.py b/src/intel/vulkan/anv_entrypoints_gen.py
new file mode 100644 (file)
index 0000000..1e4cfcb
--- /dev/null
@@ -0,0 +1,324 @@
+# coding=utf-8
+#
+# Copyright © 2015 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import fileinput, re, sys
+
+# Each function typedef in the vulkan.h header is all on one line and matches
+# this regepx. We hope that won't change.
+
+p = re.compile('typedef ([^ ]*) *\((?:VKAPI_PTR)? *\*PFN_vk([^(]*)\)(.*);')
+
+entrypoints = []
+
+# We generate a static hash table for entry point lookup
+# (vkGetProcAddress). We use a linear congruential generator for our hash
+# function and a power-of-two size table. The prime numbers are determined
+# experimentally.
+
+none = 0xffff
+hash_size = 256
+u32_mask = 2**32 - 1
+hash_mask = hash_size - 1
+
+prime_factor = 5024183
+prime_step = 19
+
+def hash(name):
+    h = 0;
+    for c in name:
+        h = (h * prime_factor + ord(c)) & u32_mask
+
+    return h
+
+opt_header = False
+opt_code = False
+
+if (sys.argv[1] == "header"):
+    opt_header = True
+    sys.argv.pop()
+elif (sys.argv[1] == "code"):
+    opt_code = True
+    sys.argv.pop()
+
+# Parse the entry points in the header
+
+i = 0
+for line in fileinput.input():
+    m  = p.match(line)
+    if (m):
+        if m.group(2) == 'VoidFunction':
+            continue
+        fullname = "vk" + m.group(2)
+        h = hash(fullname)
+        entrypoints.append((m.group(1), m.group(2), m.group(3), i, h))
+        i = i + 1
+
+# For outputting entrypoints.h we generate a anv_EntryPoint() prototype
+# per entry point.
+
+if opt_header:
+    print "/* This file generated from vk_gen.py, don't edit directly. */\n"
+
+    print "struct anv_dispatch_table {"
+    print "   union {"
+    print "      void *entrypoints[%d];" % len(entrypoints)
+    print "      struct {"
+
+    for type, name, args, num, h in entrypoints:
+        print "         %s (*%s)%s;" % (type, name, args)
+    print "      };\n"
+    print "   };\n"
+    print "};\n"
+
+    print "void anv_set_dispatch_devinfo(const struct brw_device_info *info);\n"
+
+    for type, name, args, num, h in entrypoints:
+        print "%s anv_%s%s;" % (type, name, args)
+        print "%s gen7_%s%s;" % (type, name, args)
+        print "%s gen75_%s%s;" % (type, name, args)
+        print "%s gen8_%s%s;" % (type, name, args)
+        print "%s gen9_%s%s;" % (type, name, args)
+        print "%s anv_validate_%s%s;" % (type, name, args)
+    exit()
+
+
+
+print """/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* DO NOT EDIT! This is a generated file. */
+
+#include "anv_private.h"
+
+struct anv_entrypoint {
+   uint32_t name;
+   uint32_t hash;
+};
+
+/* We use a big string constant to avoid lots of reloctions from the entry
+ * point table to lots of little strings. The entries in the entry point table
+ * store the index into this big string.
+ */
+
+static const char strings[] ="""
+
+offsets = []
+i = 0;
+for type, name, args, num, h in entrypoints:
+    print "   \"vk%s\\0\"" % name
+    offsets.append(i)
+    i += 2 + len(name) + 1
+print """   ;
+
+/* Weak aliases for all potential validate functions. These will resolve to
+ * NULL if they're not defined, which lets the resolve_entrypoint() function
+ * either pick a validate wrapper if available or just plug in the actual
+ * entry point.
+ */
+"""
+
+# Now generate the table of all entry points and their validation functions
+
+print "\nstatic const struct anv_entrypoint entrypoints[] = {"
+for type, name, args, num, h in entrypoints:
+    print "   { %5d, 0x%08x }," % (offsets[num], h)
+print "};\n"
+
+for layer in [ "anv", "validate", "gen7", "gen75", "gen8", "gen9" ]:
+    for type, name, args, num, h in entrypoints:
+        print "%s %s_%s%s __attribute__ ((weak));" % (type, layer, name, args)
+    print "\nconst struct anv_dispatch_table %s_layer = {" % layer
+    for type, name, args, num, h in entrypoints:
+        print "   .%s = %s_%s," % (name, layer, name)
+    print "};\n"
+
+print """
+#ifdef DEBUG
+static bool enable_validate = true;
+#else
+static bool enable_validate = false;
+#endif
+
+/* We can't use symbols that need resolving (like, oh, getenv) in the resolve
+ * function. This means that we have to determine whether or not to use the
+ * validation layer sometime before that. The constructor function attribute asks
+ * the dynamic linker to invoke determine_validate() at dlopen() time which
+ * works.
+ */
+static void __attribute__ ((constructor))
+determine_validate(void)
+{
+   const char *s = getenv("ANV_VALIDATE");
+
+   if (s)
+      enable_validate = atoi(s);
+}
+
+static const struct brw_device_info *dispatch_devinfo;
+
+void
+anv_set_dispatch_devinfo(const struct brw_device_info *devinfo)
+{
+   dispatch_devinfo = devinfo;
+}
+
+void * __attribute__ ((noinline))
+anv_resolve_entrypoint(uint32_t index)
+{
+   if (enable_validate && validate_layer.entrypoints[index])
+      return validate_layer.entrypoints[index];
+
+   if (dispatch_devinfo == NULL) {
+      assert(anv_layer.entrypoints[index]);
+      return anv_layer.entrypoints[index];
+   }
+
+   switch (dispatch_devinfo->gen) {
+   case 9:
+      if (gen9_layer.entrypoints[index])
+         return gen9_layer.entrypoints[index];
+      /* fall through */
+   case 8:
+      if (gen8_layer.entrypoints[index])
+         return gen8_layer.entrypoints[index];
+      /* fall through */
+   case 7:
+      if (dispatch_devinfo->is_haswell && gen75_layer.entrypoints[index])
+         return gen75_layer.entrypoints[index];
+
+      if (gen7_layer.entrypoints[index])
+         return gen7_layer.entrypoints[index];
+      /* fall through */
+   case 0:
+      return anv_layer.entrypoints[index];
+   default:
+      unreachable("unsupported gen\\n");
+   }
+}
+"""
+
+# Now output ifuncs and their resolve helpers for all entry points. The
+# resolve helper calls resolve_entrypoint() with the entry point index, which
+# lets the resolver look it up in the table.
+
+for type, name, args, num, h in entrypoints:
+    print "static void *resolve_%s(void) { return anv_resolve_entrypoint(%d); }" % (name, num)
+    print "%s vk%s%s\n   __attribute__ ((ifunc (\"resolve_%s\"), visibility (\"default\")));\n" % (type, name, args, name)
+
+
+# Now generate the hash table used for entry point look up.  This is a
+# uint16_t table of entry point indices. We use 0xffff to indicate an entry
+# in the hash table is empty.
+
+map = [none for f in xrange(hash_size)]
+collisions = [0 for f in xrange(10)]
+for type, name, args, num, h in entrypoints:
+    level = 0
+    while map[h & hash_mask] != none:
+        h = h + prime_step
+        level = level + 1
+    if level > 9:
+        collisions[9] += 1
+    else:
+        collisions[level] += 1
+    map[h & hash_mask] = num
+
+print "/* Hash table stats:"
+print " * size %d entries" % hash_size
+print " * collisions  entries"
+for i in xrange(10):
+    if (i == 9):
+        plus = "+"
+    else:
+        plus = " "
+
+    print " *     %2d%s     %4d" % (i, plus, collisions[i])
+print " */\n"
+
+print "#define none 0x%04x\n" % none
+
+print "static const uint16_t map[] = {"
+for i in xrange(0, hash_size, 8):
+    print "   ",
+    for j in xrange(i, i + 8):
+        if map[j] & 0xffff == 0xffff:
+            print "  none,",
+        else:
+            print "0x%04x," % (map[j] & 0xffff),
+    print
+
+print "};"    
+
+# Finally we generate the hash table lookup function.  The hash function and
+# linear probing algorithm matches the hash table generated above.
+
+print """
+void *
+anv_lookup_entrypoint(const char *name)
+{
+   static const uint32_t prime_factor = %d;
+   static const uint32_t prime_step = %d;
+   const struct anv_entrypoint *e;
+   uint32_t hash, h, i;
+   const char *p;
+
+   hash = 0;
+   for (p = name; *p; p++)
+      hash = hash * prime_factor + *p;
+
+   h = hash;
+   do {
+      i = map[h & %d];
+      if (i == none)
+         return NULL;
+      e = &entrypoints[i];
+      h += prime_step;
+   } while (e->hash != hash);
+
+   if (strcmp(name, strings + e->name) != 0)
+      return NULL;
+
+   return anv_resolve_entrypoint(i);
+}
+""" % (prime_factor, prime_step, hash_mask)
diff --git a/src/intel/vulkan/anv_formats.c b/src/intel/vulkan/anv_formats.c
new file mode 100644 (file)
index 0000000..4d279a8
--- /dev/null
@@ -0,0 +1,603 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_private.h"
+#include "brw_surface_formats.h"
+
+#define RGBA { 0, 1, 2, 3 }
+#define BGRA { 2, 1, 0, 3 }
+
+#define swiz_fmt(__vk_fmt, __hw_fmt, __swizzle, ...)     \
+   [__vk_fmt] = { \
+      .vk_format = __vk_fmt, \
+      .name = #__vk_fmt, \
+      .isl_format = __hw_fmt, \
+      .isl_layout = &isl_format_layouts[__hw_fmt], \
+      .swizzle = __swizzle, \
+      __VA_ARGS__ \
+   }
+
+#define fmt(__vk_fmt, __hw_fmt, ...) \
+   swiz_fmt(__vk_fmt, __hw_fmt, RGBA, __VA_ARGS__)
+
+/* HINT: For array formats, the ISL name should match the VK name.  For
+ * packed formats, they should have the channels in reverse order from each
+ * other.  The reason for this is that, for packed formats, the ISL (and
+ * bspec) names are in LSB -> MSB order while VK formats are MSB -> LSB.
+ */
+static const struct anv_format anv_formats[] = {
+   fmt(VK_FORMAT_UNDEFINED,               ISL_FORMAT_RAW),
+   fmt(VK_FORMAT_R4G4_UNORM_PACK8,        ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_R4G4B4A4_UNORM_PACK16,   ISL_FORMAT_A4B4G4R4_UNORM),
+   swiz_fmt(VK_FORMAT_B4G4R4A4_UNORM_PACK16,   ISL_FORMAT_A4B4G4R4_UNORM,  BGRA),
+   fmt(VK_FORMAT_R5G6B5_UNORM_PACK16,     ISL_FORMAT_B5G6R5_UNORM),
+   swiz_fmt(VK_FORMAT_B5G6R5_UNORM_PACK16,     ISL_FORMAT_B5G6R5_UNORM, BGRA),
+   fmt(VK_FORMAT_R5G5B5A1_UNORM_PACK16,   ISL_FORMAT_A1B5G5R5_UNORM),
+   fmt(VK_FORMAT_B5G5R5A1_UNORM_PACK16,   ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_A1R5G5B5_UNORM_PACK16,   ISL_FORMAT_B5G5R5A1_UNORM),
+   fmt(VK_FORMAT_R8_UNORM,                ISL_FORMAT_R8_UNORM),
+   fmt(VK_FORMAT_R8_SNORM,                ISL_FORMAT_R8_SNORM),
+   fmt(VK_FORMAT_R8_USCALED,              ISL_FORMAT_R8_USCALED),
+   fmt(VK_FORMAT_R8_SSCALED,              ISL_FORMAT_R8_SSCALED),
+   fmt(VK_FORMAT_R8_UINT,                 ISL_FORMAT_R8_UINT),
+   fmt(VK_FORMAT_R8_SINT,                 ISL_FORMAT_R8_SINT),
+   fmt(VK_FORMAT_R8_SRGB,                 ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_R8G8_UNORM,              ISL_FORMAT_R8G8_UNORM),
+   fmt(VK_FORMAT_R8G8_SNORM,              ISL_FORMAT_R8G8_SNORM),
+   fmt(VK_FORMAT_R8G8_USCALED,            ISL_FORMAT_R8G8_USCALED),
+   fmt(VK_FORMAT_R8G8_SSCALED,            ISL_FORMAT_R8G8_SSCALED),
+   fmt(VK_FORMAT_R8G8_UINT,               ISL_FORMAT_R8G8_UINT),
+   fmt(VK_FORMAT_R8G8_SINT,               ISL_FORMAT_R8G8_SINT),
+   fmt(VK_FORMAT_R8G8_SRGB,               ISL_FORMAT_UNSUPPORTED), /* L8A8_UNORM_SRGB */
+   fmt(VK_FORMAT_R8G8B8_UNORM,            ISL_FORMAT_R8G8B8_UNORM),
+   fmt(VK_FORMAT_R8G8B8_SNORM,            ISL_FORMAT_R8G8B8_SNORM),
+   fmt(VK_FORMAT_R8G8B8_USCALED,          ISL_FORMAT_R8G8B8_USCALED),
+   fmt(VK_FORMAT_R8G8B8_SSCALED,          ISL_FORMAT_R8G8B8_SSCALED),
+   fmt(VK_FORMAT_R8G8B8_UINT,             ISL_FORMAT_R8G8B8_UINT),
+   fmt(VK_FORMAT_R8G8B8_SINT,             ISL_FORMAT_R8G8B8_SINT),
+   fmt(VK_FORMAT_R8G8B8_SRGB,             ISL_FORMAT_UNSUPPORTED), /* B8G8R8A8_UNORM_SRGB */
+   fmt(VK_FORMAT_R8G8B8A8_UNORM,          ISL_FORMAT_R8G8B8A8_UNORM),
+   fmt(VK_FORMAT_R8G8B8A8_SNORM,          ISL_FORMAT_R8G8B8A8_SNORM),
+   fmt(VK_FORMAT_R8G8B8A8_USCALED,        ISL_FORMAT_R8G8B8A8_USCALED),
+   fmt(VK_FORMAT_R8G8B8A8_SSCALED,        ISL_FORMAT_R8G8B8A8_SSCALED),
+   fmt(VK_FORMAT_R8G8B8A8_UINT,           ISL_FORMAT_R8G8B8A8_UINT),
+   fmt(VK_FORMAT_R8G8B8A8_SINT,           ISL_FORMAT_R8G8B8A8_SINT),
+   fmt(VK_FORMAT_R8G8B8A8_SRGB,           ISL_FORMAT_R8G8B8A8_UNORM_SRGB),
+   fmt(VK_FORMAT_A8B8G8R8_UNORM_PACK32,   ISL_FORMAT_R8G8B8A8_UNORM),
+   fmt(VK_FORMAT_A8B8G8R8_SNORM_PACK32,   ISL_FORMAT_R8G8B8A8_SNORM),
+   fmt(VK_FORMAT_A8B8G8R8_USCALED_PACK32, ISL_FORMAT_R8G8B8A8_USCALED),
+   fmt(VK_FORMAT_A8B8G8R8_SSCALED_PACK32, ISL_FORMAT_R8G8B8A8_SSCALED),
+   fmt(VK_FORMAT_A8B8G8R8_UINT_PACK32,    ISL_FORMAT_R8G8B8A8_UINT),
+   fmt(VK_FORMAT_A8B8G8R8_SINT_PACK32,    ISL_FORMAT_R8G8B8A8_SINT),
+   fmt(VK_FORMAT_A8B8G8R8_SRGB_PACK32,    ISL_FORMAT_R8G8B8A8_UNORM_SRGB),
+   fmt(VK_FORMAT_A2R10G10B10_UNORM_PACK32, ISL_FORMAT_B10G10R10A2_UNORM),
+   fmt(VK_FORMAT_A2R10G10B10_SNORM_PACK32, ISL_FORMAT_B10G10R10A2_SNORM),
+   fmt(VK_FORMAT_A2R10G10B10_USCALED_PACK32, ISL_FORMAT_B10G10R10A2_USCALED),
+   fmt(VK_FORMAT_A2R10G10B10_SSCALED_PACK32, ISL_FORMAT_B10G10R10A2_SSCALED),
+   fmt(VK_FORMAT_A2R10G10B10_UINT_PACK32, ISL_FORMAT_B10G10R10A2_UINT),
+   fmt(VK_FORMAT_A2R10G10B10_SINT_PACK32, ISL_FORMAT_B10G10R10A2_SINT),
+   fmt(VK_FORMAT_A2B10G10R10_UNORM_PACK32, ISL_FORMAT_R10G10B10A2_UNORM),
+   fmt(VK_FORMAT_A2B10G10R10_SNORM_PACK32, ISL_FORMAT_R10G10B10A2_SNORM),
+   fmt(VK_FORMAT_A2B10G10R10_USCALED_PACK32, ISL_FORMAT_R10G10B10A2_USCALED),
+   fmt(VK_FORMAT_A2B10G10R10_SSCALED_PACK32, ISL_FORMAT_R10G10B10A2_SSCALED),
+   fmt(VK_FORMAT_A2B10G10R10_UINT_PACK32, ISL_FORMAT_R10G10B10A2_UINT),
+   fmt(VK_FORMAT_A2B10G10R10_SINT_PACK32, ISL_FORMAT_R10G10B10A2_SINT),
+   fmt(VK_FORMAT_R16_UNORM,               ISL_FORMAT_R16_UNORM),
+   fmt(VK_FORMAT_R16_SNORM,               ISL_FORMAT_R16_SNORM),
+   fmt(VK_FORMAT_R16_USCALED,             ISL_FORMAT_R16_USCALED),
+   fmt(VK_FORMAT_R16_SSCALED,             ISL_FORMAT_R16_SSCALED),
+   fmt(VK_FORMAT_R16_UINT,                ISL_FORMAT_R16_UINT),
+   fmt(VK_FORMAT_R16_SINT,                ISL_FORMAT_R16_SINT),
+   fmt(VK_FORMAT_R16_SFLOAT,              ISL_FORMAT_R16_FLOAT),
+   fmt(VK_FORMAT_R16G16_UNORM,            ISL_FORMAT_R16G16_UNORM),
+   fmt(VK_FORMAT_R16G16_SNORM,            ISL_FORMAT_R16G16_SNORM),
+   fmt(VK_FORMAT_R16G16_USCALED,          ISL_FORMAT_R16G16_USCALED),
+   fmt(VK_FORMAT_R16G16_SSCALED,          ISL_FORMAT_R16G16_SSCALED),
+   fmt(VK_FORMAT_R16G16_UINT,             ISL_FORMAT_R16G16_UINT),
+   fmt(VK_FORMAT_R16G16_SINT,             ISL_FORMAT_R16G16_SINT),
+   fmt(VK_FORMAT_R16G16_SFLOAT,           ISL_FORMAT_R16G16_FLOAT),
+   fmt(VK_FORMAT_R16G16B16_UNORM,         ISL_FORMAT_R16G16B16_UNORM),
+   fmt(VK_FORMAT_R16G16B16_SNORM,         ISL_FORMAT_R16G16B16_SNORM),
+   fmt(VK_FORMAT_R16G16B16_USCALED,       ISL_FORMAT_R16G16B16_USCALED),
+   fmt(VK_FORMAT_R16G16B16_SSCALED,       ISL_FORMAT_R16G16B16_SSCALED),
+   fmt(VK_FORMAT_R16G16B16_UINT,          ISL_FORMAT_R16G16B16_UINT),
+   fmt(VK_FORMAT_R16G16B16_SINT,          ISL_FORMAT_R16G16B16_SINT),
+   fmt(VK_FORMAT_R16G16B16_SFLOAT,        ISL_FORMAT_R16G16B16_FLOAT),
+   fmt(VK_FORMAT_R16G16B16A16_UNORM,      ISL_FORMAT_R16G16B16A16_UNORM),
+   fmt(VK_FORMAT_R16G16B16A16_SNORM,      ISL_FORMAT_R16G16B16A16_SNORM),
+   fmt(VK_FORMAT_R16G16B16A16_USCALED,    ISL_FORMAT_R16G16B16A16_USCALED),
+   fmt(VK_FORMAT_R16G16B16A16_SSCALED,    ISL_FORMAT_R16G16B16A16_SSCALED),
+   fmt(VK_FORMAT_R16G16B16A16_UINT,       ISL_FORMAT_R16G16B16A16_UINT),
+   fmt(VK_FORMAT_R16G16B16A16_SINT,       ISL_FORMAT_R16G16B16A16_SINT),
+   fmt(VK_FORMAT_R16G16B16A16_SFLOAT,     ISL_FORMAT_R16G16B16A16_FLOAT),
+   fmt(VK_FORMAT_R32_UINT,                ISL_FORMAT_R32_UINT,),
+   fmt(VK_FORMAT_R32_SINT,                ISL_FORMAT_R32_SINT,),
+   fmt(VK_FORMAT_R32_SFLOAT,              ISL_FORMAT_R32_FLOAT,),
+   fmt(VK_FORMAT_R32G32_UINT,             ISL_FORMAT_R32G32_UINT,),
+   fmt(VK_FORMAT_R32G32_SINT,             ISL_FORMAT_R32G32_SINT,),
+   fmt(VK_FORMAT_R32G32_SFLOAT,           ISL_FORMAT_R32G32_FLOAT,),
+   fmt(VK_FORMAT_R32G32B32_UINT,          ISL_FORMAT_R32G32B32_UINT,),
+   fmt(VK_FORMAT_R32G32B32_SINT,          ISL_FORMAT_R32G32B32_SINT,),
+   fmt(VK_FORMAT_R32G32B32_SFLOAT,        ISL_FORMAT_R32G32B32_FLOAT,),
+   fmt(VK_FORMAT_R32G32B32A32_UINT,       ISL_FORMAT_R32G32B32A32_UINT,),
+   fmt(VK_FORMAT_R32G32B32A32_SINT,       ISL_FORMAT_R32G32B32A32_SINT,),
+   fmt(VK_FORMAT_R32G32B32A32_SFLOAT,     ISL_FORMAT_R32G32B32A32_FLOAT,),
+   fmt(VK_FORMAT_R64_UINT,                ISL_FORMAT_R64_PASSTHRU),
+   fmt(VK_FORMAT_R64_SINT,                ISL_FORMAT_R64_PASSTHRU),
+   fmt(VK_FORMAT_R64_SFLOAT,              ISL_FORMAT_R64_FLOAT),
+   fmt(VK_FORMAT_R64G64_UINT,             ISL_FORMAT_R64G64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64_SINT,             ISL_FORMAT_R64G64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64_SFLOAT,           ISL_FORMAT_R64G64_FLOAT),
+   fmt(VK_FORMAT_R64G64B64_UINT,          ISL_FORMAT_R64G64B64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64B64_SINT,          ISL_FORMAT_R64G64B64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64B64_SFLOAT,        ISL_FORMAT_R64G64B64_FLOAT),
+   fmt(VK_FORMAT_R64G64B64A64_UINT,       ISL_FORMAT_R64G64B64A64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64B64A64_SINT,       ISL_FORMAT_R64G64B64A64_PASSTHRU),
+   fmt(VK_FORMAT_R64G64B64A64_SFLOAT,     ISL_FORMAT_R64G64B64A64_FLOAT),
+   fmt(VK_FORMAT_B10G11R11_UFLOAT_PACK32, ISL_FORMAT_R11G11B10_FLOAT),
+   fmt(VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,  ISL_FORMAT_R9G9B9E5_SHAREDEXP),
+
+   fmt(VK_FORMAT_D16_UNORM,               ISL_FORMAT_R16_UNORM,               .has_depth = true),
+   fmt(VK_FORMAT_X8_D24_UNORM_PACK32,     ISL_FORMAT_R24_UNORM_X8_TYPELESS,   .has_depth = true),
+   fmt(VK_FORMAT_D32_SFLOAT,              ISL_FORMAT_R32_FLOAT,               .has_depth = true),
+   fmt(VK_FORMAT_S8_UINT,                 ISL_FORMAT_R8_UINT,                                      .has_stencil = true),
+   fmt(VK_FORMAT_D16_UNORM_S8_UINT,       ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_D24_UNORM_S8_UINT,       ISL_FORMAT_R24_UNORM_X8_TYPELESS,   .has_depth = true,   .has_stencil = true),
+   fmt(VK_FORMAT_D32_SFLOAT_S8_UINT,      ISL_FORMAT_R32_FLOAT,               .has_depth = true,   .has_stencil = true),
+
+   fmt(VK_FORMAT_BC1_RGB_UNORM_BLOCK,     ISL_FORMAT_DXT1_RGB),
+   fmt(VK_FORMAT_BC1_RGB_SRGB_BLOCK,      ISL_FORMAT_DXT1_RGB_SRGB),
+   fmt(VK_FORMAT_BC1_RGBA_UNORM_BLOCK,    ISL_FORMAT_BC1_UNORM),
+   fmt(VK_FORMAT_BC1_RGBA_SRGB_BLOCK,     ISL_FORMAT_BC1_UNORM_SRGB),
+   fmt(VK_FORMAT_BC2_UNORM_BLOCK,         ISL_FORMAT_BC2_UNORM),
+   fmt(VK_FORMAT_BC2_SRGB_BLOCK,          ISL_FORMAT_BC2_UNORM_SRGB),
+   fmt(VK_FORMAT_BC3_UNORM_BLOCK,         ISL_FORMAT_BC3_UNORM),
+   fmt(VK_FORMAT_BC3_SRGB_BLOCK,          ISL_FORMAT_BC3_UNORM_SRGB),
+   fmt(VK_FORMAT_BC4_UNORM_BLOCK,         ISL_FORMAT_BC4_UNORM),
+   fmt(VK_FORMAT_BC4_SNORM_BLOCK,         ISL_FORMAT_BC4_SNORM),
+   fmt(VK_FORMAT_BC5_UNORM_BLOCK,         ISL_FORMAT_BC5_UNORM),
+   fmt(VK_FORMAT_BC5_SNORM_BLOCK,         ISL_FORMAT_BC5_SNORM),
+   fmt(VK_FORMAT_BC6H_UFLOAT_BLOCK,       ISL_FORMAT_BC6H_UF16),
+   fmt(VK_FORMAT_BC6H_SFLOAT_BLOCK,       ISL_FORMAT_BC6H_SF16),
+   fmt(VK_FORMAT_BC7_UNORM_BLOCK,         ISL_FORMAT_BC7_UNORM),
+   fmt(VK_FORMAT_BC7_SRGB_BLOCK,          ISL_FORMAT_BC7_UNORM_SRGB),
+   fmt(VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, ISL_FORMAT_ETC2_RGB8),
+   fmt(VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK,  ISL_FORMAT_ETC2_SRGB8),
+   fmt(VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, ISL_FORMAT_ETC2_RGB8_PTA),
+   fmt(VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, ISL_FORMAT_ETC2_SRGB8_PTA),
+   fmt(VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, ISL_FORMAT_ETC2_EAC_RGBA8),
+   fmt(VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK, ISL_FORMAT_ETC2_EAC_SRGB8_A8),
+   fmt(VK_FORMAT_EAC_R11_UNORM_BLOCK,     ISL_FORMAT_EAC_R11),
+   fmt(VK_FORMAT_EAC_R11_SNORM_BLOCK,     ISL_FORMAT_EAC_SIGNED_R11),
+   fmt(VK_FORMAT_EAC_R11G11_UNORM_BLOCK,  ISL_FORMAT_EAC_RG11),
+   fmt(VK_FORMAT_EAC_R11G11_SNORM_BLOCK,  ISL_FORMAT_EAC_SIGNED_RG11),
+   fmt(VK_FORMAT_ASTC_4x4_UNORM_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_4x4_SRGB_BLOCK,     ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_5x4_UNORM_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_5x4_SRGB_BLOCK,     ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_5x5_UNORM_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_5x5_SRGB_BLOCK,     ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_6x5_UNORM_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_6x5_SRGB_BLOCK,     ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_6x6_UNORM_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_6x6_SRGB_BLOCK,     ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_8x5_UNORM_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_8x5_SRGB_BLOCK,     ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_8x6_UNORM_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_8x6_SRGB_BLOCK,     ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_8x8_UNORM_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_8x8_SRGB_BLOCK,     ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_10x5_UNORM_BLOCK,   ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_10x5_SRGB_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_10x6_UNORM_BLOCK,   ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_10x6_SRGB_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_10x8_UNORM_BLOCK,   ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_10x8_SRGB_BLOCK,    ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_10x10_UNORM_BLOCK,  ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_10x10_SRGB_BLOCK,   ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_12x10_UNORM_BLOCK,  ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_12x10_SRGB_BLOCK,   ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_12x12_UNORM_BLOCK,  ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_ASTC_12x12_SRGB_BLOCK,   ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8_UNORM,            ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8_SNORM,            ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8_USCALED,          ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8_SSCALED,          ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8_UINT,             ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8_SINT,             ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8_SRGB,             ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8A8_UNORM,          ISL_FORMAT_B8G8R8A8_UNORM),
+   fmt(VK_FORMAT_B8G8R8A8_SNORM,          ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8A8_USCALED,        ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8A8_SSCALED,        ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8A8_UINT,           ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8A8_SINT,           ISL_FORMAT_UNSUPPORTED),
+   fmt(VK_FORMAT_B8G8R8A8_SRGB,           ISL_FORMAT_B8G8R8A8_UNORM_SRGB),
+};
+
+#undef fmt
+
+const struct anv_format *
+anv_format_for_vk_format(VkFormat format)
+{
+   return &anv_formats[format];
+}
+
+/**
+ * Exactly one bit must be set in \a aspect.
+ */
+enum isl_format
+anv_get_isl_format(VkFormat format, VkImageAspectFlags aspect,
+                   VkImageTiling tiling, struct anv_format_swizzle *swizzle)
+{
+   const struct anv_format *anv_fmt = &anv_formats[format];
+
+   if (swizzle)
+      *swizzle = anv_fmt->swizzle;
+
+   switch (aspect) {
+   case VK_IMAGE_ASPECT_COLOR_BIT:
+      if (anv_fmt->isl_format == ISL_FORMAT_UNSUPPORTED) {
+         return ISL_FORMAT_UNSUPPORTED;
+      } else if (tiling == VK_IMAGE_TILING_OPTIMAL &&
+                 !util_is_power_of_two(anv_fmt->isl_layout->bs)) {
+         /* Tiled formats *must* be power-of-two because we need up upload
+          * them with the render pipeline.  For 3-channel formats, we fix
+          * this by switching them over to RGBX or RGBA formats under the
+          * hood.
+          */
+         enum isl_format rgbx = isl_format_rgb_to_rgbx(anv_fmt->isl_format);
+         if (rgbx != ISL_FORMAT_UNSUPPORTED)
+            return rgbx;
+         else
+            return isl_format_rgb_to_rgba(anv_fmt->isl_format);
+      } else {
+         return anv_fmt->isl_format;
+      }
+
+   case VK_IMAGE_ASPECT_DEPTH_BIT:
+   case (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT):
+      assert(anv_fmt->has_depth);
+      return anv_fmt->isl_format;
+
+   case VK_IMAGE_ASPECT_STENCIL_BIT:
+      assert(anv_fmt->has_stencil);
+      return ISL_FORMAT_R8_UINT;
+
+   default:
+      unreachable("bad VkImageAspect");
+      return ISL_FORMAT_UNSUPPORTED;
+   }
+}
+
+// Format capabilities
+
+void anv_validate_GetPhysicalDeviceFormatProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkFormat                                    _format,
+    VkFormatProperties*                         pFormatProperties)
+{
+   const struct anv_format *format = anv_format_for_vk_format(_format);
+   fprintf(stderr, "vkGetFormatProperties(%s)\n", format->name);
+   anv_GetPhysicalDeviceFormatProperties(physicalDevice, _format, pFormatProperties);
+}
+
+static VkFormatFeatureFlags
+get_image_format_properties(int gen, enum isl_format base,
+                            enum isl_format actual,
+                            struct anv_format_swizzle swizzle)
+{
+   const struct brw_surface_format_info *info = &surface_formats[actual];
+
+   if (actual == ISL_FORMAT_UNSUPPORTED || !info->exists)
+      return 0;
+
+   VkFormatFeatureFlags flags = 0;
+   if (info->sampling <= gen) {
+      flags |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
+               VK_FORMAT_FEATURE_BLIT_SRC_BIT;
+
+      if (info->filtering <= gen)
+         flags |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
+   }
+
+   /* We can render to swizzled formats.  However, if the alpha channel is
+    * moved, then blending won't work correctly.  The PRM tells us
+    * straight-up not to render to such a surface.
+    */
+   if (info->render_target <= gen && swizzle.a == 3) {
+      flags |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
+               VK_FORMAT_FEATURE_BLIT_DST_BIT;
+   }
+
+   if (info->alpha_blend <= gen && swizzle.a == 3)
+      flags |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
+
+   /* Load/store is determined based on base format.  This prevents RGB
+    * formats from showing up as load/store capable.
+    */
+   if (isl_is_storage_image_format(base))
+      flags |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
+
+   if (base == ISL_FORMAT_R32_SINT || base == ISL_FORMAT_R32_UINT)
+      flags |= VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
+
+   return flags;
+}
+
+static VkFormatFeatureFlags
+get_buffer_format_properties(int gen, enum isl_format format)
+{
+   const struct brw_surface_format_info *info = &surface_formats[format];
+
+   if (format == ISL_FORMAT_UNSUPPORTED || !info->exists)
+      return 0;
+
+   VkFormatFeatureFlags flags = 0;
+   if (info->sampling <= gen && !isl_format_is_compressed(format))
+      flags |= VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
+
+   if (info->input_vb <= gen)
+      flags |= VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
+
+   if (isl_is_storage_image_format(format))
+      flags |= VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
+
+   if (format == ISL_FORMAT_R32_SINT || format == ISL_FORMAT_R32_UINT)
+      flags |= VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
+
+   return flags;
+}
+
+static void
+anv_physical_device_get_format_properties(struct anv_physical_device *physical_device,
+                                          VkFormat format,
+                                          VkFormatProperties *out_properties)
+{
+   int gen = physical_device->info->gen * 10;
+   if (physical_device->info->is_haswell)
+      gen += 5;
+
+   VkFormatFeatureFlags linear = 0, tiled = 0, buffer = 0;
+   if (anv_format_is_depth_or_stencil(&anv_formats[format])) {
+      tiled |= VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+      if (physical_device->info->gen >= 8) {
+         tiled |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
+         tiled |= VK_FORMAT_FEATURE_BLIT_SRC_BIT;
+      }
+      if (anv_formats[format].has_depth) {
+         tiled |= VK_FORMAT_FEATURE_BLIT_DST_BIT;
+      }
+   } else {
+      enum isl_format linear_fmt, tiled_fmt;
+      struct anv_format_swizzle linear_swizzle, tiled_swizzle;
+      linear_fmt = anv_get_isl_format(format, VK_IMAGE_ASPECT_COLOR_BIT,
+                                      VK_IMAGE_TILING_LINEAR, &linear_swizzle);
+      tiled_fmt = anv_get_isl_format(format, VK_IMAGE_ASPECT_COLOR_BIT,
+                                     VK_IMAGE_TILING_OPTIMAL, &tiled_swizzle);
+
+      linear = get_image_format_properties(gen, linear_fmt, linear_fmt,
+                                           linear_swizzle);
+      tiled = get_image_format_properties(gen, linear_fmt, tiled_fmt,
+                                          tiled_swizzle);
+      buffer = get_buffer_format_properties(gen, linear_fmt);
+
+      /* XXX: We handle 3-channel formats by switching them out for RGBX or
+       * RGBA formats behind-the-scenes.  This works fine for textures
+       * because the upload process will fill in the extra channel.
+       * We could also support it for render targets, but it will take
+       * substantially more work and we have enough RGBX formats to handle
+       * what most clients will want.
+       */
+      if (linear_fmt != ISL_FORMAT_UNSUPPORTED &&
+          !util_is_power_of_two(isl_format_layouts[linear_fmt].bs) &&
+          isl_format_rgb_to_rgbx(linear_fmt) == ISL_FORMAT_UNSUPPORTED) {
+         tiled &= ~VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT &
+                  ~VK_FORMAT_FEATURE_BLIT_DST_BIT;
+      }
+   }
+
+   out_properties->linearTilingFeatures = linear;
+   out_properties->optimalTilingFeatures = tiled;
+   out_properties->bufferFeatures = buffer;
+
+   return;
+}
+
+
+void anv_GetPhysicalDeviceFormatProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkFormat                                    format,
+    VkFormatProperties*                         pFormatProperties)
+{
+   ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
+
+   anv_physical_device_get_format_properties(
+               physical_device,
+               format,
+               pFormatProperties);
+}
+
+VkResult anv_GetPhysicalDeviceImageFormatProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkFormat                                    format,
+    VkImageType                                 type,
+    VkImageTiling                               tiling,
+    VkImageUsageFlags                           usage,
+    VkImageCreateFlags                          createFlags,
+    VkImageFormatProperties*                    pImageFormatProperties)
+{
+   ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
+   VkFormatProperties format_props;
+   VkFormatFeatureFlags format_feature_flags;
+   VkExtent3D maxExtent;
+   uint32_t maxMipLevels;
+   uint32_t maxArraySize;
+   VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT;
+
+   anv_physical_device_get_format_properties(physical_device, format,
+                                             &format_props);
+
+   /* Extract the VkFormatFeatureFlags that are relevant for the queried
+    * tiling.
+    */
+   if (tiling == VK_IMAGE_TILING_LINEAR) {
+      format_feature_flags = format_props.linearTilingFeatures;
+   } else if (tiling == VK_IMAGE_TILING_OPTIMAL) {
+      format_feature_flags = format_props.optimalTilingFeatures;
+   } else {
+      unreachable("bad VkImageTiling");
+   }
+
+   switch (type) {
+   default:
+      unreachable("bad VkImageType");
+   case VK_IMAGE_TYPE_1D:
+      maxExtent.width = 16384;
+      maxExtent.height = 1;
+      maxExtent.depth = 1;
+      maxMipLevels = 15; /* log2(maxWidth) + 1 */
+      maxArraySize = 2048;
+      sampleCounts = VK_SAMPLE_COUNT_1_BIT;
+      break;
+   case VK_IMAGE_TYPE_2D:
+      /* FINISHME: Does this really differ for cube maps? The documentation
+       * for RENDER_SURFACE_STATE suggests so.
+       */
+      maxExtent.width = 16384;
+      maxExtent.height = 16384;
+      maxExtent.depth = 1;
+      maxMipLevels = 15; /* log2(maxWidth) + 1 */
+      maxArraySize = 2048;
+      break;
+   case VK_IMAGE_TYPE_3D:
+      maxExtent.width = 2048;
+      maxExtent.height = 2048;
+      maxExtent.depth = 2048;
+      maxMipLevels = 12; /* log2(maxWidth) + 1 */
+      maxArraySize = 1;
+      break;
+   }
+
+   if (tiling == VK_IMAGE_TILING_OPTIMAL &&
+       type == VK_IMAGE_TYPE_2D &&
+       (format_feature_flags & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
+                                VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
+       !(createFlags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) &&
+       !(usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
+      sampleCounts = isl_device_get_sample_counts(&physical_device->isl_dev);
+   }
+
+   if (usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) {
+      /* Meta implements transfers by sampling from the source image. */
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
+         goto unsupported;
+      }
+   }
+
+#if 0
+   if (usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) {
+      if (anv_format_for_vk_format(format)->has_stencil) {
+         /* Not yet implemented because copying to a W-tiled surface is crazy
+          * hard.
+          */
+         anv_finishme("support VK_IMAGE_USAGE_TRANSFER_DST_BIT for "
+                      "stencil format");
+         goto unsupported;
+      }
+   }
+#endif
+
+   if (usage & VK_IMAGE_USAGE_SAMPLED_BIT) {
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
+         goto unsupported;
+      }
+   }
+
+   if (usage & VK_IMAGE_USAGE_STORAGE_BIT) {
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
+         goto unsupported;
+      }
+   }
+
+   if (usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
+         goto unsupported;
+      }
+   }
+
+   if (usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+      if (!(format_feature_flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+         goto unsupported;
+      }
+   }
+
+   if (usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) {
+      /* Nothing to check. */
+   }
+
+   if (usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) {
+      /* Ignore this flag because it was removed from the
+       * provisional_I_20150910 header.
+       */
+   }
+
+   *pImageFormatProperties = (VkImageFormatProperties) {
+      .maxExtent = maxExtent,
+      .maxMipLevels = maxMipLevels,
+      .maxArrayLayers = maxArraySize,
+      .sampleCounts = sampleCounts,
+
+      /* FINISHME: Accurately calculate
+       * VkImageFormatProperties::maxResourceSize.
+       */
+      .maxResourceSize = UINT32_MAX,
+   };
+
+   return VK_SUCCESS;
+
+unsupported:
+   *pImageFormatProperties = (VkImageFormatProperties) {
+      .maxExtent = { 0, 0, 0 },
+      .maxMipLevels = 0,
+      .maxArrayLayers = 0,
+      .sampleCounts = 0,
+      .maxResourceSize = 0,
+   };
+
+   return VK_SUCCESS;
+}
+
+void anv_GetPhysicalDeviceSparseImageFormatProperties(
+    VkPhysicalDevice                            physicalDevice,
+    VkFormat                                    format,
+    VkImageType                                 type,
+    uint32_t                                    samples,
+    VkImageUsageFlags                           usage,
+    VkImageTiling                               tiling,
+    uint32_t*                                   pNumProperties,
+    VkSparseImageFormatProperties*              pProperties)
+{
+   /* Sparse images are not yet supported. */
+   *pNumProperties = 0;
+}
diff --git a/src/intel/vulkan/anv_gem.c b/src/intel/vulkan/anv_gem.c
new file mode 100644 (file)
index 0000000..a886f7c
--- /dev/null
@@ -0,0 +1,335 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define _DEFAULT_SOURCE
+
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+static int
+anv_ioctl(int fd, unsigned long request, void *arg)
+{
+   int ret;
+
+   do {
+      ret = ioctl(fd, request, arg);
+   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+
+   return ret;
+}
+
+/**
+ * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
+ *
+ * Return gem handle, or 0 on failure. Gem handles are never 0.
+ */
+uint32_t
+anv_gem_create(struct anv_device *device, size_t size)
+{
+   struct drm_i915_gem_create gem_create = {
+      .size = size,
+   };
+
+   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
+   if (ret != 0) {
+      /* FIXME: What do we do if this fails? */
+      return 0;
+   }
+
+   return gem_create.handle;
+}
+
+void
+anv_gem_close(struct anv_device *device, uint32_t gem_handle)
+{
+   struct drm_gem_close close = {
+      .handle = gem_handle,
+   };
+
+   anv_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
+}
+
+/**
+ * Wrapper around DRM_IOCTL_I915_GEM_MMAP.
+ */
+void*
+anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
+             uint64_t offset, uint64_t size, uint32_t flags)
+{
+   struct drm_i915_gem_mmap gem_mmap = {
+      .handle = gem_handle,
+      .offset = offset,
+      .size = size,
+      .flags = flags,
+   };
+
+   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
+   if (ret != 0) {
+      /* FIXME: Is NULL the right error return? Cf MAP_INVALID */
+      return NULL;
+   }
+
+   VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1));
+   return (void *)(uintptr_t) gem_mmap.addr_ptr;
+}
+
+/* This is just a wrapper around munmap, but it also notifies valgrind that
+ * this map is no longer valid.  Pair this with anv_gem_mmap().
+ */
+void
+anv_gem_munmap(void *p, uint64_t size)
+{
+   VG(VALGRIND_FREELIKE_BLOCK(p, 0));
+   munmap(p, size);
+}
+
+uint32_t
+anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
+{
+   struct drm_i915_gem_userptr userptr = {
+      .user_ptr = (__u64)((unsigned long) mem),
+      .user_size = size,
+      .flags = 0,
+   };
+
+   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
+   if (ret == -1)
+      return 0;
+
+   return userptr.handle;
+}
+
+int
+anv_gem_set_caching(struct anv_device *device,
+                    uint32_t gem_handle, uint32_t caching)
+{
+   struct drm_i915_gem_caching gem_caching = {
+      .handle = gem_handle,
+      .caching = caching,
+   };
+
+   return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
+}
+
+int
+anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
+                   uint32_t read_domains, uint32_t write_domain)
+{
+   struct drm_i915_gem_set_domain gem_set_domain = {
+      .handle = gem_handle,
+      .read_domains = read_domains,
+      .write_domain = write_domain,
+   };
+
+   return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
+}
+
+/**
+ * On error, \a timeout_ns holds the remaining time.
+ */
+int
+anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
+{
+   struct drm_i915_gem_wait wait = {
+      .bo_handle = gem_handle,
+      .timeout_ns = *timeout_ns,
+      .flags = 0,
+   };
+
+   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
+   *timeout_ns = wait.timeout_ns;
+
+   return ret;
+}
+
+int
+anv_gem_execbuffer(struct anv_device *device,
+                   struct drm_i915_gem_execbuffer2 *execbuf)
+{
+   return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
+}
+
+int
+anv_gem_set_tiling(struct anv_device *device,
+                   uint32_t gem_handle, uint32_t stride, uint32_t tiling)
+{
+   int ret;
+
+   /* set_tiling overwrites the input on the error path, so we have to open
+    * code anv_ioctl.
+    */
+   do {
+      struct drm_i915_gem_set_tiling set_tiling = {
+         .handle = gem_handle,
+         .tiling_mode = tiling,
+         .stride = stride,
+      };
+
+      ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
+   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+
+   return ret;
+}
+
+int
+anv_gem_get_param(int fd, uint32_t param)
+{
+   int tmp;
+
+   drm_i915_getparam_t gp = {
+      .param = param,
+      .value = &tmp,
+   };
+
+   int ret = anv_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+   if (ret == 0)
+      return tmp;
+
+   return 0;
+}
+
+bool
+anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
+{
+   struct drm_gem_close close;
+   int ret;
+
+   struct drm_i915_gem_create gem_create = {
+      .size = 4096,
+   };
+
+   if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
+      assert(!"Failed to create GEM BO");
+      return false;
+   }
+
+   bool swizzled = false;
+
+   /* set_tiling overwrites the input on the error path, so we have to open
+    * code anv_ioctl.
+    */
+   do {
+      struct drm_i915_gem_set_tiling set_tiling = {
+         .handle = gem_create.handle,
+         .tiling_mode = tiling,
+         .stride = tiling == I915_TILING_X ? 512 : 128,
+      };
+
+      ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
+   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+
+   if (ret != 0) {
+      assert(!"Failed to set BO tiling");
+      goto close_and_return;
+   }
+
+   struct drm_i915_gem_get_tiling get_tiling = {
+      .handle = gem_create.handle,
+   };
+
+   if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
+      assert(!"Failed to get BO tiling");
+      goto close_and_return;
+   }
+
+   swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
+
+close_and_return:
+
+   memset(&close, 0, sizeof(close));
+   close.handle = gem_create.handle;
+   anv_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
+
+   return swizzled;
+}
+
+int
+anv_gem_create_context(struct anv_device *device)
+{
+   struct drm_i915_gem_context_create create = { 0 };
+
+   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
+   if (ret == -1)
+      return -1;
+
+   return create.ctx_id;
+}
+
+int
+anv_gem_destroy_context(struct anv_device *device, int context)
+{
+   struct drm_i915_gem_context_destroy destroy = {
+      .ctx_id = context,
+   };
+
+   return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
+}
+
+int
+anv_gem_get_aperture(int fd, uint64_t *size)
+{
+   struct drm_i915_gem_get_aperture aperture = { 0 };
+
+   int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+   if (ret == -1)
+      return -1;
+
+   *size = aperture.aper_available_size;
+
+   return 0;
+}
+
+int
+anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
+{
+   struct drm_prime_handle args = {
+      .handle = gem_handle,
+      .flags = DRM_CLOEXEC,
+   };
+
+   int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
+   if (ret == -1)
+      return -1;
+
+   return args.fd;
+}
+
+uint32_t
+anv_gem_fd_to_handle(struct anv_device *device, int fd)
+{
+   struct drm_prime_handle args = {
+      .fd = fd,
+   };
+
+   int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
+   if (ret == -1)
+      return 0;
+
+   return args.handle;
+}
diff --git a/src/intel/vulkan/anv_gem_stubs.c b/src/intel/vulkan/anv_gem_stubs.c
new file mode 100644 (file)
index 0000000..3204fef
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define _DEFAULT_SOURCE
+
+#include <linux/memfd.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+
+#include "anv_private.h"
+
+static inline int
+memfd_create(const char *name, unsigned int flags)
+{
+   return syscall(SYS_memfd_create, name, flags);
+}
+
+uint32_t
+anv_gem_create(struct anv_device *device, size_t size)
+{
+   int fd = memfd_create("fake bo", MFD_CLOEXEC);
+   if (fd == -1)
+      return 0;
+
+   assert(fd != 0);
+
+   if (ftruncate(fd, size) == -1)
+      return 0;
+
+   return fd;
+}
+
+void
+anv_gem_close(struct anv_device *device, uint32_t gem_handle)
+{
+   close(gem_handle);
+}
+
+void*
+anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
+             uint64_t offset, uint64_t size, uint32_t flags)
+{
+   /* Ignore flags, as they're specific to I915_GEM_MMAP. */
+   (void) flags;
+
+   return mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+               gem_handle, offset);
+}
+
+/* This is just a wrapper around munmap, but it also notifies valgrind that
+ * this map is no longer valid.  Pair this with anv_gem_mmap().
+ */
+void
+anv_gem_munmap(void *p, uint64_t size)
+{
+   munmap(p, size);
+}
+
+uint32_t
+anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
+{
+   return -1;
+}
+
+int
+anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
+{
+   return 0;
+}
+
+int
+anv_gem_execbuffer(struct anv_device *device,
+                   struct drm_i915_gem_execbuffer2 *execbuf)
+{
+   return 0;
+}
+
+int
+anv_gem_set_tiling(struct anv_device *device,
+                   uint32_t gem_handle, uint32_t stride, uint32_t tiling)
+{
+   return 0;
+}
+
+int
+anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle,
+                    uint32_t caching)
+{
+   return 0;
+}
+
+int
+anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
+                   uint32_t read_domains, uint32_t write_domain)
+{
+   return 0;
+}
+
+int
+anv_gem_get_param(int fd, uint32_t param)
+{
+   unreachable("Unused");
+}
+
+bool
+anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
+{
+   unreachable("Unused");
+}
+
+int
+anv_gem_create_context(struct anv_device *device)
+{
+   unreachable("Unused");
+}
+
+int
+anv_gem_destroy_context(struct anv_device *device, int context)
+{
+   unreachable("Unused");
+}
+
+int
+anv_gem_get_aperture(int fd, uint64_t *size)
+{
+   unreachable("Unused");
+}
+
+int
+anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
+{
+   unreachable("Unused");
+}
+
+uint32_t
+anv_gem_fd_to_handle(struct anv_device *device, int fd)
+{
+   unreachable("Unused");
+}
diff --git a/src/intel/vulkan/anv_genX.h b/src/intel/vulkan/anv_genX.h
new file mode 100644 (file)
index 0000000..908a9e0
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+ * Gen-specific function declarations.  This header must *not* be included
+ * directly.  Instead, it is included multiple times by gen8_private.h.
+ * 
+ * In this header file, the usual genx() macro is available.
+ */
+
+VkResult genX(init_device_state)(struct anv_device *device);
+
+void genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer);
+
+struct anv_state
+genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
+                                          struct anv_framebuffer *fb);
+
+void genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
+                                  struct anv_subpass *subpass);
+
+void genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer);
+void genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer);
+
+void genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
+                                bool enable_slm);
+
+void genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer);
+void genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer);
+
+void genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer);
+
+VkResult
+genX(graphics_pipeline_create)(VkDevice _device,
+                               struct anv_pipeline_cache *cache,
+                               const VkGraphicsPipelineCreateInfo *pCreateInfo,
+                               const struct anv_graphics_pipeline_create_info *extra,
+                               const VkAllocationCallbacks *alloc,
+                               VkPipeline *pPipeline);
+
+VkResult
+genX(compute_pipeline_create)(VkDevice _device,
+                              struct anv_pipeline_cache *cache,
+                              const VkComputePipelineCreateInfo *pCreateInfo,
+                              const VkAllocationCallbacks *alloc,
+                              VkPipeline *pPipeline);
diff --git a/src/intel/vulkan/anv_image.c b/src/intel/vulkan/anv_image.c
new file mode 100644 (file)
index 0000000..143a084
--- /dev/null
@@ -0,0 +1,794 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+/**
+ * Exactly one bit must be set in \a aspect.
+ */
+static isl_surf_usage_flags_t
+choose_isl_surf_usage(VkImageUsageFlags vk_usage,
+                      VkImageAspectFlags aspect)
+{
+   isl_surf_usage_flags_t isl_usage = 0;
+
+   /* FINISHME: Support aux surfaces */
+   isl_usage |= ISL_SURF_USAGE_DISABLE_AUX_BIT;
+
+   if (vk_usage & VK_IMAGE_USAGE_SAMPLED_BIT)
+      isl_usage |= ISL_SURF_USAGE_TEXTURE_BIT;
+
+   if (vk_usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)
+      isl_usage |= ISL_SURF_USAGE_TEXTURE_BIT;
+
+   if (vk_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)
+      isl_usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT;
+
+   if (vk_usage & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
+      isl_usage |= ISL_SURF_USAGE_CUBE_BIT;
+
+   if (vk_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+      switch (aspect) {
+      default:
+         unreachable("bad VkImageAspect");
+      case VK_IMAGE_ASPECT_DEPTH_BIT:
+         isl_usage |= ISL_SURF_USAGE_DEPTH_BIT;
+         break;
+      case VK_IMAGE_ASPECT_STENCIL_BIT:
+         isl_usage |= ISL_SURF_USAGE_STENCIL_BIT;
+         break;
+      }
+   }
+
+   if (vk_usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) {
+      /* Meta implements transfers by sampling from the source image. */
+      isl_usage |= ISL_SURF_USAGE_TEXTURE_BIT;
+   }
+
+   if (vk_usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) {
+      /* Meta implements transfers by rendering into the destination image. */
+      isl_usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT;
+   }
+
+   return isl_usage;
+}
+
+/**
+ * Exactly one bit must be set in \a aspect.
+ */
+static struct anv_surface *
+get_surface(struct anv_image *image, VkImageAspectFlags aspect)
+{
+   switch (aspect) {
+   default:
+      unreachable("bad VkImageAspect");
+   case VK_IMAGE_ASPECT_COLOR_BIT:
+      return &image->color_surface;
+   case VK_IMAGE_ASPECT_DEPTH_BIT:
+      return &image->depth_surface;
+   case VK_IMAGE_ASPECT_STENCIL_BIT:
+      return &image->stencil_surface;
+   }
+}
+
+/**
+ * Initialize the anv_image::*_surface selected by \a aspect. Then update the
+ * image's memory requirements (that is, the image's size and alignment).
+ *
+ * Exactly one bit must be set in \a aspect.
+ */
+static VkResult
+make_surface(const struct anv_device *dev,
+             struct anv_image *image,
+             const struct anv_image_create_info *anv_info,
+             VkImageAspectFlags aspect)
+{
+   const VkImageCreateInfo *vk_info = anv_info->vk_info;
+   bool ok UNUSED;
+
+   static const enum isl_surf_dim vk_to_isl_surf_dim[] = {
+      [VK_IMAGE_TYPE_1D] = ISL_SURF_DIM_1D,
+      [VK_IMAGE_TYPE_2D] = ISL_SURF_DIM_2D,
+      [VK_IMAGE_TYPE_3D] = ISL_SURF_DIM_3D,
+   };
+
+   isl_tiling_flags_t tiling_flags = anv_info->isl_tiling_flags;
+   if (vk_info->tiling == VK_IMAGE_TILING_LINEAR)
+      tiling_flags = ISL_TILING_LINEAR_BIT;
+
+   struct anv_surface *anv_surf = get_surface(image, aspect);
+
+   VkExtent3D extent;
+   switch (vk_info->imageType) {
+   case VK_IMAGE_TYPE_1D:
+      extent = (VkExtent3D) { vk_info->extent.width, 1, 1 };
+      break;
+   case VK_IMAGE_TYPE_2D:
+      extent = (VkExtent3D) { vk_info->extent.width, vk_info->extent.height, 1 };
+      break;
+   case VK_IMAGE_TYPE_3D:
+      extent = vk_info->extent;
+      break;
+   default:
+      unreachable("invalid image type");
+   }
+
+   image->extent = extent;
+
+   ok = isl_surf_init(&dev->isl_dev, &anv_surf->isl,
+      .dim = vk_to_isl_surf_dim[vk_info->imageType],
+      .format = anv_get_isl_format(vk_info->format, aspect,
+                                   vk_info->tiling, NULL),
+      .width = extent.width,
+      .height = extent.height,
+      .depth = extent.depth,
+      .levels = vk_info->mipLevels,
+      .array_len = vk_info->arrayLayers,
+      .samples = vk_info->samples,
+      .min_alignment = 0,
+      .min_pitch = 0,
+      .usage = choose_isl_surf_usage(image->usage, aspect),
+      .tiling_flags = tiling_flags);
+
+   /* isl_surf_init() will fail only if provided invalid input. Invalid input
+    * is illegal in Vulkan.
+    */
+   assert(ok);
+
+   anv_surf->offset = align_u32(image->size, anv_surf->isl.alignment);
+   image->size = anv_surf->offset + anv_surf->isl.size;
+   image->alignment = MAX(image->alignment, anv_surf->isl.alignment);
+
+   return VK_SUCCESS;
+}
+
+/**
+ * Parameter @a format is required and overrides VkImageCreateInfo::format.
+ */
+static VkImageUsageFlags
+anv_image_get_full_usage(const VkImageCreateInfo *info,
+                         const struct anv_format *format)
+{
+   VkImageUsageFlags usage = info->usage;
+
+   if (info->samples > 1 &&
+       (usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
+      /* Meta will resolve the image by binding it as a texture. */
+      usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
+   }
+
+   if (usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) {
+      /* Meta will transfer from the image by binding it as a texture. */
+      usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
+   }
+
+   if (usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) {
+      /* For non-clear transfer operations, meta will transfer to the image by
+       * binding it as a color attachment, even if the image format is not
+       * a color format.
+       */
+      usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+
+      if (anv_format_is_depth_or_stencil(format)) {
+         /* vkCmdClearDepthStencilImage() only requires that
+          * VK_IMAGE_USAGE_TRANSFER_SRC_BIT be set. In particular, it does
+          * not require VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT. Meta
+          * clears the image, though, by binding it as a depthstencil
+          * attachment.
+          */
+         usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+      }
+   }
+
+   return usage;
+}
+
+VkResult
+anv_image_create(VkDevice _device,
+                 const struct anv_image_create_info *create_info,
+                 const VkAllocationCallbacks* alloc,
+                 VkImage *pImage)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
+   struct anv_image *image = NULL;
+   const struct anv_format *format = anv_format_for_vk_format(pCreateInfo->format);
+   VkResult r;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
+
+   anv_assert(pCreateInfo->mipLevels > 0);
+   anv_assert(pCreateInfo->arrayLayers > 0);
+   anv_assert(pCreateInfo->samples > 0);
+   anv_assert(pCreateInfo->extent.width > 0);
+   anv_assert(pCreateInfo->extent.height > 0);
+   anv_assert(pCreateInfo->extent.depth > 0);
+
+   image = anv_alloc2(&device->alloc, alloc, sizeof(*image), 8,
+                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!image)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   memset(image, 0, sizeof(*image));
+   image->type = pCreateInfo->imageType;
+   image->extent = pCreateInfo->extent;
+   image->vk_format = pCreateInfo->format;
+   image->format = format;
+   image->levels = pCreateInfo->mipLevels;
+   image->array_size = pCreateInfo->arrayLayers;
+   image->samples = pCreateInfo->samples;
+   image->usage = anv_image_get_full_usage(pCreateInfo, format);
+   image->tiling = pCreateInfo->tiling;
+
+   if (likely(anv_format_is_color(format))) {
+      r = make_surface(device, image, create_info,
+                       VK_IMAGE_ASPECT_COLOR_BIT);
+      if (r != VK_SUCCESS)
+         goto fail;
+   } else {
+      if (image->format->has_depth) {
+         r = make_surface(device, image, create_info,
+                          VK_IMAGE_ASPECT_DEPTH_BIT);
+         if (r != VK_SUCCESS)
+            goto fail;
+      }
+
+      if (image->format->has_stencil) {
+         r = make_surface(device, image, create_info,
+                          VK_IMAGE_ASPECT_STENCIL_BIT);
+         if (r != VK_SUCCESS)
+            goto fail;
+      }
+   }
+
+   *pImage = anv_image_to_handle(image);
+
+   return VK_SUCCESS;
+
+fail:
+   if (image)
+      anv_free2(&device->alloc, alloc, image);
+
+   return r;
+}
+
+VkResult
+anv_CreateImage(VkDevice device,
+                const VkImageCreateInfo *pCreateInfo,
+                const VkAllocationCallbacks *pAllocator,
+                VkImage *pImage)
+{
+   return anv_image_create(device,
+      &(struct anv_image_create_info) {
+         .vk_info = pCreateInfo,
+         .isl_tiling_flags = ISL_TILING_ANY_MASK,
+      },
+      pAllocator,
+      pImage);
+}
+
+void
+anv_DestroyImage(VkDevice _device, VkImage _image,
+                 const VkAllocationCallbacks *pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   anv_free2(&device->alloc, pAllocator, anv_image_from_handle(_image));
+}
+
+static void
+anv_surface_get_subresource_layout(struct anv_image *image,
+                                   struct anv_surface *surface,
+                                   const VkImageSubresource *subresource,
+                                   VkSubresourceLayout *layout)
+{
+   /* If we are on a non-zero mip level or array slice, we need to
+    * calculate a real offset.
+    */
+   anv_assert(subresource->mipLevel == 0);
+   anv_assert(subresource->arrayLayer == 0);
+
+   layout->offset = surface->offset;
+   layout->rowPitch = surface->isl.row_pitch;
+   layout->depthPitch = isl_surf_get_array_pitch(&surface->isl);
+   layout->arrayPitch = isl_surf_get_array_pitch(&surface->isl);
+   layout->size = surface->isl.size;
+}
+
+void anv_GetImageSubresourceLayout(
+    VkDevice                                    device,
+    VkImage                                     _image,
+    const VkImageSubresource*                   pSubresource,
+    VkSubresourceLayout*                        pLayout)
+{
+   ANV_FROM_HANDLE(anv_image, image, _image);
+
+   assert(__builtin_popcount(pSubresource->aspectMask) == 1);
+
+   switch (pSubresource->aspectMask) {
+   case VK_IMAGE_ASPECT_COLOR_BIT:
+      anv_surface_get_subresource_layout(image, &image->color_surface,
+                                         pSubresource, pLayout);
+      break;
+   case VK_IMAGE_ASPECT_DEPTH_BIT:
+      anv_surface_get_subresource_layout(image, &image->depth_surface,
+                                         pSubresource, pLayout);
+      break;
+   case VK_IMAGE_ASPECT_STENCIL_BIT:
+      anv_surface_get_subresource_layout(image, &image->stencil_surface,
+                                         pSubresource, pLayout);
+      break;
+   default:
+      assert(!"Invalid image aspect");
+   }
+}
+
+VkResult
+anv_validate_CreateImageView(VkDevice _device,
+                             const VkImageViewCreateInfo *pCreateInfo,
+                             const VkAllocationCallbacks *pAllocator,
+                             VkImageView *pView)
+{
+   ANV_FROM_HANDLE(anv_image, image, pCreateInfo->image);
+   const VkImageSubresourceRange *subresource;
+   const struct anv_format *view_format_info;
+
+   /* Validate structure type before dereferencing it. */
+   assert(pCreateInfo);
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO);
+   subresource = &pCreateInfo->subresourceRange;
+
+   /* Validate viewType is in range before using it. */
+   assert(pCreateInfo->viewType >= VK_IMAGE_VIEW_TYPE_BEGIN_RANGE);
+   assert(pCreateInfo->viewType <= VK_IMAGE_VIEW_TYPE_END_RANGE);
+
+   /* Validate format is in range before using it. */
+   assert(pCreateInfo->format >= VK_FORMAT_BEGIN_RANGE);
+   assert(pCreateInfo->format <= VK_FORMAT_END_RANGE);
+   view_format_info = anv_format_for_vk_format(pCreateInfo->format);
+
+   /* Validate channel swizzles. */
+   assert(pCreateInfo->components.r >= VK_COMPONENT_SWIZZLE_BEGIN_RANGE);
+   assert(pCreateInfo->components.r <= VK_COMPONENT_SWIZZLE_END_RANGE);
+   assert(pCreateInfo->components.g >= VK_COMPONENT_SWIZZLE_BEGIN_RANGE);
+   assert(pCreateInfo->components.g <= VK_COMPONENT_SWIZZLE_END_RANGE);
+   assert(pCreateInfo->components.b >= VK_COMPONENT_SWIZZLE_BEGIN_RANGE);
+   assert(pCreateInfo->components.b <= VK_COMPONENT_SWIZZLE_END_RANGE);
+   assert(pCreateInfo->components.a >= VK_COMPONENT_SWIZZLE_BEGIN_RANGE);
+   assert(pCreateInfo->components.a <= VK_COMPONENT_SWIZZLE_END_RANGE);
+
+   /* Validate subresource. */
+   assert(subresource->aspectMask != 0);
+   assert(subresource->levelCount > 0);
+   assert(subresource->layerCount > 0);
+   assert(subresource->baseMipLevel < image->levels);
+   assert(subresource->baseMipLevel + anv_get_levelCount(image, subresource) <= image->levels);
+   assert(subresource->baseArrayLayer < image->array_size);
+   assert(subresource->baseArrayLayer + anv_get_layerCount(image, subresource) <= image->array_size);
+   assert(pView);
+
+   const VkImageAspectFlags ds_flags = VK_IMAGE_ASPECT_DEPTH_BIT
+                                     | VK_IMAGE_ASPECT_STENCIL_BIT;
+
+   /* Validate format. */
+   if (subresource->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
+      assert(subresource->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
+      assert(!image->format->has_depth);
+      assert(!image->format->has_stencil);
+      assert(!view_format_info->has_depth);
+      assert(!view_format_info->has_stencil);
+      assert(view_format_info->isl_layout->bs ==
+             image->format->isl_layout->bs);
+   } else if (subresource->aspectMask & ds_flags) {
+      assert((subresource->aspectMask & ~ds_flags) == 0);
+
+      if (subresource->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
+         assert(image->format->has_depth);
+         assert(view_format_info->has_depth);
+         assert(view_format_info->isl_layout->bs ==
+                image->format->isl_layout->bs);
+      }
+
+      if (subresource->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
+         /* FINISHME: Is it legal to have an R8 view of S8? */
+         assert(image->format->has_stencil);
+         assert(view_format_info->has_stencil);
+      }
+   } else {
+      assert(!"bad VkImageSubresourceRange::aspectFlags");
+   }
+
+   return anv_CreateImageView(_device, pCreateInfo, pAllocator, pView);
+}
+
+static struct anv_state
+alloc_surface_state(struct anv_device *device,
+                    struct anv_cmd_buffer *cmd_buffer)
+{
+      if (cmd_buffer) {
+         return anv_cmd_buffer_alloc_surface_state(cmd_buffer);
+      } else {
+         return anv_state_pool_alloc(&device->surface_state_pool, 64, 64);
+      }
+}
+
+static bool
+has_matching_storage_typed_format(const struct anv_device *device,
+                                  enum isl_format format)
+{
+   return (isl_format_get_layout(format)->bs <= 4 ||
+           (isl_format_get_layout(format)->bs <= 8 &&
+            (device->info.gen >= 8 || device->info.is_haswell)) ||
+           device->info.gen >= 9);
+}
+
+static enum isl_channel_select
+remap_swizzle(VkComponentSwizzle swizzle, VkComponentSwizzle component,
+              struct anv_format_swizzle format_swizzle)
+{
+   if (swizzle == VK_COMPONENT_SWIZZLE_IDENTITY)
+      swizzle = component;
+
+   switch (swizzle) {
+   case VK_COMPONENT_SWIZZLE_ZERO:
+      return ISL_CHANNEL_SELECT_ZERO;
+   case VK_COMPONENT_SWIZZLE_ONE:
+      return ISL_CHANNEL_SELECT_ONE;
+   case VK_COMPONENT_SWIZZLE_R:
+      return ISL_CHANNEL_SELECT_RED + format_swizzle.r;
+   case VK_COMPONENT_SWIZZLE_G:
+      return ISL_CHANNEL_SELECT_RED + format_swizzle.g;
+   case VK_COMPONENT_SWIZZLE_B:
+      return ISL_CHANNEL_SELECT_RED + format_swizzle.b;
+   case VK_COMPONENT_SWIZZLE_A:
+      return ISL_CHANNEL_SELECT_RED + format_swizzle.a;
+   default:
+      unreachable("Invalid swizzle");
+   }
+}
+
+void
+anv_image_view_init(struct anv_image_view *iview,
+                    struct anv_device *device,
+                    const VkImageViewCreateInfo* pCreateInfo,
+                    struct anv_cmd_buffer *cmd_buffer,
+                    uint32_t offset,
+                    VkImageUsageFlags usage_mask)
+{
+   ANV_FROM_HANDLE(anv_image, image, pCreateInfo->image);
+   const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
+
+   assert(range->layerCount > 0);
+   assert(range->baseMipLevel < image->levels);
+   assert(image->usage & (VK_IMAGE_USAGE_SAMPLED_BIT |
+                          VK_IMAGE_USAGE_STORAGE_BIT |
+                          VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+                          VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT));
+
+   switch (image->type) {
+   default:
+      unreachable("bad VkImageType");
+   case VK_IMAGE_TYPE_1D:
+   case VK_IMAGE_TYPE_2D:
+      assert(range->baseArrayLayer + anv_get_layerCount(image, range) - 1 <= image->array_size);
+      break;
+   case VK_IMAGE_TYPE_3D:
+      assert(range->baseArrayLayer + anv_get_layerCount(image, range) - 1
+             <= anv_minify(image->extent.depth, range->baseMipLevel));
+      break;
+   }
+
+   struct anv_surface *surface =
+      anv_image_get_surface_for_aspect_mask(image, range->aspectMask);
+
+   iview->image = image;
+   iview->bo = image->bo;
+   iview->offset = image->offset + surface->offset + offset;
+
+   iview->aspect_mask = pCreateInfo->subresourceRange.aspectMask;
+   iview->vk_format = pCreateInfo->format;
+
+   struct anv_format_swizzle swizzle;
+   enum isl_format format = anv_get_isl_format(pCreateInfo->format,
+                                               range->aspectMask,
+                                               image->tiling, &swizzle);
+
+   iview->base_layer = range->baseArrayLayer;
+   iview->base_mip = range->baseMipLevel;
+
+   struct isl_view isl_view = {
+      .format = format,
+      .base_level = range->baseMipLevel,
+      .levels = anv_get_levelCount(image, range),
+      .base_array_layer = range->baseArrayLayer,
+      .array_len = anv_get_layerCount(image, range),
+      .channel_select = {
+         remap_swizzle(pCreateInfo->components.r,
+                       VK_COMPONENT_SWIZZLE_R, swizzle),
+         remap_swizzle(pCreateInfo->components.g,
+                       VK_COMPONENT_SWIZZLE_G, swizzle),
+         remap_swizzle(pCreateInfo->components.b,
+                       VK_COMPONENT_SWIZZLE_B, swizzle),
+         remap_swizzle(pCreateInfo->components.a,
+                       VK_COMPONENT_SWIZZLE_A, swizzle),
+      },
+   };
+
+   iview->extent = (VkExtent3D) {
+      .width  = anv_minify(image->extent.width , range->baseMipLevel),
+      .height = anv_minify(image->extent.height, range->baseMipLevel),
+      .depth  = anv_minify(image->extent.depth , range->baseMipLevel),
+   };
+
+   isl_surf_usage_flags_t cube_usage;
+   if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
+       pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) {
+      cube_usage = ISL_SURF_USAGE_CUBE_BIT;
+   } else {
+      cube_usage = 0;
+   }
+
+   if (image->usage & usage_mask & VK_IMAGE_USAGE_SAMPLED_BIT) {
+      iview->sampler_surface_state = alloc_surface_state(device, cmd_buffer);
+
+      isl_view.usage = cube_usage | ISL_SURF_USAGE_TEXTURE_BIT;
+      isl_surf_fill_state(&device->isl_dev,
+                          iview->sampler_surface_state.map,
+                          .surf = &surface->isl,
+                          .view = &isl_view,
+                          .mocs = device->default_mocs);
+
+      if (!device->info.has_llc)
+         anv_state_clflush(iview->sampler_surface_state);
+   } else {
+      iview->sampler_surface_state.alloc_size = 0;
+   }
+
+   if (image->usage & usage_mask & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
+      iview->color_rt_surface_state = alloc_surface_state(device, cmd_buffer);
+
+      isl_view.usage = cube_usage | ISL_SURF_USAGE_RENDER_TARGET_BIT;
+      isl_surf_fill_state(&device->isl_dev,
+                          iview->color_rt_surface_state.map,
+                          .surf = &surface->isl,
+                          .view = &isl_view,
+                          .mocs = device->default_mocs);
+
+      if (!device->info.has_llc)
+         anv_state_clflush(iview->color_rt_surface_state);
+   } else {
+      iview->color_rt_surface_state.alloc_size = 0;
+   }
+
+   if (image->usage & usage_mask & VK_IMAGE_USAGE_STORAGE_BIT) {
+      iview->storage_surface_state = alloc_surface_state(device, cmd_buffer);
+
+      if (has_matching_storage_typed_format(device, format)) {
+         isl_view.usage = cube_usage | ISL_SURF_USAGE_STORAGE_BIT;
+         isl_surf_fill_state(&device->isl_dev,
+                             iview->storage_surface_state.map,
+                             .surf = &surface->isl,
+                             .view = &isl_view,
+                             .mocs = device->default_mocs);
+      } else {
+         anv_fill_buffer_surface_state(device, iview->storage_surface_state,
+                                       ISL_FORMAT_RAW,
+                                       iview->offset,
+                                       iview->bo->size - iview->offset, 1);
+      }
+
+      isl_surf_fill_image_param(&device->isl_dev,
+                                &iview->storage_image_param,
+                                &surface->isl, &isl_view);
+
+      if (!device->info.has_llc)
+         anv_state_clflush(iview->storage_surface_state);
+   } else {
+      iview->storage_surface_state.alloc_size = 0;
+   }
+}
+
+VkResult
+anv_CreateImageView(VkDevice _device,
+                    const VkImageViewCreateInfo *pCreateInfo,
+                    const VkAllocationCallbacks *pAllocator,
+                    VkImageView *pView)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_image_view *view;
+
+   view = anv_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (view == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   anv_image_view_init(view, device, pCreateInfo, NULL, 0, ~0);
+
+   *pView = anv_image_view_to_handle(view);
+
+   return VK_SUCCESS;
+}
+
+void
+anv_DestroyImageView(VkDevice _device, VkImageView _iview,
+                     const VkAllocationCallbacks *pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_image_view, iview, _iview);
+
+   if (iview->color_rt_surface_state.alloc_size > 0) {
+      anv_state_pool_free(&device->surface_state_pool,
+                          iview->color_rt_surface_state);
+   }
+
+   if (iview->sampler_surface_state.alloc_size > 0) {
+      anv_state_pool_free(&device->surface_state_pool,
+                          iview->sampler_surface_state);
+   }
+
+   if (iview->storage_surface_state.alloc_size > 0) {
+      anv_state_pool_free(&device->surface_state_pool,
+                          iview->storage_surface_state);
+   }
+
+   anv_free2(&device->alloc, pAllocator, iview);
+}
+
+VkResult
+anv_CreateBufferView(VkDevice _device,
+                     const VkBufferViewCreateInfo *pCreateInfo,
+                     const VkAllocationCallbacks *pAllocator,
+                     VkBufferView *pView)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer);
+   struct anv_buffer_view *view;
+
+   view = anv_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!view)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   const struct anv_format *format =
+      anv_format_for_vk_format(pCreateInfo->format);
+
+   view->format = format->isl_format;
+   view->bo = buffer->bo;
+   view->offset = buffer->offset + pCreateInfo->offset;
+   view->range = pCreateInfo->range == VK_WHOLE_SIZE ?
+                 buffer->size - view->offset : pCreateInfo->range;
+
+   if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) {
+      view->surface_state =
+         anv_state_pool_alloc(&device->surface_state_pool, 64, 64);
+
+      anv_fill_buffer_surface_state(device, view->surface_state,
+                                    view->format,
+                                    view->offset, view->range,
+                                    format->isl_layout->bs);
+   } else {
+      view->surface_state = (struct anv_state){ 0 };
+   }
+
+   if (buffer->usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) {
+      view->storage_surface_state =
+         anv_state_pool_alloc(&device->surface_state_pool, 64, 64);
+
+      enum isl_format storage_format =
+         has_matching_storage_typed_format(device, view->format) ?
+         isl_lower_storage_image_format(&device->isl_dev, view->format) :
+         ISL_FORMAT_RAW;
+
+      anv_fill_buffer_surface_state(device, view->storage_surface_state,
+                                    storage_format,
+                                    view->offset, view->range,
+                                    (storage_format == ISL_FORMAT_RAW ? 1 :
+                                     format->isl_layout->bs));
+
+      isl_buffer_fill_image_param(&device->isl_dev,
+                                  &view->storage_image_param,
+                                  view->format, view->range);
+   } else {
+      view->storage_surface_state = (struct anv_state){ 0 };
+   }
+
+   *pView = anv_buffer_view_to_handle(view);
+
+   return VK_SUCCESS;
+}
+
+void
+anv_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
+                      const VkAllocationCallbacks *pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_buffer_view, view, bufferView);
+
+   if (view->surface_state.alloc_size > 0)
+      anv_state_pool_free(&device->surface_state_pool,
+                          view->surface_state);
+
+   if (view->storage_surface_state.alloc_size > 0)
+      anv_state_pool_free(&device->surface_state_pool,
+                          view->storage_surface_state);
+
+   anv_free2(&device->alloc, pAllocator, view);
+}
+
+struct anv_surface *
+anv_image_get_surface_for_aspect_mask(struct anv_image *image, VkImageAspectFlags aspect_mask)
+{
+   switch (aspect_mask) {
+   case VK_IMAGE_ASPECT_COLOR_BIT:
+      /* Dragons will eat you.
+       *
+       * Meta attaches all destination surfaces as color render targets. Guess
+       * what surface the Meta Dragons really want.
+       */
+      if (image->format->has_depth && image->format->has_stencil) {
+         return &image->depth_surface;
+      } else if (image->format->has_depth) {
+         return &image->depth_surface;
+      } else if (image->format->has_stencil) {
+         return &image->stencil_surface;
+      } else {
+         return &image->color_surface;
+      }
+      break;
+   case VK_IMAGE_ASPECT_DEPTH_BIT:
+      assert(image->format->has_depth);
+      return &image->depth_surface;
+   case VK_IMAGE_ASPECT_STENCIL_BIT:
+      assert(image->format->has_stencil);
+      return &image->stencil_surface;
+   case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
+      if (image->format->has_depth && image->format->has_stencil) {
+         /* FINISHME: The Vulkan spec (git a511ba2) requires support for
+          * combined depth stencil formats. Specifically, it states:
+          *
+          *    At least one of ename:VK_FORMAT_D24_UNORM_S8_UINT or
+          *    ename:VK_FORMAT_D32_SFLOAT_S8_UINT must be supported.
+          *
+          * Image views with both depth and stencil aspects are only valid for
+          * render target attachments, in which case
+          * cmd_buffer_emit_depth_stencil() will pick out both the depth and
+          * stencil surfaces from the underlying surface.
+          */
+         return &image->depth_surface;
+      } else if (image->format->has_depth) {
+         return &image->depth_surface;
+      } else if (image->format->has_stencil) {
+         return &image->stencil_surface;
+      }
+      /* fallthrough */
+    default:
+       unreachable("image does not have aspect");
+       return NULL;
+   }
+}
diff --git a/src/intel/vulkan/anv_intel.c b/src/intel/vulkan/anv_intel.c
new file mode 100644 (file)
index 0000000..d95d9af
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+VkResult anv_CreateDmaBufImageINTEL(
+    VkDevice                                    _device,
+    const VkDmaBufImageCreateInfo*              pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkDeviceMemory*                             pMem,
+    VkImage*                                    pImage)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_device_memory *mem;
+   struct anv_image *image;
+   VkResult result;
+   VkImage image_h;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DMA_BUF_IMAGE_CREATE_INFO_INTEL);
+
+   mem = anv_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
+                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (mem == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   mem->bo.gem_handle = anv_gem_fd_to_handle(device, pCreateInfo->fd);
+   if (!mem->bo.gem_handle) {
+      result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+      goto fail;
+   }
+
+   mem->bo.map = NULL;
+   mem->bo.index = 0;
+   mem->bo.offset = 0;
+   mem->bo.size = pCreateInfo->strideInBytes * pCreateInfo->extent.height;
+
+   anv_image_create(_device,
+      &(struct anv_image_create_info) {
+         .isl_tiling_flags = ISL_TILING_X_BIT,
+         .stride = pCreateInfo->strideInBytes,
+         .vk_info =
+      &(VkImageCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+         .imageType = VK_IMAGE_TYPE_2D,
+         .format = pCreateInfo->format,
+         .extent = pCreateInfo->extent,
+         .mipLevels = 1,
+         .arrayLayers = 1,
+         .samples = 1,
+         /* FIXME: Need a way to use X tiling to allow scanout */
+         .tiling = VK_IMAGE_TILING_OPTIMAL,
+         .usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+         .flags = 0,
+      }},
+      pAllocator, &image_h);
+
+   image = anv_image_from_handle(image_h);
+   image->bo = &mem->bo;
+   image->offset = 0;
+
+   assert(image->extent.width > 0);
+   assert(image->extent.height > 0);
+   assert(image->extent.depth == 1);
+
+   *pMem = anv_device_memory_to_handle(mem);
+   *pImage = anv_image_to_handle(image);
+
+   return VK_SUCCESS;
+
+ fail:
+   anv_free2(&device->alloc, pAllocator, mem);
+
+   return result;
+}
diff --git a/src/intel/vulkan/anv_meta.c b/src/intel/vulkan/anv_meta.c
new file mode 100644 (file)
index 0000000..82944ea
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_meta.h"
+
+struct anv_render_pass anv_meta_dummy_renderpass = {0};
+
+void
+anv_meta_save(struct anv_meta_saved_state *state,
+              const struct anv_cmd_buffer *cmd_buffer,
+              uint32_t dynamic_mask)
+{
+   state->old_pipeline = cmd_buffer->state.pipeline;
+   state->old_descriptor_set0 = cmd_buffer->state.descriptors[0];
+   memcpy(state->old_vertex_bindings, cmd_buffer->state.vertex_bindings,
+          sizeof(state->old_vertex_bindings));
+
+   state->dynamic_mask = dynamic_mask;
+   anv_dynamic_state_copy(&state->dynamic, &cmd_buffer->state.dynamic,
+                          dynamic_mask);
+}
+
+void
+anv_meta_restore(const struct anv_meta_saved_state *state,
+                 struct anv_cmd_buffer *cmd_buffer)
+{
+   cmd_buffer->state.pipeline = state->old_pipeline;
+   cmd_buffer->state.descriptors[0] = state->old_descriptor_set0;
+   memcpy(cmd_buffer->state.vertex_bindings, state->old_vertex_bindings,
+          sizeof(state->old_vertex_bindings));
+
+   cmd_buffer->state.vb_dirty |= (1 << ANV_META_VERTEX_BINDING_COUNT) - 1;
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
+   cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
+
+   anv_dynamic_state_copy(&cmd_buffer->state.dynamic, &state->dynamic,
+                          state->dynamic_mask);
+   cmd_buffer->state.dirty |= state->dynamic_mask;
+
+   /* Since we've used the pipeline with the VS disabled, set
+    * need_query_wa. See CmdBeginQuery.
+    */
+   cmd_buffer->state.need_query_wa = true;
+}
+
+VkImageViewType
+anv_meta_get_view_type(const struct anv_image *image)
+{
+   switch (image->type) {
+   case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
+   case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
+   case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
+   default:
+      unreachable("bad VkImageViewType");
+   }
+}
+
+/**
+ * When creating a destination VkImageView, this function provides the needed
+ * VkImageViewCreateInfo::subresourceRange::baseArrayLayer.
+ */
+uint32_t
+anv_meta_get_iview_layer(const struct anv_image *dest_image,
+                         const VkImageSubresourceLayers *dest_subresource,
+                         const VkOffset3D *dest_offset)
+{
+   switch (dest_image->type) {
+   case VK_IMAGE_TYPE_1D:
+   case VK_IMAGE_TYPE_2D:
+      return dest_subresource->baseArrayLayer;
+   case VK_IMAGE_TYPE_3D:
+      /* HACK: Vulkan does not allow attaching a 3D image to a framebuffer,
+       * but meta does it anyway. When doing so, we translate the
+       * destination's z offset into an array offset.
+       */
+      return dest_offset->z;
+   default:
+      assert(!"bad VkImageType");
+      return 0;
+   }
+}
+
+static void *
+meta_alloc(void* _device, size_t size, size_t alignment,
+           VkSystemAllocationScope allocationScope)
+{
+   struct anv_device *device = _device;
+   return device->alloc.pfnAllocation(device->alloc.pUserData, size, alignment,
+                                      VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+}
+
+static void *
+meta_realloc(void* _device, void *original, size_t size, size_t alignment,
+             VkSystemAllocationScope allocationScope)
+{
+   struct anv_device *device = _device;
+   return device->alloc.pfnReallocation(device->alloc.pUserData, original,
+                                        size, alignment,
+                                        VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+}
+
+static void
+meta_free(void* _device, void *data)
+{
+   struct anv_device *device = _device;
+   return device->alloc.pfnFree(device->alloc.pUserData, data);
+}
+
+VkResult
+anv_device_init_meta(struct anv_device *device)
+{
+   VkResult result;
+
+   device->meta_state.alloc = (VkAllocationCallbacks) {
+      .pUserData = device,
+      .pfnAllocation = meta_alloc,
+      .pfnReallocation = meta_realloc,
+      .pfnFree = meta_free,
+   };
+
+   result = anv_device_init_meta_clear_state(device);
+   if (result != VK_SUCCESS)
+      goto fail_clear;
+
+   result = anv_device_init_meta_resolve_state(device);
+   if (result != VK_SUCCESS)
+      goto fail_resolve;
+
+   result = anv_device_init_meta_blit_state(device);
+   if (result != VK_SUCCESS)
+      goto fail_blit;
+
+   return VK_SUCCESS;
+
+fail_blit:
+   anv_device_finish_meta_resolve_state(device);
+fail_resolve:
+   anv_device_finish_meta_clear_state(device);
+fail_clear:
+   return result;
+}
+
+void
+anv_device_finish_meta(struct anv_device *device)
+{
+   anv_device_finish_meta_resolve_state(device);
+   anv_device_finish_meta_clear_state(device);
+   anv_device_finish_meta_blit_state(device);
+}
diff --git a/src/intel/vulkan/anv_meta.h b/src/intel/vulkan/anv_meta.h
new file mode 100644 (file)
index 0000000..e2e0043
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "anv_private.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ANV_META_VERTEX_BINDING_COUNT 2
+
+struct anv_meta_saved_state {
+   struct anv_vertex_binding old_vertex_bindings[ANV_META_VERTEX_BINDING_COUNT];
+   struct anv_descriptor_set *old_descriptor_set0;
+   struct anv_pipeline *old_pipeline;
+
+   /**
+    * Bitmask of (1 << VK_DYNAMIC_STATE_*). Defines the set of saved dynamic
+    * state.
+    */
+   uint32_t dynamic_mask;
+   struct anv_dynamic_state dynamic;
+};
+
+VkResult anv_device_init_meta_clear_state(struct anv_device *device);
+void anv_device_finish_meta_clear_state(struct anv_device *device);
+
+VkResult anv_device_init_meta_resolve_state(struct anv_device *device);
+void anv_device_finish_meta_resolve_state(struct anv_device *device);
+
+VkResult anv_device_init_meta_blit_state(struct anv_device *device);
+void anv_device_finish_meta_blit_state(struct anv_device *device);
+
+void
+anv_meta_save(struct anv_meta_saved_state *state,
+              const struct anv_cmd_buffer *cmd_buffer,
+              uint32_t dynamic_mask);
+
+void
+anv_meta_restore(const struct anv_meta_saved_state *state,
+                 struct anv_cmd_buffer *cmd_buffer);
+
+VkImageViewType
+anv_meta_get_view_type(const struct anv_image *image);
+
+uint32_t
+anv_meta_get_iview_layer(const struct anv_image *dest_image,
+                         const VkImageSubresourceLayers *dest_subresource,
+                         const VkOffset3D *dest_offset);
+
+struct anv_meta_blit2d_surf {
+   struct anv_bo *bo;
+   enum isl_tiling tiling;
+
+   /** Base offset to the start of the image */
+   uint64_t base_offset;
+
+   /** The size of an element in bytes. */
+   uint8_t bs;
+
+   /** Pitch between rows in bytes. */
+   uint32_t pitch;
+};
+
+struct anv_meta_blit2d_rect {
+   uint32_t src_x, src_y;
+   uint32_t dst_x, dst_y;
+   uint32_t width, height;
+};
+
+void
+anv_meta_begin_blit2d(struct anv_cmd_buffer *cmd_buffer,
+                      struct anv_meta_saved_state *save);
+
+void
+anv_meta_blit2d(struct anv_cmd_buffer *cmd_buffer,
+                struct anv_meta_blit2d_surf *src,
+                struct anv_meta_blit2d_surf *dst,
+                unsigned num_rects,
+                struct anv_meta_blit2d_rect *rects);
+
+void
+anv_meta_end_blit2d(struct anv_cmd_buffer *cmd_buffer,
+                    struct anv_meta_saved_state *save);
+
+void
+anv_meta_emit_blit(struct anv_cmd_buffer *cmd_buffer,
+               struct anv_image *src_image,
+               struct anv_image_view *src_iview,
+               VkOffset3D src_offset,
+               VkExtent3D src_extent,
+               struct anv_image *dest_image,
+               struct anv_image_view *dest_iview,
+               VkOffset3D dest_offset,
+               VkExtent3D dest_extent,
+               VkFilter blit_filter);
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/intel/vulkan/anv_meta_blit.c b/src/intel/vulkan/anv_meta_blit.c
new file mode 100644 (file)
index 0000000..2c3c917
--- /dev/null
@@ -0,0 +1,750 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_meta.h"
+#include "nir/nir_builder.h"
+
+struct blit_region {
+   VkOffset3D src_offset;
+   VkExtent3D src_extent;
+   VkOffset3D dest_offset;
+   VkExtent3D dest_extent;
+};
+
+static nir_shader *
+build_nir_vertex_shader(void)
+{
+   const struct glsl_type *vec4 = glsl_vec4_type();
+   nir_builder b;
+
+   nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
+   b.shader->info.name = ralloc_strdup(b.shader, "meta_blit_vs");
+
+   nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
+                                              vec4, "a_pos");
+   pos_in->data.location = VERT_ATTRIB_GENERIC0;
+   nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
+                                               vec4, "gl_Position");
+   pos_out->data.location = VARYING_SLOT_POS;
+   nir_copy_var(&b, pos_out, pos_in);
+
+   nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
+                                                  vec4, "a_tex_pos");
+   tex_pos_in->data.location = VERT_ATTRIB_GENERIC1;
+   nir_variable *tex_pos_out = nir_variable_create(b.shader, nir_var_shader_out,
+                                                   vec4, "v_tex_pos");
+   tex_pos_out->data.location = VARYING_SLOT_VAR0;
+   tex_pos_out->data.interpolation = INTERP_QUALIFIER_SMOOTH;
+   nir_copy_var(&b, tex_pos_out, tex_pos_in);
+
+   return b.shader;
+}
+
+static nir_shader *
+build_nir_copy_fragment_shader(enum glsl_sampler_dim tex_dim)
+{
+   const struct glsl_type *vec4 = glsl_vec4_type();
+   nir_builder b;
+
+   nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
+   b.shader->info.name = ralloc_strdup(b.shader, "meta_blit_fs");
+
+   nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
+                                                  vec4, "v_tex_pos");
+   tex_pos_in->data.location = VARYING_SLOT_VAR0;
+
+   /* Swizzle the array index which comes in as Z coordinate into the right
+    * position.
+    */
+   unsigned swz[] = { 0, (tex_dim == GLSL_SAMPLER_DIM_1D ? 2 : 1), 2 };
+   nir_ssa_def *const tex_pos =
+      nir_swizzle(&b, nir_load_var(&b, tex_pos_in), swz,
+                  (tex_dim == GLSL_SAMPLER_DIM_1D ? 2 : 3), false);
+
+   const struct glsl_type *sampler_type =
+      glsl_sampler_type(tex_dim, false, tex_dim != GLSL_SAMPLER_DIM_3D,
+                        glsl_get_base_type(vec4));
+   nir_variable *sampler = nir_variable_create(b.shader, nir_var_uniform,
+                                               sampler_type, "s_tex");
+   sampler->data.descriptor_set = 0;
+   sampler->data.binding = 0;
+
+   nir_tex_instr *tex = nir_tex_instr_create(b.shader, 1);
+   tex->sampler_dim = tex_dim;
+   tex->op = nir_texop_tex;
+   tex->src[0].src_type = nir_tex_src_coord;
+   tex->src[0].src = nir_src_for_ssa(tex_pos);
+   tex->dest_type = nir_type_float; /* TODO */
+   tex->is_array = glsl_sampler_type_is_array(sampler_type);
+   tex->coord_components = tex_pos->num_components;
+   tex->texture = nir_deref_var_create(tex, sampler);
+   tex->sampler = nir_deref_var_create(tex, sampler);
+
+   nir_ssa_dest_init(&tex->instr, &tex->dest, 4, "tex");
+   nir_builder_instr_insert(&b, &tex->instr);
+
+   nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out,
+                                                 vec4, "f_color");
+   color_out->data.location = FRAG_RESULT_DATA0;
+   nir_store_var(&b, color_out, &tex->dest.ssa, 4);
+
+   return b.shader;
+}
+
+static void
+meta_prepare_blit(struct anv_cmd_buffer *cmd_buffer,
+                  struct anv_meta_saved_state *saved_state)
+{
+   anv_meta_save(saved_state, cmd_buffer,
+                 (1 << VK_DYNAMIC_STATE_VIEWPORT));
+}
+
+void
+anv_meta_emit_blit(struct anv_cmd_buffer *cmd_buffer,
+               struct anv_image *src_image,
+               struct anv_image_view *src_iview,
+               VkOffset3D src_offset,
+               VkExtent3D src_extent,
+               struct anv_image *dest_image,
+               struct anv_image_view *dest_iview,
+               VkOffset3D dest_offset,
+               VkExtent3D dest_extent,
+               VkFilter blit_filter)
+{
+   struct anv_device *device = cmd_buffer->device;
+
+   struct blit_vb_data {
+      float pos[2];
+      float tex_coord[3];
+   } *vb_data;
+
+   assert(src_image->samples == dest_image->samples);
+
+   unsigned vb_size = sizeof(struct anv_vue_header) + 3 * sizeof(*vb_data);
+
+   struct anv_state vb_state =
+      anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, vb_size, 16);
+   memset(vb_state.map, 0, sizeof(struct anv_vue_header));
+   vb_data = vb_state.map + sizeof(struct anv_vue_header);
+
+   vb_data[0] = (struct blit_vb_data) {
+      .pos = {
+         dest_offset.x + dest_extent.width,
+         dest_offset.y + dest_extent.height,
+      },
+      .tex_coord = {
+         (float)(src_offset.x + src_extent.width)
+            / (float)src_iview->extent.width,
+         (float)(src_offset.y + src_extent.height)
+            / (float)src_iview->extent.height,
+         (float)src_offset.z / (float)src_iview->extent.depth,
+      },
+   };
+
+   vb_data[1] = (struct blit_vb_data) {
+      .pos = {
+         dest_offset.x,
+         dest_offset.y + dest_extent.height,
+      },
+      .tex_coord = {
+         (float)src_offset.x / (float)src_iview->extent.width,
+         (float)(src_offset.y + src_extent.height) /
+            (float)src_iview->extent.height,
+         (float)src_offset.z / (float)src_iview->extent.depth,
+      },
+   };
+
+   vb_data[2] = (struct blit_vb_data) {
+      .pos = {
+         dest_offset.x,
+         dest_offset.y,
+      },
+      .tex_coord = {
+         (float)src_offset.x / (float)src_iview->extent.width,
+         (float)src_offset.y / (float)src_iview->extent.height,
+         (float)src_offset.z / (float)src_iview->extent.depth,
+      },
+   };
+
+   anv_state_clflush(vb_state);
+
+   struct anv_buffer vertex_buffer = {
+      .device = device,
+      .size = vb_size,
+      .bo = &device->dynamic_state_block_pool.bo,
+      .offset = vb_state.offset,
+   };
+
+   anv_CmdBindVertexBuffers(anv_cmd_buffer_to_handle(cmd_buffer), 0, 2,
+      (VkBuffer[]) {
+         anv_buffer_to_handle(&vertex_buffer),
+         anv_buffer_to_handle(&vertex_buffer)
+      },
+      (VkDeviceSize[]) {
+         0,
+         sizeof(struct anv_vue_header),
+      });
+
+   VkSampler sampler;
+   ANV_CALL(CreateSampler)(anv_device_to_handle(device),
+      &(VkSamplerCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
+         .magFilter = blit_filter,
+         .minFilter = blit_filter,
+      }, &cmd_buffer->pool->alloc, &sampler);
+
+   VkDescriptorPool desc_pool;
+   anv_CreateDescriptorPool(anv_device_to_handle(device),
+      &(const VkDescriptorPoolCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+         .pNext = NULL,
+         .flags = 0,
+         .maxSets = 1,
+         .poolSizeCount = 1,
+         .pPoolSizes = (VkDescriptorPoolSize[]) {
+            {
+               .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+               .descriptorCount = 1
+            },
+         }
+      }, &cmd_buffer->pool->alloc, &desc_pool);
+
+   VkDescriptorSet set;
+   anv_AllocateDescriptorSets(anv_device_to_handle(device),
+      &(VkDescriptorSetAllocateInfo) {
+         .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+         .descriptorPool = desc_pool,
+         .descriptorSetCount = 1,
+         .pSetLayouts = &device->meta_state.blit.ds_layout
+      }, &set);
+
+   anv_UpdateDescriptorSets(anv_device_to_handle(device),
+      1, /* writeCount */
+      (VkWriteDescriptorSet[]) {
+         {
+            .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+            .dstSet = set,
+            .dstBinding = 0,
+            .dstArrayElement = 0,
+            .descriptorCount = 1,
+            .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+            .pImageInfo = (VkDescriptorImageInfo[]) {
+               {
+                  .sampler = sampler,
+                  .imageView = anv_image_view_to_handle(src_iview),
+                  .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
+               },
+            }
+         }
+      }, 0, NULL);
+
+   VkFramebuffer fb;
+   anv_CreateFramebuffer(anv_device_to_handle(device),
+      &(VkFramebufferCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+         .attachmentCount = 1,
+         .pAttachments = (VkImageView[]) {
+            anv_image_view_to_handle(dest_iview),
+         },
+         .width = dest_iview->extent.width,
+         .height = dest_iview->extent.height,
+         .layers = 1
+      }, &cmd_buffer->pool->alloc, &fb);
+
+   ANV_CALL(CmdBeginRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer),
+      &(VkRenderPassBeginInfo) {
+         .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+         .renderPass = device->meta_state.blit.render_pass,
+         .framebuffer = fb,
+         .renderArea = {
+            .offset = { dest_offset.x, dest_offset.y },
+            .extent = { dest_extent.width, dest_extent.height },
+         },
+         .clearValueCount = 0,
+         .pClearValues = NULL,
+      }, VK_SUBPASS_CONTENTS_INLINE);
+
+   VkPipeline pipeline;
+
+   switch (src_image->type) {
+   case VK_IMAGE_TYPE_1D:
+      pipeline = device->meta_state.blit.pipeline_1d_src;
+      break;
+   case VK_IMAGE_TYPE_2D:
+      pipeline = device->meta_state.blit.pipeline_2d_src;
+      break;
+   case VK_IMAGE_TYPE_3D:
+      pipeline = device->meta_state.blit.pipeline_3d_src;
+      break;
+   default:
+      unreachable(!"bad VkImageType");
+   }
+
+   if (cmd_buffer->state.pipeline != anv_pipeline_from_handle(pipeline)) {
+      anv_CmdBindPipeline(anv_cmd_buffer_to_handle(cmd_buffer),
+                          VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+   }
+
+   anv_CmdSetViewport(anv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
+                      &(VkViewport) {
+                        .x = 0.0f,
+                        .y = 0.0f,
+                        .width = dest_iview->extent.width,
+                        .height = dest_iview->extent.height,
+                        .minDepth = 0.0f,
+                        .maxDepth = 1.0f,
+                      });
+
+   anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer),
+                             VK_PIPELINE_BIND_POINT_GRAPHICS,
+                             device->meta_state.blit.pipeline_layout, 0, 1,
+                             &set, 0, NULL);
+
+   ANV_CALL(CmdDraw)(anv_cmd_buffer_to_handle(cmd_buffer), 3, 1, 0, 0);
+
+   ANV_CALL(CmdEndRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer));
+
+   /* At the point where we emit the draw call, all data from the
+    * descriptor sets, etc. has been used.  We are free to delete it.
+    */
+   anv_DestroyDescriptorPool(anv_device_to_handle(device),
+                             desc_pool, &cmd_buffer->pool->alloc);
+   anv_DestroySampler(anv_device_to_handle(device), sampler,
+                      &cmd_buffer->pool->alloc);
+   anv_DestroyFramebuffer(anv_device_to_handle(device), fb,
+                          &cmd_buffer->pool->alloc);
+}
+
+static void
+meta_finish_blit(struct anv_cmd_buffer *cmd_buffer,
+                 const struct anv_meta_saved_state *saved_state)
+{
+   anv_meta_restore(saved_state, cmd_buffer);
+}
+
+void anv_CmdBlitImage(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     srcImage,
+    VkImageLayout                               srcImageLayout,
+    VkImage                                     destImage,
+    VkImageLayout                               destImageLayout,
+    uint32_t                                    regionCount,
+    const VkImageBlit*                          pRegions,
+    VkFilter                                    filter)
+
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_image, src_image, srcImage);
+   ANV_FROM_HANDLE(anv_image, dest_image, destImage);
+   struct anv_meta_saved_state saved_state;
+
+   /* From the Vulkan 1.0 spec:
+    *
+    *    vkCmdBlitImage must not be used for multisampled source or
+    *    destination images. Use vkCmdResolveImage for this purpose.
+    */
+   assert(src_image->samples == 1);
+   assert(dest_image->samples == 1);
+
+   anv_finishme("respect VkFilter");
+
+   meta_prepare_blit(cmd_buffer, &saved_state);
+
+   for (unsigned r = 0; r < regionCount; r++) {
+      struct anv_image_view src_iview;
+      anv_image_view_init(&src_iview, cmd_buffer->device,
+         &(VkImageViewCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+            .image = srcImage,
+            .viewType = anv_meta_get_view_type(src_image),
+            .format = src_image->vk_format,
+            .subresourceRange = {
+               .aspectMask = pRegions[r].srcSubresource.aspectMask,
+               .baseMipLevel = pRegions[r].srcSubresource.mipLevel,
+               .levelCount = 1,
+               .baseArrayLayer = pRegions[r].srcSubresource.baseArrayLayer,
+               .layerCount = 1
+            },
+         },
+         cmd_buffer, 0, VK_IMAGE_USAGE_SAMPLED_BIT);
+
+      const VkOffset3D dest_offset = {
+         .x = pRegions[r].dstOffsets[0].x,
+         .y = pRegions[r].dstOffsets[0].y,
+         .z = 0,
+      };
+
+      if (pRegions[r].dstOffsets[1].x < pRegions[r].dstOffsets[0].x ||
+          pRegions[r].dstOffsets[1].y < pRegions[r].dstOffsets[0].y ||
+          pRegions[r].srcOffsets[1].x < pRegions[r].srcOffsets[0].x ||
+          pRegions[r].srcOffsets[1].y < pRegions[r].srcOffsets[0].y)
+         anv_finishme("FINISHME: Allow flipping in blits");
+
+      const VkExtent3D dest_extent = {
+         .width = pRegions[r].dstOffsets[1].x - pRegions[r].dstOffsets[0].x,
+         .height = pRegions[r].dstOffsets[1].y - pRegions[r].dstOffsets[0].y,
+      };
+
+      const VkExtent3D src_extent = {
+         .width = pRegions[r].srcOffsets[1].x - pRegions[r].srcOffsets[0].x,
+         .height = pRegions[r].srcOffsets[1].y - pRegions[r].srcOffsets[0].y,
+      };
+
+      const uint32_t dest_array_slice =
+         anv_meta_get_iview_layer(dest_image, &pRegions[r].dstSubresource,
+                                  &pRegions[r].dstOffsets[0]);
+
+      if (pRegions[r].srcSubresource.layerCount > 1)
+         anv_finishme("FINISHME: copy multiple array layers");
+
+      if (pRegions[r].srcOffsets[0].z + 1 != pRegions[r].srcOffsets[1].z ||
+          pRegions[r].dstOffsets[0].z + 1 != pRegions[r].dstOffsets[1].z)
+         anv_finishme("FINISHME: copy multiple depth layers");
+
+      struct anv_image_view dest_iview;
+      anv_image_view_init(&dest_iview, cmd_buffer->device,
+         &(VkImageViewCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+            .image = destImage,
+            .viewType = anv_meta_get_view_type(dest_image),
+            .format = dest_image->vk_format,
+            .subresourceRange = {
+               .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+               .baseMipLevel = pRegions[r].dstSubresource.mipLevel,
+               .levelCount = 1,
+               .baseArrayLayer = dest_array_slice,
+               .layerCount = 1
+            },
+         },
+         cmd_buffer, 0, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+
+      anv_meta_emit_blit(cmd_buffer,
+                     src_image, &src_iview,
+                     pRegions[r].srcOffsets[0], src_extent,
+                     dest_image, &dest_iview,
+                     dest_offset, dest_extent,
+                     filter);
+   }
+
+   meta_finish_blit(cmd_buffer, &saved_state);
+}
+
+void
+anv_device_finish_meta_blit_state(struct anv_device *device)
+{
+   anv_DestroyRenderPass(anv_device_to_handle(device),
+                         device->meta_state.blit.render_pass,
+                         &device->meta_state.alloc);
+   anv_DestroyPipeline(anv_device_to_handle(device),
+                       device->meta_state.blit.pipeline_1d_src,
+                       &device->meta_state.alloc);
+   anv_DestroyPipeline(anv_device_to_handle(device),
+                       device->meta_state.blit.pipeline_2d_src,
+                       &device->meta_state.alloc);
+   anv_DestroyPipeline(anv_device_to_handle(device),
+                       device->meta_state.blit.pipeline_3d_src,
+                       &device->meta_state.alloc);
+   anv_DestroyPipelineLayout(anv_device_to_handle(device),
+                             device->meta_state.blit.pipeline_layout,
+                             &device->meta_state.alloc);
+   anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
+                                  device->meta_state.blit.ds_layout,
+                                  &device->meta_state.alloc);
+}
+
+VkResult
+anv_device_init_meta_blit_state(struct anv_device *device)
+{
+   VkResult result;
+
+   result = anv_CreateRenderPass(anv_device_to_handle(device),
+      &(VkRenderPassCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+         .attachmentCount = 1,
+         .pAttachments = &(VkAttachmentDescription) {
+            .format = VK_FORMAT_UNDEFINED, /* Our shaders don't care */
+            .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+            .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+            .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
+            .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
+         },
+         .subpassCount = 1,
+         .pSubpasses = &(VkSubpassDescription) {
+            .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+            .inputAttachmentCount = 0,
+            .colorAttachmentCount = 1,
+            .pColorAttachments = &(VkAttachmentReference) {
+               .attachment = 0,
+               .layout = VK_IMAGE_LAYOUT_GENERAL,
+            },
+            .pResolveAttachments = NULL,
+            .pDepthStencilAttachment = &(VkAttachmentReference) {
+               .attachment = VK_ATTACHMENT_UNUSED,
+               .layout = VK_IMAGE_LAYOUT_GENERAL,
+            },
+            .preserveAttachmentCount = 1,
+            .pPreserveAttachments = (uint32_t[]) { 0 },
+         },
+         .dependencyCount = 0,
+      }, &device->meta_state.alloc, &device->meta_state.blit.render_pass);
+   if (result != VK_SUCCESS)
+      goto fail;
+
+   /* We don't use a vertex shader for blitting, but instead build and pass
+    * the VUEs directly to the rasterization backend.  However, we do need
+    * to provide GLSL source for the vertex shader so that the compiler
+    * does not dead-code our inputs.
+    */
+   struct anv_shader_module vs = {
+      .nir = build_nir_vertex_shader(),
+   };
+
+   struct anv_shader_module fs_1d = {
+      .nir = build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_1D),
+   };
+
+   struct anv_shader_module fs_2d = {
+      .nir = build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_2D),
+   };
+
+   struct anv_shader_module fs_3d = {
+      .nir = build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_3D),
+   };
+
+   VkPipelineVertexInputStateCreateInfo vi_create_info = {
+      .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+      .vertexBindingDescriptionCount = 2,
+      .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
+         {
+            .binding = 0,
+            .stride = 0,
+            .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
+         },
+         {
+            .binding = 1,
+            .stride = 5 * sizeof(float),
+            .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
+         },
+      },
+      .vertexAttributeDescriptionCount = 3,
+      .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
+         {
+            /* VUE Header */
+            .location = 0,
+            .binding = 0,
+            .format = VK_FORMAT_R32G32B32A32_UINT,
+            .offset = 0
+         },
+         {
+            /* Position */
+            .location = 1,
+            .binding = 1,
+            .format = VK_FORMAT_R32G32_SFLOAT,
+            .offset = 0
+         },
+         {
+            /* Texture Coordinate */
+            .location = 2,
+            .binding = 1,
+            .format = VK_FORMAT_R32G32B32_SFLOAT,
+            .offset = 8
+         }
+      }
+   };
+
+   VkDescriptorSetLayoutCreateInfo ds_layout_info = {
+      .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+      .bindingCount = 1,
+      .pBindings = (VkDescriptorSetLayoutBinding[]) {
+         {
+            .binding = 0,
+            .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+            .descriptorCount = 1,
+            .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
+            .pImmutableSamplers = NULL
+         },
+      }
+   };
+   result = anv_CreateDescriptorSetLayout(anv_device_to_handle(device),
+                                          &ds_layout_info,
+                                          &device->meta_state.alloc,
+                                          &device->meta_state.blit.ds_layout);
+   if (result != VK_SUCCESS)
+      goto fail_render_pass;
+
+   result = anv_CreatePipelineLayout(anv_device_to_handle(device),
+      &(VkPipelineLayoutCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+         .setLayoutCount = 1,
+         .pSetLayouts = &device->meta_state.blit.ds_layout,
+      },
+      &device->meta_state.alloc, &device->meta_state.blit.pipeline_layout);
+   if (result != VK_SUCCESS)
+      goto fail_descriptor_set_layout;
+
+   VkPipelineShaderStageCreateInfo pipeline_shader_stages[] = {
+      {
+         .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+         .stage = VK_SHADER_STAGE_VERTEX_BIT,
+         .module = anv_shader_module_to_handle(&vs),
+         .pName = "main",
+         .pSpecializationInfo = NULL
+      }, {
+         .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+         .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
+         .module = VK_NULL_HANDLE, /* TEMPLATE VALUE! FILL ME IN! */
+         .pName = "main",
+         .pSpecializationInfo = NULL
+      },
+   };
+
+   const VkGraphicsPipelineCreateInfo vk_pipeline_info = {
+      .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+      .stageCount = ARRAY_SIZE(pipeline_shader_stages),
+      .pStages = pipeline_shader_stages,
+      .pVertexInputState = &vi_create_info,
+      .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+         .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+         .primitiveRestartEnable = false,
+      },
+      .pViewportState = &(VkPipelineViewportStateCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+         .viewportCount = 1,
+         .scissorCount = 1,
+      },
+      .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+         .rasterizerDiscardEnable = false,
+         .polygonMode = VK_POLYGON_MODE_FILL,
+         .cullMode = VK_CULL_MODE_NONE,
+         .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE
+      },
+      .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+         .rasterizationSamples = 1,
+         .sampleShadingEnable = false,
+         .pSampleMask = (VkSampleMask[]) { UINT32_MAX },
+      },
+      .pColorBlendState = &(VkPipelineColorBlendStateCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+         .attachmentCount = 1,
+         .pAttachments = (VkPipelineColorBlendAttachmentState []) {
+            { .colorWriteMask =
+                 VK_COLOR_COMPONENT_A_BIT |
+                 VK_COLOR_COMPONENT_R_BIT |
+                 VK_COLOR_COMPONENT_G_BIT |
+                 VK_COLOR_COMPONENT_B_BIT },
+         }
+      },
+      .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+         .dynamicStateCount = 9,
+         .pDynamicStates = (VkDynamicState[]) {
+            VK_DYNAMIC_STATE_VIEWPORT,
+            VK_DYNAMIC_STATE_SCISSOR,
+            VK_DYNAMIC_STATE_LINE_WIDTH,
+            VK_DYNAMIC_STATE_DEPTH_BIAS,
+            VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+            VK_DYNAMIC_STATE_DEPTH_BOUNDS,
+            VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
+            VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
+            VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+         },
+      },
+      .flags = 0,
+      .layout = device->meta_state.blit.pipeline_layout,
+      .renderPass = device->meta_state.blit.render_pass,
+      .subpass = 0,
+   };
+
+   const struct anv_graphics_pipeline_create_info anv_pipeline_info = {
+      .color_attachment_count = -1,
+      .use_repclear = false,
+      .disable_viewport = true,
+      .disable_scissor = true,
+      .disable_vs = true,
+      .use_rectlist = true
+   };
+
+   pipeline_shader_stages[1].module = anv_shader_module_to_handle(&fs_1d);
+   result = anv_graphics_pipeline_create(anv_device_to_handle(device),
+      VK_NULL_HANDLE,
+      &vk_pipeline_info, &anv_pipeline_info,
+      &device->meta_state.alloc, &device->meta_state.blit.pipeline_1d_src);
+   if (result != VK_SUCCESS)
+      goto fail_pipeline_layout;
+
+   pipeline_shader_stages[1].module = anv_shader_module_to_handle(&fs_2d);
+   result = anv_graphics_pipeline_create(anv_device_to_handle(device),
+      VK_NULL_HANDLE,
+      &vk_pipeline_info, &anv_pipeline_info,
+      &device->meta_state.alloc, &device->meta_state.blit.pipeline_2d_src);
+   if (result != VK_SUCCESS)
+      goto fail_pipeline_1d;
+
+   pipeline_shader_stages[1].module = anv_shader_module_to_handle(&fs_3d);
+   result = anv_graphics_pipeline_create(anv_device_to_handle(device),
+      VK_NULL_HANDLE,
+      &vk_pipeline_info, &anv_pipeline_info,
+      &device->meta_state.alloc, &device->meta_state.blit.pipeline_3d_src);
+   if (result != VK_SUCCESS)
+      goto fail_pipeline_2d;
+
+   ralloc_free(vs.nir);
+   ralloc_free(fs_1d.nir);
+   ralloc_free(fs_2d.nir);
+   ralloc_free(fs_3d.nir);
+
+   return VK_SUCCESS;
+
+ fail_pipeline_2d:
+   anv_DestroyPipeline(anv_device_to_handle(device),
+                       device->meta_state.blit.pipeline_2d_src,
+                       &device->meta_state.alloc);
+
+ fail_pipeline_1d:
+   anv_DestroyPipeline(anv_device_to_handle(device),
+                       device->meta_state.blit.pipeline_1d_src,
+                       &device->meta_state.alloc);
+
+ fail_pipeline_layout:
+   anv_DestroyPipelineLayout(anv_device_to_handle(device),
+                             device->meta_state.blit.pipeline_layout,
+                             &device->meta_state.alloc);
+ fail_descriptor_set_layout:
+   anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
+                                  device->meta_state.blit.ds_layout,
+                                  &device->meta_state.alloc);
+ fail_render_pass:
+   anv_DestroyRenderPass(anv_device_to_handle(device),
+                         device->meta_state.blit.render_pass,
+                         &device->meta_state.alloc);
+
+   ralloc_free(vs.nir);
+   ralloc_free(fs_1d.nir);
+   ralloc_free(fs_2d.nir);
+   ralloc_free(fs_3d.nir);
+ fail:
+   return result;
+}
diff --git a/src/intel/vulkan/anv_meta_blit2d.c b/src/intel/vulkan/anv_meta_blit2d.c
new file mode 100644 (file)
index 0000000..6f07342
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_meta.h"
+
+static VkFormat
+vk_format_for_size(int bs)
+{
+   /* The choice of UNORM and UINT formats is very intentional here.  Most of
+    * the time, we want to use a UINT format to avoid any rounding error in
+    * the blit.  For stencil blits, R8_UINT is required by the hardware.
+    * (It's the only format allowed in conjunction with W-tiling.)  Also we
+    * intentionally use the 4-channel formats whenever we can.  This is so
+    * that, when we do a RGB <-> RGBX copy, the two formats will line up even
+    * though one of them is 3/4 the size of the other.  The choice of UNORM
+    * vs. UINT is also very intentional because Haswell doesn't handle 8 or
+    * 16-bit RGB UINT formats at all so we have to use UNORM there.
+    * Fortunately, the only time we should ever use two different formats in
+    * the table below is for RGB -> RGBA blits and so we will never have any
+    * UNORM/UINT mismatch.
+    */
+   switch (bs) {
+   case 1: return VK_FORMAT_R8_UINT;
+   case 2: return VK_FORMAT_R8G8_UINT;
+   case 3: return VK_FORMAT_R8G8B8_UNORM;
+   case 4: return VK_FORMAT_R8G8B8A8_UNORM;
+   case 6: return VK_FORMAT_R16G16B16_UNORM;
+   case 8: return VK_FORMAT_R16G16B16A16_UNORM;
+   case 12: return VK_FORMAT_R32G32B32_UINT;
+   case 16: return VK_FORMAT_R32G32B32A32_UINT;
+   default:
+      unreachable("Invalid format block size");
+   }
+}
+
+void
+anv_meta_end_blit2d(struct anv_cmd_buffer *cmd_buffer,
+                    struct anv_meta_saved_state *save)
+{
+   anv_meta_restore(save, cmd_buffer);
+}
+
+void
+anv_meta_begin_blit2d(struct anv_cmd_buffer *cmd_buffer,
+                      struct anv_meta_saved_state *save)
+{
+   anv_meta_save(save, cmd_buffer,
+                 (1 << VK_DYNAMIC_STATE_VIEWPORT));
+}
+
+void
+anv_meta_blit2d(struct anv_cmd_buffer *cmd_buffer,
+                struct anv_meta_blit2d_surf *src,
+                struct anv_meta_blit2d_surf *dst,
+                unsigned num_rects,
+                struct anv_meta_blit2d_rect *rects)
+{
+   VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
+   VkFormat src_format = vk_format_for_size(src->bs);
+   VkFormat dst_format = vk_format_for_size(dst->bs);
+   VkImageUsageFlags src_usage = VK_IMAGE_USAGE_SAMPLED_BIT;
+   VkImageUsageFlags dst_usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+
+   for (unsigned r = 0; r < num_rects; ++r) {
+
+      /* Create VkImages */
+      VkImageCreateInfo image_info = {
+         .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+         .imageType = VK_IMAGE_TYPE_2D,
+         .format = 0, /* TEMPLATE */
+         .extent = {
+            .width = 0, /* TEMPLATE */
+            .height = 0, /* TEMPLATE */
+            .depth = 1,
+         },
+         .mipLevels = 1,
+         .arrayLayers = 1,
+         .samples = 1,
+         .tiling = 0, /* TEMPLATE */
+         .usage = 0, /* TEMPLATE */
+      };
+      struct anv_image_create_info anv_image_info = {
+         .vk_info = &image_info,
+         .isl_tiling_flags = 0, /* TEMPLATE */
+      };
+
+      /* The image height is the rect height + src/dst y-offset from the
+       * tile-aligned base address.
+       */
+      struct isl_tile_info tile_info;
+
+      anv_image_info.isl_tiling_flags = 1 << src->tiling;
+      image_info.tiling = src->tiling == ISL_TILING_LINEAR ?
+                          VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+      image_info.usage = src_usage;
+      image_info.format = src_format,
+      isl_tiling_get_info(&cmd_buffer->device->isl_dev, src->tiling, src->bs,
+                          &tile_info);
+      image_info.extent.height = rects[r].height +
+                                 rects[r].src_y % tile_info.height;
+      image_info.extent.width = src->pitch / src->bs;
+      VkImage src_image;
+      anv_image_create(vk_device, &anv_image_info,
+                       &cmd_buffer->pool->alloc, &src_image);
+
+      anv_image_info.isl_tiling_flags = 1 << dst->tiling;
+      image_info.tiling = dst->tiling == ISL_TILING_LINEAR ?
+                          VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+      image_info.usage = dst_usage;
+      image_info.format = dst_format,
+      isl_tiling_get_info(&cmd_buffer->device->isl_dev, dst->tiling, dst->bs,
+                          &tile_info);
+      image_info.extent.height = rects[r].height +
+                                 rects[r].dst_y % tile_info.height;
+      image_info.extent.width = dst->pitch / dst->bs;
+      VkImage dst_image;
+      anv_image_create(vk_device, &anv_image_info,
+                       &cmd_buffer->pool->alloc, &dst_image);
+
+      /* We could use a vk call to bind memory, but that would require
+      * creating a dummy memory object etc. so there's really no point.
+      */
+      anv_image_from_handle(src_image)->bo = src->bo;
+      anv_image_from_handle(src_image)->offset = src->base_offset;
+      anv_image_from_handle(dst_image)->bo = dst->bo;
+      anv_image_from_handle(dst_image)->offset = dst->base_offset;
+
+      /* Create VkImageViews */
+      VkImageViewCreateInfo iview_info = {
+         .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+         .image = 0, /* TEMPLATE */
+         .viewType = VK_IMAGE_VIEW_TYPE_2D,
+         .format = 0, /* TEMPLATE */
+         .subresourceRange = {
+            .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+            .baseMipLevel = 0,
+            .levelCount = 1,
+            .baseArrayLayer = 0,
+            .layerCount = 1
+         },
+      };
+      uint32_t img_o = 0;
+
+      iview_info.image = src_image;
+      iview_info.format = src_format;
+      VkOffset3D src_offset_el = {0};
+      isl_surf_get_image_intratile_offset_el_xy(&cmd_buffer->device->isl_dev,
+                                                &anv_image_from_handle(src_image)->
+                                                   color_surface.isl,
+                                                rects[r].src_x,
+                                                rects[r].src_y,
+                                                &img_o,
+                                                (uint32_t*)&src_offset_el.x,
+                                                (uint32_t*)&src_offset_el.y);
+
+      struct anv_image_view src_iview;
+      anv_image_view_init(&src_iview, cmd_buffer->device,
+         &iview_info, cmd_buffer, img_o, src_usage);
+
+      iview_info.image = dst_image;
+      iview_info.format = dst_format;
+      VkOffset3D dst_offset_el = {0};
+      isl_surf_get_image_intratile_offset_el_xy(&cmd_buffer->device->isl_dev,
+                                                &anv_image_from_handle(dst_image)->
+                                                   color_surface.isl,
+                                                rects[r].dst_x,
+                                                rects[r].dst_y,
+                                                &img_o,
+                                                (uint32_t*)&dst_offset_el.x,
+                                                (uint32_t*)&dst_offset_el.y);
+      struct anv_image_view dst_iview;
+      anv_image_view_init(&dst_iview, cmd_buffer->device,
+         &iview_info, cmd_buffer, img_o, dst_usage);
+
+      /* Perform blit */
+      anv_meta_emit_blit(cmd_buffer,
+                     anv_image_from_handle(src_image),
+                     &src_iview,
+                     src_offset_el,
+                     (VkExtent3D){rects[r].width, rects[r].height, 1},
+                     anv_image_from_handle(dst_image),
+                     &dst_iview,
+                     dst_offset_el,
+                     (VkExtent3D){rects[r].width, rects[r].height, 1},
+                     VK_FILTER_NEAREST);
+
+      anv_DestroyImage(vk_device, src_image, &cmd_buffer->pool->alloc);
+      anv_DestroyImage(vk_device, dst_image, &cmd_buffer->pool->alloc);
+   }
+}
+
diff --git a/src/intel/vulkan/anv_meta_clear.c b/src/intel/vulkan/anv_meta_clear.c
new file mode 100644 (file)
index 0000000..a24e599
--- /dev/null
@@ -0,0 +1,1100 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_meta.h"
+#include "anv_private.h"
+#include "nir/nir_builder.h"
+
+/** Vertex attributes for color clears.  */
+struct color_clear_vattrs {
+   struct anv_vue_header vue_header;
+   float position[2]; /**< 3DPRIM_RECTLIST */
+   VkClearColorValue color;
+};
+
+/** Vertex attributes for depthstencil clears.  */
+struct depthstencil_clear_vattrs {
+   struct anv_vue_header vue_header;
+   float position[2]; /*<< 3DPRIM_RECTLIST */
+};
+
+static void
+meta_clear_begin(struct anv_meta_saved_state *saved_state,
+                 struct anv_cmd_buffer *cmd_buffer)
+{
+   anv_meta_save(saved_state, cmd_buffer,
+                 (1 << VK_DYNAMIC_STATE_VIEWPORT) |
+                 (1 << VK_DYNAMIC_STATE_SCISSOR) |
+                 (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE) |
+                 (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK));
+
+   cmd_buffer->state.dynamic.viewport.count = 0;
+   cmd_buffer->state.dynamic.scissor.count = 0;
+}
+
+static void
+meta_clear_end(struct anv_meta_saved_state *saved_state,
+               struct anv_cmd_buffer *cmd_buffer)
+{
+   anv_meta_restore(saved_state, cmd_buffer);
+}
+
+static void
+build_color_shaders(struct nir_shader **out_vs,
+                    struct nir_shader **out_fs,
+                    uint32_t frag_output)
+{
+   nir_builder vs_b;
+   nir_builder fs_b;
+
+   nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
+   nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
+
+   vs_b.shader->info.name = ralloc_strdup(vs_b.shader, "meta_clear_color_vs");
+   fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "meta_clear_color_fs");
+
+   const struct glsl_type *position_type = glsl_vec4_type();
+   const struct glsl_type *color_type = glsl_vec4_type();
+
+   nir_variable *vs_in_pos =
+      nir_variable_create(vs_b.shader, nir_var_shader_in, position_type,
+                          "a_position");
+   vs_in_pos->data.location = VERT_ATTRIB_GENERIC0;
+
+   nir_variable *vs_out_pos =
+      nir_variable_create(vs_b.shader, nir_var_shader_out, position_type,
+                          "gl_Position");
+   vs_out_pos->data.location = VARYING_SLOT_POS;
+
+   nir_variable *vs_in_color =
+      nir_variable_create(vs_b.shader, nir_var_shader_in, color_type,
+                          "a_color");
+   vs_in_color->data.location = VERT_ATTRIB_GENERIC1;
+
+   nir_variable *vs_out_color =
+      nir_variable_create(vs_b.shader, nir_var_shader_out, color_type,
+                          "v_color");
+   vs_out_color->data.location = VARYING_SLOT_VAR0;
+   vs_out_color->data.interpolation = INTERP_QUALIFIER_FLAT;
+
+   nir_variable *fs_in_color =
+      nir_variable_create(fs_b.shader, nir_var_shader_in, color_type,
+                          "v_color");
+   fs_in_color->data.location = vs_out_color->data.location;
+   fs_in_color->data.interpolation = vs_out_color->data.interpolation;
+
+   nir_variable *fs_out_color =
+      nir_variable_create(fs_b.shader, nir_var_shader_out, color_type,
+                          "f_color");
+   fs_out_color->data.location = FRAG_RESULT_DATA0 + frag_output;
+
+   nir_copy_var(&vs_b, vs_out_pos, vs_in_pos);
+   nir_copy_var(&vs_b, vs_out_color, vs_in_color);
+   nir_copy_var(&fs_b, fs_out_color, fs_in_color);
+
+   *out_vs = vs_b.shader;
+   *out_fs = fs_b.shader;
+}
+
+static VkResult
+create_pipeline(struct anv_device *device,
+                uint32_t samples,
+                struct nir_shader *vs_nir,
+                struct nir_shader *fs_nir,
+                const VkPipelineVertexInputStateCreateInfo *vi_state,
+                const VkPipelineDepthStencilStateCreateInfo *ds_state,
+                const VkPipelineColorBlendStateCreateInfo *cb_state,
+                const VkAllocationCallbacks *alloc,
+                bool use_repclear,
+                struct anv_pipeline **pipeline)
+{
+   VkDevice device_h = anv_device_to_handle(device);
+   VkResult result;
+
+   struct anv_shader_module vs_m = { .nir = vs_nir };
+   struct anv_shader_module fs_m = { .nir = fs_nir };
+
+   VkPipeline pipeline_h = VK_NULL_HANDLE;
+   result = anv_graphics_pipeline_create(device_h,
+      VK_NULL_HANDLE,
+      &(VkGraphicsPipelineCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+         .stageCount = fs_nir ? 2 : 1,
+         .pStages = (VkPipelineShaderStageCreateInfo[]) {
+            {
+               .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+               .stage = VK_SHADER_STAGE_VERTEX_BIT,
+               .module = anv_shader_module_to_handle(&vs_m),
+               .pName = "main",
+            },
+            {
+               .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+               .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
+               .module = anv_shader_module_to_handle(&fs_m),
+               .pName = "main",
+            },
+         },
+         .pVertexInputState = vi_state,
+         .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+            .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+            .primitiveRestartEnable = false,
+         },
+         .pViewportState = &(VkPipelineViewportStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+            .viewportCount = 1,
+            .pViewports = NULL, /* dynamic */
+            .scissorCount = 1,
+            .pScissors = NULL, /* dynamic */
+         },
+         .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+            .rasterizerDiscardEnable = false,
+            .polygonMode = VK_POLYGON_MODE_FILL,
+            .cullMode = VK_CULL_MODE_NONE,
+            .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+            .depthBiasEnable = false,
+         },
+         .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+            .rasterizationSamples = samples,
+            .sampleShadingEnable = false,
+            .pSampleMask = (VkSampleMask[]) { ~0 },
+            .alphaToCoverageEnable = false,
+            .alphaToOneEnable = false,
+         },
+         .pDepthStencilState = ds_state,
+         .pColorBlendState = cb_state,
+         .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
+            /* The meta clear pipeline declares all state as dynamic.
+             * As a consequence, vkCmdBindPipeline writes no dynamic state
+             * to the cmd buffer. Therefore, at the end of the meta clear,
+             * we need only restore dynamic state was vkCmdSet.
+             */
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+            .dynamicStateCount = 8,
+            .pDynamicStates = (VkDynamicState[]) {
+               /* Everything except stencil write mask */
+               VK_DYNAMIC_STATE_VIEWPORT,
+               VK_DYNAMIC_STATE_SCISSOR,
+               VK_DYNAMIC_STATE_LINE_WIDTH,
+               VK_DYNAMIC_STATE_DEPTH_BIAS,
+               VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+               VK_DYNAMIC_STATE_DEPTH_BOUNDS,
+               VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
+               VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+            },
+         },
+         .flags = 0,
+         .renderPass = anv_render_pass_to_handle(&anv_meta_dummy_renderpass),
+         .subpass = 0,
+      },
+      &(struct anv_graphics_pipeline_create_info) {
+         .color_attachment_count = MAX_RTS,
+         .use_repclear = use_repclear,
+         .disable_viewport = true,
+         .disable_vs = true,
+         .use_rectlist = true
+      },
+      alloc,
+      &pipeline_h);
+
+   ralloc_free(vs_nir);
+   ralloc_free(fs_nir);
+
+   *pipeline = anv_pipeline_from_handle(pipeline_h);
+
+   return result;
+}
+
+static VkResult
+create_color_pipeline(struct anv_device *device,
+                      uint32_t samples,
+                      uint32_t frag_output,
+                      struct anv_pipeline **pipeline)
+{
+   struct nir_shader *vs_nir;
+   struct nir_shader *fs_nir;
+   build_color_shaders(&vs_nir, &fs_nir, frag_output);
+
+   const VkPipelineVertexInputStateCreateInfo vi_state = {
+      .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+      .vertexBindingDescriptionCount = 1,
+      .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
+         {
+            .binding = 0,
+            .stride = sizeof(struct color_clear_vattrs),
+            .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
+         },
+      },
+      .vertexAttributeDescriptionCount = 3,
+      .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
+         {
+            /* VUE Header */
+            .location = 0,
+            .binding = 0,
+            .format = VK_FORMAT_R32G32B32A32_UINT,
+            .offset = offsetof(struct color_clear_vattrs, vue_header),
+         },
+         {
+            /* Position */
+            .location = 1,
+            .binding = 0,
+            .format = VK_FORMAT_R32G32_SFLOAT,
+            .offset = offsetof(struct color_clear_vattrs, position),
+         },
+         {
+            /* Color */
+            .location = 2,
+            .binding = 0,
+            .format = VK_FORMAT_R32G32B32A32_SFLOAT,
+            .offset = offsetof(struct color_clear_vattrs, color),
+         },
+      },
+   };
+
+   const VkPipelineDepthStencilStateCreateInfo ds_state = {
+      .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+      .depthTestEnable = false,
+      .depthWriteEnable = false,
+      .depthBoundsTestEnable = false,
+      .stencilTestEnable = false,
+   };
+
+   VkPipelineColorBlendAttachmentState blend_attachment_state[MAX_RTS] = { 0 };
+   blend_attachment_state[frag_output] = (VkPipelineColorBlendAttachmentState) {
+      .blendEnable = false,
+      .colorWriteMask = VK_COLOR_COMPONENT_A_BIT |
+                        VK_COLOR_COMPONENT_R_BIT |
+                        VK_COLOR_COMPONENT_G_BIT |
+                        VK_COLOR_COMPONENT_B_BIT,
+   };
+
+   const VkPipelineColorBlendStateCreateInfo cb_state = {
+      .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+      .logicOpEnable = false,
+      .attachmentCount = MAX_RTS,
+      .pAttachments = blend_attachment_state
+   };
+
+   /* Use the repclear shader.  Since the NIR shader we are providing has
+    * exactly one output, that output will get compacted down to binding
+    * table entry 0.  The hard-coded repclear shader is then exactly what
+    * we want regardless of what attachment we are actually clearing.
+    */
+   return
+      create_pipeline(device, samples, vs_nir, fs_nir, &vi_state, &ds_state,
+                      &cb_state, &device->meta_state.alloc,
+                      /*use_repclear*/ true, pipeline);
+}
+
+static void
+destroy_pipeline(struct anv_device *device, struct anv_pipeline *pipeline)
+{
+   if (!pipeline)
+      return;
+
+   ANV_CALL(DestroyPipeline)(anv_device_to_handle(device),
+                             anv_pipeline_to_handle(pipeline),
+                             &device->meta_state.alloc);
+}
+
+void
+anv_device_finish_meta_clear_state(struct anv_device *device)
+{
+   struct anv_meta_state *state = &device->meta_state;
+
+   for (uint32_t i = 0; i < ARRAY_SIZE(state->clear); ++i) {
+      for (uint32_t j = 0; j < ARRAY_SIZE(state->clear[i].color_pipelines); ++j) {
+         destroy_pipeline(device, state->clear[i].color_pipelines[j]);
+      }
+
+      destroy_pipeline(device, state->clear[i].depth_only_pipeline);
+      destroy_pipeline(device, state->clear[i].stencil_only_pipeline);
+      destroy_pipeline(device, state->clear[i].depthstencil_pipeline);
+   }
+}
+
+static void
+emit_color_clear(struct anv_cmd_buffer *cmd_buffer,
+                 const VkClearAttachment *clear_att,
+                 const VkClearRect *clear_rect)
+{
+   struct anv_device *device = cmd_buffer->device;
+   const struct anv_subpass *subpass = cmd_buffer->state.subpass;
+   const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+   const uint32_t subpass_att = clear_att->colorAttachment;
+   const uint32_t pass_att = subpass->color_attachments[subpass_att];
+   const struct anv_image_view *iview = fb->attachments[pass_att];
+   const uint32_t samples = iview->image->samples;
+   const uint32_t samples_log2 = ffs(samples) - 1;
+   struct anv_pipeline *pipeline =
+      device->meta_state.clear[samples_log2].color_pipelines[subpass_att];
+   VkClearColorValue clear_value = clear_att->clearValue.color;
+
+   VkCommandBuffer cmd_buffer_h = anv_cmd_buffer_to_handle(cmd_buffer);
+   VkPipeline pipeline_h = anv_pipeline_to_handle(pipeline);
+
+   assert(samples_log2 < ARRAY_SIZE(device->meta_state.clear));
+   assert(clear_att->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
+   assert(clear_att->colorAttachment < subpass->color_count);
+
+   const struct color_clear_vattrs vertex_data[3] = {
+      {
+         .vue_header = { 0 },
+         .position = {
+            clear_rect->rect.offset.x,
+            clear_rect->rect.offset.y,
+         },
+         .color = clear_value,
+      },
+      {
+         .vue_header = { 0 },
+         .position = {
+            clear_rect->rect.offset.x + clear_rect->rect.extent.width,
+            clear_rect->rect.offset.y,
+         },
+         .color = clear_value,
+      },
+      {
+         .vue_header = { 0 },
+         .position = {
+            clear_rect->rect.offset.x + clear_rect->rect.extent.width,
+            clear_rect->rect.offset.y + clear_rect->rect.extent.height,
+         },
+         .color = clear_value,
+      },
+   };
+
+   struct anv_state state =
+      anv_cmd_buffer_emit_dynamic(cmd_buffer, vertex_data, sizeof(vertex_data), 16);
+
+   struct anv_buffer vertex_buffer = {
+      .device = device,
+      .size = sizeof(vertex_data),
+      .bo = &device->dynamic_state_block_pool.bo,
+      .offset = state.offset,
+   };
+
+   ANV_CALL(CmdSetViewport)(cmd_buffer_h, 0, 1,
+      (VkViewport[]) {
+         {
+            .x = 0,
+            .y = 0,
+            .width = fb->width,
+            .height = fb->height,
+            .minDepth = 0.0,
+            .maxDepth = 1.0,
+         },
+      });
+
+   ANV_CALL(CmdSetScissor)(cmd_buffer_h, 0, 1,
+      (VkRect2D[]) {
+         {
+            .offset = { 0, 0 },
+            .extent = { fb->width, fb->height },
+         }
+      });
+
+   ANV_CALL(CmdBindVertexBuffers)(cmd_buffer_h, 0, 1,
+      (VkBuffer[]) { anv_buffer_to_handle(&vertex_buffer) },
+      (VkDeviceSize[]) { 0 });
+
+   if (cmd_buffer->state.pipeline != pipeline) {
+      ANV_CALL(CmdBindPipeline)(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
+                                pipeline_h);
+   }
+
+   ANV_CALL(CmdDraw)(cmd_buffer_h, 3, 1, 0, 0);
+}
+
+
+static void
+build_depthstencil_shader(struct nir_shader **out_vs)
+{
+   nir_builder vs_b;
+
+   nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
+
+   vs_b.shader->info.name = ralloc_strdup(vs_b.shader, "meta_clear_depthstencil_vs");
+
+   const struct glsl_type *position_type = glsl_vec4_type();
+
+   nir_variable *vs_in_pos =
+      nir_variable_create(vs_b.shader, nir_var_shader_in, position_type,
+                          "a_position");
+   vs_in_pos->data.location = VERT_ATTRIB_GENERIC0;
+
+   nir_variable *vs_out_pos =
+      nir_variable_create(vs_b.shader, nir_var_shader_out, position_type,
+                          "gl_Position");
+   vs_out_pos->data.location = VARYING_SLOT_POS;
+
+   nir_copy_var(&vs_b, vs_out_pos, vs_in_pos);
+
+   *out_vs = vs_b.shader;
+}
+
+static VkResult
+create_depthstencil_pipeline(struct anv_device *device,
+                             VkImageAspectFlags aspects,
+                             uint32_t samples,
+                             struct anv_pipeline **pipeline)
+{
+   struct nir_shader *vs_nir;
+
+   build_depthstencil_shader(&vs_nir);
+
+   const VkPipelineVertexInputStateCreateInfo vi_state = {
+      .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+      .vertexBindingDescriptionCount = 1,
+      .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
+         {
+            .binding = 0,
+            .stride = sizeof(struct depthstencil_clear_vattrs),
+            .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
+         },
+      },
+      .vertexAttributeDescriptionCount = 2,
+      .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
+         {
+            /* VUE Header */
+            .location = 0,
+            .binding = 0,
+            .format = VK_FORMAT_R32G32B32A32_UINT,
+            .offset = offsetof(struct depthstencil_clear_vattrs, vue_header),
+         },
+         {
+            /* Position */
+            .location = 1,
+            .binding = 0,
+            .format = VK_FORMAT_R32G32_SFLOAT,
+            .offset = offsetof(struct depthstencil_clear_vattrs, position),
+         },
+      },
+   };
+
+   const VkPipelineDepthStencilStateCreateInfo ds_state = {
+      .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+      .depthTestEnable = (aspects & VK_IMAGE_ASPECT_DEPTH_BIT),
+      .depthCompareOp = VK_COMPARE_OP_ALWAYS,
+      .depthWriteEnable = (aspects & VK_IMAGE_ASPECT_DEPTH_BIT),
+      .depthBoundsTestEnable = false,
+      .stencilTestEnable = (aspects & VK_IMAGE_ASPECT_STENCIL_BIT),
+      .front = {
+         .passOp = VK_STENCIL_OP_REPLACE,
+         .compareOp = VK_COMPARE_OP_ALWAYS,
+         .writeMask = UINT32_MAX,
+         .reference = 0, /* dynamic */
+      },
+      .back = { 0 /* dont care */ },
+   };
+
+   const VkPipelineColorBlendStateCreateInfo cb_state = {
+      .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+      .logicOpEnable = false,
+      .attachmentCount = 0,
+      .pAttachments = NULL,
+   };
+
+   return create_pipeline(device, samples, vs_nir, NULL, &vi_state, &ds_state,
+                          &cb_state, &device->meta_state.alloc,
+                          /*use_repclear*/ true, pipeline);
+}
+
+static void
+emit_depthstencil_clear(struct anv_cmd_buffer *cmd_buffer,
+                        const VkClearAttachment *clear_att,
+                        const VkClearRect *clear_rect)
+{
+   struct anv_device *device = cmd_buffer->device;
+   struct anv_meta_state *meta_state = &device->meta_state;
+   const struct anv_subpass *subpass = cmd_buffer->state.subpass;
+   const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+   const uint32_t pass_att = subpass->depth_stencil_attachment;
+   const struct anv_image_view *iview = fb->attachments[pass_att];
+   const uint32_t samples = iview->image->samples;
+   const uint32_t samples_log2 = ffs(samples) - 1;
+   VkClearDepthStencilValue clear_value = clear_att->clearValue.depthStencil;
+   VkImageAspectFlags aspects = clear_att->aspectMask;
+
+   VkCommandBuffer cmd_buffer_h = anv_cmd_buffer_to_handle(cmd_buffer);
+
+   assert(samples_log2 < ARRAY_SIZE(meta_state->clear));
+   assert(aspects == VK_IMAGE_ASPECT_DEPTH_BIT ||
+          aspects == VK_IMAGE_ASPECT_STENCIL_BIT ||
+          aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
+                      VK_IMAGE_ASPECT_STENCIL_BIT));
+   assert(pass_att != VK_ATTACHMENT_UNUSED);
+
+   const struct depthstencil_clear_vattrs vertex_data[3] = {
+      {
+         .vue_header = { 0 },
+         .position = {
+            clear_rect->rect.offset.x,
+            clear_rect->rect.offset.y,
+         },
+      },
+      {
+         .vue_header = { 0 },
+         .position = {
+            clear_rect->rect.offset.x + clear_rect->rect.extent.width,
+            clear_rect->rect.offset.y,
+         },
+      },
+      {
+         .vue_header = { 0 },
+         .position = {
+            clear_rect->rect.offset.x + clear_rect->rect.extent.width,
+            clear_rect->rect.offset.y + clear_rect->rect.extent.height,
+         },
+      },
+   };
+
+   struct anv_state state =
+      anv_cmd_buffer_emit_dynamic(cmd_buffer, vertex_data, sizeof(vertex_data), 16);
+
+   struct anv_buffer vertex_buffer = {
+      .device = device,
+      .size = sizeof(vertex_data),
+      .bo = &device->dynamic_state_block_pool.bo,
+      .offset = state.offset,
+   };
+
+   ANV_CALL(CmdSetViewport)(cmd_buffer_h, 0, 1,
+      (VkViewport[]) {
+         {
+            .x = 0,
+            .y = 0,
+            .width = fb->width,
+            .height = fb->height,
+
+            /* Ignored when clearing only stencil. */
+            .minDepth = clear_value.depth,
+            .maxDepth = clear_value.depth,
+         },
+      });
+
+   ANV_CALL(CmdSetScissor)(cmd_buffer_h, 0, 1,
+      (VkRect2D[]) {
+         {
+            .offset = { 0, 0 },
+            .extent = { fb->width, fb->height },
+         }
+      });
+
+   if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+      ANV_CALL(CmdSetStencilReference)(cmd_buffer_h, VK_STENCIL_FACE_FRONT_BIT,
+                                       clear_value.stencil);
+   }
+
+   ANV_CALL(CmdBindVertexBuffers)(cmd_buffer_h, 0, 1,
+      (VkBuffer[]) { anv_buffer_to_handle(&vertex_buffer) },
+      (VkDeviceSize[]) { 0 });
+
+   struct anv_pipeline *pipeline;
+   switch (aspects) {
+   case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
+      pipeline = meta_state->clear[samples_log2].depthstencil_pipeline;
+      break;
+   case VK_IMAGE_ASPECT_DEPTH_BIT:
+      pipeline = meta_state->clear[samples_log2].depth_only_pipeline;
+      break;
+   case VK_IMAGE_ASPECT_STENCIL_BIT:
+      pipeline = meta_state->clear[samples_log2].stencil_only_pipeline;
+      break;
+   default:
+      unreachable("expected depth or stencil aspect");
+   }
+
+   if (cmd_buffer->state.pipeline != pipeline) {
+      ANV_CALL(CmdBindPipeline)(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
+                                anv_pipeline_to_handle(pipeline));
+   }
+
+   ANV_CALL(CmdDraw)(cmd_buffer_h, 3, 1, 0, 0);
+}
+
+VkResult
+anv_device_init_meta_clear_state(struct anv_device *device)
+{
+   VkResult res;
+   struct anv_meta_state *state = &device->meta_state;
+
+   zero(device->meta_state.clear);
+
+   for (uint32_t i = 0; i < ARRAY_SIZE(state->clear); ++i) {
+      uint32_t samples = 1 << i;
+
+      for (uint32_t j = 0; j < ARRAY_SIZE(state->clear[i].color_pipelines); ++j) {
+         res = create_color_pipeline(device, samples, /* frag_output */ j,
+                                     &state->clear[i].color_pipelines[j]);
+         if (res != VK_SUCCESS)
+            goto fail;
+      }
+
+      res = create_depthstencil_pipeline(device,
+                                         VK_IMAGE_ASPECT_DEPTH_BIT, samples,
+                                         &state->clear[i].depth_only_pipeline);
+      if (res != VK_SUCCESS)
+         goto fail;
+
+      res = create_depthstencil_pipeline(device,
+                                         VK_IMAGE_ASPECT_STENCIL_BIT, samples,
+                                         &state->clear[i].stencil_only_pipeline);
+      if (res != VK_SUCCESS)
+         goto fail;
+
+      res = create_depthstencil_pipeline(device,
+                                         VK_IMAGE_ASPECT_DEPTH_BIT |
+                                         VK_IMAGE_ASPECT_STENCIL_BIT, samples,
+                                         &state->clear[i].depthstencil_pipeline);
+      if (res != VK_SUCCESS)
+         goto fail;
+   }
+
+   return VK_SUCCESS;
+
+fail:
+   anv_device_finish_meta_clear_state(device);
+   return res;
+}
+
+/**
+ * The parameters mean that same as those in vkCmdClearAttachments.
+ */
+static void
+emit_clear(struct anv_cmd_buffer *cmd_buffer,
+           const VkClearAttachment *clear_att,
+           const VkClearRect *clear_rect)
+{
+   if (clear_att->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
+      emit_color_clear(cmd_buffer, clear_att, clear_rect);
+   } else {
+      assert(clear_att->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT |
+                                      VK_IMAGE_ASPECT_STENCIL_BIT));
+      emit_depthstencil_clear(cmd_buffer, clear_att, clear_rect);
+   }
+}
+
+static bool
+subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
+{
+   const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
+   uint32_t ds = cmd_state->subpass->depth_stencil_attachment;
+
+   for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
+      uint32_t a = cmd_state->subpass->color_attachments[i];
+      if (cmd_state->attachments[a].pending_clear_aspects) {
+         return true;
+      }
+   }
+
+   if (ds != VK_ATTACHMENT_UNUSED &&
+       cmd_state->attachments[ds].pending_clear_aspects) {
+      return true;
+   }
+
+   return false;
+}
+
+/**
+ * Emit any pending attachment clears for the current subpass.
+ *
+ * @see anv_attachment_state::pending_clear_aspects
+ */
+void
+anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_cmd_state *cmd_state = &cmd_buffer->state;
+   struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+   struct anv_meta_saved_state saved_state;
+
+   if (!subpass_needs_clear(cmd_buffer))
+      return;
+
+   meta_clear_begin(&saved_state, cmd_buffer);
+
+   if (cmd_state->framebuffer->layers > 1)
+      anv_finishme("clearing multi-layer framebuffer");
+
+   VkClearRect clear_rect = {
+      .rect = {
+         .offset = { 0, 0 },
+         .extent = { fb->width, fb->height },
+      },
+      .baseArrayLayer = 0,
+      .layerCount = 1, /* FINISHME: clear multi-layer framebuffer */
+   };
+
+   for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
+      uint32_t a = cmd_state->subpass->color_attachments[i];
+
+      if (!cmd_state->attachments[a].pending_clear_aspects)
+         continue;
+
+      assert(cmd_state->attachments[a].pending_clear_aspects ==
+             VK_IMAGE_ASPECT_COLOR_BIT);
+
+      VkClearAttachment clear_att = {
+         .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+         .colorAttachment = i, /* Use attachment index relative to subpass */
+         .clearValue = cmd_state->attachments[a].clear_value,
+      };
+
+      emit_clear(cmd_buffer, &clear_att, &clear_rect);
+      cmd_state->attachments[a].pending_clear_aspects = 0;
+   }
+
+   uint32_t ds = cmd_state->subpass->depth_stencil_attachment;
+
+   if (ds != VK_ATTACHMENT_UNUSED &&
+       cmd_state->attachments[ds].pending_clear_aspects) {
+
+      VkClearAttachment clear_att = {
+         .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
+         .clearValue = cmd_state->attachments[ds].clear_value,
+      };
+
+      emit_clear(cmd_buffer, &clear_att, &clear_rect);
+      cmd_state->attachments[ds].pending_clear_aspects = 0;
+   }
+
+   meta_clear_end(&saved_state, cmd_buffer);
+}
+
+static void
+anv_cmd_clear_image(struct anv_cmd_buffer *cmd_buffer,
+                    struct anv_image *image,
+                    VkImageLayout image_layout,
+                    const VkClearValue *clear_value,
+                    uint32_t range_count,
+                    const VkImageSubresourceRange *ranges)
+{
+   VkDevice device_h = anv_device_to_handle(cmd_buffer->device);
+
+   for (uint32_t r = 0; r < range_count; r++) {
+      const VkImageSubresourceRange *range = &ranges[r];
+
+      for (uint32_t l = 0; l < anv_get_levelCount(image, range); ++l) {
+         for (uint32_t s = 0; s < anv_get_layerCount(image, range); ++s) {
+            struct anv_image_view iview;
+            anv_image_view_init(&iview, cmd_buffer->device,
+               &(VkImageViewCreateInfo) {
+                  .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+                  .image = anv_image_to_handle(image),
+                  .viewType = anv_meta_get_view_type(image),
+                  .format = image->vk_format,
+                  .subresourceRange = {
+                     .aspectMask = range->aspectMask,
+                     .baseMipLevel = range->baseMipLevel + l,
+                     .levelCount = 1,
+                     .baseArrayLayer = range->baseArrayLayer + s,
+                     .layerCount = 1
+                  },
+               },
+               cmd_buffer, 0, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+
+            VkFramebuffer fb;
+            anv_CreateFramebuffer(device_h,
+               &(VkFramebufferCreateInfo) {
+                  .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+                  .attachmentCount = 1,
+                  .pAttachments = (VkImageView[]) {
+                     anv_image_view_to_handle(&iview),
+                  },
+                  .width = iview.extent.width,
+                  .height = iview.extent.height,
+                  .layers = 1
+               },
+               &cmd_buffer->pool->alloc,
+               &fb);
+
+            VkAttachmentDescription att_desc = {
+               .format = iview.vk_format,
+               .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+               .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+               .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+               .stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
+               .initialLayout = image_layout,
+               .finalLayout = image_layout,
+            };
+
+            VkSubpassDescription subpass_desc = {
+               .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+               .inputAttachmentCount = 0,
+               .colorAttachmentCount = 0,
+               .pColorAttachments = NULL,
+               .pResolveAttachments = NULL,
+               .pDepthStencilAttachment = NULL,
+               .preserveAttachmentCount = 0,
+               .pPreserveAttachments = NULL,
+            };
+
+            const VkAttachmentReference att_ref = {
+               .attachment = 0,
+               .layout = image_layout,
+            };
+
+            if (range->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
+               subpass_desc.colorAttachmentCount = 1;
+               subpass_desc.pColorAttachments = &att_ref;
+            } else {
+               subpass_desc.pDepthStencilAttachment = &att_ref;
+            }
+
+            VkRenderPass pass;
+            anv_CreateRenderPass(device_h,
+               &(VkRenderPassCreateInfo) {
+                  .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+                  .attachmentCount = 1,
+                  .pAttachments = &att_desc,
+                  .subpassCount = 1,
+                  .pSubpasses = &subpass_desc,
+               },
+               &cmd_buffer->pool->alloc,
+               &pass);
+
+            ANV_CALL(CmdBeginRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer),
+               &(VkRenderPassBeginInfo) {
+                  .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+                  .renderArea = {
+                     .offset = { 0, 0, },
+                     .extent = {
+                        .width = iview.extent.width,
+                        .height = iview.extent.height,
+                     },
+                  },
+                  .renderPass = pass,
+                  .framebuffer = fb,
+                  .clearValueCount = 0,
+                  .pClearValues = NULL,
+               },
+               VK_SUBPASS_CONTENTS_INLINE);
+
+            VkClearAttachment clear_att = {
+               .aspectMask = range->aspectMask,
+               .colorAttachment = 0,
+               .clearValue = *clear_value,
+            };
+
+            VkClearRect clear_rect = {
+               .rect = {
+                  .offset = { 0, 0 },
+                  .extent = { iview.extent.width, iview.extent.height },
+               },
+               .baseArrayLayer = range->baseArrayLayer,
+               .layerCount = 1, /* FINISHME: clear multi-layer framebuffer */
+            };
+
+            emit_clear(cmd_buffer, &clear_att, &clear_rect);
+
+            ANV_CALL(CmdEndRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer));
+            ANV_CALL(DestroyRenderPass)(device_h, pass,
+                                        &cmd_buffer->pool->alloc);
+            ANV_CALL(DestroyFramebuffer)(device_h, fb,
+                                         &cmd_buffer->pool->alloc);
+         }
+      }
+   }
+}
+
+void anv_CmdClearColorImage(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     image_h,
+    VkImageLayout                               imageLayout,
+    const VkClearColorValue*                    pColor,
+    uint32_t                                    rangeCount,
+    const VkImageSubresourceRange*              pRanges)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_image, image, image_h);
+   struct anv_meta_saved_state saved_state;
+
+   meta_clear_begin(&saved_state, cmd_buffer);
+
+   anv_cmd_clear_image(cmd_buffer, image, imageLayout,
+                       (const VkClearValue *) pColor,
+                       rangeCount, pRanges);
+
+   meta_clear_end(&saved_state, cmd_buffer);
+}
+
+void anv_CmdClearDepthStencilImage(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     image_h,
+    VkImageLayout                               imageLayout,
+    const VkClearDepthStencilValue*             pDepthStencil,
+    uint32_t                                    rangeCount,
+    const VkImageSubresourceRange*              pRanges)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_image, image, image_h);
+   struct anv_meta_saved_state saved_state;
+
+   meta_clear_begin(&saved_state, cmd_buffer);
+
+   anv_cmd_clear_image(cmd_buffer, image, imageLayout,
+                       (const VkClearValue *) pDepthStencil,
+                       rangeCount, pRanges);
+
+   meta_clear_end(&saved_state, cmd_buffer);
+}
+
+void anv_CmdClearAttachments(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    attachmentCount,
+    const VkClearAttachment*                    pAttachments,
+    uint32_t                                    rectCount,
+    const VkClearRect*                          pRects)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   struct anv_meta_saved_state saved_state;
+
+   meta_clear_begin(&saved_state, cmd_buffer);
+
+   /* FINISHME: We can do better than this dumb loop. It thrashes too much
+    * state.
+    */
+   for (uint32_t a = 0; a < attachmentCount; ++a) {
+      for (uint32_t r = 0; r < rectCount; ++r) {
+         emit_clear(cmd_buffer, &pAttachments[a], &pRects[r]);
+      }
+   }
+
+   meta_clear_end(&saved_state, cmd_buffer);
+}
+
+static void
+do_buffer_fill(struct anv_cmd_buffer *cmd_buffer,
+               struct anv_bo *dest, uint64_t dest_offset,
+               int width, int height, VkFormat fill_format, uint32_t data)
+{
+   VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
+
+   VkImageCreateInfo image_info = {
+      .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+      .imageType = VK_IMAGE_TYPE_2D,
+      .format = fill_format,
+      .extent = {
+         .width = width,
+         .height = height,
+         .depth = 1,
+      },
+      .mipLevels = 1,
+      .arrayLayers = 1,
+      .samples = 1,
+      .tiling = VK_IMAGE_TILING_LINEAR,
+      .usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+      .flags = 0,
+   };
+
+   VkImage dest_image;
+   image_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+   anv_CreateImage(vk_device, &image_info,
+                   &cmd_buffer->pool->alloc, &dest_image);
+
+   /* We could use a vk call to bind memory, but that would require
+    * creating a dummy memory object etc. so there's really no point.
+    */
+   anv_image_from_handle(dest_image)->bo = dest;
+   anv_image_from_handle(dest_image)->offset = dest_offset;
+
+   const VkClearValue clear_value = {
+      .color = {
+         .uint32 = { data, data, data, data }
+      }
+   };
+
+   const VkImageSubresourceRange range = {
+      .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+      .baseMipLevel = 0,
+      .levelCount = 1,
+      .baseArrayLayer = 0,
+      .layerCount = 1,
+   };
+
+   anv_cmd_clear_image(cmd_buffer, anv_image_from_handle(dest_image),
+                       VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+                       &clear_value, 1, &range);
+}
+
+void anv_CmdFillBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    dstBuffer,
+    VkDeviceSize                                dstOffset,
+    VkDeviceSize                                fillSize,
+    uint32_t                                    data)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
+   struct anv_meta_saved_state saved_state;
+
+   meta_clear_begin(&saved_state, cmd_buffer);
+
+   VkFormat format;
+   int bs;
+   if ((fillSize & 15) == 0 && (dstOffset & 15) == 0) {
+      format = VK_FORMAT_R32G32B32A32_UINT;
+      bs = 16;
+   } else if ((fillSize & 7) == 0 && (dstOffset & 15) == 0) {
+      format = VK_FORMAT_R32G32_UINT;
+      bs = 8;
+   } else {
+      assert((fillSize & 3) == 0 && (dstOffset & 3) == 0);
+      format = VK_FORMAT_R32_UINT;
+      bs = 4;
+   }
+
+   /* This is maximum possible width/height our HW can handle */
+   const uint64_t max_surface_dim = 1 << 14;
+
+   /* First, we make a bunch of max-sized copies */
+   const uint64_t max_fill_size = max_surface_dim * max_surface_dim * bs;
+   while (fillSize > max_fill_size) {
+      do_buffer_fill(cmd_buffer, dst_buffer->bo,
+                     dst_buffer->offset + dstOffset,
+                     max_surface_dim, max_surface_dim, format, data);
+      fillSize -= max_fill_size;
+      dstOffset += max_fill_size;
+   }
+
+   uint64_t height = fillSize / (max_surface_dim * bs);
+   assert(height < max_surface_dim);
+   if (height != 0) {
+      const uint64_t rect_fill_size = height * max_surface_dim * bs;
+      do_buffer_fill(cmd_buffer, dst_buffer->bo,
+                     dst_buffer->offset + dstOffset,
+                     max_surface_dim, height, format, data);
+      fillSize -= rect_fill_size;
+      dstOffset += rect_fill_size;
+   }
+
+   if (fillSize != 0) {
+      do_buffer_fill(cmd_buffer, dst_buffer->bo,
+                     dst_buffer->offset + dstOffset,
+                     fillSize / bs, 1, format, data);
+   }
+
+   meta_clear_end(&saved_state, cmd_buffer);
+}
diff --git a/src/intel/vulkan/anv_meta_copy.c b/src/intel/vulkan/anv_meta_copy.c
new file mode 100644 (file)
index 0000000..1a2bfd6
--- /dev/null
@@ -0,0 +1,441 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_meta.h"
+
+/* Returns the user-provided VkBufferImageCopy::imageExtent in units of
+ * elements rather than texels. One element equals one texel or one block
+ * if Image is uncompressed or compressed, respectively.
+ */
+static struct VkExtent3D
+meta_region_extent_el(const VkFormat format,
+                      const struct VkExtent3D *extent)
+{
+   const struct isl_format_layout *isl_layout =
+      anv_format_for_vk_format(format)->isl_layout;
+   return (VkExtent3D) {
+      .width  = DIV_ROUND_UP(extent->width , isl_layout->bw),
+      .height = DIV_ROUND_UP(extent->height, isl_layout->bh),
+      .depth  = DIV_ROUND_UP(extent->depth , isl_layout->bd),
+   };
+}
+
+/* Returns the user-provided VkBufferImageCopy::imageOffset in units of
+ * elements rather than texels. One element equals one texel or one block
+ * if Image is uncompressed or compressed, respectively.
+ */
+static struct VkOffset3D
+meta_region_offset_el(const struct anv_image *image,
+                      const struct VkOffset3D *offset)
+{
+   const struct isl_format_layout *isl_layout = image->format->isl_layout;
+   return (VkOffset3D) {
+      .x = offset->x / isl_layout->bw,
+      .y = offset->y / isl_layout->bh,
+      .z = offset->z / isl_layout->bd,
+   };
+}
+
+static struct anv_meta_blit2d_surf
+blit_surf_for_image(const struct anv_image* image,
+                    const struct isl_surf *img_isl_surf)
+{
+   return (struct anv_meta_blit2d_surf) {
+      .bo = image->bo,
+      .tiling = img_isl_surf->tiling,
+      .base_offset = image->offset,
+      .bs = isl_format_get_layout(img_isl_surf->format)->bs,
+      .pitch = isl_surf_get_row_pitch(img_isl_surf),
+   };
+}
+
+static void
+do_buffer_copy(struct anv_cmd_buffer *cmd_buffer,
+               struct anv_bo *src, uint64_t src_offset,
+               struct anv_bo *dest, uint64_t dest_offset,
+               int width, int height, int bs)
+{
+   struct anv_meta_blit2d_surf b_src = {
+      .bo = src,
+      .tiling = ISL_TILING_LINEAR,
+      .base_offset = src_offset,
+      .bs = bs,
+      .pitch = width * bs,
+   };
+   struct anv_meta_blit2d_surf b_dst = {
+      .bo = dest,
+      .tiling = ISL_TILING_LINEAR,
+      .base_offset = dest_offset,
+      .bs = bs,
+      .pitch = width * bs,
+   };
+   struct anv_meta_blit2d_rect rect = {
+      .width = width,
+      .height = height,
+   };
+   anv_meta_blit2d(cmd_buffer, &b_src, &b_dst, 1, &rect);
+}
+
+static void
+meta_copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
+                          struct anv_buffer* buffer,
+                          struct anv_image* image,
+                          uint32_t regionCount,
+                          const VkBufferImageCopy* pRegions,
+                          bool forward)
+{
+   struct anv_meta_saved_state saved_state;
+
+   /* The Vulkan 1.0 spec says "dstImage must have a sample count equal to
+    * VK_SAMPLE_COUNT_1_BIT."
+    */
+   assert(image->samples == 1);
+
+   anv_meta_begin_blit2d(cmd_buffer, &saved_state);
+
+   for (unsigned r = 0; r < regionCount; r++) {
+
+      /* Start creating blit rect */
+      const VkOffset3D img_offset_el =
+         meta_region_offset_el(image, &pRegions[r].imageOffset);
+      const VkExtent3D bufferExtent = {
+         .width = pRegions[r].bufferRowLength,
+         .height = pRegions[r].bufferImageHeight,
+      };
+      const VkExtent3D buf_extent_el =
+         meta_region_extent_el(image->vk_format, &bufferExtent);
+      const VkExtent3D img_extent_el =
+         meta_region_extent_el(image->vk_format, &pRegions[r].imageExtent);
+      struct anv_meta_blit2d_rect rect = {
+         .width = MAX2(buf_extent_el.width, img_extent_el.width),
+         .height = MAX2(buf_extent_el.height, img_extent_el.height),
+      };
+
+      /* Create blit surfaces */
+      VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
+      const struct isl_surf *img_isl_surf =
+         &anv_image_get_surface_for_aspect_mask(image, aspect)->isl;
+      struct anv_meta_blit2d_surf img_bsurf =
+         blit_surf_for_image(image, img_isl_surf);
+      struct anv_meta_blit2d_surf buf_bsurf = {
+         .bo = buffer->bo,
+         .tiling = ISL_TILING_LINEAR,
+         .base_offset = buffer->offset + pRegions[r].bufferOffset,
+         .bs = forward ? image->format->isl_layout->bs : img_bsurf.bs,
+         .pitch = rect.width * buf_bsurf.bs,
+      };
+
+      /* Set direction-dependent variables */
+      struct anv_meta_blit2d_surf *dst_bsurf = forward ? &img_bsurf : &buf_bsurf;
+      struct anv_meta_blit2d_surf *src_bsurf = forward ? &buf_bsurf : &img_bsurf;
+      uint32_t *x_offset = forward ? &rect.dst_x : &rect.src_x;
+      uint32_t *y_offset = forward ? &rect.dst_y : &rect.src_y;
+
+      /* Loop through each 3D or array slice */
+      unsigned num_slices_3d = pRegions[r].imageExtent.depth;
+      unsigned num_slices_array = pRegions[r].imageSubresource.layerCount;
+      unsigned slice_3d = 0;
+      unsigned slice_array = 0;
+      while (slice_3d < num_slices_3d && slice_array < num_slices_array) {
+
+         /* Finish creating blit rect */
+         isl_surf_get_image_offset_el(img_isl_surf,
+                                    pRegions[r].imageSubresource.mipLevel,
+                                    pRegions[r].imageSubresource.baseArrayLayer
+                                       + slice_array,
+                                    pRegions[r].imageOffset.z + slice_3d,
+                                    x_offset,
+                                    y_offset);
+         *x_offset += img_offset_el.x;
+         *y_offset += img_offset_el.y;
+
+         /* Perform Blit */
+         anv_meta_blit2d(cmd_buffer, src_bsurf, dst_bsurf, 1, &rect);
+
+         /* Once we've done the blit, all of the actual information about
+          * the image is embedded in the command buffer so we can just
+          * increment the offset directly in the image effectively
+          * re-binding it to different backing memory.
+          */
+         buf_bsurf.base_offset += rect.width * rect.height * buf_bsurf.bs;
+
+         if (image->type == VK_IMAGE_TYPE_3D)
+            slice_3d++;
+         else
+            slice_array++;
+      }
+   }
+   anv_meta_end_blit2d(cmd_buffer, &saved_state);
+}
+
+void anv_CmdCopyBufferToImage(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    srcBuffer,
+    VkImage                                     destImage,
+    VkImageLayout                               destImageLayout,
+    uint32_t                                    regionCount,
+    const VkBufferImageCopy*                    pRegions)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_image, dest_image, destImage);
+   ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
+
+   meta_copy_buffer_to_image(cmd_buffer, src_buffer, dest_image,
+                             regionCount, pRegions, true);
+}
+
+void anv_CmdCopyImageToBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     srcImage,
+    VkImageLayout                               srcImageLayout,
+    VkBuffer                                    destBuffer,
+    uint32_t                                    regionCount,
+    const VkBufferImageCopy*                    pRegions)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_image, src_image, srcImage);
+   ANV_FROM_HANDLE(anv_buffer, dst_buffer, destBuffer);
+
+   meta_copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
+                             regionCount, pRegions, false);
+}
+
+void anv_CmdCopyImage(
+    VkCommandBuffer                             commandBuffer,
+    VkImage                                     srcImage,
+    VkImageLayout                               srcImageLayout,
+    VkImage                                     destImage,
+    VkImageLayout                               destImageLayout,
+    uint32_t                                    regionCount,
+    const VkImageCopy*                          pRegions)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_image, src_image, srcImage);
+   ANV_FROM_HANDLE(anv_image, dest_image, destImage);
+   struct anv_meta_saved_state saved_state;
+
+   /* From the Vulkan 1.0 spec:
+    *
+    *    vkCmdCopyImage can be used to copy image data between multisample
+    *    images, but both images must have the same number of samples.
+    */
+   assert(src_image->samples == dest_image->samples);
+
+   anv_meta_begin_blit2d(cmd_buffer, &saved_state);
+
+   for (unsigned r = 0; r < regionCount; r++) {
+      assert(pRegions[r].srcSubresource.aspectMask ==
+             pRegions[r].dstSubresource.aspectMask);
+
+      VkImageAspectFlags aspect = pRegions[r].srcSubresource.aspectMask;
+
+      /* Create blit surfaces */
+      struct isl_surf *src_isl_surf =
+         &anv_image_get_surface_for_aspect_mask(src_image, aspect)->isl;
+      struct isl_surf *dst_isl_surf =
+         &anv_image_get_surface_for_aspect_mask(dest_image, aspect)->isl;
+      struct anv_meta_blit2d_surf b_src =
+         blit_surf_for_image(src_image, src_isl_surf);
+      struct anv_meta_blit2d_surf b_dst =
+         blit_surf_for_image(dest_image, dst_isl_surf);
+
+      /* Start creating blit rect */
+      const VkOffset3D dst_offset_el =
+         meta_region_offset_el(dest_image, &pRegions[r].dstOffset);
+      const VkOffset3D src_offset_el =
+         meta_region_offset_el(src_image, &pRegions[r].srcOffset);
+      const VkExtent3D img_extent_el =
+         meta_region_extent_el(src_image->vk_format, &pRegions[r].extent);
+      struct anv_meta_blit2d_rect rect = {
+         .width = img_extent_el.width,
+         .height = img_extent_el.height,
+      };
+
+      /* Loop through each 3D or array slice */
+      unsigned num_slices_3d = pRegions[r].extent.depth;
+      unsigned num_slices_array = pRegions[r].dstSubresource.layerCount;
+      unsigned slice_3d = 0;
+      unsigned slice_array = 0;
+      while (slice_3d < num_slices_3d && slice_array < num_slices_array) {
+
+         /* Finish creating blit rect */
+         isl_surf_get_image_offset_el(dst_isl_surf,
+                                    pRegions[r].dstSubresource.mipLevel,
+                                    pRegions[r].dstSubresource.baseArrayLayer
+                                       + slice_array,
+                                    pRegions[r].dstOffset.z + slice_3d,
+                                    &rect.dst_x,
+                                    &rect.dst_y);
+         isl_surf_get_image_offset_el(src_isl_surf,
+                                    pRegions[r].srcSubresource.mipLevel,
+                                    pRegions[r].srcSubresource.baseArrayLayer
+                                       + slice_array,
+                                    pRegions[r].srcOffset.z + slice_3d,
+                                    &rect.src_x,
+                                    &rect.src_y);
+         rect.dst_x += dst_offset_el.x;
+         rect.dst_y += dst_offset_el.y;
+         rect.src_x += src_offset_el.x;
+         rect.src_y += src_offset_el.y;
+
+         /* Perform Blit */
+         anv_meta_blit2d(cmd_buffer, &b_src, &b_dst, 1, &rect);
+
+         if (dest_image->type == VK_IMAGE_TYPE_3D)
+            slice_3d++;
+         else
+            slice_array++;
+      }
+   }
+
+   anv_meta_end_blit2d(cmd_buffer, &saved_state);
+}
+
+void anv_CmdCopyBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    srcBuffer,
+    VkBuffer                                    destBuffer,
+    uint32_t                                    regionCount,
+    const VkBufferCopy*                         pRegions)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
+   ANV_FROM_HANDLE(anv_buffer, dest_buffer, destBuffer);
+
+   struct anv_meta_saved_state saved_state;
+
+   anv_meta_begin_blit2d(cmd_buffer, &saved_state);
+
+   for (unsigned r = 0; r < regionCount; r++) {
+      uint64_t src_offset = src_buffer->offset + pRegions[r].srcOffset;
+      uint64_t dest_offset = dest_buffer->offset + pRegions[r].dstOffset;
+      uint64_t copy_size = pRegions[r].size;
+
+      /* First, we compute the biggest format that can be used with the
+       * given offsets and size.
+       */
+      int bs = 16;
+
+      int fs = ffs(src_offset) - 1;
+      if (fs != -1)
+         bs = MIN2(bs, 1 << fs);
+      assert(src_offset % bs == 0);
+
+      fs = ffs(dest_offset) - 1;
+      if (fs != -1)
+         bs = MIN2(bs, 1 << fs);
+      assert(dest_offset % bs == 0);
+
+      fs = ffs(pRegions[r].size) - 1;
+      if (fs != -1)
+         bs = MIN2(bs, 1 << fs);
+      assert(pRegions[r].size % bs == 0);
+
+      /* This is maximum possible width/height our HW can handle */
+      uint64_t max_surface_dim = 1 << 14;
+
+      /* First, we make a bunch of max-sized copies */
+      uint64_t max_copy_size = max_surface_dim * max_surface_dim * bs;
+      while (copy_size >= max_copy_size) {
+         do_buffer_copy(cmd_buffer, src_buffer->bo, src_offset,
+                        dest_buffer->bo, dest_offset,
+                        max_surface_dim, max_surface_dim, bs);
+         copy_size -= max_copy_size;
+         src_offset += max_copy_size;
+         dest_offset += max_copy_size;
+      }
+
+      uint64_t height = copy_size / (max_surface_dim * bs);
+      assert(height < max_surface_dim);
+      if (height != 0) {
+         uint64_t rect_copy_size = height * max_surface_dim * bs;
+         do_buffer_copy(cmd_buffer, src_buffer->bo, src_offset,
+                        dest_buffer->bo, dest_offset,
+                        max_surface_dim, height, bs);
+         copy_size -= rect_copy_size;
+         src_offset += rect_copy_size;
+         dest_offset += rect_copy_size;
+      }
+
+      if (copy_size != 0) {
+         do_buffer_copy(cmd_buffer, src_buffer->bo, src_offset,
+                        dest_buffer->bo, dest_offset,
+                        copy_size / bs, 1, bs);
+      }
+   }
+
+   anv_meta_end_blit2d(cmd_buffer, &saved_state);
+}
+
+void anv_CmdUpdateBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    dstBuffer,
+    VkDeviceSize                                dstOffset,
+    VkDeviceSize                                dataSize,
+    const uint32_t*                             pData)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
+   struct anv_meta_saved_state saved_state;
+
+   anv_meta_begin_blit2d(cmd_buffer, &saved_state);
+
+   /* We can't quite grab a full block because the state stream needs a
+    * little data at the top to build its linked list.
+    */
+   const uint32_t max_update_size =
+      cmd_buffer->device->dynamic_state_block_pool.block_size - 64;
+
+   assert(max_update_size < (1 << 14) * 4);
+
+   while (dataSize) {
+      const uint32_t copy_size = MIN2(dataSize, max_update_size);
+
+      struct anv_state tmp_data =
+         anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
+
+      memcpy(tmp_data.map, pData, copy_size);
+
+      int bs;
+      if ((copy_size & 15) == 0 && (dstOffset & 15) == 0) {
+         bs = 16;
+      } else if ((copy_size & 7) == 0 && (dstOffset & 7) == 0) {
+         bs = 8;
+      } else {
+         assert((copy_size & 3) == 0 && (dstOffset & 3) == 0);
+         bs = 4;
+      }
+
+      do_buffer_copy(cmd_buffer,
+                     &cmd_buffer->device->dynamic_state_block_pool.bo,
+                     tmp_data.offset,
+                     dst_buffer->bo, dst_buffer->offset + dstOffset,
+                     copy_size / bs, 1, bs);
+
+      dataSize -= copy_size;
+      dstOffset += copy_size;
+      pData = (void *)pData + copy_size;
+   }
+
+   anv_meta_end_blit2d(cmd_buffer, &saved_state);
+}
diff --git a/src/intel/vulkan/anv_meta_resolve.c b/src/intel/vulkan/anv_meta_resolve.c
new file mode 100644 (file)
index 0000000..19fb3ad
--- /dev/null
@@ -0,0 +1,881 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include "anv_meta.h"
+#include "anv_private.h"
+#include "nir/nir_builder.h"
+
+/**
+ * Vertex attributes used by all pipelines.
+ */
+struct vertex_attrs {
+   struct anv_vue_header vue_header;
+   float position[2]; /**< 3DPRIM_RECTLIST */
+   float tex_position[2];
+};
+
+static void
+meta_resolve_save(struct anv_meta_saved_state *saved_state,
+                  struct anv_cmd_buffer *cmd_buffer)
+{
+   anv_meta_save(saved_state, cmd_buffer,
+                 (1 << VK_DYNAMIC_STATE_VIEWPORT) |
+                 (1 << VK_DYNAMIC_STATE_SCISSOR));
+
+   cmd_buffer->state.dynamic.viewport.count = 0;
+   cmd_buffer->state.dynamic.scissor.count = 0;
+}
+
+static void
+meta_resolve_restore(struct anv_meta_saved_state *saved_state,
+                     struct anv_cmd_buffer *cmd_buffer)
+{
+   anv_meta_restore(saved_state, cmd_buffer);
+}
+
+static VkPipeline *
+get_pipeline_h(struct anv_device *device, uint32_t samples)
+{
+   uint32_t i = ffs(samples) - 2; /* log2(samples) - 1 */
+
+   assert(samples >= 2);
+   assert(i < ARRAY_SIZE(device->meta_state.resolve.pipelines));
+
+   return &device->meta_state.resolve.pipelines[i];
+}
+
+static nir_shader *
+build_nir_vs(void)
+{
+   const struct glsl_type *vec4 = glsl_vec4_type();
+
+   nir_builder b;
+   nir_variable *a_position;
+   nir_variable *v_position;
+   nir_variable *a_tex_position;
+   nir_variable *v_tex_position;
+
+   nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
+   b.shader->info.name = ralloc_strdup(b.shader, "meta_resolve_vs");
+
+   a_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
+                                    "a_position");
+   a_position->data.location = VERT_ATTRIB_GENERIC0;
+
+   v_position = nir_variable_create(b.shader, nir_var_shader_out, vec4,
+                                    "gl_Position");
+   v_position->data.location = VARYING_SLOT_POS;
+
+   a_tex_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
+                                    "a_tex_position");
+   a_tex_position->data.location = VERT_ATTRIB_GENERIC1;
+
+   v_tex_position = nir_variable_create(b.shader, nir_var_shader_out, vec4,
+                                    "v_tex_position");
+   v_tex_position->data.location = VARYING_SLOT_VAR0;
+
+   nir_copy_var(&b, v_position, a_position);
+   nir_copy_var(&b, v_tex_position, a_tex_position);
+
+   return b.shader;
+}
+
+static nir_shader *
+build_nir_fs(uint32_t num_samples)
+{
+   const struct glsl_type *vec4 = glsl_vec4_type();
+
+   const struct glsl_type *sampler2DMS =
+         glsl_sampler_type(GLSL_SAMPLER_DIM_MS,
+                           /*is_shadow*/ false,
+                           /*is_array*/ false,
+                           GLSL_TYPE_FLOAT);
+
+   nir_builder b;
+   nir_variable *u_tex; /* uniform sampler */
+   nir_variable *v_position; /* vec4, varying fragment position */
+   nir_variable *v_tex_position; /* vec4, varying texture coordinate */
+   nir_variable *f_color; /* vec4, fragment output color */
+   nir_ssa_def *accum; /* vec4, accumulation of sample values */
+
+   nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
+   b.shader->info.name = ralloc_asprintf(b.shader,
+                                         "meta_resolve_fs_samples%02d",
+                                         num_samples);
+
+   u_tex = nir_variable_create(b.shader, nir_var_uniform, sampler2DMS,
+                                   "u_tex");
+   u_tex->data.descriptor_set = 0;
+   u_tex->data.binding = 0;
+
+   v_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
+                                     "v_position");
+   v_position->data.location = VARYING_SLOT_POS;
+   v_position->data.origin_upper_left = true;
+
+   v_tex_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
+                                    "v_tex_position");
+   v_tex_position->data.location = VARYING_SLOT_VAR0;
+
+   f_color = nir_variable_create(b.shader, nir_var_shader_out, vec4,
+                                 "f_color");
+   f_color->data.location = FRAG_RESULT_DATA0;
+
+   accum = nir_imm_vec4(&b, 0, 0, 0, 0);
+
+   nir_ssa_def *tex_position_ivec =
+      nir_f2i(&b, nir_load_var(&b, v_tex_position));
+
+   for (uint32_t i = 0; i < num_samples; ++i) {
+      nir_tex_instr *tex;
+
+      tex = nir_tex_instr_create(b.shader, /*num_srcs*/ 2);
+      tex->texture = nir_deref_var_create(tex, u_tex);
+      tex->sampler = nir_deref_var_create(tex, u_tex);
+      tex->sampler_dim = GLSL_SAMPLER_DIM_MS;
+      tex->op = nir_texop_txf_ms;
+      tex->src[0].src = nir_src_for_ssa(tex_position_ivec);
+      tex->src[0].src_type = nir_tex_src_coord;
+      tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, i));
+      tex->src[1].src_type = nir_tex_src_ms_index;
+      tex->dest_type = nir_type_float;
+      tex->is_array = false;
+      tex->coord_components = 3;
+      nir_ssa_dest_init(&tex->instr, &tex->dest, /*num_components*/ 4, "tex");
+      nir_builder_instr_insert(&b, &tex->instr);
+
+      accum = nir_fadd(&b, accum, &tex->dest.ssa);
+   }
+
+   accum = nir_fdiv(&b, accum, nir_imm_float(&b, num_samples));
+   nir_store_var(&b, f_color, accum, /*writemask*/ 4);
+
+   return b.shader;
+}
+
+static VkResult
+create_pass(struct anv_device *device)
+{
+   VkResult result;
+   VkDevice device_h = anv_device_to_handle(device);
+   const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
+
+   result = anv_CreateRenderPass(device_h,
+      &(VkRenderPassCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+         .attachmentCount = 1,
+         .pAttachments = &(VkAttachmentDescription) {
+            .format = VK_FORMAT_UNDEFINED, /* Our shaders don't care */
+            .samples = 1,
+            .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+            .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+            .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
+            .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
+         },
+         .subpassCount = 1,
+         .pSubpasses = &(VkSubpassDescription) {
+            .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+            .inputAttachmentCount = 0,
+            .colorAttachmentCount = 1,
+            .pColorAttachments = &(VkAttachmentReference) {
+               .attachment = 0,
+               .layout = VK_IMAGE_LAYOUT_GENERAL,
+            },
+            .pResolveAttachments = NULL,
+            .pDepthStencilAttachment = &(VkAttachmentReference) {
+               .attachment = VK_ATTACHMENT_UNUSED,
+            },
+            .preserveAttachmentCount = 0,
+            .pPreserveAttachments = NULL,
+         },
+         .dependencyCount = 0,
+      },
+      alloc,
+      &device->meta_state.resolve.pass);
+
+   return result;
+}
+
+static VkResult
+create_pipeline(struct anv_device *device,
+                uint32_t num_samples,
+                VkShaderModule vs_module_h)
+{
+   VkResult result;
+   VkDevice device_h = anv_device_to_handle(device);
+
+   struct anv_shader_module fs_module = {
+      .nir = build_nir_fs(num_samples),
+   };
+
+   if (!fs_module.nir) {
+      /* XXX: Need more accurate error */
+      result = VK_ERROR_OUT_OF_HOST_MEMORY;
+      goto cleanup;
+   }
+
+   result = anv_graphics_pipeline_create(device_h,
+      VK_NULL_HANDLE,
+      &(VkGraphicsPipelineCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+         .stageCount = 2,
+         .pStages = (VkPipelineShaderStageCreateInfo[]) {
+            {
+               .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+               .stage = VK_SHADER_STAGE_VERTEX_BIT,
+               .module = vs_module_h,
+               .pName = "main",
+            },
+            {
+               .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+               .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
+               .module = anv_shader_module_to_handle(&fs_module),
+               .pName = "main",
+            },
+         },
+         .pVertexInputState = &(VkPipelineVertexInputStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+            .vertexBindingDescriptionCount = 1,
+            .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
+               {
+                  .binding = 0,
+                  .stride = sizeof(struct vertex_attrs),
+                  .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
+               },
+            },
+            .vertexAttributeDescriptionCount = 3,
+            .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
+               {
+                  /* VUE Header */
+                  .location = 0,
+                  .binding = 0,
+                  .format = VK_FORMAT_R32G32B32A32_UINT,
+                  .offset = offsetof(struct vertex_attrs, vue_header),
+               },
+               {
+                  /* Position */
+                  .location = 1,
+                  .binding = 0,
+                  .format = VK_FORMAT_R32G32_SFLOAT,
+                  .offset = offsetof(struct vertex_attrs, position),
+               },
+               {
+                  /* Texture Coordinate */
+                  .location = 2,
+                  .binding = 0,
+                  .format = VK_FORMAT_R32G32_SFLOAT,
+                  .offset = offsetof(struct vertex_attrs, tex_position),
+               },
+            },
+         },
+         .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+            .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+            .primitiveRestartEnable = false,
+         },
+         .pViewportState = &(VkPipelineViewportStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+            .viewportCount = 1,
+            .scissorCount = 1,
+         },
+         .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+            .depthClampEnable = false,
+            .rasterizerDiscardEnable = false,
+            .polygonMode = VK_POLYGON_MODE_FILL,
+            .cullMode = VK_CULL_MODE_NONE,
+            .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+         },
+         .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+            .rasterizationSamples = 1,
+            .sampleShadingEnable = false,
+            .pSampleMask = (VkSampleMask[]) { 0x1 },
+            .alphaToCoverageEnable = false,
+            .alphaToOneEnable = false,
+         },
+         .pColorBlendState = &(VkPipelineColorBlendStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+            .logicOpEnable = false,
+            .attachmentCount = 1,
+            .pAttachments = (VkPipelineColorBlendAttachmentState []) {
+               {
+                  .colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
+                                    VK_COLOR_COMPONENT_G_BIT |
+                                    VK_COLOR_COMPONENT_B_BIT |
+                                    VK_COLOR_COMPONENT_A_BIT,
+               },
+            },
+         },
+         .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+            .dynamicStateCount = 2,
+            .pDynamicStates = (VkDynamicState[]) {
+               VK_DYNAMIC_STATE_VIEWPORT,
+               VK_DYNAMIC_STATE_SCISSOR,
+            },
+         },
+         .layout = device->meta_state.resolve.pipeline_layout,
+         .renderPass = device->meta_state.resolve.pass,
+         .subpass = 0,
+      },
+      &(struct anv_graphics_pipeline_create_info) {
+         .color_attachment_count = -1,
+         .use_repclear = false,
+         .disable_viewport = true,
+         .disable_scissor = true,
+         .disable_vs = true,
+         .use_rectlist = true
+      },
+      &device->meta_state.alloc,
+      get_pipeline_h(device, num_samples));
+   if (result != VK_SUCCESS)
+      goto cleanup;
+
+   goto cleanup;
+
+cleanup:
+   ralloc_free(fs_module.nir);
+   return result;
+}
+
+void
+anv_device_finish_meta_resolve_state(struct anv_device *device)
+{
+   struct anv_meta_state *state = &device->meta_state;
+   VkDevice device_h = anv_device_to_handle(device);
+   VkRenderPass pass_h = device->meta_state.resolve.pass;
+   VkPipelineLayout pipeline_layout_h = device->meta_state.resolve.pipeline_layout;
+   VkDescriptorSetLayout ds_layout_h = device->meta_state.resolve.ds_layout;
+   const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
+
+   if (pass_h)
+      ANV_CALL(DestroyRenderPass)(device_h, pass_h,
+                                  &device->meta_state.alloc);
+
+   if (pipeline_layout_h)
+      ANV_CALL(DestroyPipelineLayout)(device_h, pipeline_layout_h, alloc);
+
+   if (ds_layout_h)
+      ANV_CALL(DestroyDescriptorSetLayout)(device_h, ds_layout_h, alloc);
+
+   for (uint32_t i = 0; i < ARRAY_SIZE(state->resolve.pipelines); ++i) {
+      VkPipeline pipeline_h = state->resolve.pipelines[i];
+
+      if (pipeline_h) {
+         ANV_CALL(DestroyPipeline)(device_h, pipeline_h, alloc);
+      }
+   }
+}
+
+VkResult
+anv_device_init_meta_resolve_state(struct anv_device *device)
+{
+   VkResult res = VK_SUCCESS;
+   VkDevice device_h = anv_device_to_handle(device);
+   const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
+
+   const isl_sample_count_mask_t sample_count_mask =
+      isl_device_get_sample_counts(&device->isl_dev);
+
+   zero(device->meta_state.resolve);
+
+   struct anv_shader_module vs_module = { .nir = build_nir_vs() };
+   if (!vs_module.nir) {
+      /* XXX: Need more accurate error */
+      res = VK_ERROR_OUT_OF_HOST_MEMORY;
+      goto fail;
+   }
+
+   VkShaderModule vs_module_h = anv_shader_module_to_handle(&vs_module);
+
+   res = anv_CreateDescriptorSetLayout(device_h,
+      &(VkDescriptorSetLayoutCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+         .bindingCount = 1,
+         .pBindings = (VkDescriptorSetLayoutBinding[]) {
+            {
+               .binding = 0,
+               .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+               .descriptorCount = 1,
+               .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
+            },
+         },
+      },
+      alloc,
+      &device->meta_state.resolve.ds_layout);
+   if (res != VK_SUCCESS)
+      goto fail;
+
+   res = anv_CreatePipelineLayout(device_h,
+      &(VkPipelineLayoutCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+         .setLayoutCount = 1,
+         .pSetLayouts = (VkDescriptorSetLayout[]) {
+            device->meta_state.resolve.ds_layout,
+         },
+      },
+      alloc,
+      &device->meta_state.resolve.pipeline_layout);
+   if (res != VK_SUCCESS)
+      goto fail;
+
+   res = create_pass(device);
+   if (res != VK_SUCCESS)
+      goto fail;
+
+   for (uint32_t i = 0;
+        i < ARRAY_SIZE(device->meta_state.resolve.pipelines); ++i) {
+
+      uint32_t sample_count = 1 << (1 + i);
+      if (!(sample_count_mask & sample_count))
+         continue;
+
+      res = create_pipeline(device, sample_count, vs_module_h);
+      if (res != VK_SUCCESS)
+         goto fail;
+   }
+
+   goto cleanup;
+
+fail:
+   anv_device_finish_meta_resolve_state(device);
+
+cleanup:
+   ralloc_free(vs_module.nir);
+
+   return res;
+}
+
+static void
+emit_resolve(struct anv_cmd_buffer *cmd_buffer,
+             struct anv_image_view *src_iview,
+             const VkOffset2D *src_offset,
+             struct anv_image_view *dest_iview,
+             const VkOffset2D *dest_offset,
+             const VkExtent2D *resolve_extent)
+{
+   struct anv_device *device = cmd_buffer->device;
+   VkDevice device_h = anv_device_to_handle(device);
+   VkCommandBuffer cmd_buffer_h = anv_cmd_buffer_to_handle(cmd_buffer);
+   const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+   const struct anv_image *src_image = src_iview->image;
+
+   const struct vertex_attrs vertex_data[3] = {
+      {
+         .vue_header = {0},
+         .position = {
+            dest_offset->x + resolve_extent->width,
+            dest_offset->y + resolve_extent->height,
+         },
+         .tex_position = {
+            src_offset->x + resolve_extent->width,
+            src_offset->y + resolve_extent->height,
+         },
+      },
+      {
+         .vue_header = {0},
+         .position = {
+            dest_offset->x,
+            dest_offset->y + resolve_extent->height,
+         },
+         .tex_position = {
+            src_offset->x,
+            src_offset->y + resolve_extent->height,
+         },
+      },
+      {
+         .vue_header = {0},
+         .position = {
+            dest_offset->x,
+            dest_offset->y,
+         },
+         .tex_position = {
+            src_offset->x,
+            src_offset->y,
+         },
+      },
+   };
+
+   struct anv_state vertex_mem =
+      anv_cmd_buffer_emit_dynamic(cmd_buffer, vertex_data,
+                                  sizeof(vertex_data), 16);
+
+   struct anv_buffer vertex_buffer = {
+      .device = device,
+      .size = sizeof(vertex_data),
+      .bo = &cmd_buffer->dynamic_state_stream.block_pool->bo,
+      .offset = vertex_mem.offset,
+   };
+
+   VkBuffer vertex_buffer_h = anv_buffer_to_handle(&vertex_buffer);
+
+   anv_CmdBindVertexBuffers(cmd_buffer_h,
+      /*firstBinding*/ 0,
+      /*bindingCount*/ 1,
+      (VkBuffer[]) { vertex_buffer_h },
+      (VkDeviceSize[]) { 0 });
+
+   VkSampler sampler_h;
+   ANV_CALL(CreateSampler)(device_h,
+      &(VkSamplerCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
+         .magFilter = VK_FILTER_NEAREST,
+         .minFilter = VK_FILTER_NEAREST,
+         .mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST,
+         .addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+         .addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+         .addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+         .mipLodBias = 0.0,
+         .anisotropyEnable = false,
+         .compareEnable = false,
+         .minLod = 0.0,
+         .maxLod = 0.0,
+         .unnormalizedCoordinates = false,
+      },
+      &cmd_buffer->pool->alloc,
+      &sampler_h);
+
+   VkDescriptorPool desc_pool;
+   anv_CreateDescriptorPool(anv_device_to_handle(device),
+      &(const VkDescriptorPoolCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+         .pNext = NULL,
+         .flags = 0,
+         .maxSets = 1,
+         .poolSizeCount = 1,
+         .pPoolSizes = (VkDescriptorPoolSize[]) {
+            {
+               .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+               .descriptorCount = 1
+            },
+         }
+      }, &cmd_buffer->pool->alloc, &desc_pool);
+
+   VkDescriptorSet desc_set_h;
+   anv_AllocateDescriptorSets(device_h,
+      &(VkDescriptorSetAllocateInfo) {
+         .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+         .descriptorPool = desc_pool,
+         .descriptorSetCount = 1,
+         .pSetLayouts = (VkDescriptorSetLayout[]) {
+            device->meta_state.resolve.ds_layout,
+         },
+      },
+      &desc_set_h);
+
+   anv_UpdateDescriptorSets(device_h,
+      /*writeCount*/ 1,
+      (VkWriteDescriptorSet[]) {
+         {
+            .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+            .dstSet = desc_set_h,
+            .dstBinding = 0,
+            .dstArrayElement = 0,
+            .descriptorCount = 1,
+            .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+            .pImageInfo = (VkDescriptorImageInfo[]) {
+               {
+                  .sampler = sampler_h,
+                  .imageView = anv_image_view_to_handle(src_iview),
+                  .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
+               },
+            },
+         },
+      },
+      /*copyCount*/ 0,
+      /*copies */ NULL);
+
+   ANV_CALL(CmdSetViewport)(cmd_buffer_h,
+      /*firstViewport*/ 0,
+      /*viewportCount*/ 1,
+      (VkViewport[]) {
+         {
+            .x = 0,
+            .y = 0,
+            .width = fb->width,
+            .height = fb->height,
+            .minDepth = 0.0,
+            .maxDepth = 1.0,
+         },
+      });
+
+   ANV_CALL(CmdSetScissor)(cmd_buffer_h,
+      /*firstScissor*/ 0,
+      /*scissorCount*/ 1,
+      (VkRect2D[]) {
+         {
+            .offset = { 0, 0 },
+            .extent = (VkExtent2D) { fb->width, fb->height },
+         },
+      });
+
+   VkPipeline pipeline_h = *get_pipeline_h(device, src_image->samples);
+   ANV_FROM_HANDLE(anv_pipeline, pipeline, pipeline_h);
+
+   if (cmd_buffer->state.pipeline != pipeline) {
+      anv_CmdBindPipeline(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
+                          pipeline_h);
+   }
+
+   anv_CmdBindDescriptorSets(cmd_buffer_h,
+      VK_PIPELINE_BIND_POINT_GRAPHICS,
+      device->meta_state.resolve.pipeline_layout,
+      /*firstSet*/ 0,
+      /* setCount */ 1,
+      (VkDescriptorSet[]) {
+         desc_set_h,
+      },
+      /*copyCount*/ 0,
+      /*copies */ NULL);
+
+   ANV_CALL(CmdDraw)(cmd_buffer_h, 3, 1, 0, 0);
+
+   /* All objects below are consumed by the draw call. We may safely destroy
+    * them.
+    */
+   anv_DestroyDescriptorPool(anv_device_to_handle(device),
+                             desc_pool, &cmd_buffer->pool->alloc);
+   anv_DestroySampler(device_h, sampler_h,
+                      &cmd_buffer->pool->alloc);
+}
+
+void anv_CmdResolveImage(
+    VkCommandBuffer                             cmd_buffer_h,
+    VkImage                                     src_image_h,
+    VkImageLayout                               src_image_layout,
+    VkImage                                     dest_image_h,
+    VkImageLayout                               dest_image_layout,
+    uint32_t                                    region_count,
+    const VkImageResolve*                       regions)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmd_buffer_h);
+   ANV_FROM_HANDLE(anv_image, src_image, src_image_h);
+   ANV_FROM_HANDLE(anv_image, dest_image, dest_image_h);
+   struct anv_device *device = cmd_buffer->device;
+   struct anv_meta_saved_state state;
+   VkDevice device_h = anv_device_to_handle(device);
+
+   meta_resolve_save(&state, cmd_buffer);
+
+   assert(src_image->samples > 1);
+   assert(dest_image->samples == 1);
+
+   if (src_image->samples >= 16) {
+      /* See commit aa3f9aaf31e9056a255f9e0472ebdfdaa60abe54 for the
+       * glBlitFramebuffer workaround for samples >= 16.
+       */
+      anv_finishme("vkCmdResolveImage: need interpolation workaround when "
+                   "samples >= 16");
+   }
+
+   if (src_image->array_size > 1)
+      anv_finishme("vkCmdResolveImage: multisample array images");
+
+   for (uint32_t r = 0; r < region_count; ++r) {
+      const VkImageResolve *region = &regions[r];
+
+      /* From the Vulkan 1.0 spec:
+       *
+       *    - The aspectMask member of srcSubresource and dstSubresource must
+       *      only contain VK_IMAGE_ASPECT_COLOR_BIT
+       *
+       *    - The layerCount member of srcSubresource and dstSubresource must
+       *      match
+       */
+      assert(region->srcSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
+      assert(region->dstSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
+      assert(region->srcSubresource.layerCount ==
+             region->dstSubresource.layerCount);
+
+      const uint32_t src_base_layer =
+         anv_meta_get_iview_layer(src_image, &region->srcSubresource,
+                                  &region->srcOffset);
+
+      const uint32_t dest_base_layer =
+         anv_meta_get_iview_layer(dest_image, &region->dstSubresource,
+                                  &region->dstOffset);
+
+      for (uint32_t layer = 0; layer < region->srcSubresource.layerCount;
+           ++layer) {
+
+         struct anv_image_view src_iview;
+         anv_image_view_init(&src_iview, cmd_buffer->device,
+            &(VkImageViewCreateInfo) {
+               .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+               .image = src_image_h,
+               .viewType = anv_meta_get_view_type(src_image),
+               .format = src_image->format->vk_format,
+               .subresourceRange = {
+                  .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+                  .baseMipLevel = region->srcSubresource.mipLevel,
+                  .levelCount = 1,
+                  .baseArrayLayer = src_base_layer + layer,
+                  .layerCount = 1,
+               },
+            },
+            cmd_buffer, 0, VK_IMAGE_USAGE_SAMPLED_BIT);
+
+         struct anv_image_view dest_iview;
+         anv_image_view_init(&dest_iview, cmd_buffer->device,
+            &(VkImageViewCreateInfo) {
+               .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+               .image = dest_image_h,
+               .viewType = anv_meta_get_view_type(dest_image),
+               .format = dest_image->format->vk_format,
+               .subresourceRange = {
+                  .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+                  .baseMipLevel = region->dstSubresource.mipLevel,
+                  .levelCount = 1,
+                  .baseArrayLayer = dest_base_layer + layer,
+                  .layerCount = 1,
+               },
+            },
+            cmd_buffer, 0, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+
+         VkFramebuffer fb_h;
+         anv_CreateFramebuffer(device_h,
+            &(VkFramebufferCreateInfo) {
+               .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+               .attachmentCount = 1,
+               .pAttachments = (VkImageView[]) {
+                  anv_image_view_to_handle(&dest_iview),
+               },
+               .width = anv_minify(dest_image->extent.width,
+                                   region->dstSubresource.mipLevel),
+               .height = anv_minify(dest_image->extent.height,
+                                    region->dstSubresource.mipLevel),
+               .layers = 1
+            },
+            &cmd_buffer->pool->alloc,
+            &fb_h);
+
+         ANV_CALL(CmdBeginRenderPass)(cmd_buffer_h,
+            &(VkRenderPassBeginInfo) {
+               .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+               .renderPass = device->meta_state.resolve.pass,
+               .framebuffer = fb_h,
+               .renderArea = {
+                  .offset = {
+                     region->dstOffset.x,
+                     region->dstOffset.y,
+                  },
+                  .extent = {
+                     region->extent.width,
+                     region->extent.height,
+                  }
+               },
+               .clearValueCount = 0,
+               .pClearValues = NULL,
+            },
+            VK_SUBPASS_CONTENTS_INLINE);
+
+         emit_resolve(cmd_buffer,
+             &src_iview,
+             &(VkOffset2D) {
+               .x = region->srcOffset.x,
+               .y = region->srcOffset.y,
+             },
+             &dest_iview,
+             &(VkOffset2D) {
+               .x = region->dstOffset.x,
+               .y = region->dstOffset.y,
+             },
+             &(VkExtent2D) {
+               .width = region->extent.width,
+               .height = region->extent.height,
+             });
+
+         ANV_CALL(CmdEndRenderPass)(cmd_buffer_h);
+
+         anv_DestroyFramebuffer(device_h, fb_h,
+                                &cmd_buffer->pool->alloc);
+      }
+   }
+
+   meta_resolve_restore(&state, cmd_buffer);
+}
+
+/**
+ * Emit any needed resolves for the current subpass.
+ */
+void
+anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+   struct anv_subpass *subpass = cmd_buffer->state.subpass;
+   struct anv_meta_saved_state saved_state;
+
+   /* FINISHME(perf): Skip clears for resolve attachments.
+    *
+    * From the Vulkan 1.0 spec:
+    *
+    *    If the first use of an attachment in a render pass is as a resolve
+    *    attachment, then the loadOp is effectively ignored as the resolve is
+    *    guaranteed to overwrite all pixels in the render area.
+    */
+
+   if (!subpass->has_resolve)
+      return;
+
+   meta_resolve_save(&saved_state, cmd_buffer);
+
+   for (uint32_t i = 0; i < subpass->color_count; ++i) {
+      uint32_t src_att = subpass->color_attachments[i];
+      uint32_t dest_att = subpass->resolve_attachments[i];
+
+      if (dest_att == VK_ATTACHMENT_UNUSED)
+         continue;
+
+      struct anv_image_view *src_iview = fb->attachments[src_att];
+      struct anv_image_view *dest_iview = fb->attachments[dest_att];
+
+      struct anv_subpass resolve_subpass = {
+         .color_count = 1,
+         .color_attachments = (uint32_t[]) { dest_att },
+         .depth_stencil_attachment = VK_ATTACHMENT_UNUSED,
+      };
+
+      anv_cmd_buffer_set_subpass(cmd_buffer, &resolve_subpass);
+
+      /* Subpass resolves must respect the render area. We can ignore the
+       * render area here because vkCmdBeginRenderPass set the render area
+       * with 3DSTATE_DRAWING_RECTANGLE.
+       *
+       * XXX(chadv): Does the hardware really respect
+       * 3DSTATE_DRAWING_RECTANGLE when draing a 3DPRIM_RECTLIST?
+       */
+      emit_resolve(cmd_buffer,
+          src_iview,
+          &(VkOffset2D) { 0, 0 },
+          dest_iview,
+          &(VkOffset2D) { 0, 0 },
+          &(VkExtent2D) { fb->width, fb->height });
+   }
+
+   cmd_buffer->state.subpass = subpass;
+   meta_resolve_restore(&saved_state, cmd_buffer);
+}
diff --git a/src/intel/vulkan/anv_nir.h b/src/intel/vulkan/anv_nir.h
new file mode 100644 (file)
index 0000000..606fd1c
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "nir/nir.h"
+#include "anv_private.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void anv_nir_lower_push_constants(nir_shader *shader, bool is_scalar);
+
+void anv_nir_apply_dynamic_offsets(struct anv_pipeline *pipeline,
+                                   nir_shader *shader,
+                                   struct brw_stage_prog_data *prog_data);
+void anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline,
+                                   nir_shader *shader,
+                                   struct brw_stage_prog_data *prog_data,
+                                   struct anv_pipeline_bind_map *map);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/intel/vulkan/anv_nir_apply_dynamic_offsets.c b/src/intel/vulkan/anv_nir_apply_dynamic_offsets.c
new file mode 100644 (file)
index 0000000..46bc5d2
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_nir.h"
+#include "nir/nir_builder.h"
+
+struct apply_dynamic_offsets_state {
+   nir_shader *shader;
+   nir_builder builder;
+
+   const struct anv_pipeline_layout *layout;
+
+   uint32_t indices_start;
+};
+
+static bool
+apply_dynamic_offsets_block(nir_block *block, void *void_state)
+{
+   struct apply_dynamic_offsets_state *state = void_state;
+   struct anv_descriptor_set_layout *set_layout;
+
+   nir_builder *b = &state->builder;
+
+   nir_foreach_instr_safe(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      unsigned block_idx_src;
+      switch (intrin->intrinsic) {
+      case nir_intrinsic_load_ubo:
+      case nir_intrinsic_load_ssbo:
+         block_idx_src = 0;
+         break;
+      case nir_intrinsic_store_ssbo:
+         block_idx_src = 1;
+         break;
+      default:
+         continue; /* the loop */
+      }
+
+      nir_instr *res_instr = intrin->src[block_idx_src].ssa->parent_instr;
+      assert(res_instr->type == nir_instr_type_intrinsic);
+      nir_intrinsic_instr *res_intrin = nir_instr_as_intrinsic(res_instr);
+      assert(res_intrin->intrinsic == nir_intrinsic_vulkan_resource_index);
+
+      unsigned set = res_intrin->const_index[0];
+      unsigned binding = res_intrin->const_index[1];
+
+      set_layout = state->layout->set[set].layout;
+      if (set_layout->binding[binding].dynamic_offset_index < 0)
+         continue;
+
+      b->cursor = nir_before_instr(&intrin->instr);
+
+      /* First, we need to generate the uniform load for the buffer offset */
+      uint32_t index = state->layout->set[set].dynamic_offset_start +
+                       set_layout->binding[binding].dynamic_offset_index;
+
+      nir_intrinsic_instr *offset_load =
+         nir_intrinsic_instr_create(state->shader, nir_intrinsic_load_uniform);
+      offset_load->num_components = 2;
+      offset_load->const_index[0] = state->indices_start + index * 8;
+      offset_load->src[0] = nir_src_for_ssa(nir_imul(b, res_intrin->src[0].ssa,
+                                                     nir_imm_int(b, 8)));
+
+      nir_ssa_dest_init(&offset_load->instr, &offset_load->dest, 2, NULL);
+      nir_builder_instr_insert(b, &offset_load->instr);
+
+      nir_src *offset_src = nir_get_io_offset_src(intrin);
+      nir_ssa_def *new_offset = nir_iadd(b, offset_src->ssa,
+                                         &offset_load->dest.ssa);
+
+      /* In order to avoid out-of-bounds access, we predicate */
+      nir_ssa_def *pred = nir_uge(b, nir_channel(b, &offset_load->dest.ssa, 1),
+                                  offset_src->ssa);
+      nir_if *if_stmt = nir_if_create(b->shader);
+      if_stmt->condition = nir_src_for_ssa(pred);
+      nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
+
+      nir_instr_remove(&intrin->instr);
+      *offset_src = nir_src_for_ssa(new_offset);
+      nir_instr_insert_after_cf_list(&if_stmt->then_list, &intrin->instr);
+
+      if (intrin->intrinsic != nir_intrinsic_store_ssbo) {
+         /* It's a load, we need a phi node */
+         nir_phi_instr *phi = nir_phi_instr_create(b->shader);
+         nir_ssa_dest_init(&phi->instr, &phi->dest,
+                           intrin->num_components, NULL);
+
+         nir_phi_src *src1 = ralloc(phi, nir_phi_src);
+         struct exec_node *tnode = exec_list_get_tail(&if_stmt->then_list);
+         src1->pred = exec_node_data(nir_block, tnode, cf_node.node);
+         src1->src = nir_src_for_ssa(&intrin->dest.ssa);
+         exec_list_push_tail(&phi->srcs, &src1->node);
+
+         b->cursor = nir_after_cf_list(&if_stmt->else_list);
+         nir_ssa_def *zero = nir_build_imm(b, intrin->num_components,
+            (nir_const_value) { .u = { 0, 0, 0, 0 } });
+
+         nir_phi_src *src2 = ralloc(phi, nir_phi_src);
+         struct exec_node *enode = exec_list_get_tail(&if_stmt->else_list);
+         src2->pred = exec_node_data(nir_block, enode, cf_node.node);
+         src2->src = nir_src_for_ssa(zero);
+         exec_list_push_tail(&phi->srcs, &src2->node);
+
+         assert(intrin->dest.is_ssa);
+         nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+                                  nir_src_for_ssa(&phi->dest.ssa));
+
+         nir_instr_insert_after_cf(&if_stmt->cf_node, &phi->instr);
+      }
+   }
+
+   return true;
+}
+
+void
+anv_nir_apply_dynamic_offsets(struct anv_pipeline *pipeline,
+                              nir_shader *shader,
+                              struct brw_stage_prog_data *prog_data)
+{
+   struct apply_dynamic_offsets_state state = {
+      .shader = shader,
+      .layout = pipeline->layout,
+      .indices_start = shader->num_uniforms,
+   };
+
+   if (!state.layout || !state.layout->stage[shader->stage].has_dynamic_offsets)
+      return;
+
+   nir_foreach_function(shader, function) {
+      if (function->impl) {
+         nir_builder_init(&state.builder, function->impl);
+         nir_foreach_block(function->impl, apply_dynamic_offsets_block, &state);
+         nir_metadata_preserve(function->impl, nir_metadata_block_index |
+                                               nir_metadata_dominance);
+      }
+   }
+
+   struct anv_push_constants *null_data = NULL;
+   for (unsigned i = 0; i < MAX_DYNAMIC_BUFFERS; i++) {
+      prog_data->param[i * 2 + shader->num_uniforms / 4] =
+         (const union gl_constant_value *)&null_data->dynamic[i].offset;
+      prog_data->param[i * 2 + 1 + shader->num_uniforms / 4] =
+         (const union gl_constant_value *)&null_data->dynamic[i].range;
+   }
+
+   shader->num_uniforms += MAX_DYNAMIC_BUFFERS * 8;
+}
diff --git a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c
new file mode 100644 (file)
index 0000000..eeb9b97
--- /dev/null
@@ -0,0 +1,387 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_nir.h"
+#include "program/prog_parameter.h"
+#include "nir/nir_builder.h"
+
+struct apply_pipeline_layout_state {
+   nir_shader *shader;
+   nir_builder builder;
+
+   struct {
+      BITSET_WORD *used;
+      uint8_t *surface_offsets;
+      uint8_t *sampler_offsets;
+      uint8_t *image_offsets;
+   } set[MAX_SETS];
+};
+
+static void
+add_binding(struct apply_pipeline_layout_state *state,
+            uint32_t set, uint32_t binding)
+{
+   BITSET_SET(state->set[set].used, binding);
+}
+
+static void
+add_var_binding(struct apply_pipeline_layout_state *state, nir_variable *var)
+{
+   add_binding(state, var->data.descriptor_set, var->data.binding);
+}
+
+static bool
+get_used_bindings_block(nir_block *block, void *void_state)
+{
+   struct apply_pipeline_layout_state *state = void_state;
+
+   nir_foreach_instr_safe(block, instr) {
+      switch (instr->type) {
+      case nir_instr_type_intrinsic: {
+         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+         switch (intrin->intrinsic) {
+         case nir_intrinsic_vulkan_resource_index:
+            add_binding(state, nir_intrinsic_desc_set(intrin),
+                        nir_intrinsic_binding(intrin));
+            break;
+
+         case nir_intrinsic_image_load:
+         case nir_intrinsic_image_store:
+         case nir_intrinsic_image_atomic_add:
+         case nir_intrinsic_image_atomic_min:
+         case nir_intrinsic_image_atomic_max:
+         case nir_intrinsic_image_atomic_and:
+         case nir_intrinsic_image_atomic_or:
+         case nir_intrinsic_image_atomic_xor:
+         case nir_intrinsic_image_atomic_exchange:
+         case nir_intrinsic_image_atomic_comp_swap:
+         case nir_intrinsic_image_size:
+         case nir_intrinsic_image_samples:
+            add_var_binding(state, intrin->variables[0]->var);
+            break;
+
+         default:
+            break;
+         }
+         break;
+      }
+      case nir_instr_type_tex: {
+         nir_tex_instr *tex = nir_instr_as_tex(instr);
+         assert(tex->texture);
+         add_var_binding(state, tex->texture->var);
+         if (tex->sampler)
+            add_var_binding(state, tex->sampler->var);
+         break;
+      }
+      default:
+         continue;
+      }
+   }
+
+   return true;
+}
+
+static void
+lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
+                          struct apply_pipeline_layout_state *state)
+{
+   nir_builder *b = &state->builder;
+
+   b->cursor = nir_before_instr(&intrin->instr);
+
+   uint32_t set = nir_intrinsic_desc_set(intrin);
+   uint32_t binding = nir_intrinsic_binding(intrin);
+
+   uint32_t surface_index = state->set[set].surface_offsets[binding];
+
+   nir_const_value *const_block_idx =
+      nir_src_as_const_value(intrin->src[0]);
+
+   nir_ssa_def *block_index;
+   if (const_block_idx) {
+      block_index = nir_imm_int(b, surface_index + const_block_idx->u[0]);
+   } else {
+      block_index = nir_iadd(b, nir_imm_int(b, surface_index),
+                             nir_ssa_for_src(b, intrin->src[0], 1));
+   }
+
+   assert(intrin->dest.is_ssa);
+   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(block_index));
+   nir_instr_remove(&intrin->instr);
+}
+
+static void
+lower_tex_deref(nir_tex_instr *tex, nir_deref_var *deref,
+                unsigned *const_index, nir_tex_src_type src_type,
+                struct apply_pipeline_layout_state *state)
+{
+   if (deref->deref.child) {
+      assert(deref->deref.child->deref_type == nir_deref_type_array);
+      nir_deref_array *deref_array = nir_deref_as_array(deref->deref.child);
+
+      *const_index += deref_array->base_offset;
+
+      if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
+         nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
+                                               tex->num_srcs + 1);
+
+         for (unsigned i = 0; i < tex->num_srcs; i++) {
+            new_srcs[i].src_type = tex->src[i].src_type;
+            nir_instr_move_src(&tex->instr, &new_srcs[i].src, &tex->src[i].src);
+         }
+
+         ralloc_free(tex->src);
+         tex->src = new_srcs;
+
+         /* Now we can go ahead and move the source over to being a
+          * first-class texture source.
+          */
+         tex->src[tex->num_srcs].src_type = src_type;
+         tex->num_srcs++;
+         assert(deref_array->indirect.is_ssa);
+         nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs - 1].src,
+                               deref_array->indirect);
+      }
+   }
+}
+
+static void
+cleanup_tex_deref(nir_tex_instr *tex, nir_deref_var *deref)
+{
+   if (deref->deref.child == NULL)
+      return;
+
+   nir_deref_array *deref_array = nir_deref_as_array(deref->deref.child);
+
+   if (deref_array->deref_array_type != nir_deref_array_type_indirect)
+      return;
+
+   nir_instr_rewrite_src(&tex->instr, &deref_array->indirect, NIR_SRC_INIT);
+}
+
+static void
+lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
+{
+   /* No one should have come by and lowered it already */
+   assert(tex->texture);
+
+   unsigned set = tex->texture->var->data.descriptor_set;
+   unsigned binding = tex->texture->var->data.binding;
+   tex->texture_index = state->set[set].surface_offsets[binding];
+   lower_tex_deref(tex, tex->texture, &tex->texture_index,
+                   nir_tex_src_texture_offset, state);
+
+   if (tex->sampler) {
+      unsigned set = tex->sampler->var->data.descriptor_set;
+      unsigned binding = tex->sampler->var->data.binding;
+      tex->sampler_index = state->set[set].sampler_offsets[binding];
+      lower_tex_deref(tex, tex->sampler, &tex->sampler_index,
+                      nir_tex_src_sampler_offset, state);
+   }
+
+   /* The backend only ever uses this to mark used surfaces.  We don't care
+    * about that little optimization so it just needs to be non-zero.
+    */
+   tex->texture_array_size = 1;
+
+   cleanup_tex_deref(tex, tex->texture);
+   if (tex->sampler)
+      cleanup_tex_deref(tex, tex->sampler);
+   tex->texture = NULL;
+   tex->sampler = NULL;
+}
+
+static bool
+apply_pipeline_layout_block(nir_block *block, void *void_state)
+{
+   struct apply_pipeline_layout_state *state = void_state;
+
+   nir_foreach_instr_safe(block, instr) {
+      switch (instr->type) {
+      case nir_instr_type_intrinsic: {
+         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+         if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index) {
+            lower_res_index_intrinsic(intrin, state);
+         }
+         break;
+      }
+      case nir_instr_type_tex:
+         lower_tex(nir_instr_as_tex(instr), state);
+         break;
+      default:
+         continue;
+      }
+   }
+
+   return true;
+}
+
+static void
+setup_vec4_uniform_value(const union gl_constant_value **params,
+                         const union gl_constant_value *values,
+                         unsigned n)
+{
+   static const gl_constant_value zero = { 0 };
+
+   for (unsigned i = 0; i < n; ++i)
+      params[i] = &values[i];
+
+   for (unsigned i = n; i < 4; ++i)
+      params[i] = &zero;
+}
+
+void
+anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline,
+                              nir_shader *shader,
+                              struct brw_stage_prog_data *prog_data,
+                              struct anv_pipeline_bind_map *map)
+{
+   struct anv_pipeline_layout *layout = pipeline->layout;
+
+   struct apply_pipeline_layout_state state = {
+      .shader = shader,
+   };
+
+   void *mem_ctx = ralloc_context(NULL);
+
+   for (unsigned s = 0; s < layout->num_sets; s++) {
+      const unsigned count = layout->set[s].layout->binding_count;
+      const unsigned words = BITSET_WORDS(count);
+      state.set[s].used = rzalloc_array(mem_ctx, BITSET_WORD, words);
+      state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
+      state.set[s].sampler_offsets = rzalloc_array(mem_ctx, uint8_t, count);
+      state.set[s].image_offsets = rzalloc_array(mem_ctx, uint8_t, count);
+   }
+
+   nir_foreach_function(shader, function) {
+      if (function->impl)
+         nir_foreach_block(function->impl, get_used_bindings_block, &state);
+   }
+
+   for (uint32_t set = 0; set < layout->num_sets; set++) {
+      struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
+
+      BITSET_WORD b, _tmp;
+      BITSET_FOREACH_SET(b, _tmp, state.set[set].used,
+                         set_layout->binding_count) {
+         if (set_layout->binding[b].stage[shader->stage].surface_index >= 0)
+            map->surface_count += set_layout->binding[b].array_size;
+         if (set_layout->binding[b].stage[shader->stage].sampler_index >= 0)
+            map->sampler_count += set_layout->binding[b].array_size;
+         if (set_layout->binding[b].stage[shader->stage].image_index >= 0)
+            map->image_count += set_layout->binding[b].array_size;
+      }
+   }
+
+   unsigned surface = 0;
+   unsigned sampler = 0;
+   unsigned image = 0;
+   for (uint32_t set = 0; set < layout->num_sets; set++) {
+      struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
+
+      BITSET_WORD b, _tmp;
+      BITSET_FOREACH_SET(b, _tmp, state.set[set].used,
+                         set_layout->binding_count) {
+         unsigned array_size = set_layout->binding[b].array_size;
+         unsigned set_offset = set_layout->binding[b].descriptor_index;
+
+         if (set_layout->binding[b].stage[shader->stage].surface_index >= 0) {
+            state.set[set].surface_offsets[b] = surface;
+            for (unsigned i = 0; i < array_size; i++) {
+               map->surface_to_descriptor[surface + i].set = set;
+               map->surface_to_descriptor[surface + i].offset = set_offset + i;
+            }
+            surface += array_size;
+         }
+
+         if (set_layout->binding[b].stage[shader->stage].sampler_index >= 0) {
+            state.set[set].sampler_offsets[b] = sampler;
+            for (unsigned i = 0; i < array_size; i++) {
+               map->sampler_to_descriptor[sampler + i].set = set;
+               map->sampler_to_descriptor[sampler + i].offset = set_offset + i;
+            }
+            sampler += array_size;
+         }
+
+         if (set_layout->binding[b].stage[shader->stage].image_index >= 0) {
+            state.set[set].image_offsets[b] = image;
+            image += array_size;
+         }
+      }
+   }
+
+   nir_foreach_function(shader, function) {
+      if (function->impl) {
+         nir_builder_init(&state.builder, function->impl);
+         nir_foreach_block(function->impl, apply_pipeline_layout_block, &state);
+         nir_metadata_preserve(function->impl, nir_metadata_block_index |
+                                               nir_metadata_dominance);
+      }
+   }
+
+   if (map->image_count > 0) {
+      assert(map->image_count <= MAX_IMAGES);
+      nir_foreach_variable(var, &shader->uniforms) {
+         if (glsl_type_is_image(var->type) ||
+             (glsl_type_is_array(var->type) &&
+              glsl_type_is_image(glsl_get_array_element(var->type)))) {
+            /* Images are represented as uniform push constants and the actual
+             * information required for reading/writing to/from the image is
+             * storred in the uniform.
+             */
+            unsigned set = var->data.descriptor_set;
+            unsigned binding = var->data.binding;
+            unsigned image_index = state.set[set].image_offsets[binding];
+
+            var->data.driver_location = shader->num_uniforms +
+                                        image_index * BRW_IMAGE_PARAM_SIZE * 4;
+         }
+      }
+
+      struct anv_push_constants *null_data = NULL;
+      const gl_constant_value **param =
+         prog_data->param + (shader->num_uniforms / 4);
+      const struct brw_image_param *image_param = null_data->images;
+      for (uint32_t i = 0; i < map->image_count; i++) {
+         setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET,
+            (const union gl_constant_value *)&image_param->surface_idx, 1);
+         setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_OFFSET_OFFSET,
+            (const union gl_constant_value *)image_param->offset, 2);
+         setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SIZE_OFFSET,
+            (const union gl_constant_value *)image_param->size, 3);
+         setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_STRIDE_OFFSET,
+            (const union gl_constant_value *)image_param->stride, 4);
+         setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_TILING_OFFSET,
+            (const union gl_constant_value *)image_param->tiling, 3);
+         setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SWIZZLING_OFFSET,
+            (const union gl_constant_value *)image_param->swizzling, 2);
+
+         param += BRW_IMAGE_PARAM_SIZE;
+         image_param ++;
+      }
+
+      shader->num_uniforms += map->image_count * BRW_IMAGE_PARAM_SIZE * 4;
+   }
+
+   ralloc_free(mem_ctx);
+}
diff --git a/src/intel/vulkan/anv_nir_lower_push_constants.c b/src/intel/vulkan/anv_nir_lower_push_constants.c
new file mode 100644 (file)
index 0000000..53cd3d7
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_nir.h"
+
+struct lower_push_constants_state {
+   nir_shader *shader;
+   bool is_scalar;
+};
+
+static bool
+lower_push_constants_block(nir_block *block, void *void_state)
+{
+   struct lower_push_constants_state *state = void_state;
+
+   nir_foreach_instr(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      /* TODO: Handle indirect push constants */
+      if (intrin->intrinsic != nir_intrinsic_load_push_constant)
+         continue;
+
+      /* This wont work for vec4 stages. */
+      assert(state->is_scalar);
+
+      assert(intrin->const_index[0] % 4 == 0);
+      assert(intrin->const_index[1] == 128);
+
+      /* We just turn them into uniform loads with the appropreate offset */
+      intrin->intrinsic = nir_intrinsic_load_uniform;
+   }
+
+   return true;
+}
+
+void
+anv_nir_lower_push_constants(nir_shader *shader, bool is_scalar)
+{
+   struct lower_push_constants_state state = {
+      .shader = shader,
+      .is_scalar = is_scalar,
+   };
+
+   nir_foreach_function(shader, function) {
+      if (function->impl)
+         nir_foreach_block(function->impl, lower_push_constants_block, &state);
+   }
+
+   assert(shader->num_uniforms % 4 == 0);
+   if (is_scalar)
+      shader->num_uniforms /= 4;
+   else
+      shader->num_uniforms = DIV_ROUND_UP(shader->num_uniforms, 16);
+}
diff --git a/src/intel/vulkan/anv_pass.c b/src/intel/vulkan/anv_pass.c
new file mode 100644 (file)
index 0000000..d07e9fe
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_private.h"
+
+VkResult anv_CreateRenderPass(
+    VkDevice                                    _device,
+    const VkRenderPassCreateInfo*               pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkRenderPass*                               pRenderPass)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_render_pass *pass;
+   size_t size;
+   size_t attachments_offset;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
+
+   size = sizeof(*pass);
+   size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
+   attachments_offset = size;
+   size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
+
+   pass = anv_alloc2(&device->alloc, pAllocator, size, 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pass == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   /* Clear the subpasses along with the parent pass. This required because
+    * each array member of anv_subpass must be a valid pointer if not NULL.
+    */
+   memset(pass, 0, size);
+   pass->attachment_count = pCreateInfo->attachmentCount;
+   pass->subpass_count = pCreateInfo->subpassCount;
+   pass->attachments = (void *) pass + attachments_offset;
+
+   for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
+      struct anv_render_pass_attachment *att = &pass->attachments[i];
+
+      att->format = anv_format_for_vk_format(pCreateInfo->pAttachments[i].format);
+      att->samples = pCreateInfo->pAttachments[i].samples;
+      att->load_op = pCreateInfo->pAttachments[i].loadOp;
+      att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
+      // att->store_op = pCreateInfo->pAttachments[i].storeOp;
+      // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
+   }
+
+   uint32_t subpass_attachment_count = 0, *p;
+   for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+      const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
+
+      subpass_attachment_count +=
+         desc->inputAttachmentCount +
+         desc->colorAttachmentCount +
+         /* Count colorAttachmentCount again for resolve_attachments */
+         desc->colorAttachmentCount;
+   }
+
+   pass->subpass_attachments =
+      anv_alloc2(&device->alloc, pAllocator,
+                 subpass_attachment_count * sizeof(uint32_t), 8,
+                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pass->subpass_attachments == NULL) {
+      anv_free2(&device->alloc, pAllocator, pass);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+
+   p = pass->subpass_attachments;
+   for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
+      const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
+      struct anv_subpass *subpass = &pass->subpasses[i];
+
+      subpass->input_count = desc->inputAttachmentCount;
+      subpass->color_count = desc->colorAttachmentCount;
+
+      if (desc->inputAttachmentCount > 0) {
+         subpass->input_attachments = p;
+         p += desc->inputAttachmentCount;
+
+         for (uint32_t j = 0; j < desc->inputAttachmentCount; j++) {
+            subpass->input_attachments[j]
+               = desc->pInputAttachments[j].attachment;
+         }
+      }
+
+      if (desc->colorAttachmentCount > 0) {
+         subpass->color_attachments = p;
+         p += desc->colorAttachmentCount;
+
+         for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
+            subpass->color_attachments[j]
+               = desc->pColorAttachments[j].attachment;
+         }
+      }
+
+      subpass->has_resolve = false;
+      if (desc->pResolveAttachments) {
+         subpass->resolve_attachments = p;
+         p += desc->colorAttachmentCount;
+
+         for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
+            uint32_t a = desc->pResolveAttachments[j].attachment;
+            subpass->resolve_attachments[j] = a;
+            if (a != VK_ATTACHMENT_UNUSED)
+               subpass->has_resolve = true;
+         }
+      }
+
+      if (desc->pDepthStencilAttachment) {
+         subpass->depth_stencil_attachment =
+            desc->pDepthStencilAttachment->attachment;
+      } else {
+         subpass->depth_stencil_attachment = VK_ATTACHMENT_UNUSED;
+      }
+   }
+
+   *pRenderPass = anv_render_pass_to_handle(pass);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyRenderPass(
+    VkDevice                                    _device,
+    VkRenderPass                                _pass,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_render_pass, pass, _pass);
+
+   anv_free2(&device->alloc, pAllocator, pass->subpass_attachments);
+   anv_free2(&device->alloc, pAllocator, pass);
+}
+
+void anv_GetRenderAreaGranularity(
+    VkDevice                                    device,
+    VkRenderPass                                renderPass,
+    VkExtent2D*                                 pGranularity)
+{
+   *pGranularity = (VkExtent2D) { 1, 1 };
+}
diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c
new file mode 100644 (file)
index 0000000..abe93a5
--- /dev/null
@@ -0,0 +1,1376 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "util/mesa-sha1.h"
+#include "anv_private.h"
+#include "brw_nir.h"
+#include "anv_nir.h"
+#include "nir/spirv/nir_spirv.h"
+
+/* Needed for SWIZZLE macros */
+#include "program/prog_instruction.h"
+
+// Shader functions
+
+VkResult anv_CreateShaderModule(
+    VkDevice                                    _device,
+    const VkShaderModuleCreateInfo*             pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkShaderModule*                             pShaderModule)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_shader_module *module;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
+   assert(pCreateInfo->flags == 0);
+
+   module = anv_alloc2(&device->alloc, pAllocator,
+                       sizeof(*module) + pCreateInfo->codeSize, 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (module == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   module->nir = NULL;
+   module->size = pCreateInfo->codeSize;
+   memcpy(module->data, pCreateInfo->pCode, module->size);
+
+   _mesa_sha1_compute(module->data, module->size, module->sha1);
+
+   *pShaderModule = anv_shader_module_to_handle(module);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyShaderModule(
+    VkDevice                                    _device,
+    VkShaderModule                              _module,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_shader_module, module, _module);
+
+   anv_free2(&device->alloc, pAllocator, module);
+}
+
+#define SPIR_V_MAGIC_NUMBER 0x07230203
+
+/* Eventually, this will become part of anv_CreateShader.  Unfortunately,
+ * we can't do that yet because we don't have the ability to copy nir.
+ */
+static nir_shader *
+anv_shader_compile_to_nir(struct anv_device *device,
+                          struct anv_shader_module *module,
+                          const char *entrypoint_name,
+                          gl_shader_stage stage,
+                          const VkSpecializationInfo *spec_info)
+{
+   if (strcmp(entrypoint_name, "main") != 0) {
+      anv_finishme("Multiple shaders per module not really supported");
+   }
+
+   const struct brw_compiler *compiler =
+      device->instance->physicalDevice.compiler;
+   const nir_shader_compiler_options *nir_options =
+      compiler->glsl_compiler_options[stage].NirOptions;
+
+   nir_shader *nir;
+   nir_function *entry_point;
+   if (module->nir) {
+      /* Some things such as our meta clear/blit code will give us a NIR
+       * shader directly.  In that case, we just ignore the SPIR-V entirely
+       * and just use the NIR shader */
+      nir = module->nir;
+      nir->options = nir_options;
+      nir_validate_shader(nir);
+
+      assert(exec_list_length(&nir->functions) == 1);
+      struct exec_node *node = exec_list_get_head(&nir->functions);
+      entry_point = exec_node_data(nir_function, node, node);
+   } else {
+      uint32_t *spirv = (uint32_t *) module->data;
+      assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
+      assert(module->size % 4 == 0);
+
+      uint32_t num_spec_entries = 0;
+      struct nir_spirv_specialization *spec_entries = NULL;
+      if (spec_info && spec_info->mapEntryCount > 0) {
+         num_spec_entries = spec_info->mapEntryCount;
+         spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
+         for (uint32_t i = 0; i < num_spec_entries; i++) {
+            const uint32_t *data =
+               spec_info->pData + spec_info->pMapEntries[i].offset;
+            assert((const void *)(data + 1) <=
+                   spec_info->pData + spec_info->dataSize);
+
+            spec_entries[i].id = spec_info->pMapEntries[i].constantID;
+            spec_entries[i].data = *data;
+         }
+      }
+
+      entry_point = spirv_to_nir(spirv, module->size / 4,
+                                 spec_entries, num_spec_entries,
+                                 stage, entrypoint_name, nir_options);
+      nir = entry_point->shader;
+      assert(nir->stage == stage);
+      nir_validate_shader(nir);
+
+      free(spec_entries);
+
+      nir_lower_returns(nir);
+      nir_validate_shader(nir);
+
+      nir_inline_functions(nir);
+      nir_validate_shader(nir);
+
+      /* Pick off the single entrypoint that we want */
+      foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
+         if (func != entry_point)
+            exec_node_remove(&func->node);
+      }
+      assert(exec_list_length(&nir->functions) == 1);
+      entry_point->name = ralloc_strdup(entry_point, "main");
+
+      nir_remove_dead_variables(nir, nir_var_shader_in);
+      nir_remove_dead_variables(nir, nir_var_shader_out);
+      nir_remove_dead_variables(nir, nir_var_system_value);
+      nir_validate_shader(nir);
+
+      nir_lower_outputs_to_temporaries(entry_point->shader, entry_point);
+
+      nir_lower_system_values(nir);
+      nir_validate_shader(nir);
+   }
+
+   /* Vulkan uses the separate-shader linking model */
+   nir->info.separate_shader = true;
+
+   nir = brw_preprocess_nir(nir, compiler->scalar_stage[stage]);
+
+   nir_shader_gather_info(nir, entry_point->impl);
+
+   uint32_t indirect_mask = 0;
+   if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
+      indirect_mask |= (1 << nir_var_shader_in);
+   if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
+      indirect_mask |= 1 << nir_var_local;
+
+   nir_lower_indirect_derefs(nir, indirect_mask);
+
+   return nir;
+}
+
+void anv_DestroyPipeline(
+    VkDevice                                    _device,
+    VkPipeline                                  _pipeline,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
+
+   anv_reloc_list_finish(&pipeline->batch_relocs,
+                         pAllocator ? pAllocator : &device->alloc);
+   if (pipeline->blend_state.map)
+      anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
+   anv_free2(&device->alloc, pAllocator, pipeline);
+}
+
+static const uint32_t vk_to_gen_primitive_type[] = {
+   [VK_PRIMITIVE_TOPOLOGY_POINT_LIST]                    = _3DPRIM_POINTLIST,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_LIST]                     = _3DPRIM_LINELIST,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP]                    = _3DPRIM_LINESTRIP,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST]                 = _3DPRIM_TRILIST,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP]                = _3DPRIM_TRISTRIP,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN]                  = _3DPRIM_TRIFAN,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY]      = _3DPRIM_LINELIST_ADJ,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY]     = _3DPRIM_LINESTRIP_ADJ,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY]  = _3DPRIM_TRILIST_ADJ,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
+/*   [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST]                = _3DPRIM_PATCHLIST_1 */
+};
+
+static void
+populate_sampler_prog_key(const struct brw_device_info *devinfo,
+                          struct brw_sampler_prog_key_data *key)
+{
+   /* XXX: Handle texture swizzle on HSW- */
+   for (int i = 0; i < MAX_SAMPLERS; i++) {
+      /* Assume color sampler, no swizzling. (Works for BDW+) */
+      key->swizzles[i] = SWIZZLE_XYZW;
+   }
+}
+
+static void
+populate_vs_prog_key(const struct brw_device_info *devinfo,
+                     struct brw_vs_prog_key *key)
+{
+   memset(key, 0, sizeof(*key));
+
+   populate_sampler_prog_key(devinfo, &key->tex);
+
+   /* XXX: Handle vertex input work-arounds */
+
+   /* XXX: Handle sampler_prog_key */
+}
+
+static void
+populate_gs_prog_key(const struct brw_device_info *devinfo,
+                     struct brw_gs_prog_key *key)
+{
+   memset(key, 0, sizeof(*key));
+
+   populate_sampler_prog_key(devinfo, &key->tex);
+}
+
+static void
+populate_wm_prog_key(const struct brw_device_info *devinfo,
+                     const VkGraphicsPipelineCreateInfo *info,
+                     const struct anv_graphics_pipeline_create_info *extra,
+                     struct brw_wm_prog_key *key)
+{
+   ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
+
+   memset(key, 0, sizeof(*key));
+
+   populate_sampler_prog_key(devinfo, &key->tex);
+
+   /* TODO: Fill out key->input_slots_valid */
+
+   /* Vulkan doesn't specify a default */
+   key->high_quality_derivatives = false;
+
+   /* XXX Vulkan doesn't appear to specify */
+   key->clamp_fragment_color = false;
+
+   /* Vulkan always specifies upper-left coordinates */
+   key->drawable_height = 0;
+   key->render_to_fbo = false;
+
+   if (extra && extra->color_attachment_count >= 0) {
+      key->nr_color_regions = extra->color_attachment_count;
+   } else {
+      key->nr_color_regions =
+         render_pass->subpasses[info->subpass].color_count;
+   }
+
+   key->replicate_alpha = key->nr_color_regions > 1 &&
+                          info->pMultisampleState &&
+                          info->pMultisampleState->alphaToCoverageEnable;
+
+   if (info->pMultisampleState && info->pMultisampleState->rasterizationSamples > 1) {
+      /* We should probably pull this out of the shader, but it's fairly
+       * harmless to compute it and then let dead-code take care of it.
+       */
+      key->persample_shading = info->pMultisampleState->sampleShadingEnable;
+      if (key->persample_shading)
+         key->persample_2x = info->pMultisampleState->rasterizationSamples == 2;
+
+      key->compute_pos_offset = info->pMultisampleState->sampleShadingEnable;
+      key->compute_sample_id = info->pMultisampleState->sampleShadingEnable;
+   }
+}
+
+static void
+populate_cs_prog_key(const struct brw_device_info *devinfo,
+                     struct brw_cs_prog_key *key)
+{
+   memset(key, 0, sizeof(*key));
+
+   populate_sampler_prog_key(devinfo, &key->tex);
+}
+
+static nir_shader *
+anv_pipeline_compile(struct anv_pipeline *pipeline,
+                     struct anv_shader_module *module,
+                     const char *entrypoint,
+                     gl_shader_stage stage,
+                     const VkSpecializationInfo *spec_info,
+                     struct brw_stage_prog_data *prog_data,
+                     struct anv_pipeline_bind_map *map)
+{
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+
+   nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
+                                               module, entrypoint, stage,
+                                               spec_info);
+   if (nir == NULL)
+      return NULL;
+
+   anv_nir_lower_push_constants(nir, compiler->scalar_stage[stage]);
+
+   /* Figure out the number of parameters */
+   prog_data->nr_params = 0;
+
+   if (nir->num_uniforms > 0) {
+      /* If the shader uses any push constants at all, we'll just give
+       * them the maximum possible number
+       */
+      prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
+   }
+
+   if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
+      prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
+
+   if (nir->info.num_images > 0)
+      prog_data->nr_params += nir->info.num_images * BRW_IMAGE_PARAM_SIZE;
+
+   if (prog_data->nr_params > 0) {
+      /* XXX: I think we're leaking this */
+      prog_data->param = (const union gl_constant_value **)
+         malloc(prog_data->nr_params * sizeof(union gl_constant_value *));
+
+      /* We now set the param values to be offsets into a
+       * anv_push_constant_data structure.  Since the compiler doesn't
+       * actually dereference any of the gl_constant_value pointers in the
+       * params array, it doesn't really matter what we put here.
+       */
+      struct anv_push_constants *null_data = NULL;
+      if (nir->num_uniforms > 0) {
+         /* Fill out the push constants section of the param array */
+         for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++)
+            prog_data->param[i] = (const union gl_constant_value *)
+               &null_data->client_data[i * sizeof(float)];
+      }
+   }
+
+   /* Set up dynamic offsets */
+   anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
+
+   /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
+   if (pipeline->layout)
+      anv_nir_apply_pipeline_layout(pipeline, nir, prog_data, map);
+
+   /* Finish the optimization and compilation process */
+   if (nir->stage == MESA_SHADER_COMPUTE)
+      brw_nir_lower_shared(nir);
+
+   /* nir_lower_io will only handle the push constants; we need to set this
+    * to the full number of possible uniforms.
+    */
+   nir->num_uniforms = prog_data->nr_params * 4;
+
+   return nir;
+}
+
+static void
+anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
+{
+   prog_data->binding_table.size_bytes = 0;
+   prog_data->binding_table.texture_start = bias;
+   prog_data->binding_table.ubo_start = bias;
+   prog_data->binding_table.ssbo_start = bias;
+   prog_data->binding_table.image_start = bias;
+}
+
+static void
+anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
+                                gl_shader_stage stage,
+                                const struct brw_stage_prog_data *prog_data,
+                                struct anv_pipeline_bind_map *map)
+{
+   struct brw_device_info *devinfo = &pipeline->device->info;
+   uint32_t max_threads[] = {
+      [MESA_SHADER_VERTEX]                  = devinfo->max_vs_threads,
+      [MESA_SHADER_TESS_CTRL]               = devinfo->max_hs_threads,
+      [MESA_SHADER_TESS_EVAL]               = devinfo->max_ds_threads,
+      [MESA_SHADER_GEOMETRY]                = devinfo->max_gs_threads,
+      [MESA_SHADER_FRAGMENT]                = devinfo->max_wm_threads,
+      [MESA_SHADER_COMPUTE]                 = devinfo->max_cs_threads,
+   };
+
+   pipeline->prog_data[stage] = prog_data;
+   pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
+   pipeline->scratch_start[stage] = pipeline->total_scratch;
+   pipeline->total_scratch =
+      align_u32(pipeline->total_scratch, 1024) +
+      prog_data->total_scratch * max_threads[stage];
+   pipeline->bindings[stage] = *map;
+}
+
+static VkResult
+anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
+                        struct anv_pipeline_cache *cache,
+                        const VkGraphicsPipelineCreateInfo *info,
+                        struct anv_shader_module *module,
+                        const char *entrypoint,
+                        const VkSpecializationInfo *spec_info)
+{
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+   const struct brw_stage_prog_data *stage_prog_data;
+   struct anv_pipeline_bind_map map;
+   struct brw_vs_prog_key key;
+   uint32_t kernel = NO_KERNEL;
+   unsigned char sha1[20];
+
+   populate_vs_prog_key(&pipeline->device->info, &key);
+
+   if (module->size > 0) {
+      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
+      kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
+   }
+
+   if (kernel == NO_KERNEL) {
+      struct brw_vs_prog_data prog_data = { 0, };
+      struct anv_pipeline_binding surface_to_descriptor[256];
+      struct anv_pipeline_binding sampler_to_descriptor[256];
+
+      map = (struct anv_pipeline_bind_map) {
+         .surface_to_descriptor = surface_to_descriptor,
+         .sampler_to_descriptor = sampler_to_descriptor
+      };
+
+      nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                             MESA_SHADER_VERTEX, spec_info,
+                                             &prog_data.base.base, &map);
+      if (nir == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+      anv_fill_binding_table(&prog_data.base.base, 0);
+
+      void *mem_ctx = ralloc_context(NULL);
+
+      if (module->nir == NULL)
+         ralloc_steal(mem_ctx, nir);
+
+      prog_data.inputs_read = nir->info.inputs_read;
+
+      brw_compute_vue_map(&pipeline->device->info,
+                          &prog_data.base.vue_map,
+                          nir->info.outputs_written,
+                          nir->info.separate_shader);
+
+      unsigned code_size;
+      const unsigned *shader_code =
+         brw_compile_vs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
+                        NULL, false, -1, &code_size, NULL);
+      if (shader_code == NULL) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
+      stage_prog_data = &prog_data.base.base;
+      kernel = anv_pipeline_cache_upload_kernel(cache,
+                                                module->size > 0 ? sha1 : NULL,
+                                                shader_code, code_size,
+                                                &stage_prog_data, sizeof(prog_data),
+                                                &map);
+      ralloc_free(mem_ctx);
+   }
+
+   const struct brw_vs_prog_data *vs_prog_data =
+      (const struct brw_vs_prog_data *) stage_prog_data;
+
+   if (vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
+      pipeline->vs_simd8 = kernel;
+      pipeline->vs_vec4 = NO_KERNEL;
+   } else {
+      pipeline->vs_simd8 = NO_KERNEL;
+      pipeline->vs_vec4 = kernel;
+   }
+
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX,
+                                   stage_prog_data, &map);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
+                        struct anv_pipeline_cache *cache,
+                        const VkGraphicsPipelineCreateInfo *info,
+                        struct anv_shader_module *module,
+                        const char *entrypoint,
+                        const VkSpecializationInfo *spec_info)
+{
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+   const struct brw_stage_prog_data *stage_prog_data;
+   struct anv_pipeline_bind_map map;
+   struct brw_gs_prog_key key;
+   uint32_t kernel = NO_KERNEL;
+   unsigned char sha1[20];
+
+   populate_gs_prog_key(&pipeline->device->info, &key);
+
+   if (module->size > 0) {
+      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
+      kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
+   }
+
+   if (kernel == NO_KERNEL) {
+      struct brw_gs_prog_data prog_data = { 0, };
+      struct anv_pipeline_binding surface_to_descriptor[256];
+      struct anv_pipeline_binding sampler_to_descriptor[256];
+
+      map = (struct anv_pipeline_bind_map) {
+         .surface_to_descriptor = surface_to_descriptor,
+         .sampler_to_descriptor = sampler_to_descriptor
+      };
+
+      nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                             MESA_SHADER_GEOMETRY, spec_info,
+                                             &prog_data.base.base, &map);
+      if (nir == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+      anv_fill_binding_table(&prog_data.base.base, 0);
+
+      void *mem_ctx = ralloc_context(NULL);
+
+      if (module->nir == NULL)
+         ralloc_steal(mem_ctx, nir);
+
+      brw_compute_vue_map(&pipeline->device->info,
+                          &prog_data.base.vue_map,
+                          nir->info.outputs_written,
+                          nir->info.separate_shader);
+
+      unsigned code_size;
+      const unsigned *shader_code =
+         brw_compile_gs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
+                        NULL, -1, &code_size, NULL);
+      if (shader_code == NULL) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
+      /* TODO: SIMD8 GS */
+      stage_prog_data = &prog_data.base.base;
+      kernel = anv_pipeline_cache_upload_kernel(cache,
+                                                module->size > 0 ? sha1 : NULL,
+                                                shader_code, code_size,
+                                                &stage_prog_data, sizeof(prog_data),
+                                                &map);
+
+      ralloc_free(mem_ctx);
+   }
+
+   pipeline->gs_kernel = kernel;
+
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY,
+                                   stage_prog_data, &map);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
+                        struct anv_pipeline_cache *cache,
+                        const VkGraphicsPipelineCreateInfo *info,
+                        const struct anv_graphics_pipeline_create_info *extra,
+                        struct anv_shader_module *module,
+                        const char *entrypoint,
+                        const VkSpecializationInfo *spec_info)
+{
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+   const struct brw_stage_prog_data *stage_prog_data;
+   struct anv_pipeline_bind_map map;
+   struct brw_wm_prog_key key;
+   uint32_t kernel = NO_KERNEL;
+   unsigned char sha1[20];
+
+   populate_wm_prog_key(&pipeline->device->info, info, extra, &key);
+
+   if (module->size > 0) {
+      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
+      kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
+   }
+
+   if (kernel == NO_KERNEL) {
+      struct brw_wm_prog_data prog_data = { 0, };
+      struct anv_pipeline_binding surface_to_descriptor[256];
+      struct anv_pipeline_binding sampler_to_descriptor[256];
+
+      map = (struct anv_pipeline_bind_map) {
+         .surface_to_descriptor = surface_to_descriptor + 8,
+         .sampler_to_descriptor = sampler_to_descriptor
+      };
+
+      nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                             MESA_SHADER_FRAGMENT, spec_info,
+                                             &prog_data.base, &map);
+      if (nir == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+      unsigned num_rts = 0;
+      struct anv_pipeline_binding rt_bindings[8];
+      nir_function_impl *impl = nir_shader_get_entrypoint(nir)->impl;
+      nir_foreach_variable_safe(var, &nir->outputs) {
+         if (var->data.location < FRAG_RESULT_DATA0)
+            continue;
+
+         unsigned rt = var->data.location - FRAG_RESULT_DATA0;
+         if (rt >= key.nr_color_regions) {
+            /* Out-of-bounds, throw it away */
+            var->data.mode = nir_var_local;
+            exec_node_remove(&var->node);
+            exec_list_push_tail(&impl->locals, &var->node);
+            continue;
+         }
+
+         /* Give it a new, compacted, location */
+         var->data.location = FRAG_RESULT_DATA0 + num_rts;
+
+         unsigned array_len =
+            glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
+         assert(num_rts + array_len <= 8);
+
+         for (unsigned i = 0; i < array_len; i++) {
+            rt_bindings[num_rts] = (struct anv_pipeline_binding) {
+               .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+               .offset = rt + i,
+            };
+         }
+
+         num_rts += array_len;
+      }
+
+      if (pipeline->use_repclear) {
+         assert(num_rts == 1);
+         key.nr_color_regions = 1;
+      }
+
+      if (num_rts == 0) {
+         /* If we have no render targets, we need a null render target */
+         rt_bindings[0] = (struct anv_pipeline_binding) {
+            .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+            .offset = UINT16_MAX,
+         };
+         num_rts = 1;
+      }
+
+      assert(num_rts <= 8);
+      map.surface_to_descriptor -= num_rts;
+      map.surface_count += num_rts;
+      assert(map.surface_count <= 256);
+      memcpy(map.surface_to_descriptor, rt_bindings,
+             num_rts * sizeof(*rt_bindings));
+
+      anv_fill_binding_table(&prog_data.base, num_rts);
+
+      void *mem_ctx = ralloc_context(NULL);
+
+      if (module->nir == NULL)
+         ralloc_steal(mem_ctx, nir);
+
+      unsigned code_size;
+      const unsigned *shader_code =
+         brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
+                        NULL, -1, -1, pipeline->use_repclear, &code_size, NULL);
+      if (shader_code == NULL) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
+      stage_prog_data = &prog_data.base;
+      kernel = anv_pipeline_cache_upload_kernel(cache,
+                                                module->size > 0 ? sha1 : NULL,
+                                                shader_code, code_size,
+                                                &stage_prog_data, sizeof(prog_data),
+                                                &map);
+
+      ralloc_free(mem_ctx);
+   }
+
+   const struct brw_wm_prog_data *wm_prog_data =
+      (const struct brw_wm_prog_data *) stage_prog_data;
+
+   if (wm_prog_data->no_8)
+      pipeline->ps_simd8 = NO_KERNEL;
+   else
+      pipeline->ps_simd8 = kernel;
+
+   if (wm_prog_data->no_8 || wm_prog_data->prog_offset_16) {
+      pipeline->ps_simd16 = kernel + wm_prog_data->prog_offset_16;
+   } else {
+      pipeline->ps_simd16 = NO_KERNEL;
+   }
+
+   pipeline->ps_ksp2 = 0;
+   pipeline->ps_grf_start2 = 0;
+   if (pipeline->ps_simd8 != NO_KERNEL) {
+      pipeline->ps_ksp0 = pipeline->ps_simd8;
+      pipeline->ps_grf_start0 = wm_prog_data->base.dispatch_grf_start_reg;
+      if (pipeline->ps_simd16 != NO_KERNEL) {
+         pipeline->ps_ksp2 = pipeline->ps_simd16;
+         pipeline->ps_grf_start2 = wm_prog_data->dispatch_grf_start_reg_16;
+      }
+   } else if (pipeline->ps_simd16 != NO_KERNEL) {
+      pipeline->ps_ksp0 = pipeline->ps_simd16;
+      pipeline->ps_grf_start0 = wm_prog_data->dispatch_grf_start_reg_16;
+   }
+
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT,
+                                   stage_prog_data, &map);
+
+   return VK_SUCCESS;
+}
+
+VkResult
+anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
+                        struct anv_pipeline_cache *cache,
+                        const VkComputePipelineCreateInfo *info,
+                        struct anv_shader_module *module,
+                        const char *entrypoint,
+                        const VkSpecializationInfo *spec_info)
+{
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+   const struct brw_stage_prog_data *stage_prog_data;
+   struct anv_pipeline_bind_map map;
+   struct brw_cs_prog_key key;
+   uint32_t kernel = NO_KERNEL;
+   unsigned char sha1[20];
+
+   populate_cs_prog_key(&pipeline->device->info, &key);
+
+   if (module->size > 0) {
+      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
+      kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
+   }
+
+   if (module->size == 0 || kernel == NO_KERNEL) {
+      struct brw_cs_prog_data prog_data = { 0, };
+      struct anv_pipeline_binding surface_to_descriptor[256];
+      struct anv_pipeline_binding sampler_to_descriptor[256];
+
+      map = (struct anv_pipeline_bind_map) {
+         .surface_to_descriptor = surface_to_descriptor,
+         .sampler_to_descriptor = sampler_to_descriptor
+      };
+
+      nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                             MESA_SHADER_COMPUTE, spec_info,
+                                             &prog_data.base, &map);
+      if (nir == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+      anv_fill_binding_table(&prog_data.base, 1);
+
+      prog_data.base.total_shared = nir->num_shared;
+
+      void *mem_ctx = ralloc_context(NULL);
+
+      if (module->nir == NULL)
+         ralloc_steal(mem_ctx, nir);
+
+      unsigned code_size;
+      const unsigned *shader_code =
+         brw_compile_cs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
+                        -1, &code_size, NULL);
+      if (shader_code == NULL) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
+      stage_prog_data = &prog_data.base;
+      kernel = anv_pipeline_cache_upload_kernel(cache,
+                                                module->size > 0 ? sha1 : NULL,
+                                                shader_code, code_size,
+                                                &stage_prog_data, sizeof(prog_data),
+                                                &map);
+
+      ralloc_free(mem_ctx);
+   }
+
+   pipeline->cs_simd = kernel;
+
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE,
+                                   stage_prog_data, &map);
+
+   return VK_SUCCESS;
+}
+
+static void
+gen7_compute_urb_partition(struct anv_pipeline *pipeline)
+{
+   const struct brw_device_info *devinfo = &pipeline->device->info;
+   bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
+   unsigned vs_size = vs_present ?
+      get_vs_prog_data(pipeline)->base.urb_entry_size : 1;
+   unsigned vs_entry_size_bytes = vs_size * 64;
+   bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
+   unsigned gs_size = gs_present ?
+      get_gs_prog_data(pipeline)->base.urb_entry_size : 1;
+   unsigned gs_entry_size_bytes = gs_size * 64;
+
+   /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
+    *
+    *     VS Number of URB Entries must be divisible by 8 if the VS URB Entry
+    *     Allocation Size is less than 9 512-bit URB entries.
+    *
+    * Similar text exists for GS.
+    */
+   unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
+   unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
+
+   /* URB allocations must be done in 8k chunks. */
+   unsigned chunk_size_bytes = 8192;
+
+   /* Determine the size of the URB in chunks. */
+   unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes;
+
+   /* Reserve space for push constants */
+   unsigned push_constant_kb;
+   if (pipeline->device->info.gen >= 8)
+      push_constant_kb = 32;
+   else if (pipeline->device->info.is_haswell)
+      push_constant_kb = pipeline->device->info.gt == 3 ? 32 : 16;
+   else
+      push_constant_kb = 16;
+
+   unsigned push_constant_bytes = push_constant_kb * 1024;
+   unsigned push_constant_chunks =
+      push_constant_bytes / chunk_size_bytes;
+
+   /* Initially, assign each stage the minimum amount of URB space it needs,
+    * and make a note of how much additional space it "wants" (the amount of
+    * additional space it could actually make use of).
+    */
+
+   /* VS has a lower limit on the number of URB entries */
+   unsigned vs_chunks =
+      ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
+            chunk_size_bytes) / chunk_size_bytes;
+   unsigned vs_wants =
+      ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
+            chunk_size_bytes) / chunk_size_bytes - vs_chunks;
+
+   unsigned gs_chunks = 0;
+   unsigned gs_wants = 0;
+   if (gs_present) {
+      /* There are two constraints on the minimum amount of URB space we can
+       * allocate:
+       *
+       * (1) We need room for at least 2 URB entries, since we always operate
+       * the GS in DUAL_OBJECT mode.
+       *
+       * (2) We can't allocate less than nr_gs_entries_granularity.
+       */
+      gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
+                        chunk_size_bytes) / chunk_size_bytes;
+      gs_wants =
+         ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
+               chunk_size_bytes) / chunk_size_bytes - gs_chunks;
+   }
+
+   /* There should always be enough URB space to satisfy the minimum
+    * requirements of each stage.
+    */
+   unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
+   assert(total_needs <= urb_chunks);
+
+   /* Mete out remaining space (if any) in proportion to "wants". */
+   unsigned total_wants = vs_wants + gs_wants;
+   unsigned remaining_space = urb_chunks - total_needs;
+   if (remaining_space > total_wants)
+      remaining_space = total_wants;
+   if (remaining_space > 0) {
+      unsigned vs_additional = (unsigned)
+         round(vs_wants * (((double) remaining_space) / total_wants));
+      vs_chunks += vs_additional;
+      remaining_space -= vs_additional;
+      gs_chunks += remaining_space;
+   }
+
+   /* Sanity check that we haven't over-allocated. */
+   assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
+
+   /* Finally, compute the number of entries that can fit in the space
+    * allocated to each stage.
+    */
+   unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
+   unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
+
+   /* Since we rounded up when computing *_wants, this may be slightly more
+    * than the maximum allowed amount, so correct for that.
+    */
+   nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
+   nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
+
+   /* Ensure that we program a multiple of the granularity. */
+   nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
+   nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
+
+   /* Finally, sanity check to make sure we have at least the minimum number
+    * of entries needed for each stage.
+    */
+   assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
+   if (gs_present)
+      assert(nr_gs_entries >= 2);
+
+   /* Lay out the URB in the following order:
+    * - push constants
+    * - VS
+    * - GS
+    */
+   pipeline->urb.start[MESA_SHADER_VERTEX] = push_constant_chunks;
+   pipeline->urb.size[MESA_SHADER_VERTEX] = vs_size;
+   pipeline->urb.entries[MESA_SHADER_VERTEX] = nr_vs_entries;
+
+   pipeline->urb.start[MESA_SHADER_GEOMETRY] = push_constant_chunks + vs_chunks;
+   pipeline->urb.size[MESA_SHADER_GEOMETRY] = gs_size;
+   pipeline->urb.entries[MESA_SHADER_GEOMETRY] = nr_gs_entries;
+
+   pipeline->urb.start[MESA_SHADER_TESS_CTRL] = push_constant_chunks;
+   pipeline->urb.size[MESA_SHADER_TESS_CTRL] = 1;
+   pipeline->urb.entries[MESA_SHADER_TESS_CTRL] = 0;
+
+   pipeline->urb.start[MESA_SHADER_TESS_EVAL] = push_constant_chunks;
+   pipeline->urb.size[MESA_SHADER_TESS_EVAL] = 1;
+   pipeline->urb.entries[MESA_SHADER_TESS_EVAL] = 0;
+
+   const unsigned stages =
+      _mesa_bitcount(pipeline->active_stages & VK_SHADER_STAGE_ALL_GRAPHICS);
+   unsigned size_per_stage = stages ? (push_constant_kb / stages) : 0;
+   unsigned used_kb = 0;
+
+   /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
+    * units of 2KB.  Incidentally, these are the same platforms that have
+    * 32KB worth of push constant space.
+    */
+   if (push_constant_kb == 32)
+      size_per_stage &= ~1u;
+
+   for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
+      pipeline->urb.push_size[i] =
+         (pipeline->active_stages & (1 << i)) ? size_per_stage : 0;
+      used_kb += pipeline->urb.push_size[i];
+      assert(used_kb <= push_constant_kb);
+   }
+
+   pipeline->urb.push_size[MESA_SHADER_FRAGMENT] =
+      push_constant_kb - used_kb;
+}
+
+static void
+anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
+                                const VkGraphicsPipelineCreateInfo *pCreateInfo)
+{
+   anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
+   ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
+   struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
+
+   pipeline->dynamic_state = default_dynamic_state;
+
+   if (pCreateInfo->pDynamicState) {
+      /* Remove all of the states that are marked as dynamic */
+      uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
+      for (uint32_t s = 0; s < count; s++)
+         states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
+   }
+
+   struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
+
+   dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
+   if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+      typed_memcpy(dynamic->viewport.viewports,
+                   pCreateInfo->pViewportState->pViewports,
+                   pCreateInfo->pViewportState->viewportCount);
+   }
+
+   dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
+   if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+      typed_memcpy(dynamic->scissor.scissors,
+                   pCreateInfo->pViewportState->pScissors,
+                   pCreateInfo->pViewportState->scissorCount);
+   }
+
+   if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
+      assert(pCreateInfo->pRasterizationState);
+      dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
+   }
+
+   if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
+      assert(pCreateInfo->pRasterizationState);
+      dynamic->depth_bias.bias =
+         pCreateInfo->pRasterizationState->depthBiasConstantFactor;
+      dynamic->depth_bias.clamp =
+         pCreateInfo->pRasterizationState->depthBiasClamp;
+      dynamic->depth_bias.slope =
+         pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
+   }
+
+   if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) {
+      assert(pCreateInfo->pColorBlendState);
+      typed_memcpy(dynamic->blend_constants,
+                   pCreateInfo->pColorBlendState->blendConstants, 4);
+   }
+
+   /* If there is no depthstencil attachment, then don't read
+    * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
+    * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
+    * no need to override the depthstencil defaults in
+    * anv_pipeline::dynamic_state when there is no depthstencil attachment.
+    *
+    * From the Vulkan spec (20 Oct 2015, git-aa308cb):
+    *
+    *    pDepthStencilState [...] may only be NULL if renderPass and subpass
+    *    specify a subpass that has no depth/stencil attachment.
+    */
+   if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
+      if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
+         assert(pCreateInfo->pDepthStencilState);
+         dynamic->depth_bounds.min =
+            pCreateInfo->pDepthStencilState->minDepthBounds;
+         dynamic->depth_bounds.max =
+            pCreateInfo->pDepthStencilState->maxDepthBounds;
+      }
+
+      if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
+         assert(pCreateInfo->pDepthStencilState);
+         dynamic->stencil_compare_mask.front =
+            pCreateInfo->pDepthStencilState->front.compareMask;
+         dynamic->stencil_compare_mask.back =
+            pCreateInfo->pDepthStencilState->back.compareMask;
+      }
+
+      if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
+         assert(pCreateInfo->pDepthStencilState);
+         dynamic->stencil_write_mask.front =
+            pCreateInfo->pDepthStencilState->front.writeMask;
+         dynamic->stencil_write_mask.back =
+            pCreateInfo->pDepthStencilState->back.writeMask;
+      }
+
+      if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
+         assert(pCreateInfo->pDepthStencilState);
+         dynamic->stencil_reference.front =
+            pCreateInfo->pDepthStencilState->front.reference;
+         dynamic->stencil_reference.back =
+            pCreateInfo->pDepthStencilState->back.reference;
+      }
+   }
+
+   pipeline->dynamic_state_mask = states;
+}
+
+static void
+anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
+{
+   struct anv_render_pass *renderpass = NULL;
+   struct anv_subpass *subpass = NULL;
+
+   /* Assert that all required members of VkGraphicsPipelineCreateInfo are
+    * present, as explained by the Vulkan (20 Oct 2015, git-aa308cb), Section
+    * 4.2 Graphics Pipeline.
+    */
+   assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
+
+   renderpass = anv_render_pass_from_handle(info->renderPass);
+   assert(renderpass);
+
+   if (renderpass != &anv_meta_dummy_renderpass) {
+      assert(info->subpass < renderpass->subpass_count);
+      subpass = &renderpass->subpasses[info->subpass];
+   }
+
+   assert(info->stageCount >= 1);
+   assert(info->pVertexInputState);
+   assert(info->pInputAssemblyState);
+   assert(info->pViewportState);
+   assert(info->pRasterizationState);
+
+   if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
+      assert(info->pDepthStencilState);
+
+   if (subpass && subpass->color_count > 0)
+      assert(info->pColorBlendState);
+
+   for (uint32_t i = 0; i < info->stageCount; ++i) {
+      switch (info->pStages[i].stage) {
+      case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+      case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+         assert(info->pTessellationState);
+         break;
+      default:
+         break;
+      }
+   }
+}
+
+VkResult
+anv_pipeline_init(struct anv_pipeline *pipeline,
+                  struct anv_device *device,
+                  struct anv_pipeline_cache *cache,
+                  const VkGraphicsPipelineCreateInfo *pCreateInfo,
+                  const struct anv_graphics_pipeline_create_info *extra,
+                  const VkAllocationCallbacks *alloc)
+{
+   VkResult result;
+
+   anv_validate {
+      anv_pipeline_validate_create_info(pCreateInfo);
+   }
+
+   if (alloc == NULL)
+      alloc = &device->alloc;
+
+   pipeline->device = device;
+   pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
+
+   result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
+   if (result != VK_SUCCESS)
+      return result;
+
+   pipeline->batch.alloc = alloc;
+   pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
+   pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
+   pipeline->batch.relocs = &pipeline->batch_relocs;
+
+   anv_pipeline_init_dynamic_state(pipeline, pCreateInfo);
+
+   pipeline->use_repclear = extra && extra->use_repclear;
+
+   /* When we free the pipeline, we detect stages based on the NULL status
+    * of various prog_data pointers.  Make them NULL by default.
+    */
+   memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
+   memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
+   memset(pipeline->bindings, 0, sizeof(pipeline->bindings));
+
+   pipeline->vs_simd8 = NO_KERNEL;
+   pipeline->vs_vec4 = NO_KERNEL;
+   pipeline->gs_kernel = NO_KERNEL;
+   pipeline->ps_ksp0 = NO_KERNEL;
+
+   pipeline->active_stages = 0;
+   pipeline->total_scratch = 0;
+
+   const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
+   struct anv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
+   for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
+      gl_shader_stage stage = ffs(pCreateInfo->pStages[i].stage) - 1;
+      pStages[stage] = &pCreateInfo->pStages[i];
+      modules[stage] = anv_shader_module_from_handle(pStages[stage]->module);
+   }
+
+   if (modules[MESA_SHADER_VERTEX]) {
+      anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
+                              modules[MESA_SHADER_VERTEX],
+                              pStages[MESA_SHADER_VERTEX]->pName,
+                              pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
+   }
+
+   if (modules[MESA_SHADER_TESS_CTRL] || modules[MESA_SHADER_TESS_EVAL])
+      anv_finishme("no tessellation support");
+
+   if (modules[MESA_SHADER_GEOMETRY]) {
+      anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
+                              modules[MESA_SHADER_GEOMETRY],
+                              pStages[MESA_SHADER_GEOMETRY]->pName,
+                              pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
+   }
+
+   if (modules[MESA_SHADER_FRAGMENT]) {
+      anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra,
+                              modules[MESA_SHADER_FRAGMENT],
+                              pStages[MESA_SHADER_FRAGMENT]->pName,
+                              pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
+   }
+
+   if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
+      /* Vertex is only optional if disable_vs is set */
+      assert(extra->disable_vs);
+   }
+
+   gen7_compute_urb_partition(pipeline);
+
+   const VkPipelineVertexInputStateCreateInfo *vi_info =
+      pCreateInfo->pVertexInputState;
+
+   uint64_t inputs_read;
+   if (extra && extra->disable_vs) {
+      /* If the VS is disabled, just assume the user knows what they're
+       * doing and apply the layout blindly.  This can only come from
+       * meta, so this *should* be safe.
+       */
+      inputs_read = ~0ull;
+   } else {
+      inputs_read = get_vs_prog_data(pipeline)->inputs_read;
+   }
+
+   pipeline->vb_used = 0;
+   for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
+      const VkVertexInputAttributeDescription *desc =
+         &vi_info->pVertexAttributeDescriptions[i];
+
+      if (inputs_read & (1 << (VERT_ATTRIB_GENERIC0 + desc->location)))
+         pipeline->vb_used |= 1 << desc->binding;
+   }
+
+   for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
+      const VkVertexInputBindingDescription *desc =
+         &vi_info->pVertexBindingDescriptions[i];
+
+      pipeline->binding_stride[desc->binding] = desc->stride;
+
+      /* Step rate is programmed per vertex element (attribute), not
+       * binding. Set up a map of which bindings step per instance, for
+       * reference by vertex element setup. */
+      switch (desc->inputRate) {
+      default:
+      case VK_VERTEX_INPUT_RATE_VERTEX:
+         pipeline->instancing_enable[desc->binding] = false;
+         break;
+      case VK_VERTEX_INPUT_RATE_INSTANCE:
+         pipeline->instancing_enable[desc->binding] = true;
+         break;
+      }
+   }
+
+   const VkPipelineInputAssemblyStateCreateInfo *ia_info =
+      pCreateInfo->pInputAssemblyState;
+   pipeline->primitive_restart = ia_info->primitiveRestartEnable;
+   pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
+
+   if (extra && extra->use_rectlist)
+      pipeline->topology = _3DPRIM_RECTLIST;
+
+   while (anv_block_pool_size(&device->scratch_block_pool) <
+          pipeline->total_scratch)
+      anv_block_pool_alloc(&device->scratch_block_pool);
+
+   return VK_SUCCESS;
+}
+
+VkResult
+anv_graphics_pipeline_create(
+   VkDevice _device,
+   VkPipelineCache _cache,
+   const VkGraphicsPipelineCreateInfo *pCreateInfo,
+   const struct anv_graphics_pipeline_create_info *extra,
+   const VkAllocationCallbacks *pAllocator,
+   VkPipeline *pPipeline)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
+
+   if (cache == NULL)
+      cache = &device->default_pipeline_cache;
+
+   switch (device->info.gen) {
+   case 7:
+      if (device->info.is_haswell)
+         return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
+      else
+         return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
+   case 8:
+      return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
+   case 9:
+      return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
+   default:
+      unreachable("unsupported gen\n");
+   }
+}
+
+VkResult anv_CreateGraphicsPipelines(
+    VkDevice                                    _device,
+    VkPipelineCache                             pipelineCache,
+    uint32_t                                    count,
+    const VkGraphicsPipelineCreateInfo*         pCreateInfos,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipeline*                                 pPipelines)
+{
+   VkResult result = VK_SUCCESS;
+
+   unsigned i = 0;
+   for (; i < count; i++) {
+      result = anv_graphics_pipeline_create(_device,
+                                            pipelineCache,
+                                            &pCreateInfos[i],
+                                            NULL, pAllocator, &pPipelines[i]);
+      if (result != VK_SUCCESS) {
+         for (unsigned j = 0; j < i; j++) {
+            anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
+         }
+
+         return result;
+      }
+   }
+
+   return VK_SUCCESS;
+}
+
+static VkResult anv_compute_pipeline_create(
+    VkDevice                                    _device,
+    VkPipelineCache                             _cache,
+    const VkComputePipelineCreateInfo*          pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipeline*                                 pPipeline)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
+
+   if (cache == NULL)
+      cache = &device->default_pipeline_cache;
+
+   switch (device->info.gen) {
+   case 7:
+      if (device->info.is_haswell)
+         return gen75_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
+      else
+         return gen7_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
+   case 8:
+      return gen8_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
+   case 9:
+      return gen9_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
+   default:
+      unreachable("unsupported gen\n");
+   }
+}
+
+VkResult anv_CreateComputePipelines(
+    VkDevice                                    _device,
+    VkPipelineCache                             pipelineCache,
+    uint32_t                                    count,
+    const VkComputePipelineCreateInfo*          pCreateInfos,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipeline*                                 pPipelines)
+{
+   VkResult result = VK_SUCCESS;
+
+   unsigned i = 0;
+   for (; i < count; i++) {
+      result = anv_compute_pipeline_create(_device, pipelineCache,
+                                           &pCreateInfos[i],
+                                           pAllocator, &pPipelines[i]);
+      if (result != VK_SUCCESS) {
+         for (unsigned j = 0; j < i; j++) {
+            anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
+         }
+
+         return result;
+      }
+   }
+
+   return VK_SUCCESS;
+}
diff --git a/src/intel/vulkan/anv_pipeline_cache.c b/src/intel/vulkan/anv_pipeline_cache.c
new file mode 100644 (file)
index 0000000..62dbe3e
--- /dev/null
@@ -0,0 +1,518 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "util/mesa-sha1.h"
+#include "util/debug.h"
+#include "anv_private.h"
+
+/* Remaining work:
+ *
+ * - Compact binding table layout so it's tight and not dependent on
+ *   descriptor set layout.
+ *
+ * - Review prog_data struct for size and cacheability: struct
+ *   brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
+ *   bit quantities etc; param, pull_param, and image_params are pointers, we
+ *   just need the compation map. use bit fields for all bools, eg
+ *   dual_src_blend.
+ */
+
+void
+anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
+                        struct anv_device *device)
+{
+   cache->device = device;
+   anv_state_stream_init(&cache->program_stream,
+                         &device->instruction_block_pool);
+   pthread_mutex_init(&cache->mutex, NULL);
+
+   cache->kernel_count = 0;
+   cache->total_size = 0;
+   cache->table_size = 1024;
+   const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]);
+   cache->hash_table = malloc(byte_size);
+
+   /* We don't consider allocation failure fatal, we just start with a 0-sized
+    * cache. */
+   if (cache->hash_table == NULL ||
+       !env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true))
+      cache->table_size = 0;
+   else
+      memset(cache->hash_table, 0xff, byte_size);
+}
+
+void
+anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
+{
+   anv_state_stream_finish(&cache->program_stream);
+   pthread_mutex_destroy(&cache->mutex);
+   free(cache->hash_table);
+}
+
+struct cache_entry {
+   unsigned char sha1[20];
+   uint32_t prog_data_size;
+   uint32_t kernel_size;
+   uint32_t surface_count;
+   uint32_t sampler_count;
+   uint32_t image_count;
+
+   char prog_data[0];
+
+   /* kernel follows prog_data at next 64 byte aligned address */
+};
+
+static uint32_t
+entry_size(struct cache_entry *entry)
+{
+   /* This returns the number of bytes needed to serialize an entry, which
+    * doesn't include the alignment padding bytes.
+    */
+
+   const uint32_t map_size =
+      entry->surface_count * sizeof(struct anv_pipeline_binding) +
+      entry->sampler_count * sizeof(struct anv_pipeline_binding);
+
+   return sizeof(*entry) + entry->prog_data_size + map_size;
+}
+
+void
+anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
+                struct anv_shader_module *module,
+                const char *entrypoint,
+                const VkSpecializationInfo *spec_info)
+{
+   struct mesa_sha1 *ctx;
+
+   ctx = _mesa_sha1_init();
+   _mesa_sha1_update(ctx, key, key_size);
+   _mesa_sha1_update(ctx, module->sha1, sizeof(module->sha1));
+   _mesa_sha1_update(ctx, entrypoint, strlen(entrypoint));
+   /* hash in shader stage, pipeline layout? */
+   if (spec_info) {
+      _mesa_sha1_update(ctx, spec_info->pMapEntries,
+                        spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]);
+      _mesa_sha1_update(ctx, spec_info->pData, spec_info->dataSize);
+   }
+   _mesa_sha1_final(ctx, hash);
+}
+
+static uint32_t
+anv_pipeline_cache_search_unlocked(struct anv_pipeline_cache *cache,
+                                   const unsigned char *sha1,
+                                   const struct brw_stage_prog_data **prog_data,
+                                   struct anv_pipeline_bind_map *map)
+{
+   const uint32_t mask = cache->table_size - 1;
+   const uint32_t start = (*(uint32_t *) sha1);
+
+   for (uint32_t i = 0; i < cache->table_size; i++) {
+      const uint32_t index = (start + i) & mask;
+      const uint32_t offset = cache->hash_table[index];
+
+      if (offset == ~0)
+         return NO_KERNEL;
+
+      struct cache_entry *entry =
+         cache->program_stream.block_pool->map + offset;
+      if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
+         if (prog_data) {
+            assert(map);
+            void *p = entry->prog_data;
+            *prog_data = p;
+            p += entry->prog_data_size;
+            map->surface_count = entry->surface_count;
+            map->sampler_count = entry->sampler_count;
+            map->image_count = entry->image_count;
+            map->surface_to_descriptor = p;
+            p += map->surface_count * sizeof(struct anv_pipeline_binding);
+            map->sampler_to_descriptor = p;
+         }
+
+         return offset + align_u32(entry_size(entry), 64);
+      }
+   }
+
+   unreachable("hash table should never be full");
+}
+
+uint32_t
+anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
+                          const unsigned char *sha1,
+                          const struct brw_stage_prog_data **prog_data,
+                          struct anv_pipeline_bind_map *map)
+{
+   uint32_t kernel;
+
+   pthread_mutex_lock(&cache->mutex);
+
+   kernel = anv_pipeline_cache_search_unlocked(cache, sha1, prog_data, map);
+
+   pthread_mutex_unlock(&cache->mutex);
+
+   return kernel;
+}
+
+static void
+anv_pipeline_cache_set_entry(struct anv_pipeline_cache *cache,
+                             struct cache_entry *entry, uint32_t entry_offset)
+{
+   const uint32_t mask = cache->table_size - 1;
+   const uint32_t start = (*(uint32_t *) entry->sha1);
+
+   /* We'll always be able to insert when we get here. */
+   assert(cache->kernel_count < cache->table_size / 2);
+
+   for (uint32_t i = 0; i < cache->table_size; i++) {
+      const uint32_t index = (start + i) & mask;
+      if (cache->hash_table[index] == ~0) {
+         cache->hash_table[index] = entry_offset;
+         break;
+      }
+   }
+
+   cache->total_size += entry_size(entry) + entry->kernel_size;
+   cache->kernel_count++;
+}
+
+static VkResult
+anv_pipeline_cache_grow(struct anv_pipeline_cache *cache)
+{
+   const uint32_t table_size = cache->table_size * 2;
+   const uint32_t old_table_size = cache->table_size;
+   const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
+   uint32_t *table;
+   uint32_t *old_table = cache->hash_table;
+
+   table = malloc(byte_size);
+   if (table == NULL)
+      return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+   cache->hash_table = table;
+   cache->table_size = table_size;
+   cache->kernel_count = 0;
+   cache->total_size = 0;
+
+   memset(cache->hash_table, 0xff, byte_size);
+   for (uint32_t i = 0; i < old_table_size; i++) {
+      const uint32_t offset = old_table[i];
+      if (offset == ~0)
+         continue;
+
+      struct cache_entry *entry =
+         cache->program_stream.block_pool->map + offset;
+      anv_pipeline_cache_set_entry(cache, entry, offset);
+   }
+
+   free(old_table);
+
+   return VK_SUCCESS;
+}
+
+static void
+anv_pipeline_cache_add_entry(struct anv_pipeline_cache *cache,
+                             struct cache_entry *entry, uint32_t entry_offset)
+{
+   if (cache->kernel_count == cache->table_size / 2)
+      anv_pipeline_cache_grow(cache);
+
+   /* Failing to grow that hash table isn't fatal, but may mean we don't
+    * have enough space to add this new kernel. Only add it if there's room.
+    */
+   if (cache->kernel_count < cache->table_size / 2)
+      anv_pipeline_cache_set_entry(cache, entry, entry_offset);
+}
+
+uint32_t
+anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
+                                 const unsigned char *sha1,
+                                 const void *kernel, size_t kernel_size,
+                                 const struct brw_stage_prog_data **prog_data,
+                                 size_t prog_data_size,
+                                 struct anv_pipeline_bind_map *map)
+{
+   pthread_mutex_lock(&cache->mutex);
+
+   /* Before uploading, check again that another thread didn't upload this
+    * shader while we were compiling it.
+    */
+   if (sha1) {
+      uint32_t cached_kernel =
+         anv_pipeline_cache_search_unlocked(cache, sha1, prog_data, map);
+      if (cached_kernel != NO_KERNEL) {
+         pthread_mutex_unlock(&cache->mutex);
+         return cached_kernel;
+      }
+   }
+
+   struct cache_entry *entry;
+
+   const uint32_t map_size =
+      map->surface_count * sizeof(struct anv_pipeline_binding) +
+      map->sampler_count * sizeof(struct anv_pipeline_binding);
+
+   const uint32_t preamble_size =
+      align_u32(sizeof(*entry) + prog_data_size + map_size, 64);
+
+   const uint32_t size = preamble_size + kernel_size;
+
+   assert(size < cache->program_stream.block_pool->block_size);
+   const struct anv_state state =
+      anv_state_stream_alloc(&cache->program_stream, size, 64);
+
+   entry = state.map;
+   entry->prog_data_size = prog_data_size;
+   entry->surface_count = map->surface_count;
+   entry->sampler_count = map->sampler_count;
+   entry->image_count = map->image_count;
+   entry->kernel_size = kernel_size;
+
+   void *p = entry->prog_data;
+   memcpy(p, *prog_data, prog_data_size);
+   p += prog_data_size;
+
+   memcpy(p, map->surface_to_descriptor,
+          map->surface_count * sizeof(struct anv_pipeline_binding));
+   map->surface_to_descriptor = p;
+   p += map->surface_count * sizeof(struct anv_pipeline_binding);
+
+   memcpy(p, map->sampler_to_descriptor,
+          map->sampler_count * sizeof(struct anv_pipeline_binding));
+   map->sampler_to_descriptor = p;
+
+   if (sha1) {
+      assert(anv_pipeline_cache_search_unlocked(cache, sha1,
+                                                NULL, NULL) == NO_KERNEL);
+
+      memcpy(entry->sha1, sha1, sizeof(entry->sha1));
+      anv_pipeline_cache_add_entry(cache, entry, state.offset);
+   }
+
+   pthread_mutex_unlock(&cache->mutex);
+
+   memcpy(state.map + preamble_size, kernel, kernel_size);
+
+   if (!cache->device->info.has_llc)
+      anv_state_clflush(state);
+
+   *prog_data = (const struct brw_stage_prog_data *) entry->prog_data;
+
+   return state.offset + preamble_size;
+}
+
+struct cache_header {
+   uint32_t header_size;
+   uint32_t header_version;
+   uint32_t vendor_id;
+   uint32_t device_id;
+   uint8_t  uuid[VK_UUID_SIZE];
+};
+
+static void
+anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
+                        const void *data, size_t size)
+{
+   struct anv_device *device = cache->device;
+   struct cache_header header;
+   uint8_t uuid[VK_UUID_SIZE];
+
+   if (size < sizeof(header))
+      return;
+   memcpy(&header, data, sizeof(header));
+   if (header.header_size < sizeof(header))
+      return;
+   if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
+      return;
+   if (header.vendor_id != 0x8086)
+      return;
+   if (header.device_id != device->chipset_id)
+      return;
+   anv_device_get_cache_uuid(uuid);
+   if (memcmp(header.uuid, uuid, VK_UUID_SIZE) != 0)
+      return;
+
+   void *end = (void *) data + size;
+   void *p = (void *) data + header.header_size;
+
+   while (p < end) {
+      struct cache_entry *entry = p;
+
+      void *data = entry->prog_data;
+      const struct brw_stage_prog_data *prog_data = data;
+      data += entry->prog_data_size;
+
+      struct anv_pipeline_binding *surface_to_descriptor = data;
+      data += entry->surface_count * sizeof(struct anv_pipeline_binding);
+      struct anv_pipeline_binding *sampler_to_descriptor = data;
+      data += entry->sampler_count * sizeof(struct anv_pipeline_binding);
+      void *kernel = data;
+
+      struct anv_pipeline_bind_map map = {
+         .surface_count = entry->surface_count,
+         .sampler_count = entry->sampler_count,
+         .image_count = entry->image_count,
+         .surface_to_descriptor = surface_to_descriptor,
+         .sampler_to_descriptor = sampler_to_descriptor
+      };
+
+      anv_pipeline_cache_upload_kernel(cache, entry->sha1,
+                                       kernel, entry->kernel_size,
+                                       &prog_data,
+                                       entry->prog_data_size, &map);
+      p = kernel + entry->kernel_size;
+   }
+}
+
+VkResult anv_CreatePipelineCache(
+    VkDevice                                    _device,
+    const VkPipelineCacheCreateInfo*            pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipelineCache*                            pPipelineCache)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_pipeline_cache *cache;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
+   assert(pCreateInfo->flags == 0);
+
+   cache = anv_alloc2(&device->alloc, pAllocator,
+                       sizeof(*cache), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (cache == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   anv_pipeline_cache_init(cache, device);
+
+   if (pCreateInfo->initialDataSize > 0)
+      anv_pipeline_cache_load(cache,
+                              pCreateInfo->pInitialData,
+                              pCreateInfo->initialDataSize);
+
+   *pPipelineCache = anv_pipeline_cache_to_handle(cache);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroyPipelineCache(
+    VkDevice                                    _device,
+    VkPipelineCache                             _cache,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
+
+   anv_pipeline_cache_finish(cache);
+
+   anv_free2(&device->alloc, pAllocator, cache);
+}
+
+VkResult anv_GetPipelineCacheData(
+    VkDevice                                    _device,
+    VkPipelineCache                             _cache,
+    size_t*                                     pDataSize,
+    void*                                       pData)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
+   struct cache_header *header;
+
+   const size_t size = sizeof(*header) + cache->total_size;
+
+   if (pData == NULL) {
+      *pDataSize = size;
+      return VK_SUCCESS;
+   }
+
+   if (*pDataSize < sizeof(*header)) {
+      *pDataSize = 0;
+      return VK_INCOMPLETE;
+   }
+
+   void *p = pData, *end = pData + *pDataSize;
+   header = p;
+   header->header_size = sizeof(*header);
+   header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
+   header->vendor_id = 0x8086;
+   header->device_id = device->chipset_id;
+   anv_device_get_cache_uuid(header->uuid);
+   p += header->header_size;
+
+   struct cache_entry *entry;
+   for (uint32_t i = 0; i < cache->table_size; i++) {
+      if (cache->hash_table[i] == ~0)
+         continue;
+
+      entry = cache->program_stream.block_pool->map + cache->hash_table[i];
+      const uint32_t size = entry_size(entry);
+      if (end < p + size + entry->kernel_size)
+         break;
+
+      memcpy(p, entry, size);
+      p += size;
+
+      void *kernel = (void *) entry + align_u32(size, 64);
+
+      memcpy(p, kernel, entry->kernel_size);
+      p += entry->kernel_size;
+   }
+
+   *pDataSize = p - pData;
+
+   return VK_SUCCESS;
+}
+
+static void
+anv_pipeline_cache_merge(struct anv_pipeline_cache *dst,
+                         struct anv_pipeline_cache *src)
+{
+   for (uint32_t i = 0; i < src->table_size; i++) {
+      const uint32_t offset = src->hash_table[i];
+      if (offset == ~0)
+         continue;
+
+      struct cache_entry *entry =
+         src->program_stream.block_pool->map + offset;
+
+      if (anv_pipeline_cache_search(dst, entry->sha1, NULL, NULL) != NO_KERNEL)
+         continue;
+
+      anv_pipeline_cache_add_entry(dst, entry, offset);
+   }
+}
+
+VkResult anv_MergePipelineCaches(
+    VkDevice                                    _device,
+    VkPipelineCache                             destCache,
+    uint32_t                                    srcCacheCount,
+    const VkPipelineCache*                      pSrcCaches)
+{
+   ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
+
+   for (uint32_t i = 0; i < srcCacheCount; i++) {
+      ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
+
+      anv_pipeline_cache_merge(dst, src);
+   }
+
+   return VK_SUCCESS;
+}
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
new file mode 100644 (file)
index 0000000..0ef840d
--- /dev/null
@@ -0,0 +1,1831 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <pthread.h>
+#include <assert.h>
+#include <stdint.h>
+#include <i915_drm.h>
+
+#ifdef HAVE_VALGRIND
+#include <valgrind.h>
+#include <memcheck.h>
+#define VG(x) x
+#define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
+#else
+#define VG(x)
+#endif
+
+#include "brw_device_info.h"
+#include "brw_compiler.h"
+#include "util/macros.h"
+#include "util/list.h"
+
+/* Pre-declarations needed for WSI entrypoints */
+struct wl_surface;
+struct wl_display;
+typedef struct xcb_connection_t xcb_connection_t;
+typedef uint32_t xcb_visualid_t;
+typedef uint32_t xcb_window_t;
+
+#define VK_USE_PLATFORM_XCB_KHR
+#define VK_USE_PLATFORM_WAYLAND_KHR
+
+#define VK_PROTOTYPES
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_intel.h>
+#include <vulkan/vk_icd.h>
+
+#include "anv_entrypoints.h"
+#include "brw_context.h"
+#include "isl/isl.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_VBS         32
+#define MAX_SETS         8
+#define MAX_RTS          8
+#define MAX_VIEWPORTS   16
+#define MAX_SCISSORS    16
+#define MAX_PUSH_CONSTANTS_SIZE 128
+#define MAX_DYNAMIC_BUFFERS 16
+#define MAX_IMAGES 8
+#define MAX_SAMPLES_LOG2 4 /* SKL supports 16 samples */
+
+#define anv_noreturn __attribute__((__noreturn__))
+#define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+static inline uint32_t
+align_u32(uint32_t v, uint32_t a)
+{
+   assert(a != 0 && a == (a & -a));
+   return (v + a - 1) & ~(a - 1);
+}
+
+static inline uint64_t
+align_u64(uint64_t v, uint64_t a)
+{
+   assert(a != 0 && a == (a & -a));
+   return (v + a - 1) & ~(a - 1);
+}
+
+static inline int32_t
+align_i32(int32_t v, int32_t a)
+{
+   assert(a != 0 && a == (a & -a));
+   return (v + a - 1) & ~(a - 1);
+}
+
+/** Alignment must be a power of 2. */
+static inline bool
+anv_is_aligned(uintmax_t n, uintmax_t a)
+{
+   assert(a == (a & -a));
+   return (n & (a - 1)) == 0;
+}
+
+static inline uint32_t
+anv_minify(uint32_t n, uint32_t levels)
+{
+   if (unlikely(n == 0))
+      return 0;
+   else
+      return MAX(n >> levels, 1);
+}
+
+static inline float
+anv_clamp_f(float f, float min, float max)
+{
+   assert(min < max);
+
+   if (f > max)
+      return max;
+   else if (f < min)
+      return min;
+   else
+      return f;
+}
+
+static inline bool
+anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
+{
+   if (*inout_mask & clear_mask) {
+      *inout_mask &= ~clear_mask;
+      return true;
+   } else {
+      return false;
+   }
+}
+
+#define for_each_bit(b, dword)                          \
+   for (uint32_t __dword = (dword);                     \
+        (b) = __builtin_ffs(__dword) - 1, __dword;      \
+        __dword &= ~(1 << (b)))
+
+#define typed_memcpy(dest, src, count) ({ \
+   static_assert(sizeof(*src) == sizeof(*dest), ""); \
+   memcpy((dest), (src), (count) * sizeof(*(src))); \
+})
+
+#define zero(x) (memset(&(x), 0, sizeof(x)))
+
+/* Define no kernel as 1, since that's an illegal offset for a kernel */
+#define NO_KERNEL 1
+
+struct anv_common {
+    VkStructureType                             sType;
+    const void*                                 pNext;
+};
+
+/* Whenever we generate an error, pass it through this function. Useful for
+ * debugging, where we can break on it. Only call at error site, not when
+ * propagating errors. Might be useful to plug in a stack trace here.
+ */
+
+VkResult __vk_errorf(VkResult error, const char *file, int line, const char *format, ...);
+
+#ifdef DEBUG
+#define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
+#define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
+#else
+#define vk_error(error) error
+#define vk_errorf(error, format, ...) error
+#endif
+
+void __anv_finishme(const char *file, int line, const char *format, ...)
+   anv_printflike(3, 4);
+void anv_loge(const char *format, ...) anv_printflike(1, 2);
+void anv_loge_v(const char *format, va_list va);
+
+/**
+ * Print a FINISHME message, including its source location.
+ */
+#define anv_finishme(format, ...) \
+   __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
+
+/* A non-fatal assert.  Useful for debugging. */
+#ifdef DEBUG
+#define anv_assert(x) ({ \
+   if (unlikely(!(x))) \
+      fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
+})
+#else
+#define anv_assert(x)
+#endif
+
+/**
+ * If a block of code is annotated with anv_validate, then the block runs only
+ * in debug builds.
+ */
+#ifdef DEBUG
+#define anv_validate if (1)
+#else
+#define anv_validate if (0)
+#endif
+
+void anv_abortf(const char *format, ...) anv_noreturn anv_printflike(1, 2);
+void anv_abortfv(const char *format, va_list va) anv_noreturn;
+
+#define stub_return(v) \
+   do { \
+      anv_finishme("stub %s", __func__); \
+      return (v); \
+   } while (0)
+
+#define stub() \
+   do { \
+      anv_finishme("stub %s", __func__); \
+      return; \
+   } while (0)
+
+/**
+ * A dynamically growable, circular buffer.  Elements are added at head and
+ * removed from tail. head and tail are free-running uint32_t indices and we
+ * only compute the modulo with size when accessing the array.  This way,
+ * number of bytes in the queue is always head - tail, even in case of
+ * wraparound.
+ */
+
+struct anv_vector {
+   uint32_t head;
+   uint32_t tail;
+   uint32_t element_size;
+   uint32_t size;
+   void *data;
+};
+
+int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size);
+void *anv_vector_add(struct anv_vector *queue);
+void *anv_vector_remove(struct anv_vector *queue);
+
+static inline int
+anv_vector_length(struct anv_vector *queue)
+{
+   return (queue->head - queue->tail) / queue->element_size;
+}
+
+static inline void *
+anv_vector_head(struct anv_vector *vector)
+{
+   assert(vector->tail < vector->head);
+   return (void *)((char *)vector->data +
+                   ((vector->head - vector->element_size) &
+                    (vector->size - 1)));
+}
+
+static inline void *
+anv_vector_tail(struct anv_vector *vector)
+{
+   return (void *)((char *)vector->data + (vector->tail & (vector->size - 1)));
+}
+
+static inline void
+anv_vector_finish(struct anv_vector *queue)
+{
+   free(queue->data);
+}
+
+#define anv_vector_foreach(elem, queue)                                  \
+   static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
+   for (uint32_t __anv_vector_offset = (queue)->tail;                                \
+        elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
+        __anv_vector_offset += (queue)->element_size)
+
+struct anv_bo {
+   uint32_t gem_handle;
+
+   /* Index into the current validation list.  This is used by the
+    * validation list building alrogithm to track which buffers are already
+    * in the validation list so that we can ensure uniqueness.
+    */
+   uint32_t index;
+
+   /* Last known offset.  This value is provided by the kernel when we
+    * execbuf and is used as the presumed offset for the next bunch of
+    * relocations.
+    */
+   uint64_t offset;
+
+   uint64_t size;
+   void *map;
+
+   /* We need to set the WRITE flag on winsys bos so GEM will know we're
+    * writing to them and synchronize uses on other rings (eg if the display
+    * server uses the blitter ring).
+    */
+   bool is_winsys_bo;
+};
+
+/* Represents a lock-free linked list of "free" things.  This is used by
+ * both the block pool and the state pools.  Unfortunately, in order to
+ * solve the ABA problem, we can't use a single uint32_t head.
+ */
+union anv_free_list {
+   struct {
+      int32_t offset;
+
+      /* A simple count that is incremented every time the head changes. */
+      uint32_t count;
+   };
+   uint64_t u64;
+};
+
+#define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
+
+struct anv_block_state {
+   union {
+      struct {
+         uint32_t next;
+         uint32_t end;
+      };
+      uint64_t u64;
+   };
+};
+
+struct anv_block_pool {
+   struct anv_device *device;
+
+   struct anv_bo bo;
+
+   /* The offset from the start of the bo to the "center" of the block
+    * pool.  Pointers to allocated blocks are given by
+    * bo.map + center_bo_offset + offsets.
+    */
+   uint32_t center_bo_offset;
+
+   /* Current memory map of the block pool.  This pointer may or may not
+    * point to the actual beginning of the block pool memory.  If
+    * anv_block_pool_alloc_back has ever been called, then this pointer
+    * will point to the "center" position of the buffer and all offsets
+    * (negative or positive) given out by the block pool alloc functions
+    * will be valid relative to this pointer.
+    *
+    * In particular, map == bo.map + center_offset
+    */
+   void *map;
+   int fd;
+
+   /**
+    * Array of mmaps and gem handles owned by the block pool, reclaimed when
+    * the block pool is destroyed.
+    */
+   struct anv_vector mmap_cleanups;
+
+   uint32_t block_size;
+
+   union anv_free_list free_list;
+   struct anv_block_state state;
+
+   union anv_free_list back_free_list;
+   struct anv_block_state back_state;
+};
+
+/* Block pools are backed by a fixed-size 2GB memfd */
+#define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
+
+/* The center of the block pool is also the middle of the memfd.  This may
+ * change in the future if we decide differently for some reason.
+ */
+#define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
+
+static inline uint32_t
+anv_block_pool_size(struct anv_block_pool *pool)
+{
+   return pool->state.end + pool->back_state.end;
+}
+
+struct anv_state {
+   int32_t offset;
+   uint32_t alloc_size;
+   void *map;
+};
+
+struct anv_fixed_size_state_pool {
+   size_t state_size;
+   union anv_free_list free_list;
+   struct anv_block_state block;
+};
+
+#define ANV_MIN_STATE_SIZE_LOG2 6
+#define ANV_MAX_STATE_SIZE_LOG2 10
+
+#define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2)
+
+struct anv_state_pool {
+   struct anv_block_pool *block_pool;
+   struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
+};
+
+struct anv_state_stream_block;
+
+struct anv_state_stream {
+   struct anv_block_pool *block_pool;
+
+   /* The current working block */
+   struct anv_state_stream_block *block;
+
+   /* Offset at which the current block starts */
+   uint32_t start;
+   /* Offset at which to allocate the next state */
+   uint32_t next;
+   /* Offset at which the current block ends */
+   uint32_t end;
+};
+
+#define CACHELINE_SIZE 64
+#define CACHELINE_MASK 63
+
+static inline void
+anv_clflush_range(void *start, size_t size)
+{
+   void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
+   void *end = start + size;
+
+   __builtin_ia32_mfence();
+   while (p < end) {
+      __builtin_ia32_clflush(p);
+      p += CACHELINE_SIZE;
+   }
+}
+
+static void inline
+anv_state_clflush(struct anv_state state)
+{
+   anv_clflush_range(state.map, state.alloc_size);
+}
+
+void anv_block_pool_init(struct anv_block_pool *pool,
+                         struct anv_device *device, uint32_t block_size);
+void anv_block_pool_finish(struct anv_block_pool *pool);
+int32_t anv_block_pool_alloc(struct anv_block_pool *pool);
+int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool);
+void anv_block_pool_free(struct anv_block_pool *pool, int32_t offset);
+void anv_state_pool_init(struct anv_state_pool *pool,
+                         struct anv_block_pool *block_pool);
+void anv_state_pool_finish(struct anv_state_pool *pool);
+struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
+                                      size_t state_size, size_t alignment);
+void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
+void anv_state_stream_init(struct anv_state_stream *stream,
+                           struct anv_block_pool *block_pool);
+void anv_state_stream_finish(struct anv_state_stream *stream);
+struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
+                                        uint32_t size, uint32_t alignment);
+
+/**
+ * Implements a pool of re-usable BOs.  The interface is identical to that
+ * of block_pool except that each block is its own BO.
+ */
+struct anv_bo_pool {
+   struct anv_device *device;
+
+   uint32_t bo_size;
+
+   void *free_list;
+};
+
+void anv_bo_pool_init(struct anv_bo_pool *pool,
+                      struct anv_device *device, uint32_t block_size);
+void anv_bo_pool_finish(struct anv_bo_pool *pool);
+VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo);
+void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
+
+
+void *anv_resolve_entrypoint(uint32_t index);
+
+extern struct anv_dispatch_table dtable;
+
+#define ANV_CALL(func) ({ \
+   if (dtable.func == NULL) { \
+      size_t idx = offsetof(struct anv_dispatch_table, func) / sizeof(void *); \
+      dtable.entrypoints[idx] = anv_resolve_entrypoint(idx); \
+   } \
+   dtable.func; \
+})
+
+static inline void *
+anv_alloc(const VkAllocationCallbacks *alloc,
+          size_t size, size_t align,
+          VkSystemAllocationScope scope)
+{
+   return alloc->pfnAllocation(alloc->pUserData, size, align, scope);
+}
+
+static inline void *
+anv_realloc(const VkAllocationCallbacks *alloc,
+            void *ptr, size_t size, size_t align,
+            VkSystemAllocationScope scope)
+{
+   return alloc->pfnReallocation(alloc->pUserData, ptr, size, align, scope);
+}
+
+static inline void
+anv_free(const VkAllocationCallbacks *alloc, void *data)
+{
+   alloc->pfnFree(alloc->pUserData, data);
+}
+
+static inline void *
+anv_alloc2(const VkAllocationCallbacks *parent_alloc,
+           const VkAllocationCallbacks *alloc,
+           size_t size, size_t align,
+           VkSystemAllocationScope scope)
+{
+   if (alloc)
+      return anv_alloc(alloc, size, align, scope);
+   else
+      return anv_alloc(parent_alloc, size, align, scope);
+}
+
+static inline void
+anv_free2(const VkAllocationCallbacks *parent_alloc,
+          const VkAllocationCallbacks *alloc,
+          void *data)
+{
+   if (alloc)
+      anv_free(alloc, data);
+   else
+      anv_free(parent_alloc, data);
+}
+
+struct anv_physical_device {
+    VK_LOADER_DATA                              _loader_data;
+
+    struct anv_instance *                       instance;
+    uint32_t                                    chipset_id;
+    const char *                                path;
+    const char *                                name;
+    const struct brw_device_info *              info;
+    uint64_t                                    aperture_size;
+    struct brw_compiler *                       compiler;
+    struct isl_device                           isl_dev;
+};
+
+struct anv_wsi_interaface;
+
+#define VK_ICD_WSI_PLATFORM_MAX 5
+
+struct anv_instance {
+    VK_LOADER_DATA                              _loader_data;
+
+    VkAllocationCallbacks                       alloc;
+
+    uint32_t                                    apiVersion;
+    int                                         physicalDeviceCount;
+    struct anv_physical_device                  physicalDevice;
+
+    struct anv_wsi_interface *                  wsi[VK_ICD_WSI_PLATFORM_MAX];
+};
+
+VkResult anv_init_wsi(struct anv_instance *instance);
+void anv_finish_wsi(struct anv_instance *instance);
+
+struct anv_meta_state {
+   VkAllocationCallbacks alloc;
+
+   /**
+    * Use array element `i` for images with `2^i` samples.
+    */
+   struct {
+      /**
+       * Pipeline N is used to clear color attachment N of the current
+       * subpass.
+       *
+       * HACK: We use one pipeline per color attachment to work around the
+       * compiler's inability to dynamically set the render target index of
+       * the render target write message.
+       */
+      struct anv_pipeline *color_pipelines[MAX_RTS];
+
+      struct anv_pipeline *depth_only_pipeline;
+      struct anv_pipeline *stencil_only_pipeline;
+      struct anv_pipeline *depthstencil_pipeline;
+   } clear[1 + MAX_SAMPLES_LOG2];
+
+   struct {
+      VkRenderPass render_pass;
+
+      /** Pipeline that blits from a 1D image. */
+      VkPipeline pipeline_1d_src;
+
+      /** Pipeline that blits from a 2D image. */
+      VkPipeline pipeline_2d_src;
+
+      /** Pipeline that blits from a 3D image. */
+      VkPipeline pipeline_3d_src;
+
+      VkPipelineLayout                          pipeline_layout;
+      VkDescriptorSetLayout                     ds_layout;
+   } blit;
+
+   struct {
+      /** Pipeline [i] resolves an image with 2^(i+1) samples.  */
+      VkPipeline                                pipelines[MAX_SAMPLES_LOG2];
+
+      VkRenderPass                              pass;
+      VkPipelineLayout                          pipeline_layout;
+      VkDescriptorSetLayout                     ds_layout;
+   } resolve;
+};
+
+struct anv_queue {
+    VK_LOADER_DATA                              _loader_data;
+
+    struct anv_device *                         device;
+
+    struct anv_state_pool *                     pool;
+};
+
+struct anv_pipeline_cache {
+   struct anv_device *                          device;
+   struct anv_state_stream                      program_stream;
+   pthread_mutex_t                              mutex;
+
+   uint32_t                                     total_size;
+   uint32_t                                     table_size;
+   uint32_t                                     kernel_count;
+   uint32_t *                                   hash_table;
+};
+
+struct anv_pipeline_bind_map;
+
+void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
+                             struct anv_device *device);
+void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
+uint32_t anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
+                                   const unsigned char *sha1,
+                                   const struct brw_stage_prog_data **prog_data,
+                                   struct anv_pipeline_bind_map *map);
+uint32_t anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
+                                          const unsigned char *sha1,
+                                          const void *kernel,
+                                          size_t kernel_size,
+                                          const struct brw_stage_prog_data **prog_data,
+                                          size_t prog_data_size,
+                                          struct anv_pipeline_bind_map *map);
+
+struct anv_device {
+    VK_LOADER_DATA                              _loader_data;
+
+    VkAllocationCallbacks                       alloc;
+
+    struct anv_instance *                       instance;
+    uint32_t                                    chipset_id;
+    struct brw_device_info                      info;
+    struct isl_device                           isl_dev;
+    int                                         context_id;
+    int                                         fd;
+
+    struct anv_bo_pool                          batch_bo_pool;
+
+    struct anv_block_pool                       dynamic_state_block_pool;
+    struct anv_state_pool                       dynamic_state_pool;
+
+    struct anv_block_pool                       instruction_block_pool;
+    struct anv_pipeline_cache                   default_pipeline_cache;
+
+    struct anv_block_pool                       surface_state_block_pool;
+    struct anv_state_pool                       surface_state_pool;
+
+    struct anv_bo                               workaround_bo;
+
+    struct anv_meta_state                       meta_state;
+
+    struct anv_state                            border_colors;
+
+    struct anv_queue                            queue;
+
+    struct anv_block_pool                       scratch_block_pool;
+
+    uint32_t                                    default_mocs;
+
+    pthread_mutex_t                             mutex;
+};
+
+void anv_device_get_cache_uuid(void *uuid);
+
+
+void* anv_gem_mmap(struct anv_device *device,
+                   uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
+void anv_gem_munmap(void *p, uint64_t size);
+uint32_t anv_gem_create(struct anv_device *device, size_t size);
+void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
+uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
+int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
+int anv_gem_execbuffer(struct anv_device *device,
+                       struct drm_i915_gem_execbuffer2 *execbuf);
+int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
+                       uint32_t stride, uint32_t tiling);
+int anv_gem_create_context(struct anv_device *device);
+int anv_gem_destroy_context(struct anv_device *device, int context);
+int anv_gem_get_param(int fd, uint32_t param);
+bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
+int anv_gem_get_aperture(int fd, uint64_t *size);
+int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
+uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
+int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
+int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
+                       uint32_t read_domains, uint32_t write_domain);
+
+VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
+
+struct anv_reloc_list {
+   size_t                                       num_relocs;
+   size_t                                       array_length;
+   struct drm_i915_gem_relocation_entry *       relocs;
+   struct anv_bo **                             reloc_bos;
+};
+
+VkResult anv_reloc_list_init(struct anv_reloc_list *list,
+                             const VkAllocationCallbacks *alloc);
+void anv_reloc_list_finish(struct anv_reloc_list *list,
+                           const VkAllocationCallbacks *alloc);
+
+uint64_t anv_reloc_list_add(struct anv_reloc_list *list,
+                            const VkAllocationCallbacks *alloc,
+                            uint32_t offset, struct anv_bo *target_bo,
+                            uint32_t delta);
+
+struct anv_batch_bo {
+   /* Link in the anv_cmd_buffer.owned_batch_bos list */
+   struct list_head                             link;
+
+   struct anv_bo                                bo;
+
+   /* Bytes actually consumed in this batch BO */
+   size_t                                       length;
+
+   /* Last seen surface state block pool bo offset */
+   uint32_t                                     last_ss_pool_bo_offset;
+
+   struct anv_reloc_list                        relocs;
+};
+
+struct anv_batch {
+   const VkAllocationCallbacks *                alloc;
+
+   void *                                       start;
+   void *                                       end;
+   void *                                       next;
+
+   struct anv_reloc_list *                      relocs;
+
+   /* This callback is called (with the associated user data) in the event
+    * that the batch runs out of space.
+    */
+   VkResult (*extend_cb)(struct anv_batch *, void *);
+   void *                                       user_data;
+};
+
+void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
+void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
+uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
+                              void *location, struct anv_bo *bo, uint32_t offset);
+VkResult anv_device_submit_simple_batch(struct anv_device *device,
+                                        struct anv_batch *batch);
+
+struct anv_address {
+   struct anv_bo *bo;
+   uint32_t offset;
+};
+
+#define __gen_address_type struct anv_address
+#define __gen_user_data struct anv_batch
+
+static inline uint64_t
+__gen_combine_address(struct anv_batch *batch, void *location,
+                      const struct anv_address address, uint32_t delta)
+{
+   if (address.bo == NULL) {
+      return address.offset + delta;
+   } else {
+      assert(batch->start <= location && location < batch->end);
+
+      return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
+   }
+}
+
+/* Wrapper macros needed to work around preprocessor argument issues.  In
+ * particular, arguments don't get pre-evaluated if they are concatenated.
+ * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
+ * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
+ * We can work around this easily enough with these helpers.
+ */
+#define __anv_cmd_length(cmd) cmd ## _length
+#define __anv_cmd_length_bias(cmd) cmd ## _length_bias
+#define __anv_cmd_header(cmd) cmd ## _header
+#define __anv_cmd_pack(cmd) cmd ## _pack
+
+#define anv_batch_emit(batch, cmd, ...) do {                               \
+      void *__dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd));   \
+      struct cmd __template = {                                            \
+         __anv_cmd_header(cmd),                                            \
+         __VA_ARGS__                                                       \
+      };                                                                   \
+      __anv_cmd_pack(cmd)(batch, __dst, &__template);                      \
+      VG(VALGRIND_CHECK_MEM_IS_DEFINED(__dst, __anv_cmd_length(cmd) * 4)); \
+   } while (0)
+
+#define anv_batch_emitn(batch, n, cmd, ...) ({          \
+      void *__dst = anv_batch_emit_dwords(batch, n);    \
+      struct cmd __template = {                         \
+         __anv_cmd_header(cmd),                         \
+        .DWordLength = n - __anv_cmd_length_bias(cmd),  \
+         __VA_ARGS__                                    \
+      };                                                \
+      __anv_cmd_pack(cmd)(batch, __dst, &__template);   \
+      __dst;                                            \
+   })
+
+#define anv_batch_emit_merge(batch, dwords0, dwords1)                   \
+   do {                                                                 \
+      uint32_t *dw;                                                     \
+                                                                        \
+      static_assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1), "mismatch merge"); \
+      dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0));         \
+      for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++)                \
+         dw[i] = (dwords0)[i] | (dwords1)[i];                           \
+      VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
+   } while (0)
+
+#define anv_state_pool_emit(pool, cmd, align, ...) ({                   \
+      const uint32_t __size = __anv_cmd_length(cmd) * 4;                \
+      struct anv_state __state =                                        \
+         anv_state_pool_alloc((pool), __size, align);                   \
+      struct cmd __template = {                                         \
+         __VA_ARGS__                                                    \
+      };                                                                \
+      __anv_cmd_pack(cmd)(NULL, __state.map, &__template);              \
+      VG(VALGRIND_CHECK_MEM_IS_DEFINED(__state.map, __anv_cmd_length(cmd) * 4)); \
+      if (!(pool)->block_pool->device->info.has_llc)                    \
+         anv_state_clflush(__state);                                    \
+      __state;                                                          \
+   })
+
+#define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) {  \
+   .GraphicsDataTypeGFDT                        = 0,           \
+   .LLCCacheabilityControlLLCCC                 = 0,           \
+   .L3CacheabilityControlL3CC                   = 1,           \
+}
+
+#define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) {  \
+   .LLCeLLCCacheabilityControlLLCCC             = 0,           \
+   .L3CacheabilityControlL3CC                   = 1,           \
+}
+
+#define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) {  \
+      .MemoryTypeLLCeLLCCacheabilityControl = WB,              \
+      .TargetCache = L3DefertoPATforLLCeLLCselection,          \
+      .AgeforQUADLRU = 0                                       \
+   }
+
+/* Skylake: MOCS is now an index into an array of 62 different caching
+ * configurations programmed by the kernel.
+ */
+
+#define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) {  \
+      /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */              \
+      .IndextoMOCSTables                           = 2         \
+   }
+
+#define GEN9_MOCS_PTE {                                 \
+      /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */       \
+      .IndextoMOCSTables                           = 1  \
+   }
+
+struct anv_device_memory {
+   struct anv_bo                                bo;
+   uint32_t                                     type_index;
+   VkDeviceSize                                 map_size;
+   void *                                       map;
+};
+
+/**
+ * Header for Vertex URB Entry (VUE)
+ */
+struct anv_vue_header {
+   uint32_t Reserved;
+   uint32_t RTAIndex; /* RenderTargetArrayIndex */
+   uint32_t ViewportIndex;
+   float PointWidth;
+};
+
+struct anv_descriptor_set_binding_layout {
+   /* Number of array elements in this binding */
+   uint16_t array_size;
+
+   /* Index into the flattend descriptor set */
+   uint16_t descriptor_index;
+
+   /* Index into the dynamic state array for a dynamic buffer */
+   int16_t dynamic_offset_index;
+
+   /* Index into the descriptor set buffer views */
+   int16_t buffer_index;
+
+   struct {
+      /* Index into the binding table for the associated surface */
+      int16_t surface_index;
+
+      /* Index into the sampler table for the associated sampler */
+      int16_t sampler_index;
+
+      /* Index into the image table for the associated image */
+      int16_t image_index;
+   } stage[MESA_SHADER_STAGES];
+
+   /* Immutable samplers (or NULL if no immutable samplers) */
+   struct anv_sampler **immutable_samplers;
+};
+
+struct anv_descriptor_set_layout {
+   /* Number of bindings in this descriptor set */
+   uint16_t binding_count;
+
+   /* Total size of the descriptor set with room for all array entries */
+   uint16_t size;
+
+   /* Shader stages affected by this descriptor set */
+   uint16_t shader_stages;
+
+   /* Number of buffers in this descriptor set */
+   uint16_t buffer_count;
+
+   /* Number of dynamic offsets used by this descriptor set */
+   uint16_t dynamic_offset_count;
+
+   /* Bindings in this descriptor set */
+   struct anv_descriptor_set_binding_layout binding[0];
+};
+
+struct anv_descriptor {
+   VkDescriptorType type;
+
+   union {
+      struct {
+         struct anv_image_view *image_view;
+         struct anv_sampler *sampler;
+      };
+
+      struct anv_buffer_view *buffer_view;
+   };
+};
+
+struct anv_descriptor_set {
+   const struct anv_descriptor_set_layout *layout;
+   uint32_t size;
+   uint32_t buffer_count;
+   struct anv_buffer_view *buffer_views;
+   struct anv_descriptor descriptors[0];
+};
+
+struct anv_descriptor_pool {
+   uint32_t size;
+   uint32_t next;
+   uint32_t free_list;
+
+   struct anv_state_stream surface_state_stream;
+   void *surface_state_free_list;
+
+   char data[0];
+};
+
+VkResult
+anv_descriptor_set_create(struct anv_device *device,
+                          struct anv_descriptor_pool *pool,
+                          const struct anv_descriptor_set_layout *layout,
+                          struct anv_descriptor_set **out_set);
+
+void
+anv_descriptor_set_destroy(struct anv_device *device,
+                           struct anv_descriptor_pool *pool,
+                           struct anv_descriptor_set *set);
+
+#define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT16_MAX
+
+struct anv_pipeline_binding {
+   /* The descriptor set this surface corresponds to.  The special value of
+    * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
+    * to a color attachment and not a regular descriptor.
+    */
+   uint16_t set;
+
+   /* Offset into the descriptor set or attachment list. */
+   uint16_t offset;
+};
+
+struct anv_pipeline_layout {
+   struct {
+      struct anv_descriptor_set_layout *layout;
+      uint32_t dynamic_offset_start;
+   } set[MAX_SETS];
+
+   uint32_t num_sets;
+
+   struct {
+      bool has_dynamic_offsets;
+   } stage[MESA_SHADER_STAGES];
+};
+
+struct anv_buffer {
+   struct anv_device *                          device;
+   VkDeviceSize                                 size;
+
+   VkBufferUsageFlags                           usage;
+
+   /* Set when bound */
+   struct anv_bo *                              bo;
+   VkDeviceSize                                 offset;
+};
+
+enum anv_cmd_dirty_bits {
+   ANV_CMD_DIRTY_DYNAMIC_VIEWPORT                  = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
+   ANV_CMD_DIRTY_DYNAMIC_SCISSOR                   = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
+   ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH                = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
+   ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS                = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
+   ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS           = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
+   ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS              = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
+   ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK      = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
+   ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK        = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
+   ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE         = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
+   ANV_CMD_DIRTY_DYNAMIC_ALL                       = (1 << 9) - 1,
+   ANV_CMD_DIRTY_PIPELINE                          = 1 << 9,
+   ANV_CMD_DIRTY_INDEX_BUFFER                      = 1 << 10,
+   ANV_CMD_DIRTY_RENDER_TARGETS                    = 1 << 11,
+};
+typedef uint32_t anv_cmd_dirty_mask_t;
+
+struct anv_vertex_binding {
+   struct anv_buffer *                          buffer;
+   VkDeviceSize                                 offset;
+};
+
+struct anv_push_constants {
+   /* Current allocated size of this push constants data structure.
+    * Because a decent chunk of it may not be used (images on SKL, for
+    * instance), we won't actually allocate the entire structure up-front.
+    */
+   uint32_t size;
+
+   /* Push constant data provided by the client through vkPushConstants */
+   uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
+
+   /* Our hardware only provides zero-based vertex and instance id so, in
+    * order to satisfy the vulkan requirements, we may have to push one or
+    * both of these into the shader.
+    */
+   uint32_t base_vertex;
+   uint32_t base_instance;
+
+   /* Offsets and ranges for dynamically bound buffers */
+   struct {
+      uint32_t offset;
+      uint32_t range;
+   } dynamic[MAX_DYNAMIC_BUFFERS];
+
+   /* Image data for image_load_store on pre-SKL */
+   struct brw_image_param images[MAX_IMAGES];
+};
+
+struct anv_dynamic_state {
+   struct {
+      uint32_t                                  count;
+      VkViewport                                viewports[MAX_VIEWPORTS];
+   } viewport;
+
+   struct {
+      uint32_t                                  count;
+      VkRect2D                                  scissors[MAX_SCISSORS];
+   } scissor;
+
+   float                                        line_width;
+
+   struct {
+      float                                     bias;
+      float                                     clamp;
+      float                                     slope;
+   } depth_bias;
+
+   float                                        blend_constants[4];
+
+   struct {
+      float                                     min;
+      float                                     max;
+   } depth_bounds;
+
+   struct {
+      uint32_t                                  front;
+      uint32_t                                  back;
+   } stencil_compare_mask;
+
+   struct {
+      uint32_t                                  front;
+      uint32_t                                  back;
+   } stencil_write_mask;
+
+   struct {
+      uint32_t                                  front;
+      uint32_t                                  back;
+   } stencil_reference;
+};
+
+extern const struct anv_dynamic_state default_dynamic_state;
+
+void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
+                            const struct anv_dynamic_state *src,
+                            uint32_t copy_mask);
+
+/**
+ * Attachment state when recording a renderpass instance.
+ *
+ * The clear value is valid only if there exists a pending clear.
+ */
+struct anv_attachment_state {
+   VkImageAspectFlags                           pending_clear_aspects;
+   VkClearValue                                 clear_value;
+};
+
+/** State required while building cmd buffer */
+struct anv_cmd_state {
+   /* PIPELINE_SELECT.PipelineSelection */
+   uint32_t                                     current_pipeline;
+   uint32_t                                     current_l3_config;
+   uint32_t                                     vb_dirty;
+   anv_cmd_dirty_mask_t                         dirty;
+   anv_cmd_dirty_mask_t                         compute_dirty;
+   uint32_t                                     num_workgroups_offset;
+   struct anv_bo                                *num_workgroups_bo;
+   VkShaderStageFlags                           descriptors_dirty;
+   VkShaderStageFlags                           push_constants_dirty;
+   uint32_t                                     scratch_size;
+   struct anv_pipeline *                        pipeline;
+   struct anv_pipeline *                        compute_pipeline;
+   struct anv_framebuffer *                     framebuffer;
+   struct anv_render_pass *                     pass;
+   struct anv_subpass *                         subpass;
+   uint32_t                                     restart_index;
+   struct anv_vertex_binding                    vertex_bindings[MAX_VBS];
+   struct anv_descriptor_set *                  descriptors[MAX_SETS];
+   struct anv_push_constants *                  push_constants[MESA_SHADER_STAGES];
+   struct anv_state                             binding_tables[MESA_SHADER_STAGES];
+   struct anv_state                             samplers[MESA_SHADER_STAGES];
+   struct anv_dynamic_state                     dynamic;
+   bool                                         need_query_wa;
+
+   /**
+    * Array length is anv_cmd_state::pass::attachment_count. Array content is
+    * valid only when recording a render pass instance.
+    */
+   struct anv_attachment_state *                attachments;
+
+   struct {
+      struct anv_buffer *                       index_buffer;
+      uint32_t                                  index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
+      uint32_t                                  index_offset;
+   } gen7;
+};
+
+struct anv_cmd_pool {
+   VkAllocationCallbacks                        alloc;
+   struct list_head                             cmd_buffers;
+};
+
+#define ANV_CMD_BUFFER_BATCH_SIZE 8192
+
+enum anv_cmd_buffer_exec_mode {
+   ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
+   ANV_CMD_BUFFER_EXEC_MODE_EMIT,
+   ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
+   ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
+};
+
+struct anv_cmd_buffer {
+   VK_LOADER_DATA                               _loader_data;
+
+   struct anv_device *                          device;
+
+   struct anv_cmd_pool *                        pool;
+   struct list_head                             pool_link;
+
+   struct anv_batch                             batch;
+
+   /* Fields required for the actual chain of anv_batch_bo's.
+    *
+    * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
+    */
+   struct list_head                             batch_bos;
+   enum anv_cmd_buffer_exec_mode                exec_mode;
+
+   /* A vector of anv_batch_bo pointers for every batch or surface buffer
+    * referenced by this command buffer
+    *
+    * initialized by anv_cmd_buffer_init_batch_bo_chain()
+    */
+   struct anv_vector                            seen_bbos;
+
+   /* A vector of int32_t's for every block of binding tables.
+    *
+    * initialized by anv_cmd_buffer_init_batch_bo_chain()
+    */
+   struct anv_vector                            bt_blocks;
+   uint32_t                                     bt_next;
+   struct anv_reloc_list                        surface_relocs;
+
+   /* Information needed for execbuf
+    *
+    * These fields are generated by anv_cmd_buffer_prepare_execbuf().
+    */
+   struct {
+      struct drm_i915_gem_execbuffer2           execbuf;
+
+      struct drm_i915_gem_exec_object2 *        objects;
+      uint32_t                                  bo_count;
+      struct anv_bo **                          bos;
+
+      /* Allocated length of the 'objects' and 'bos' arrays */
+      uint32_t                                  array_length;
+
+      bool                                      need_reloc;
+   } execbuf2;
+
+   /* Serial for tracking buffer completion */
+   uint32_t                                     serial;
+
+   /* Stream objects for storing temporary data */
+   struct anv_state_stream                      surface_state_stream;
+   struct anv_state_stream                      dynamic_state_stream;
+
+   VkCommandBufferUsageFlags                    usage_flags;
+   VkCommandBufferLevel                         level;
+
+   struct anv_cmd_state                         state;
+};
+
+VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
+void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
+void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
+void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
+void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
+                                  struct anv_cmd_buffer *secondary);
+void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
+
+VkResult anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
+                                           unsigned stage, struct anv_state *bt_state);
+VkResult anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
+                                      unsigned stage, struct anv_state *state);
+uint32_t gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer);
+void gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
+                                              uint32_t stages);
+
+struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
+                                             const void *data, uint32_t size, uint32_t alignment);
+struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
+                                              uint32_t *a, uint32_t *b,
+                                              uint32_t dwords, uint32_t alignment);
+
+struct anv_address
+anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
+struct anv_state
+anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
+                                   uint32_t entries, uint32_t *state_offset);
+struct anv_state
+anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
+struct anv_state
+anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
+                                   uint32_t size, uint32_t alignment);
+
+VkResult
+anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
+
+void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
+void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
+
+void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
+
+void anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
+                                     const VkRenderPassBeginInfo *info);
+
+void anv_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
+                                  struct anv_subpass *subpass);
+
+struct anv_state
+anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
+                              gl_shader_stage stage);
+struct anv_state
+anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
+
+void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer);
+void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
+
+const struct anv_image_view *
+anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
+
+void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
+
+struct anv_fence {
+   struct anv_bo bo;
+   struct drm_i915_gem_execbuffer2 execbuf;
+   struct drm_i915_gem_exec_object2 exec2_objects[1];
+   bool ready;
+};
+
+struct anv_event {
+   uint64_t                                     semaphore;
+   struct anv_state                             state;
+};
+
+struct nir_shader;
+
+struct anv_shader_module {
+   struct nir_shader *                          nir;
+
+   unsigned char                                sha1[20];
+   uint32_t                                     size;
+   char                                         data[0];
+};
+
+void anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
+                     struct anv_shader_module *module,
+                     const char *entrypoint,
+                     const VkSpecializationInfo *spec_info);
+
+static inline gl_shader_stage
+vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
+{
+   assert(__builtin_popcount(vk_stage) == 1);
+   return ffs(vk_stage) - 1;
+}
+
+static inline VkShaderStageFlagBits
+mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
+{
+   return (1 << mesa_stage);
+}
+
+#define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
+
+#define anv_foreach_stage(stage, stage_bits)                         \
+   for (gl_shader_stage stage,                                       \
+        __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK);    \
+        stage = __builtin_ffs(__tmp) - 1, __tmp;                     \
+        __tmp &= ~(1 << (stage)))
+
+struct anv_pipeline_bind_map {
+   uint32_t surface_count;
+   uint32_t sampler_count;
+   uint32_t image_count;
+   uint32_t attachment_count;
+
+   struct anv_pipeline_binding *                surface_to_descriptor;
+   struct anv_pipeline_binding *                sampler_to_descriptor;
+   uint32_t *                                   surface_to_attachment;
+};
+
+struct anv_pipeline {
+   struct anv_device *                          device;
+   struct anv_batch                             batch;
+   uint32_t                                     batch_data[512];
+   struct anv_reloc_list                        batch_relocs;
+   uint32_t                                     dynamic_state_mask;
+   struct anv_dynamic_state                     dynamic_state;
+
+   struct anv_pipeline_layout *                 layout;
+   struct anv_pipeline_bind_map                 bindings[MESA_SHADER_STAGES];
+
+   bool                                         use_repclear;
+
+   const struct brw_stage_prog_data *           prog_data[MESA_SHADER_STAGES];
+   uint32_t                                     scratch_start[MESA_SHADER_STAGES];
+   uint32_t                                     total_scratch;
+   struct {
+      uint8_t                                   push_size[MESA_SHADER_FRAGMENT + 1];
+      uint32_t                                  start[MESA_SHADER_GEOMETRY + 1];
+      uint32_t                                  size[MESA_SHADER_GEOMETRY + 1];
+      uint32_t                                  entries[MESA_SHADER_GEOMETRY + 1];
+   } urb;
+
+   VkShaderStageFlags                           active_stages;
+   struct anv_state                             blend_state;
+   uint32_t                                     vs_simd8;
+   uint32_t                                     vs_vec4;
+   uint32_t                                     ps_simd8;
+   uint32_t                                     ps_simd16;
+   uint32_t                                     ps_ksp0;
+   uint32_t                                     ps_ksp2;
+   uint32_t                                     ps_grf_start0;
+   uint32_t                                     ps_grf_start2;
+   uint32_t                                     gs_kernel;
+   uint32_t                                     cs_simd;
+
+   uint32_t                                     vb_used;
+   uint32_t                                     binding_stride[MAX_VBS];
+   bool                                         instancing_enable[MAX_VBS];
+   bool                                         primitive_restart;
+   uint32_t                                     topology;
+
+   uint32_t                                     cs_thread_width_max;
+   uint32_t                                     cs_right_mask;
+
+   struct {
+      uint32_t                                  sf[7];
+      uint32_t                                  depth_stencil_state[3];
+   } gen7;
+
+   struct {
+      uint32_t                                  sf[4];
+      uint32_t                                  raster[5];
+      uint32_t                                  wm_depth_stencil[3];
+   } gen8;
+
+   struct {
+      uint32_t                                  wm_depth_stencil[4];
+   } gen9;
+};
+
+static inline const struct brw_vs_prog_data *
+get_vs_prog_data(struct anv_pipeline *pipeline)
+{
+   return (const struct brw_vs_prog_data *) pipeline->prog_data[MESA_SHADER_VERTEX];
+}
+
+static inline const struct brw_gs_prog_data *
+get_gs_prog_data(struct anv_pipeline *pipeline)
+{
+   return (const struct brw_gs_prog_data *) pipeline->prog_data[MESA_SHADER_GEOMETRY];
+}
+
+static inline const struct brw_wm_prog_data *
+get_wm_prog_data(struct anv_pipeline *pipeline)
+{
+   return (const struct brw_wm_prog_data *) pipeline->prog_data[MESA_SHADER_FRAGMENT];
+}
+
+static inline const struct brw_cs_prog_data *
+get_cs_prog_data(struct anv_pipeline *pipeline)
+{
+   return (const struct brw_cs_prog_data *) pipeline->prog_data[MESA_SHADER_COMPUTE];
+}
+
+struct anv_graphics_pipeline_create_info {
+   /**
+    * If non-negative, overrides the color attachment count of the pipeline's
+    * subpass.
+    */
+   int8_t color_attachment_count;
+
+   bool                                         use_repclear;
+   bool                                         disable_viewport;
+   bool                                         disable_scissor;
+   bool                                         disable_vs;
+   bool                                         use_rectlist;
+};
+
+VkResult
+anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
+                  struct anv_pipeline_cache *cache,
+                  const VkGraphicsPipelineCreateInfo *pCreateInfo,
+                  const struct anv_graphics_pipeline_create_info *extra,
+                  const VkAllocationCallbacks *alloc);
+
+VkResult
+anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
+                        struct anv_pipeline_cache *cache,
+                        const VkComputePipelineCreateInfo *info,
+                        struct anv_shader_module *module,
+                        const char *entrypoint,
+                        const VkSpecializationInfo *spec_info);
+
+VkResult
+anv_graphics_pipeline_create(VkDevice device,
+                             VkPipelineCache cache,
+                             const VkGraphicsPipelineCreateInfo *pCreateInfo,
+                             const struct anv_graphics_pipeline_create_info *extra,
+                             const VkAllocationCallbacks *alloc,
+                             VkPipeline *pPipeline);
+
+struct anv_format_swizzle {
+   unsigned r:2;
+   unsigned g:2;
+   unsigned b:2;
+   unsigned a:2;
+};
+
+struct anv_format {
+   const VkFormat vk_format;
+   const char *name;
+   enum isl_format isl_format; /**< RENDER_SURFACE_STATE.SurfaceFormat */
+   const struct isl_format_layout *isl_layout;
+   struct anv_format_swizzle swizzle;
+   bool has_depth;
+   bool has_stencil;
+};
+
+const struct anv_format *
+anv_format_for_vk_format(VkFormat format);
+
+enum isl_format
+anv_get_isl_format(VkFormat format, VkImageAspectFlags aspect,
+                   VkImageTiling tiling, struct anv_format_swizzle *swizzle);
+
+static inline bool
+anv_format_is_color(const struct anv_format *format)
+{
+   return !format->has_depth && !format->has_stencil;
+}
+
+static inline bool
+anv_format_is_depth_or_stencil(const struct anv_format *format)
+{
+   return format->has_depth || format->has_stencil;
+}
+
+/**
+ * Subsurface of an anv_image.
+ */
+struct anv_surface {
+   struct isl_surf isl;
+
+   /**
+    * Offset from VkImage's base address, as bound by vkBindImageMemory().
+    */
+   uint32_t offset;
+};
+
+struct anv_image {
+   VkImageType type;
+   /* The original VkFormat provided by the client.  This may not match any
+    * of the actual surface formats.
+    */
+   VkFormat vk_format;
+   const struct anv_format *format;
+   VkExtent3D extent;
+   uint32_t levels;
+   uint32_t array_size;
+   uint32_t samples; /**< VkImageCreateInfo::samples */
+   VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
+   VkImageTiling tiling; /** VkImageCreateInfo::tiling */
+
+   VkDeviceSize size;
+   uint32_t alignment;
+
+   /* Set when bound */
+   struct anv_bo *bo;
+   VkDeviceSize offset;
+
+   /**
+    * Image subsurfaces
+    *
+    * For each foo, anv_image::foo_surface is valid if and only if
+    * anv_image::format has a foo aspect.
+    *
+    * The hardware requires that the depth buffer and stencil buffer be
+    * separate surfaces.  From Vulkan's perspective, though, depth and stencil
+    * reside in the same VkImage.  To satisfy both the hardware and Vulkan, we
+    * allocate the depth and stencil buffers as separate surfaces in the same
+    * bo.
+    */
+   union {
+      struct anv_surface color_surface;
+
+      struct {
+         struct anv_surface depth_surface;
+         struct anv_surface stencil_surface;
+      };
+   };
+};
+
+static inline uint32_t
+anv_get_layerCount(const struct anv_image *image,
+                   const VkImageSubresourceRange *range)
+{
+   return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
+          image->array_size - range->baseArrayLayer : range->layerCount;
+}
+
+static inline uint32_t
+anv_get_levelCount(const struct anv_image *image,
+                   const VkImageSubresourceRange *range)
+{
+   return range->levelCount == VK_REMAINING_MIP_LEVELS ?
+          image->levels - range->baseMipLevel : range->levelCount;
+}
+
+
+struct anv_image_view {
+   const struct anv_image *image; /**< VkImageViewCreateInfo::image */
+   struct anv_bo *bo;
+   uint32_t offset; /**< Offset into bo. */
+
+   VkImageAspectFlags aspect_mask;
+   VkFormat vk_format;
+   uint32_t base_layer;
+   uint32_t base_mip;
+   VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
+
+   /** RENDER_SURFACE_STATE when using image as a color render target. */
+   struct anv_state color_rt_surface_state;
+
+   /** RENDER_SURFACE_STATE when using image as a sampler surface. */
+   struct anv_state sampler_surface_state;
+
+   /** RENDER_SURFACE_STATE when using image as a storage image. */
+   struct anv_state storage_surface_state;
+
+   struct brw_image_param storage_image_param;
+};
+
+struct anv_image_create_info {
+   const VkImageCreateInfo *vk_info;
+   isl_tiling_flags_t isl_tiling_flags;
+   uint32_t stride;
+};
+
+VkResult anv_image_create(VkDevice _device,
+                          const struct anv_image_create_info *info,
+                          const VkAllocationCallbacks* alloc,
+                          VkImage *pImage);
+
+struct anv_surface *
+anv_image_get_surface_for_aspect_mask(struct anv_image *image,
+                                      VkImageAspectFlags aspect_mask);
+
+void anv_image_view_init(struct anv_image_view *view,
+                         struct anv_device *device,
+                         const VkImageViewCreateInfo* pCreateInfo,
+                         struct anv_cmd_buffer *cmd_buffer,
+                         uint32_t offset,
+                         VkImageUsageFlags usage_mask);
+
+struct anv_buffer_view {
+   enum isl_format format; /**< VkBufferViewCreateInfo::format */
+   struct anv_bo *bo;
+   uint32_t offset; /**< Offset into bo. */
+   uint64_t range; /**< VkBufferViewCreateInfo::range */
+
+   struct anv_state surface_state;
+   struct anv_state storage_surface_state;
+
+   struct brw_image_param storage_image_param;
+};
+
+const struct anv_format *
+anv_format_for_descriptor_type(VkDescriptorType type);
+
+void anv_fill_buffer_surface_state(struct anv_device *device,
+                                   struct anv_state state,
+                                   enum isl_format format,
+                                   uint32_t offset, uint32_t range,
+                                   uint32_t stride);
+
+void anv_image_view_fill_image_param(struct anv_device *device,
+                                     struct anv_image_view *view,
+                                     struct brw_image_param *param);
+void anv_buffer_view_fill_image_param(struct anv_device *device,
+                                      struct anv_buffer_view *view,
+                                      struct brw_image_param *param);
+
+struct anv_sampler {
+   uint32_t state[4];
+};
+
+struct anv_framebuffer {
+   uint32_t                                     width;
+   uint32_t                                     height;
+   uint32_t                                     layers;
+
+   uint32_t                                     attachment_count;
+   struct anv_image_view *                      attachments[0];
+};
+
+struct anv_subpass {
+   uint32_t                                     input_count;
+   uint32_t *                                   input_attachments;
+   uint32_t                                     color_count;
+   uint32_t *                                   color_attachments;
+   uint32_t *                                   resolve_attachments;
+   uint32_t                                     depth_stencil_attachment;
+
+   /** Subpass has at least one resolve attachment */
+   bool                                         has_resolve;
+};
+
+struct anv_render_pass_attachment {
+   const struct anv_format                      *format;
+   uint32_t                                     samples;
+   VkAttachmentLoadOp                           load_op;
+   VkAttachmentLoadOp                           stencil_load_op;
+};
+
+struct anv_render_pass {
+   uint32_t                                     attachment_count;
+   uint32_t                                     subpass_count;
+   uint32_t *                                   subpass_attachments;
+   struct anv_render_pass_attachment *          attachments;
+   struct anv_subpass                           subpasses[0];
+};
+
+extern struct anv_render_pass anv_meta_dummy_renderpass;
+
+struct anv_query_pool_slot {
+   uint64_t begin;
+   uint64_t end;
+   uint64_t available;
+};
+
+struct anv_query_pool {
+   VkQueryType                                  type;
+   uint32_t                                     slots;
+   struct anv_bo                                bo;
+};
+
+VkResult anv_device_init_meta(struct anv_device *device);
+void anv_device_finish_meta(struct anv_device *device);
+
+void *anv_lookup_entrypoint(const char *name);
+
+void anv_dump_image_to_ppm(struct anv_device *device,
+                           struct anv_image *image, unsigned miplevel,
+                           unsigned array_layer, const char *filename);
+
+#define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType)                      \
+                                                                           \
+   static inline struct __anv_type *                                       \
+   __anv_type ## _from_handle(__VkType _handle)                            \
+   {                                                                       \
+      return (struct __anv_type *) _handle;                                \
+   }                                                                       \
+                                                                           \
+   static inline __VkType                                                  \
+   __anv_type ## _to_handle(struct __anv_type *_obj)                       \
+   {                                                                       \
+      return (__VkType) _obj;                                              \
+   }
+
+#define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType)              \
+                                                                           \
+   static inline struct __anv_type *                                       \
+   __anv_type ## _from_handle(__VkType _handle)                            \
+   {                                                                       \
+      return (struct __anv_type *)(uintptr_t) _handle;                     \
+   }                                                                       \
+                                                                           \
+   static inline __VkType                                                  \
+   __anv_type ## _to_handle(struct __anv_type *_obj)                       \
+   {                                                                       \
+      return (__VkType)(uintptr_t) _obj;                                   \
+   }
+
+#define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
+   struct __anv_type *__name = __anv_type ## _from_handle(__handle)
+
+ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
+ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
+ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
+ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
+ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
+
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
+
+#define ANV_DEFINE_STRUCT_CASTS(__anv_type, __VkType) \
+   \
+   static inline const __VkType * \
+   __anv_type ## _to_ ## __VkType(const struct __anv_type *__anv_obj) \
+   { \
+      return (const __VkType *) __anv_obj; \
+   }
+
+#define ANV_COMMON_TO_STRUCT(__VkType, __vk_name, __common_name) \
+   const __VkType *__vk_name = anv_common_to_ ## __VkType(__common_name)
+
+ANV_DEFINE_STRUCT_CASTS(anv_common, VkMemoryBarrier)
+ANV_DEFINE_STRUCT_CASTS(anv_common, VkBufferMemoryBarrier)
+ANV_DEFINE_STRUCT_CASTS(anv_common, VkImageMemoryBarrier)
+
+/* Gen-specific function declarations */
+#ifdef genX
+#  include "anv_genX.h"
+#else
+#  define genX(x) gen7_##x
+#  include "anv_genX.h"
+#  undef genX
+#  define genX(x) gen75_##x
+#  include "anv_genX.h"
+#  undef genX
+#  define genX(x) gen8_##x
+#  include "anv_genX.h"
+#  undef genX
+#  define genX(x) gen9_##x
+#  include "anv_genX.h"
+#  undef genX
+#endif
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/intel/vulkan/anv_query.c b/src/intel/vulkan/anv_query.c
new file mode 100644 (file)
index 0000000..e45b519
--- /dev/null
@@ -0,0 +1,187 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+VkResult anv_CreateQueryPool(
+    VkDevice                                    _device,
+    const VkQueryPoolCreateInfo*                pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkQueryPool*                                pQueryPool)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_query_pool *pool;
+   VkResult result;
+   uint32_t slot_size;
+   uint64_t size;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
+
+   switch (pCreateInfo->queryType) {
+   case VK_QUERY_TYPE_OCCLUSION:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      break;
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+      return VK_ERROR_INCOMPATIBLE_DRIVER;
+   default:
+      assert(!"Invalid query type");
+   }
+
+   slot_size = sizeof(struct anv_query_pool_slot);
+   pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pool == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   pool->type = pCreateInfo->queryType;
+   pool->slots = pCreateInfo->queryCount;
+
+   size = pCreateInfo->queryCount * slot_size;
+   result = anv_bo_init_new(&pool->bo, device, size);
+   if (result != VK_SUCCESS)
+      goto fail;
+
+   pool->bo.map = anv_gem_mmap(device, pool->bo.gem_handle, 0, size, 0);
+
+   *pQueryPool = anv_query_pool_to_handle(pool);
+
+   return VK_SUCCESS;
+
+ fail:
+   anv_free2(&device->alloc, pAllocator, pool);
+
+   return result;
+}
+
+void anv_DestroyQueryPool(
+    VkDevice                                    _device,
+    VkQueryPool                                 _pool,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_query_pool, pool, _pool);
+
+   anv_gem_munmap(pool->bo.map, pool->bo.size);
+   anv_gem_close(device, pool->bo.gem_handle);
+   anv_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult anv_GetQueryPoolResults(
+    VkDevice                                    _device,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    firstQuery,
+    uint32_t                                    queryCount,
+    size_t                                      dataSize,
+    void*                                       pData,
+    VkDeviceSize                                stride,
+    VkQueryResultFlags                          flags)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+   int64_t timeout = INT64_MAX;
+   uint64_t result;
+   int ret;
+
+   assert(pool->type == VK_QUERY_TYPE_OCCLUSION ||
+          pool->type == VK_QUERY_TYPE_TIMESTAMP);
+
+   if (pData == NULL)
+      return VK_SUCCESS;
+
+   if (flags & VK_QUERY_RESULT_WAIT_BIT) {
+      ret = anv_gem_wait(device, pool->bo.gem_handle, &timeout);
+      if (ret == -1) {
+         /* We don't know the real error. */
+         return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+                          "gem_wait failed %m");
+      }
+   }
+
+   void *data_end = pData + dataSize;
+   struct anv_query_pool_slot *slot = pool->bo.map;
+
+   for (uint32_t i = 0; i < queryCount; i++) {
+      switch (pool->type) {
+      case VK_QUERY_TYPE_OCCLUSION: {
+         result = slot[firstQuery + i].end - slot[firstQuery + i].begin;
+         break;
+      }
+      case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+         unreachable("pipeline stats not supported");
+      case VK_QUERY_TYPE_TIMESTAMP: {
+         result = slot[firstQuery + i].begin;
+         break;
+      }
+      default:
+         unreachable("invalid pool type");
+      }
+
+      if (flags & VK_QUERY_RESULT_64_BIT) {
+         uint64_t *dst = pData;
+         dst[0] = result;
+         if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
+            dst[1] = slot[firstQuery + i].available;
+      } else {
+         uint32_t *dst = pData;
+         if (result > UINT32_MAX)
+            result = UINT32_MAX;
+         dst[0] = result;
+         if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
+            dst[1] = slot[firstQuery + i].available;
+      }
+
+      pData += stride;
+      if (pData >= data_end)
+         break;
+   }
+
+   return VK_SUCCESS;
+}
+
+void anv_CmdResetQueryPool(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    firstQuery,
+    uint32_t                                    queryCount)
+{
+   ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+
+   for (uint32_t i = 0; i < queryCount; i++) {
+      switch (pool->type) {
+      case VK_QUERY_TYPE_OCCLUSION:
+      case VK_QUERY_TYPE_TIMESTAMP: {
+         struct anv_query_pool_slot *slot = pool->bo.map;
+         slot[firstQuery + i].available = 0;
+         break;
+      }
+      default:
+         assert(!"Invalid query type");
+      }
+   }
+}
diff --git a/src/intel/vulkan/anv_util.c b/src/intel/vulkan/anv_util.c
new file mode 100644 (file)
index 0000000..62f4705
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "anv_private.h"
+
+/** Log an error message.  */
+void anv_printflike(1, 2)
+anv_loge(const char *format, ...)
+{
+   va_list va;
+
+   va_start(va, format);
+   anv_loge_v(format, va);
+   va_end(va);
+}
+
+/** \see anv_loge() */
+void
+anv_loge_v(const char *format, va_list va)
+{
+   fprintf(stderr, "vk: error: ");
+   vfprintf(stderr, format, va);
+   fprintf(stderr, "\n");
+}
+
+void anv_printflike(3, 4)
+__anv_finishme(const char *file, int line, const char *format, ...)
+{
+   va_list ap;
+   char buffer[256];
+
+   va_start(ap, format);
+   vsnprintf(buffer, sizeof(buffer), format, ap);
+   va_end(ap);
+
+   fprintf(stderr, "%s:%d: FINISHME: %s\n", file, line, buffer);
+}
+
+void anv_noreturn anv_printflike(1, 2)
+anv_abortf(const char *format, ...)
+{
+   va_list va;
+
+   va_start(va, format);
+   anv_abortfv(format, va);
+   va_end(va);
+}
+
+void anv_noreturn
+anv_abortfv(const char *format, va_list va)
+{
+   fprintf(stderr, "vk: error: ");
+   vfprintf(stderr, format, va);
+   fprintf(stderr, "\n");
+   abort();
+}
+
+VkResult
+__vk_errorf(VkResult error, const char *file, int line, const char *format, ...)
+{
+   va_list ap;
+   char buffer[256];
+
+#define ERROR_CASE(error) case error: error_str = #error; break;
+
+   const char *error_str;
+   switch ((int32_t)error) {
+
+   /* Core errors */
+   ERROR_CASE(VK_ERROR_OUT_OF_HOST_MEMORY)
+   ERROR_CASE(VK_ERROR_OUT_OF_DEVICE_MEMORY)
+   ERROR_CASE(VK_ERROR_INITIALIZATION_FAILED)
+   ERROR_CASE(VK_ERROR_DEVICE_LOST)
+   ERROR_CASE(VK_ERROR_MEMORY_MAP_FAILED)
+   ERROR_CASE(VK_ERROR_LAYER_NOT_PRESENT)
+   ERROR_CASE(VK_ERROR_EXTENSION_NOT_PRESENT)
+   ERROR_CASE(VK_ERROR_INCOMPATIBLE_DRIVER)
+
+   /* Extension errors */
+   ERROR_CASE(VK_ERROR_OUT_OF_DATE_KHR)
+
+   default:
+      assert(!"Unknown error");
+      error_str = "unknown error";
+   }
+
+#undef ERROR_CASE
+
+   if (format) {
+      va_start(ap, format);
+      vsnprintf(buffer, sizeof(buffer), format, ap);
+      va_end(ap);
+
+      fprintf(stderr, "%s:%d: %s (%s)\n", file, line, buffer, error_str);
+   } else {
+      fprintf(stderr, "%s:%d: %s\n", file, line, error_str);
+   }
+
+   return error;
+}
+
+int
+anv_vector_init(struct anv_vector *vector, uint32_t element_size, uint32_t size)
+{
+   assert(util_is_power_of_two(size));
+   assert(element_size < size && util_is_power_of_two(element_size));
+
+   vector->head = 0;
+   vector->tail = 0;
+   vector->element_size = element_size;
+   vector->size = size;
+   vector->data = malloc(size);
+
+   return vector->data != NULL;
+}
+
+void *
+anv_vector_add(struct anv_vector *vector)
+{
+   uint32_t offset, size, split, src_tail, dst_tail;
+   void *data;
+
+   if (vector->head - vector->tail == vector->size) {
+      size = vector->size * 2;
+      data = malloc(size);
+      if (data == NULL)
+         return NULL;
+      src_tail = vector->tail & (vector->size - 1);
+      dst_tail = vector->tail & (size - 1);
+      if (src_tail == 0) {
+         /* Since we know that the vector is full, this means that it's
+          * linear from start to end so we can do one copy.
+          */
+         memcpy(data + dst_tail, vector->data, vector->size);
+      } else {
+         /* In this case, the vector is split into two pieces and we have
+          * to do two copies.  We have to be careful to make sure each
+          * piece goes to the right locations.  Thanks to the change in
+          * size, it may or may not still wrap around.
+          */
+         split = align_u32(vector->tail, vector->size);
+         assert(vector->tail <= split && split < vector->head);
+         memcpy(data + dst_tail, vector->data + src_tail,
+                split - vector->tail);
+         memcpy(data + (split & (size - 1)), vector->data,
+                vector->head - split);
+      }
+      free(vector->data);
+      vector->data = data;
+      vector->size = size;
+   }
+
+   assert(vector->head - vector->tail < vector->size);
+
+   offset = vector->head & (vector->size - 1);
+   vector->head += vector->element_size;
+
+   return vector->data + offset;
+}
+
+void *
+anv_vector_remove(struct anv_vector *vector)
+{
+   uint32_t offset;
+
+   if (vector->head == vector->tail)
+      return NULL;
+
+   assert(vector->head - vector->tail <= vector->size);
+
+   offset = vector->tail & (vector->size - 1);
+   vector->tail += vector->element_size;
+
+   return vector->data + offset;
+}
diff --git a/src/intel/vulkan/anv_wsi.c b/src/intel/vulkan/anv_wsi.c
new file mode 100644 (file)
index 0000000..c2938f3
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_wsi.h"
+
+VkResult
+anv_init_wsi(struct anv_instance *instance)
+{
+   VkResult result;
+
+   result = anv_x11_init_wsi(instance);
+   if (result != VK_SUCCESS)
+      return result;
+
+#ifdef HAVE_WAYLAND_PLATFORM
+   result = anv_wl_init_wsi(instance);
+   if (result != VK_SUCCESS) {
+      anv_x11_finish_wsi(instance);
+      return result;
+   }
+#endif
+
+   return VK_SUCCESS;
+}
+
+void
+anv_finish_wsi(struct anv_instance *instance)
+{
+#ifdef HAVE_WAYLAND_PLATFORM
+   anv_wl_finish_wsi(instance);
+#endif
+   anv_x11_finish_wsi(instance);
+}
+
+void anv_DestroySurfaceKHR(
+    VkInstance                                   _instance,
+    VkSurfaceKHR                                 _surface,
+    const VkAllocationCallbacks*                 pAllocator)
+{
+   ANV_FROM_HANDLE(anv_instance, instance, _instance);
+   ANV_FROM_HANDLE(_VkIcdSurfaceBase, surface, _surface);
+
+   anv_free2(&instance->alloc, pAllocator, surface);
+}
+
+VkResult anv_GetPhysicalDeviceSurfaceSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    VkSurfaceKHR                                _surface,
+    VkBool32*                                   pSupported)
+{
+   ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
+   ANV_FROM_HANDLE(_VkIcdSurfaceBase, surface, _surface);
+   struct anv_wsi_interface *iface = device->instance->wsi[surface->platform];
+
+   return iface->get_support(surface, device, queueFamilyIndex, pSupported);
+}
+
+VkResult anv_GetPhysicalDeviceSurfaceCapabilitiesKHR(
+    VkPhysicalDevice                            physicalDevice,
+    VkSurfaceKHR                                _surface,
+    VkSurfaceCapabilitiesKHR*                   pSurfaceCapabilities)
+{
+   ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
+   ANV_FROM_HANDLE(_VkIcdSurfaceBase, surface, _surface);
+   struct anv_wsi_interface *iface = device->instance->wsi[surface->platform];
+
+   return iface->get_capabilities(surface, device, pSurfaceCapabilities);
+}
+
+VkResult anv_GetPhysicalDeviceSurfaceFormatsKHR(
+    VkPhysicalDevice                            physicalDevice,
+    VkSurfaceKHR                                _surface,
+    uint32_t*                                   pSurfaceFormatCount,
+    VkSurfaceFormatKHR*                         pSurfaceFormats)
+{
+   ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
+   ANV_FROM_HANDLE(_VkIcdSurfaceBase, surface, _surface);
+   struct anv_wsi_interface *iface = device->instance->wsi[surface->platform];
+
+   return iface->get_formats(surface, device, pSurfaceFormatCount,
+                             pSurfaceFormats);
+}
+
+VkResult anv_GetPhysicalDeviceSurfacePresentModesKHR(
+    VkPhysicalDevice                            physicalDevice,
+    VkSurfaceKHR                                _surface,
+    uint32_t*                                   pPresentModeCount,
+    VkPresentModeKHR*                           pPresentModes)
+{
+   ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
+   ANV_FROM_HANDLE(_VkIcdSurfaceBase, surface, _surface);
+   struct anv_wsi_interface *iface = device->instance->wsi[surface->platform];
+
+   return iface->get_present_modes(surface, device, pPresentModeCount,
+                                   pPresentModes);
+}
+
+VkResult anv_CreateSwapchainKHR(
+    VkDevice                                     _device,
+    const VkSwapchainCreateInfoKHR*              pCreateInfo,
+    const VkAllocationCallbacks*                 pAllocator,
+    VkSwapchainKHR*                              pSwapchain)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(_VkIcdSurfaceBase, surface, pCreateInfo->surface);
+   struct anv_wsi_interface *iface = device->instance->wsi[surface->platform];
+   struct anv_swapchain *swapchain;
+
+   VkResult result = iface->create_swapchain(surface, device, pCreateInfo,
+                                             pAllocator, &swapchain);
+   if (result != VK_SUCCESS)
+      return result;
+
+   if (pAllocator)
+      swapchain->alloc = *pAllocator;
+   else
+      swapchain->alloc = device->alloc;
+
+   for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++)
+      swapchain->fences[i] = VK_NULL_HANDLE;
+
+   *pSwapchain = anv_swapchain_to_handle(swapchain);
+
+   return VK_SUCCESS;
+}
+
+void anv_DestroySwapchainKHR(
+    VkDevice                                     device,
+    VkSwapchainKHR                               _swapchain,
+    const VkAllocationCallbacks*                 pAllocator)
+{
+   ANV_FROM_HANDLE(anv_swapchain, swapchain, _swapchain);
+
+   for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++) {
+      if (swapchain->fences[i] != VK_NULL_HANDLE)
+         anv_DestroyFence(device, swapchain->fences[i], pAllocator);
+   }
+
+   swapchain->destroy(swapchain, pAllocator);
+}
+
+VkResult anv_GetSwapchainImagesKHR(
+    VkDevice                                     device,
+    VkSwapchainKHR                               _swapchain,
+    uint32_t*                                    pSwapchainImageCount,
+    VkImage*                                     pSwapchainImages)
+{
+   ANV_FROM_HANDLE(anv_swapchain, swapchain, _swapchain);
+
+   return swapchain->get_images(swapchain, pSwapchainImageCount,
+                                pSwapchainImages);
+}
+
+VkResult anv_AcquireNextImageKHR(
+    VkDevice                                     device,
+    VkSwapchainKHR                               _swapchain,
+    uint64_t                                     timeout,
+    VkSemaphore                                  semaphore,
+    VkFence                                      fence,
+    uint32_t*                                    pImageIndex)
+{
+   ANV_FROM_HANDLE(anv_swapchain, swapchain, _swapchain);
+
+   return swapchain->acquire_next_image(swapchain, timeout, semaphore,
+                                        pImageIndex);
+}
+
+VkResult anv_QueuePresentKHR(
+    VkQueue                                  _queue,
+    const VkPresentInfoKHR*                  pPresentInfo)
+{
+   ANV_FROM_HANDLE(anv_queue, queue, _queue);
+   VkResult result;
+
+   for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
+      ANV_FROM_HANDLE(anv_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
+
+      assert(swapchain->device == queue->device);
+
+      if (swapchain->fences[0] == VK_NULL_HANDLE) {
+         result = anv_CreateFence(anv_device_to_handle(queue->device),
+            &(VkFenceCreateInfo) {
+               .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+               .flags = 0,
+            }, &swapchain->alloc, &swapchain->fences[0]);
+         if (result != VK_SUCCESS)
+            return result;
+      } else {
+         anv_ResetFences(anv_device_to_handle(queue->device),
+                         1, &swapchain->fences[0]);
+      }
+
+      anv_QueueSubmit(_queue, 0, NULL, swapchain->fences[0]);
+
+      result = swapchain->queue_present(swapchain, queue,
+                                        pPresentInfo->pImageIndices[i]);
+      /* TODO: What if one of them returns OUT_OF_DATE? */
+      if (result != VK_SUCCESS)
+         return result;
+
+      VkFence last = swapchain->fences[2];
+      swapchain->fences[2] = swapchain->fences[1];
+      swapchain->fences[1] = swapchain->fences[0];
+      swapchain->fences[0] = last;
+
+      if (last != VK_NULL_HANDLE) {
+         anv_WaitForFences(anv_device_to_handle(queue->device),
+                           1, &last, true, 1);
+      }
+   }
+
+   return VK_SUCCESS;
+}
diff --git a/src/intel/vulkan/anv_wsi.h b/src/intel/vulkan/anv_wsi.h
new file mode 100644 (file)
index 0000000..bf17f03
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "anv_private.h"
+
+struct anv_swapchain;
+
+struct anv_wsi_interface {
+   VkResult (*get_support)(VkIcdSurfaceBase *surface,
+                           struct anv_physical_device *device,
+                           uint32_t queueFamilyIndex,
+                           VkBool32* pSupported);
+   VkResult (*get_capabilities)(VkIcdSurfaceBase *surface,
+                                struct anv_physical_device *device,
+                                VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
+   VkResult (*get_formats)(VkIcdSurfaceBase *surface,
+                           struct anv_physical_device *device,
+                           uint32_t* pSurfaceFormatCount,
+                           VkSurfaceFormatKHR* pSurfaceFormats);
+   VkResult (*get_present_modes)(VkIcdSurfaceBase *surface,
+                                 struct anv_physical_device *device,
+                                 uint32_t* pPresentModeCount,
+                                 VkPresentModeKHR* pPresentModes);
+   VkResult (*create_swapchain)(VkIcdSurfaceBase *surface,
+                                struct anv_device *device,
+                                const VkSwapchainCreateInfoKHR* pCreateInfo,
+                                const VkAllocationCallbacks* pAllocator,
+                                struct anv_swapchain **swapchain);
+};
+
+struct anv_swapchain {
+   struct anv_device *device;
+
+   VkAllocationCallbacks alloc;
+
+   VkFence fences[3];
+
+   VkResult (*destroy)(struct anv_swapchain *swapchain,
+                       const VkAllocationCallbacks *pAllocator);
+   VkResult (*get_images)(struct anv_swapchain *swapchain,
+                          uint32_t *pCount, VkImage *pSwapchainImages);
+   VkResult (*acquire_next_image)(struct anv_swapchain *swap_chain,
+                                  uint64_t timeout, VkSemaphore semaphore,
+                                  uint32_t *image_index);
+   VkResult (*queue_present)(struct anv_swapchain *swap_chain,
+                             struct anv_queue *queue,
+                             uint32_t image_index);
+};
+
+ANV_DEFINE_NONDISP_HANDLE_CASTS(_VkIcdSurfaceBase, VkSurfaceKHR)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_swapchain, VkSwapchainKHR)
+
+VkResult anv_x11_init_wsi(struct anv_instance *instance);
+void anv_x11_finish_wsi(struct anv_instance *instance);
+VkResult anv_wl_init_wsi(struct anv_instance *instance);
+void anv_wl_finish_wsi(struct anv_instance *instance);
diff --git a/src/intel/vulkan/anv_wsi_wayland.c b/src/intel/vulkan/anv_wsi_wayland.c
new file mode 100644 (file)
index 0000000..6f25eaf
--- /dev/null
@@ -0,0 +1,871 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <wayland-client.h>
+#include <wayland-drm-client-protocol.h>
+
+#include "anv_wsi.h"
+
+#include <util/hash_table.h>
+
+#define MIN_NUM_IMAGES 2
+
+struct wsi_wl_display {
+   struct wl_display *                          display;
+   struct wl_drm *                              drm;
+
+   /* Vector of VkFormats supported */
+   struct anv_vector                            formats;
+
+   uint32_t                                     capabilities;
+};
+
+struct wsi_wayland {
+   struct anv_wsi_interface                     base;
+
+   struct anv_instance *                        instance;
+
+   pthread_mutex_t                              mutex;
+   /* Hash table of wl_display -> wsi_wl_display mappings */
+   struct hash_table *                          displays;
+};
+
+static void
+wsi_wl_display_add_vk_format(struct wsi_wl_display *display, VkFormat format)
+{
+   /* Don't add a format that's already in the list */
+   VkFormat *f;
+   anv_vector_foreach(f, &display->formats)
+      if (*f == format)
+         return;
+
+   /* Don't add formats which aren't supported by the driver */
+   if (anv_format_for_vk_format(format)->isl_format ==
+       ISL_FORMAT_UNSUPPORTED) {
+      return;
+   }
+
+   f = anv_vector_add(&display->formats);
+   if (f)
+      *f = format;
+}
+
+static void
+drm_handle_device(void *data, struct wl_drm *drm, const char *name)
+{
+   fprintf(stderr, "wl_drm.device(%s)\n", name);
+}
+
+static uint32_t
+wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
+{
+   switch (vk_format) {
+   /* TODO: Figure out what all the formats mean and make this table
+    * correct.
+    */
+#if 0
+   case VK_FORMAT_R4G4B4A4_UNORM:
+      return alpha ? WL_DRM_FORMAT_ABGR4444 : WL_DRM_FORMAT_XBGR4444;
+   case VK_FORMAT_R5G6B5_UNORM:
+      return WL_DRM_FORMAT_BGR565;
+   case VK_FORMAT_R5G5B5A1_UNORM:
+      return alpha ? WL_DRM_FORMAT_ABGR1555 : WL_DRM_FORMAT_XBGR1555;
+   case VK_FORMAT_R8G8B8_UNORM:
+      return WL_DRM_FORMAT_XBGR8888;
+   case VK_FORMAT_R8G8B8A8_UNORM:
+      return alpha ? WL_DRM_FORMAT_ABGR8888 : WL_DRM_FORMAT_XBGR8888;
+   case VK_FORMAT_R10G10B10A2_UNORM:
+      return alpha ? WL_DRM_FORMAT_ABGR2101010 : WL_DRM_FORMAT_XBGR2101010;
+   case VK_FORMAT_B4G4R4A4_UNORM:
+      return alpha ? WL_DRM_FORMAT_ARGB4444 : WL_DRM_FORMAT_XRGB4444;
+   case VK_FORMAT_B5G6R5_UNORM:
+      return WL_DRM_FORMAT_RGB565;
+   case VK_FORMAT_B5G5R5A1_UNORM:
+      return alpha ? WL_DRM_FORMAT_XRGB1555 : WL_DRM_FORMAT_XRGB1555;
+#endif
+   case VK_FORMAT_B8G8R8_SRGB:
+      return WL_DRM_FORMAT_BGRX8888;
+   case VK_FORMAT_B8G8R8A8_SRGB:
+      return alpha ? WL_DRM_FORMAT_ARGB8888 : WL_DRM_FORMAT_XRGB8888;
+#if 0
+   case VK_FORMAT_B10G10R10A2_UNORM:
+      return alpha ? WL_DRM_FORMAT_ARGB2101010 : WL_DRM_FORMAT_XRGB2101010;
+#endif
+
+   default:
+      assert("!Unsupported Vulkan format");
+      return 0;
+   }
+}
+
+static void
+drm_handle_format(void *data, struct wl_drm *drm, uint32_t wl_format)
+{
+   struct wsi_wl_display *display = data;
+
+   switch (wl_format) {
+#if 0
+   case WL_DRM_FORMAT_ABGR4444:
+   case WL_DRM_FORMAT_XBGR4444:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_R4G4B4A4_UNORM);
+      break;
+   case WL_DRM_FORMAT_BGR565:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G6B5_UNORM);
+      break;
+   case WL_DRM_FORMAT_ABGR1555:
+   case WL_DRM_FORMAT_XBGR1555:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G5B5A1_UNORM);
+      break;
+   case WL_DRM_FORMAT_XBGR8888:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8_UNORM);
+      /* fallthrough */
+   case WL_DRM_FORMAT_ABGR8888:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8A8_UNORM);
+      break;
+   case WL_DRM_FORMAT_ABGR2101010:
+   case WL_DRM_FORMAT_XBGR2101010:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_R10G10B10A2_UNORM);
+      break;
+   case WL_DRM_FORMAT_ARGB4444:
+   case WL_DRM_FORMAT_XRGB4444:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_B4G4R4A4_UNORM);
+      break;
+   case WL_DRM_FORMAT_RGB565:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G6R5_UNORM);
+      break;
+   case WL_DRM_FORMAT_ARGB1555:
+   case WL_DRM_FORMAT_XRGB1555:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G5R5A1_UNORM);
+      break;
+#endif
+   case WL_DRM_FORMAT_XRGB8888:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_SRGB);
+      /* fallthrough */
+   case WL_DRM_FORMAT_ARGB8888:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_SRGB);
+      break;
+#if 0
+   case WL_DRM_FORMAT_ARGB2101010:
+   case WL_DRM_FORMAT_XRGB2101010:
+      wsi_wl_display_add_vk_format(display, VK_FORMAT_B10G10R10A2_UNORM);
+      break;
+#endif
+   }
+}
+
+static void
+drm_handle_authenticated(void *data, struct wl_drm *drm)
+{
+}
+
+static void
+drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t capabilities)
+{
+   struct wsi_wl_display *display = data;
+
+   display->capabilities = capabilities;
+}
+
+static const struct wl_drm_listener drm_listener = {
+   drm_handle_device,
+   drm_handle_format,
+   drm_handle_authenticated,
+   drm_handle_capabilities,
+};
+
+static void
+registry_handle_global(void *data, struct wl_registry *registry,
+                       uint32_t name, const char *interface, uint32_t version)
+{
+   struct wsi_wl_display *display = data;
+
+   if (strcmp(interface, "wl_drm") == 0) {
+      assert(display->drm == NULL);
+
+      assert(version >= 2);
+      display->drm = wl_registry_bind(registry, name, &wl_drm_interface, 2);
+
+      if (display->drm)
+         wl_drm_add_listener(display->drm, &drm_listener, display);
+   }
+}
+
+static void
+registry_handle_global_remove(void *data, struct wl_registry *registry,
+                              uint32_t name)
+{ /* No-op */ }
+
+static const struct wl_registry_listener registry_listener = {
+   registry_handle_global,
+   registry_handle_global_remove
+};
+
+static void
+wsi_wl_display_destroy(struct wsi_wayland *wsi, struct wsi_wl_display *display)
+{
+   anv_vector_finish(&display->formats);
+   if (display->drm)
+      wl_drm_destroy(display->drm);
+   anv_free(&wsi->instance->alloc, display);
+}
+
+static struct wsi_wl_display *
+wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display)
+{
+   struct wsi_wl_display *display =
+      anv_alloc(&wsi->instance->alloc, sizeof(*display), 8,
+                VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+   if (!display)
+      return NULL;
+
+   memset(display, 0, sizeof(*display));
+
+   display->display = wl_display;
+
+   if (!anv_vector_init(&display->formats, sizeof(VkFormat), 8))
+      goto fail;
+
+   struct wl_registry *registry = wl_display_get_registry(wl_display);
+   if (!registry)
+      return NULL;
+
+   wl_registry_add_listener(registry, &registry_listener, display);
+
+   /* Round-rip to get the wl_drm global */
+   wl_display_roundtrip(wl_display);
+
+   if (!display->drm)
+      goto fail;
+
+   /* Round-rip to get wl_drm formats and capabilities */
+   wl_display_roundtrip(wl_display);
+
+   /* We need prime support */
+   if (!(display->capabilities & WL_DRM_CAPABILITY_PRIME))
+      goto fail;
+
+   /* We don't need this anymore */
+   wl_registry_destroy(registry);
+
+   return display;
+
+fail:
+   if (registry)
+      wl_registry_destroy(registry);
+
+   wsi_wl_display_destroy(wsi, display);
+   return NULL;
+}
+
+static struct wsi_wl_display *
+wsi_wl_get_display(struct anv_instance *instance, struct wl_display *wl_display)
+{
+   struct wsi_wayland *wsi =
+      (struct wsi_wayland *)instance->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+
+   pthread_mutex_lock(&wsi->mutex);
+
+   struct hash_entry *entry = _mesa_hash_table_search(wsi->displays,
+                                                      wl_display);
+   if (!entry) {
+      /* We're about to make a bunch of blocking calls.  Let's drop the
+       * mutex for now so we don't block up too badly.
+       */
+      pthread_mutex_unlock(&wsi->mutex);
+
+      struct wsi_wl_display *display = wsi_wl_display_create(wsi, wl_display);
+
+      pthread_mutex_lock(&wsi->mutex);
+
+      entry = _mesa_hash_table_search(wsi->displays, wl_display);
+      if (entry) {
+         /* Oops, someone raced us to it */
+         wsi_wl_display_destroy(wsi, display);
+      } else {
+         entry = _mesa_hash_table_insert(wsi->displays, wl_display, display);
+      }
+   }
+
+   pthread_mutex_unlock(&wsi->mutex);
+
+   return entry->data;
+}
+
+VkBool32 anv_GetPhysicalDeviceWaylandPresentationSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    struct wl_display*                          display)
+{
+   ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
+
+   return wsi_wl_get_display(physical_device->instance, display) != NULL;
+}
+
+static VkResult
+wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
+                           struct anv_physical_device *device,
+                           uint32_t queueFamilyIndex,
+                           VkBool32* pSupported)
+{
+   *pSupported = true;
+
+   return VK_SUCCESS;
+}
+
+static const VkPresentModeKHR present_modes[] = {
+   VK_PRESENT_MODE_MAILBOX_KHR,
+   VK_PRESENT_MODE_FIFO_KHR,
+};
+
+static VkResult
+wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
+                                struct anv_physical_device *device,
+                                VkSurfaceCapabilitiesKHR* caps)
+{
+   caps->minImageCount = MIN_NUM_IMAGES;
+   caps->maxImageCount = 4;
+   caps->currentExtent = (VkExtent2D) { -1, -1 };
+   caps->minImageExtent = (VkExtent2D) { 1, 1 };
+   caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
+   caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+   caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+   caps->maxImageArrayLayers = 1;
+
+   caps->supportedCompositeAlpha =
+      VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
+      VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
+
+   caps->supportedUsageFlags =
+      VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+      VK_IMAGE_USAGE_SAMPLED_BIT |
+      VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+      VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
+                           struct anv_physical_device *device,
+                           uint32_t* pSurfaceFormatCount,
+                           VkSurfaceFormatKHR* pSurfaceFormats)
+{
+   VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
+   struct wsi_wl_display *display =
+      wsi_wl_get_display(device->instance, surface->display);
+
+   uint32_t count = anv_vector_length(&display->formats);
+
+   if (pSurfaceFormats == NULL) {
+      *pSurfaceFormatCount = count;
+      return VK_SUCCESS;
+   }
+
+   assert(*pSurfaceFormatCount >= count);
+   *pSurfaceFormatCount = count;
+
+   VkFormat *f;
+   anv_vector_foreach(f, &display->formats) {
+      *(pSurfaceFormats++) = (VkSurfaceFormatKHR) {
+         .format = *f,
+         /* TODO: We should get this from the compositor somehow */
+         .colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR,
+      };
+   }
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *surface,
+                                 struct anv_physical_device *device,
+                                 uint32_t* pPresentModeCount,
+                                 VkPresentModeKHR* pPresentModes)
+{
+   if (pPresentModes == NULL) {
+      *pPresentModeCount = ARRAY_SIZE(present_modes);
+      return VK_SUCCESS;
+   }
+
+   assert(*pPresentModeCount >= ARRAY_SIZE(present_modes));
+   typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
+   *pPresentModeCount = ARRAY_SIZE(present_modes);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *surface,
+                                struct anv_device *device,
+                                const VkSwapchainCreateInfoKHR* pCreateInfo,
+                                const VkAllocationCallbacks* pAllocator,
+                                struct anv_swapchain **swapchain);
+
+VkResult anv_CreateWaylandSurfaceKHR(
+    VkInstance                                  _instance,
+    const VkWaylandSurfaceCreateInfoKHR*        pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface)
+{
+   ANV_FROM_HANDLE(anv_instance, instance, _instance);
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
+
+   VkIcdSurfaceWayland *surface;
+
+   surface = anv_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
+                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (surface == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
+   surface->display = pCreateInfo->display;
+   surface->surface = pCreateInfo->surface;
+
+   *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
+
+   return VK_SUCCESS;
+}
+
+struct wsi_wl_image {
+   struct anv_image *                           image;
+   struct anv_device_memory *                   memory;
+   struct wl_buffer *                           buffer;
+   bool                                         busy;
+};
+
+struct wsi_wl_swapchain {
+   struct anv_swapchain                        base;
+
+   struct wsi_wl_display *                      display;
+   struct wl_event_queue *                      queue;
+   struct wl_surface *                          surface;
+
+   VkExtent2D                                   extent;
+   VkFormat                                     vk_format;
+   uint32_t                                     drm_format;
+
+   VkPresentModeKHR                             present_mode;
+   bool                                         fifo_ready;
+
+   uint32_t                                     image_count;
+   struct wsi_wl_image                          images[0];
+};
+
+static VkResult
+wsi_wl_swapchain_get_images(struct anv_swapchain *anv_chain,
+                            uint32_t *pCount, VkImage *pSwapchainImages)
+{
+   struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)anv_chain;
+
+   if (pSwapchainImages == NULL) {
+      *pCount = chain->image_count;
+      return VK_SUCCESS;
+   }
+
+   assert(chain->image_count <= *pCount);
+   for (uint32_t i = 0; i < chain->image_count; i++)
+      pSwapchainImages[i] = anv_image_to_handle(chain->images[i].image);
+
+   *pCount = chain->image_count;
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+wsi_wl_swapchain_acquire_next_image(struct anv_swapchain *anv_chain,
+                                    uint64_t timeout,
+                                    VkSemaphore semaphore,
+                                    uint32_t *image_index)
+{
+   struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)anv_chain;
+
+   int ret = wl_display_dispatch_queue_pending(chain->display->display,
+                                               chain->queue);
+   /* XXX: I'm not sure if out-of-date is the right error here.  If
+    * wl_display_dispatch_queue_pending fails it most likely means we got
+    * kicked by the server so this seems more-or-less correct.
+    */
+   if (ret < 0)
+      return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
+
+   while (1) {
+      for (uint32_t i = 0; i < chain->image_count; i++) {
+         if (!chain->images[i].busy) {
+            /* We found a non-busy image */
+            *image_index = i;
+            return VK_SUCCESS;
+         }
+      }
+
+      /* This time we do a blocking dispatch because we can't go
+       * anywhere until we get an event.
+       */
+      int ret = wl_display_roundtrip_queue(chain->display->display,
+                                           chain->queue);
+      if (ret < 0)
+         return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
+   }
+}
+
+static void
+frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
+{
+   struct wsi_wl_swapchain *chain = data;
+
+   chain->fifo_ready = true;
+
+   wl_callback_destroy(callback);
+}
+
+static const struct wl_callback_listener frame_listener = {
+   frame_handle_done,
+};
+
+static VkResult
+wsi_wl_swapchain_queue_present(struct anv_swapchain *anv_chain,
+                               struct anv_queue *queue,
+                               uint32_t image_index)
+{
+   struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)anv_chain;
+
+   if (chain->present_mode == VK_PRESENT_MODE_FIFO_KHR) {
+      while (!chain->fifo_ready) {
+         int ret = wl_display_dispatch_queue(chain->display->display,
+                                             chain->queue);
+         if (ret < 0)
+            return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
+      }
+   }
+
+   assert(image_index < chain->image_count);
+   wl_surface_attach(chain->surface, chain->images[image_index].buffer, 0, 0);
+   wl_surface_damage(chain->surface, 0, 0, INT32_MAX, INT32_MAX);
+
+   if (chain->present_mode == VK_PRESENT_MODE_FIFO_KHR) {
+      struct wl_callback *frame = wl_surface_frame(chain->surface);
+      wl_proxy_set_queue((struct wl_proxy *)frame, chain->queue);
+      wl_callback_add_listener(frame, &frame_listener, chain);
+      chain->fifo_ready = false;
+   }
+
+   chain->images[image_index].busy = true;
+   wl_surface_commit(chain->surface);
+   wl_display_flush(chain->display->display);
+
+   return VK_SUCCESS;
+}
+
+static void
+wsi_wl_image_finish(struct wsi_wl_swapchain *chain, struct wsi_wl_image *image,
+                    const VkAllocationCallbacks* pAllocator)
+{
+   VkDevice vk_device = anv_device_to_handle(chain->base.device);
+   anv_FreeMemory(vk_device, anv_device_memory_to_handle(image->memory),
+                  pAllocator);
+   anv_DestroyImage(vk_device, anv_image_to_handle(image->image),
+                    pAllocator);
+}
+
+static void
+buffer_handle_release(void *data, struct wl_buffer *buffer)
+{
+   struct wsi_wl_image *image = data;
+
+   assert(image->buffer == buffer);
+
+   image->busy = false;
+}
+
+static const struct wl_buffer_listener buffer_listener = {
+   buffer_handle_release,
+};
+
+static VkResult
+wsi_wl_image_init(struct wsi_wl_swapchain *chain, struct wsi_wl_image *image,
+                  const VkAllocationCallbacks* pAllocator)
+{
+   VkDevice vk_device = anv_device_to_handle(chain->base.device);
+   VkResult result;
+
+   VkImage vk_image;
+   result = anv_image_create(vk_device,
+      &(struct anv_image_create_info) {
+         .isl_tiling_flags = ISL_TILING_X_BIT,
+         .stride = 0,
+         .vk_info =
+      &(VkImageCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+         .imageType = VK_IMAGE_TYPE_2D,
+         .format = chain->vk_format,
+         .extent = {
+            .width = chain->extent.width,
+            .height = chain->extent.height,
+            .depth = 1
+         },
+         .mipLevels = 1,
+         .arrayLayers = 1,
+         .samples = 1,
+         /* FIXME: Need a way to use X tiling to allow scanout */
+         .tiling = VK_IMAGE_TILING_OPTIMAL,
+         .usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+         .flags = 0,
+      }},
+      pAllocator,
+      &vk_image);
+
+   if (result != VK_SUCCESS)
+      return result;
+
+   image->image = anv_image_from_handle(vk_image);
+   assert(anv_format_is_color(image->image->format));
+
+   struct anv_surface *surface = &image->image->color_surface;
+
+   VkDeviceMemory vk_memory;
+   result = anv_AllocateMemory(vk_device,
+      &(VkMemoryAllocateInfo) {
+         .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+         .allocationSize = image->image->size,
+         .memoryTypeIndex = 0,
+      },
+      pAllocator,
+      &vk_memory);
+
+   if (result != VK_SUCCESS)
+      goto fail_image;
+
+   image->memory = anv_device_memory_from_handle(vk_memory);
+   image->memory->bo.is_winsys_bo = true;
+
+   result = anv_BindImageMemory(vk_device, vk_image, vk_memory, 0);
+
+   if (result != VK_SUCCESS)
+      goto fail_mem;
+
+   int ret = anv_gem_set_tiling(chain->base.device,
+                                image->memory->bo.gem_handle,
+                                surface->isl.row_pitch, I915_TILING_X);
+   if (ret) {
+      /* FINISHME: Choose a better error. */
+      result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+      goto fail_mem;
+   }
+
+   int fd = anv_gem_handle_to_fd(chain->base.device,
+                                 image->memory->bo.gem_handle);
+   if (fd == -1) {
+      /* FINISHME: Choose a better error. */
+      result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+      goto fail_mem;
+   }
+
+   image->buffer = wl_drm_create_prime_buffer(chain->display->drm,
+                                              fd, /* name */
+                                              chain->extent.width,
+                                              chain->extent.height,
+                                              chain->drm_format,
+                                              surface->offset,
+                                              surface->isl.row_pitch,
+                                              0, 0, 0, 0 /* unused */);
+   wl_display_roundtrip(chain->display->display);
+   close(fd);
+
+   wl_proxy_set_queue((struct wl_proxy *)image->buffer, chain->queue);
+   wl_buffer_add_listener(image->buffer, &buffer_listener, image);
+
+   return VK_SUCCESS;
+
+fail_mem:
+   anv_FreeMemory(vk_device, vk_memory, pAllocator);
+fail_image:
+   anv_DestroyImage(vk_device, vk_image, pAllocator);
+
+   return result;
+}
+
+static VkResult
+wsi_wl_swapchain_destroy(struct anv_swapchain *anv_chain,
+                         const VkAllocationCallbacks *pAllocator)
+{
+   struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)anv_chain;
+
+   for (uint32_t i = 0; i < chain->image_count; i++) {
+      if (chain->images[i].buffer)
+         wsi_wl_image_finish(chain, &chain->images[i], pAllocator);
+   }
+
+   anv_free2(&chain->base.device->alloc, pAllocator, chain);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+                                struct anv_device *device,
+                                const VkSwapchainCreateInfoKHR* pCreateInfo,
+                                const VkAllocationCallbacks* pAllocator,
+                                struct anv_swapchain **swapchain_out)
+{
+   VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
+   struct wsi_wl_swapchain *chain;
+   VkResult result;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
+
+   int num_images = pCreateInfo->minImageCount;
+
+   assert(num_images >= MIN_NUM_IMAGES);
+
+   /* For true mailbox mode, we need at least 4 images:
+    *  1) One to scan out from
+    *  2) One to have queued for scan-out
+    *  3) One to be currently held by the Wayland compositor
+    *  4) One to render to
+    */
+   if (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR)
+      num_images = MAX2(num_images, 4);
+
+   size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
+   chain = anv_alloc2(&device->alloc, pAllocator, size, 8,
+                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (chain == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   chain->base.device = device;
+   chain->base.destroy = wsi_wl_swapchain_destroy;
+   chain->base.get_images = wsi_wl_swapchain_get_images;
+   chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
+   chain->base.queue_present = wsi_wl_swapchain_queue_present;
+
+   chain->surface = surface->surface;
+   chain->extent = pCreateInfo->imageExtent;
+   chain->vk_format = pCreateInfo->imageFormat;
+   chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, false);
+
+   chain->present_mode = pCreateInfo->presentMode;
+   chain->fifo_ready = true;
+
+   chain->image_count = num_images;
+
+   /* Mark a bunch of stuff as NULL.  This way we can just call
+    * destroy_swapchain for cleanup.
+    */
+   for (uint32_t i = 0; i < chain->image_count; i++)
+      chain->images[i].buffer = NULL;
+   chain->queue = NULL;
+
+   chain->display = wsi_wl_get_display(device->instance, surface->display);
+   if (!chain->display)
+      goto fail;
+
+   chain->queue = wl_display_create_queue(chain->display->display);
+   if (!chain->queue)
+      goto fail;
+
+   for (uint32_t i = 0; i < chain->image_count; i++) {
+      result = wsi_wl_image_init(chain, &chain->images[i], pAllocator);
+      if (result != VK_SUCCESS)
+         goto fail;
+      chain->images[i].busy = false;
+   }
+
+   *swapchain_out = &chain->base;
+
+   return VK_SUCCESS;
+
+fail:
+   wsi_wl_swapchain_destroy(&chain->base, pAllocator);
+
+   return result;
+}
+
+VkResult
+anv_wl_init_wsi(struct anv_instance *instance)
+{
+   struct wsi_wayland *wsi;
+   VkResult result;
+
+   wsi = anv_alloc(&instance->alloc, sizeof(*wsi), 8,
+                   VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+   if (!wsi) {
+      result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      goto fail;
+   }
+
+   wsi->instance = instance;
+
+   int ret = pthread_mutex_init(&wsi->mutex, NULL);
+   if (ret != 0) {
+      if (ret == ENOMEM) {
+         result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      } else {
+         /* FINISHME: Choose a better error. */
+         result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
+      goto fail_alloc;
+   }
+
+   wsi->displays = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+                                           _mesa_key_pointer_equal);
+   if (!wsi->displays) {
+      result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      goto fail_mutex;
+   }
+
+   wsi->base.get_support = wsi_wl_surface_get_support;
+   wsi->base.get_capabilities = wsi_wl_surface_get_capabilities;
+   wsi->base.get_formats = wsi_wl_surface_get_formats;
+   wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
+   wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
+
+   instance->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
+
+   return VK_SUCCESS;
+
+fail_mutex:
+   pthread_mutex_destroy(&wsi->mutex);
+
+fail_alloc:
+   anv_free(&instance->alloc, wsi);
+fail:
+   instance->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
+
+   return result;
+}
+
+void
+anv_wl_finish_wsi(struct anv_instance *instance)
+{
+   struct wsi_wayland *wsi =
+      (struct wsi_wayland *)instance->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
+
+   if (wsi) {
+      _mesa_hash_table_destroy(wsi->displays, NULL);
+
+      pthread_mutex_destroy(&wsi->mutex);
+
+      anv_free(&instance->alloc, wsi);
+   }
+}
diff --git a/src/intel/vulkan/anv_wsi_x11.c b/src/intel/vulkan/anv_wsi_x11.c
new file mode 100644 (file)
index 0000000..9ef0296
--- /dev/null
@@ -0,0 +1,902 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <X11/xshmfence.h>
+#include <xcb/xcb.h>
+#include <xcb/dri3.h>
+#include <xcb/present.h>
+
+#include "anv_wsi.h"
+
+#include "util/hash_table.h"
+
+struct wsi_x11_connection {
+   bool has_dri3;
+   bool has_present;
+};
+
+struct wsi_x11 {
+   struct anv_wsi_interface base;
+
+   pthread_mutex_t                              mutex;
+   /* Hash table of xcb_connection -> wsi_x11_connection mappings */
+   struct hash_table *connections;
+};
+
+static struct wsi_x11_connection *
+wsi_x11_connection_create(struct anv_instance *instance, xcb_connection_t *conn)
+{
+   xcb_query_extension_cookie_t dri3_cookie, pres_cookie;
+   xcb_query_extension_reply_t *dri3_reply, *pres_reply;
+
+   struct wsi_x11_connection *wsi_conn =
+      anv_alloc(&instance->alloc, sizeof(*wsi_conn), 8,
+                VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+   if (!wsi_conn)
+      return NULL;
+
+   dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
+   pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
+
+   dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
+   pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
+   if (dri3_reply == NULL || pres_reply == NULL) {
+      free(dri3_reply);
+      free(pres_reply);
+      anv_free(&instance->alloc, wsi_conn);
+      return NULL;
+   }
+
+   wsi_conn->has_dri3 = dri3_reply->present != 0;
+   wsi_conn->has_present = pres_reply->present != 0;
+
+   free(dri3_reply);
+   free(pres_reply);
+
+   return wsi_conn;
+}
+
+static void
+wsi_x11_connection_destroy(struct anv_instance *instance,
+                           struct wsi_x11_connection *conn)
+{
+   anv_free(&instance->alloc, conn);
+}
+
+static struct wsi_x11_connection *
+wsi_x11_get_connection(struct anv_instance *instance, xcb_connection_t *conn)
+{
+   struct wsi_x11 *wsi =
+      (struct wsi_x11 *)instance->wsi[VK_ICD_WSI_PLATFORM_XCB];
+
+   pthread_mutex_lock(&wsi->mutex);
+
+   struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
+   if (!entry) {
+      /* We're about to make a bunch of blocking calls.  Let's drop the
+       * mutex for now so we don't block up too badly.
+       */
+      pthread_mutex_unlock(&wsi->mutex);
+
+      struct wsi_x11_connection *wsi_conn =
+         wsi_x11_connection_create(instance, conn);
+
+      pthread_mutex_lock(&wsi->mutex);
+
+      entry = _mesa_hash_table_search(wsi->connections, conn);
+      if (entry) {
+         /* Oops, someone raced us to it */
+         wsi_x11_connection_destroy(instance, wsi_conn);
+      } else {
+         entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
+      }
+   }
+
+   pthread_mutex_unlock(&wsi->mutex);
+
+   return entry->data;
+}
+
+static const VkSurfaceFormatKHR formats[] = {
+   { .format = VK_FORMAT_B8G8R8A8_SRGB, },
+};
+
+static const VkPresentModeKHR present_modes[] = {
+   VK_PRESENT_MODE_MAILBOX_KHR,
+};
+
+static xcb_screen_t *
+get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
+{
+   xcb_screen_iterator_t screen_iter =
+      xcb_setup_roots_iterator(xcb_get_setup(conn));
+
+   for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
+      if (screen_iter.data->root == root)
+         return screen_iter.data;
+   }
+
+   return NULL;
+}
+
+static xcb_visualtype_t *
+screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
+                      unsigned *depth)
+{
+   xcb_depth_iterator_t depth_iter =
+      xcb_screen_allowed_depths_iterator(screen);
+
+   for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
+      xcb_visualtype_iterator_t visual_iter =
+         xcb_depth_visuals_iterator (depth_iter.data);
+
+      for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
+         if (visual_iter.data->visual_id == visual_id) {
+            if (depth)
+               *depth = depth_iter.data->depth;
+            return visual_iter.data;
+         }
+      }
+   }
+
+   return NULL;
+}
+
+static xcb_visualtype_t *
+connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
+                          unsigned *depth)
+{
+   xcb_screen_iterator_t screen_iter =
+      xcb_setup_roots_iterator(xcb_get_setup(conn));
+
+   /* For this we have to iterate over all of the screens which is rather
+    * annoying.  Fortunately, there is probably only 1.
+    */
+   for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
+      xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
+                                                       visual_id, depth);
+      if (visual)
+         return visual;
+   }
+
+   return NULL;
+}
+
+static xcb_visualtype_t *
+get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
+                          unsigned *depth)
+{
+   xcb_query_tree_cookie_t tree_cookie;
+   xcb_get_window_attributes_cookie_t attrib_cookie;
+   xcb_query_tree_reply_t *tree;
+   xcb_get_window_attributes_reply_t *attrib;
+
+   tree_cookie = xcb_query_tree(conn, window);
+   attrib_cookie = xcb_get_window_attributes(conn, window);
+
+   tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
+   attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
+   if (attrib == NULL || tree == NULL) {
+      free(attrib);
+      free(tree);
+      return NULL;
+   }
+
+   xcb_window_t root = tree->root;
+   xcb_visualid_t visual_id = attrib->visual;
+   free(attrib);
+   free(tree);
+
+   xcb_screen_t *screen = get_screen_for_root(conn, root);
+   if (screen == NULL)
+      return NULL;
+
+   return screen_get_visualtype(screen, visual_id, depth);
+}
+
+static bool
+visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
+{
+   uint32_t rgb_mask = visual->red_mask |
+                       visual->green_mask |
+                       visual->blue_mask;
+
+   uint32_t all_mask = 0xffffffff >> (32 - depth);
+
+   /* Do we have bits left over after RGB? */
+   return (all_mask & ~rgb_mask) != 0;
+}
+
+VkBool32 anv_GetPhysicalDeviceXcbPresentationSupportKHR(
+    VkPhysicalDevice                            physicalDevice,
+    uint32_t                                    queueFamilyIndex,
+    xcb_connection_t*                           connection,
+    xcb_visualid_t                              visual_id)
+{
+   ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
+
+   struct wsi_x11_connection *wsi_conn =
+      wsi_x11_get_connection(device->instance, connection);
+
+   if (!wsi_conn->has_dri3) {
+      fprintf(stderr, "vulkan: No DRI3 support\n");
+      return false;
+   }
+
+   unsigned visual_depth;
+   if (!connection_get_visualtype(connection, visual_id, &visual_depth))
+      return false;
+
+   if (visual_depth != 24 && visual_depth != 32)
+      return false;
+
+   return true;
+}
+
+static VkResult
+x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
+                        struct anv_physical_device *device,
+                        uint32_t queueFamilyIndex,
+                        VkBool32* pSupported)
+{
+   VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
+
+   struct wsi_x11_connection *wsi_conn =
+      wsi_x11_get_connection(device->instance, surface->connection);
+   if (!wsi_conn)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   if (!wsi_conn->has_dri3) {
+      fprintf(stderr, "vulkan: No DRI3 support\n");
+      *pSupported = false;
+      return VK_SUCCESS;
+   }
+
+   unsigned visual_depth;
+   if (!get_visualtype_for_window(surface->connection, surface->window,
+                                  &visual_depth)) {
+      *pSupported = false;
+      return VK_SUCCESS;
+   }
+
+   if (visual_depth != 24 && visual_depth != 32) {
+      *pSupported = false;
+      return VK_SUCCESS;
+   }
+
+   *pSupported = true;
+   return VK_SUCCESS;
+}
+
+static VkResult
+x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
+                             struct anv_physical_device *device,
+                             VkSurfaceCapabilitiesKHR *caps)
+{
+   VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
+   xcb_get_geometry_cookie_t geom_cookie;
+   xcb_generic_error_t *err;
+   xcb_get_geometry_reply_t *geom;
+   unsigned visual_depth;
+
+   geom_cookie = xcb_get_geometry(surface->connection, surface->window);
+
+   /* This does a round-trip.  This is why we do get_geometry first and
+    * wait to read the reply until after we have a visual.
+    */
+   xcb_visualtype_t *visual =
+      get_visualtype_for_window(surface->connection, surface->window,
+                                &visual_depth);
+
+   geom = xcb_get_geometry_reply(surface->connection, geom_cookie, &err);
+   if (geom) {
+      VkExtent2D extent = { geom->width, geom->height };
+      caps->currentExtent = extent;
+      caps->minImageExtent = extent;
+      caps->maxImageExtent = extent;
+   } else {
+      /* This can happen if the client didn't wait for the configure event
+       * to come back from the compositor.  In that case, we don't know the
+       * size of the window so we just return valid "I don't know" stuff.
+       */
+      caps->currentExtent = (VkExtent2D) { -1, -1 };
+      caps->minImageExtent = (VkExtent2D) { 1, 1 };
+      caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
+   }
+   free(err);
+   free(geom);
+
+   if (visual_has_alpha(visual, visual_depth)) {
+      caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
+                                      VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
+   } else {
+      caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
+                                      VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+   }
+
+   caps->minImageCount = 2;
+   caps->maxImageCount = 4;
+   caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+   caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+   caps->maxImageArrayLayers = 1;
+   caps->supportedUsageFlags =
+      VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+      VK_IMAGE_USAGE_SAMPLED_BIT |
+      VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+      VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+x11_surface_get_formats(VkIcdSurfaceBase *surface,
+                        struct anv_physical_device *device,
+                        uint32_t *pSurfaceFormatCount,
+                        VkSurfaceFormatKHR *pSurfaceFormats)
+{
+   if (pSurfaceFormats == NULL) {
+      *pSurfaceFormatCount = ARRAY_SIZE(formats);
+      return VK_SUCCESS;
+   }
+
+   assert(*pSurfaceFormatCount >= ARRAY_SIZE(formats));
+   typed_memcpy(pSurfaceFormats, formats, *pSurfaceFormatCount);
+   *pSurfaceFormatCount = ARRAY_SIZE(formats);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
+                              struct anv_physical_device *device,
+                              uint32_t *pPresentModeCount,
+                              VkPresentModeKHR *pPresentModes)
+{
+   if (pPresentModes == NULL) {
+      *pPresentModeCount = ARRAY_SIZE(present_modes);
+      return VK_SUCCESS;
+   }
+
+   assert(*pPresentModeCount >= ARRAY_SIZE(present_modes));
+   typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
+   *pPresentModeCount = ARRAY_SIZE(present_modes);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+x11_surface_create_swapchain(VkIcdSurfaceBase *surface,
+                             struct anv_device *device,
+                             const VkSwapchainCreateInfoKHR* pCreateInfo,
+                             const VkAllocationCallbacks* pAllocator,
+                             struct anv_swapchain **swapchain);
+
+VkResult anv_CreateXcbSurfaceKHR(
+    VkInstance                                  _instance,
+    const VkXcbSurfaceCreateInfoKHR*            pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSurfaceKHR*                               pSurface)
+{
+   ANV_FROM_HANDLE(anv_instance, instance, _instance);
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
+
+   VkIcdSurfaceXcb *surface;
+
+   surface = anv_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
+                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (surface == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
+   surface->connection = pCreateInfo->connection;
+   surface->window = pCreateInfo->window;
+
+   *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
+
+   return VK_SUCCESS;
+}
+
+struct x11_image {
+   struct anv_image *                        image;
+   struct anv_device_memory *                memory;
+   xcb_pixmap_t                              pixmap;
+   bool                                      busy;
+   struct xshmfence *                        shm_fence;
+   uint32_t                                  sync_fence;
+};
+
+struct x11_swapchain {
+   struct anv_swapchain                        base;
+
+   xcb_connection_t *                           conn;
+   xcb_window_t                                 window;
+   xcb_gc_t                                     gc;
+   VkExtent2D                                   extent;
+   uint32_t                                     image_count;
+
+   xcb_present_event_t                          event_id;
+   xcb_special_event_t *                        special_event;
+   uint64_t                                     send_sbc;
+   uint32_t                                     stamp;
+
+   struct x11_image                             images[0];
+};
+
+static VkResult
+x11_get_images(struct anv_swapchain *anv_chain,
+               uint32_t* pCount, VkImage *pSwapchainImages)
+{
+   struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
+
+   if (pSwapchainImages == NULL) {
+      *pCount = chain->image_count;
+      return VK_SUCCESS;
+   }
+
+   assert(chain->image_count <= *pCount);
+   for (uint32_t i = 0; i < chain->image_count; i++)
+      pSwapchainImages[i] = anv_image_to_handle(chain->images[i].image);
+
+   *pCount = chain->image_count;
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+x11_handle_dri3_present_event(struct x11_swapchain *chain,
+                              xcb_present_generic_event_t *event)
+{
+   switch (event->evtype) {
+   case XCB_PRESENT_CONFIGURE_NOTIFY: {
+      xcb_present_configure_notify_event_t *config = (void *) event;
+
+      if (config->width != chain->extent.width ||
+          config->height != chain->extent.height)
+         return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
+
+      break;
+   }
+
+   case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
+      xcb_present_idle_notify_event_t *idle = (void *) event;
+
+      for (unsigned i = 0; i < chain->image_count; i++) {
+         if (chain->images[i].pixmap == idle->pixmap) {
+            chain->images[i].busy = false;
+            break;
+         }
+      }
+
+      break;
+   }
+
+   case XCB_PRESENT_COMPLETE_NOTIFY:
+   default:
+      break;
+   }
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+x11_acquire_next_image(struct anv_swapchain *anv_chain,
+                       uint64_t timeout,
+                       VkSemaphore semaphore,
+                       uint32_t *image_index)
+{
+   struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
+
+   while (1) {
+      for (uint32_t i = 0; i < chain->image_count; i++) {
+         if (!chain->images[i].busy) {
+            /* We found a non-busy image */
+            xshmfence_await(chain->images[i].shm_fence);
+            *image_index = i;
+            return VK_SUCCESS;
+         }
+      }
+
+      xcb_flush(chain->conn);
+      xcb_generic_event_t *event =
+         xcb_wait_for_special_event(chain->conn, chain->special_event);
+      if (!event)
+         return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
+
+      VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
+      free(event);
+      if (result != VK_SUCCESS)
+         return result;
+   }
+}
+
+static VkResult
+x11_queue_present(struct anv_swapchain *anv_chain,
+                  struct anv_queue *queue,
+                  uint32_t image_index)
+{
+   struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
+   struct x11_image *image = &chain->images[image_index];
+
+   assert(image_index < chain->image_count);
+
+   uint32_t options = XCB_PRESENT_OPTION_NONE;
+
+   int64_t target_msc = 0;
+   int64_t divisor = 0;
+   int64_t remainder = 0;
+
+   options |= XCB_PRESENT_OPTION_ASYNC;
+
+   xshmfence_reset(image->shm_fence);
+
+   xcb_void_cookie_t cookie =
+      xcb_present_pixmap(chain->conn,
+                         chain->window,
+                         image->pixmap,
+                         (uint32_t) chain->send_sbc,
+                         0,                                    /* valid */
+                         0,                                    /* update */
+                         0,                                    /* x_off */
+                         0,                                    /* y_off */
+                         XCB_NONE,                             /* target_crtc */
+                         XCB_NONE,
+                         image->sync_fence,
+                         options,
+                         target_msc,
+                         divisor,
+                         remainder, 0, NULL);
+   xcb_discard_reply(chain->conn, cookie.sequence);
+   image->busy = true;
+
+   xcb_flush(chain->conn);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+x11_image_init(struct anv_device *device, struct x11_swapchain *chain,
+               const VkSwapchainCreateInfoKHR *pCreateInfo,
+               const VkAllocationCallbacks* pAllocator,
+               struct x11_image *image)
+{
+   xcb_void_cookie_t cookie;
+   VkResult result;
+
+   VkImage image_h;
+   result = anv_image_create(anv_device_to_handle(device),
+      &(struct anv_image_create_info) {
+         .isl_tiling_flags = ISL_TILING_X_BIT,
+         .stride = 0,
+         .vk_info =
+      &(VkImageCreateInfo) {
+         .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+         .imageType = VK_IMAGE_TYPE_2D,
+         .format = pCreateInfo->imageFormat,
+         .extent = {
+            .width = pCreateInfo->imageExtent.width,
+            .height = pCreateInfo->imageExtent.height,
+            .depth = 1
+         },
+         .mipLevels = 1,
+         .arrayLayers = 1,
+         .samples = 1,
+         /* FIXME: Need a way to use X tiling to allow scanout */
+         .tiling = VK_IMAGE_TILING_OPTIMAL,
+         .usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+         .flags = 0,
+      }},
+      NULL,
+      &image_h);
+   if (result != VK_SUCCESS)
+      return result;
+
+   image->image = anv_image_from_handle(image_h);
+   assert(anv_format_is_color(image->image->format));
+
+   VkDeviceMemory memory_h;
+   result = anv_AllocateMemory(anv_device_to_handle(device),
+      &(VkMemoryAllocateInfo) {
+         .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+         .allocationSize = image->image->size,
+         .memoryTypeIndex = 0,
+      },
+      NULL /* XXX: pAllocator */,
+      &memory_h);
+   if (result != VK_SUCCESS)
+      goto fail_create_image;
+
+   image->memory = anv_device_memory_from_handle(memory_h);
+   image->memory->bo.is_winsys_bo = true;
+
+   anv_BindImageMemory(VK_NULL_HANDLE, image_h, memory_h, 0);
+
+   struct anv_surface *surface = &image->image->color_surface;
+   assert(surface->isl.tiling == ISL_TILING_X);
+
+   int ret = anv_gem_set_tiling(device, image->memory->bo.gem_handle,
+                                surface->isl.row_pitch, I915_TILING_X);
+   if (ret) {
+      /* FINISHME: Choose a better error. */
+      result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+                         "set_tiling failed: %m");
+      goto fail_alloc_memory;
+   }
+
+   int fd = anv_gem_handle_to_fd(device, image->memory->bo.gem_handle);
+   if (fd == -1) {
+      /* FINISHME: Choose a better error. */
+      result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+                         "handle_to_fd failed: %m");
+      goto fail_alloc_memory;
+   }
+
+   uint32_t bpp = 32;
+   uint32_t depth = 24;
+   image->pixmap = xcb_generate_id(chain->conn);
+
+   cookie =
+      xcb_dri3_pixmap_from_buffer_checked(chain->conn,
+                                          image->pixmap,
+                                          chain->window,
+                                          image->image->size,
+                                          pCreateInfo->imageExtent.width,
+                                          pCreateInfo->imageExtent.height,
+                                          surface->isl.row_pitch,
+                                          depth, bpp, fd);
+   xcb_discard_reply(chain->conn, cookie.sequence);
+
+   int fence_fd = xshmfence_alloc_shm();
+   if (fence_fd < 0)
+      goto fail_pixmap;
+
+   image->shm_fence = xshmfence_map_shm(fence_fd);
+   if (image->shm_fence == NULL)
+      goto fail_shmfence_alloc;
+
+   image->sync_fence = xcb_generate_id(chain->conn);
+   xcb_dri3_fence_from_fd(chain->conn,
+                          image->pixmap,
+                          image->sync_fence,
+                          false,
+                          fence_fd);
+
+   image->busy = false;
+   xshmfence_trigger(image->shm_fence);
+
+   return VK_SUCCESS;
+
+fail_shmfence_alloc:
+   close(fence_fd);
+
+fail_pixmap:
+   cookie = xcb_free_pixmap(chain->conn, image->pixmap);
+   xcb_discard_reply(chain->conn, cookie.sequence);
+
+fail_alloc_memory:
+   anv_FreeMemory(anv_device_to_handle(chain->base.device),
+                  anv_device_memory_to_handle(image->memory), pAllocator);
+
+fail_create_image:
+   anv_DestroyImage(anv_device_to_handle(chain->base.device),
+                    anv_image_to_handle(image->image), pAllocator);
+
+   return result;
+}
+
+static void
+x11_image_finish(struct x11_swapchain *chain,
+                 const VkAllocationCallbacks* pAllocator,
+                 struct x11_image *image)
+{
+   xcb_void_cookie_t cookie;
+
+   cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
+   xcb_discard_reply(chain->conn, cookie.sequence);
+   xshmfence_unmap_shm(image->shm_fence);
+
+   cookie = xcb_free_pixmap(chain->conn, image->pixmap);
+   xcb_discard_reply(chain->conn, cookie.sequence);
+
+   anv_DestroyImage(anv_device_to_handle(chain->base.device),
+                    anv_image_to_handle(image->image), pAllocator);
+
+   anv_FreeMemory(anv_device_to_handle(chain->base.device),
+                  anv_device_memory_to_handle(image->memory), pAllocator);
+}
+
+static VkResult
+x11_swapchain_destroy(struct anv_swapchain *anv_chain,
+                      const VkAllocationCallbacks *pAllocator)
+{
+   struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
+
+   for (uint32_t i = 0; i < chain->image_count; i++)
+      x11_image_finish(chain, pAllocator, &chain->images[i]);
+
+   xcb_unregister_for_special_event(chain->conn, chain->special_event);
+
+   anv_free2(&chain->base.device->alloc, pAllocator, chain);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
+                             struct anv_device *device,
+                             const VkSwapchainCreateInfoKHR *pCreateInfo,
+                             const VkAllocationCallbacks* pAllocator,
+                             struct anv_swapchain **swapchain_out)
+{
+   VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
+   struct x11_swapchain *chain;
+   xcb_void_cookie_t cookie;
+   VkResult result;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
+
+   int num_images = pCreateInfo->minImageCount;
+
+   /* For true mailbox mode, we need at least 4 images:
+    *  1) One to scan out from
+    *  2) One to have queued for scan-out
+    *  3) One to be currently held by the Wayland compositor
+    *  4) One to render to
+    */
+   if (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR)
+      num_images = MAX2(num_images, 4);
+
+   size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
+   chain = anv_alloc2(&device->alloc, pAllocator, size, 8,
+                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (chain == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   chain->base.device = device;
+   chain->base.destroy = x11_swapchain_destroy;
+   chain->base.get_images = x11_get_images;
+   chain->base.acquire_next_image = x11_acquire_next_image;
+   chain->base.queue_present = x11_queue_present;
+
+   chain->conn = surface->connection;
+   chain->window = surface->window;
+   chain->extent = pCreateInfo->imageExtent;
+   chain->image_count = num_images;
+
+   chain->event_id = xcb_generate_id(chain->conn);
+   xcb_present_select_input(chain->conn, chain->event_id, chain->window,
+                            XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
+                            XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
+                            XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
+
+   /* Create an XCB event queue to hold present events outside of the usual
+    * application event queue
+    */
+   chain->special_event =
+      xcb_register_for_special_xge(chain->conn, &xcb_present_id,
+                                   chain->event_id, NULL);
+
+   chain->gc = xcb_generate_id(chain->conn);
+   if (!chain->gc) {
+      /* FINISHME: Choose a better error. */
+      result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      goto fail_register;
+   }
+
+   cookie = xcb_create_gc(chain->conn,
+                          chain->gc,
+                          chain->window,
+                          XCB_GC_GRAPHICS_EXPOSURES,
+                          (uint32_t []) { 0 });
+   xcb_discard_reply(chain->conn, cookie.sequence);
+
+   uint32_t image = 0;
+   for (; image < chain->image_count; image++) {
+      result = x11_image_init(device, chain, pCreateInfo, pAllocator,
+                              &chain->images[image]);
+      if (result != VK_SUCCESS)
+         goto fail_init_images;
+   }
+
+   *swapchain_out = &chain->base;
+
+   return VK_SUCCESS;
+
+fail_init_images:
+   for (uint32_t j = 0; j < image; j++)
+      x11_image_finish(chain, pAllocator, &chain->images[j]);
+
+fail_register:
+   xcb_unregister_for_special_event(chain->conn, chain->special_event);
+
+   anv_free2(&device->alloc, pAllocator, chain);
+
+   return result;
+}
+
+VkResult
+anv_x11_init_wsi(struct anv_instance *instance)
+{
+   struct wsi_x11 *wsi;
+   VkResult result;
+
+   wsi = anv_alloc(&instance->alloc, sizeof(*wsi), 8,
+                   VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+   if (!wsi) {
+      result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      goto fail;
+   }
+
+   int ret = pthread_mutex_init(&wsi->mutex, NULL);
+   if (ret != 0) {
+      if (ret == ENOMEM) {
+         result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      } else {
+         /* FINISHME: Choose a better error. */
+         result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
+      goto fail_alloc;
+   }
+
+   wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+                                              _mesa_key_pointer_equal);
+   if (!wsi->connections) {
+      result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      goto fail_mutex;
+   }
+
+   wsi->base.get_support = x11_surface_get_support;
+   wsi->base.get_capabilities = x11_surface_get_capabilities;
+   wsi->base.get_formats = x11_surface_get_formats;
+   wsi->base.get_present_modes = x11_surface_get_present_modes;
+   wsi->base.create_swapchain = x11_surface_create_swapchain;
+
+   instance->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
+
+   return VK_SUCCESS;
+
+fail_mutex:
+   pthread_mutex_destroy(&wsi->mutex);
+fail_alloc:
+   anv_free(&instance->alloc, wsi);
+fail:
+   instance->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
+
+   return result;
+}
+
+void
+anv_x11_finish_wsi(struct anv_instance *instance)
+{
+   struct wsi_x11 *wsi =
+      (struct wsi_x11 *)instance->wsi[VK_ICD_WSI_PLATFORM_XCB];
+
+   if (wsi) {
+      _mesa_hash_table_destroy(wsi->connections, NULL);
+
+      pthread_mutex_destroy(&wsi->mutex);
+
+      anv_free(&instance->alloc, wsi);
+   }
+}
diff --git a/src/intel/vulkan/dev_icd.json.in b/src/intel/vulkan/dev_icd.json.in
new file mode 100644 (file)
index 0000000..8492036
--- /dev/null
@@ -0,0 +1,7 @@
+{
+    "file_format_version": "1.0.0",
+    "ICD": {
+        "library_path": "@build_libdir@/libvulkan_intel.so",
+        "abi_versions": "1.0.3"
+    }
+}
diff --git a/src/intel/vulkan/gen7_cmd_buffer.c b/src/intel/vulkan/gen7_cmd_buffer.c
new file mode 100644 (file)
index 0000000..dbf05d0
--- /dev/null
@@ -0,0 +1,523 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+#include "genxml/gen_macros.h"
+#include "genxml/genX_pack.h"
+
+#if GEN_GEN == 7 && !GEN_IS_HASWELL
+void
+gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
+                                         uint32_t stages)
+{
+   static const uint32_t sampler_state_opcodes[] = {
+      [MESA_SHADER_VERTEX]                      = 43,
+      [MESA_SHADER_TESS_CTRL]                   = 44, /* HS */
+      [MESA_SHADER_TESS_EVAL]                   = 45, /* DS */
+      [MESA_SHADER_GEOMETRY]                    = 46,
+      [MESA_SHADER_FRAGMENT]                    = 47,
+      [MESA_SHADER_COMPUTE]                     = 0,
+   };
+
+   static const uint32_t binding_table_opcodes[] = {
+      [MESA_SHADER_VERTEX]                      = 38,
+      [MESA_SHADER_TESS_CTRL]                   = 39,
+      [MESA_SHADER_TESS_EVAL]                   = 40,
+      [MESA_SHADER_GEOMETRY]                    = 41,
+      [MESA_SHADER_FRAGMENT]                    = 42,
+      [MESA_SHADER_COMPUTE]                     = 0,
+   };
+
+   anv_foreach_stage(s, stages) {
+      if (cmd_buffer->state.samplers[s].alloc_size > 0) {
+         anv_batch_emit(&cmd_buffer->batch,
+                        GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS),
+                        ._3DCommandSubOpcode  = sampler_state_opcodes[s],
+                        .PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset);
+      }
+
+      /* Always emit binding table pointers if we're asked to, since on SKL
+       * this is what flushes push constants. */
+      anv_batch_emit(&cmd_buffer->batch,
+                     GENX(3DSTATE_BINDING_TABLE_POINTERS_VS),
+                     ._3DCommandSubOpcode  = binding_table_opcodes[s],
+                     .PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset);
+   }
+}
+
+uint32_t
+gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
+{
+   VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
+                              cmd_buffer->state.pipeline->active_stages;
+
+   VkResult result = VK_SUCCESS;
+   anv_foreach_stage(s, dirty) {
+      result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
+                                            &cmd_buffer->state.samplers[s]);
+      if (result != VK_SUCCESS)
+         break;
+      result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
+                                                 &cmd_buffer->state.binding_tables[s]);
+      if (result != VK_SUCCESS)
+         break;
+   }
+
+   if (result != VK_SUCCESS) {
+      assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
+      result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
+      assert(result == VK_SUCCESS);
+
+      /* Re-emit state base addresses so we get the new surface state base
+       * address before we start emitting binding tables etc.
+       */
+      anv_cmd_buffer_emit_state_base_address(cmd_buffer);
+
+      /* Re-emit all active binding tables */
+      dirty |= cmd_buffer->state.pipeline->active_stages;
+      anv_foreach_stage(s, dirty) {
+         result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
+                                               &cmd_buffer->state.samplers[s]);
+         if (result != VK_SUCCESS)
+            return result;
+         result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
+                                                    &cmd_buffer->state.binding_tables[s]);
+         if (result != VK_SUCCESS)
+            return result;
+      }
+   }
+
+   cmd_buffer->state.descriptors_dirty &= ~dirty;
+
+   return dirty;
+}
+#endif /* GEN_GEN == 7 && !GEN_IS_HASWELL */
+
+static inline int64_t
+clamp_int64(int64_t x, int64_t min, int64_t max)
+{
+   if (x < min)
+      return min;
+   else if (x < max)
+      return x;
+   else
+      return max;
+}
+
+#if GEN_GEN == 7 && !GEN_IS_HASWELL
+static void
+emit_scissor_state(struct anv_cmd_buffer *cmd_buffer,
+                   uint32_t count, const VkRect2D *scissors)
+{
+   struct anv_state scissor_state =
+      anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 8, 32);
+
+   for (uint32_t i = 0; i < count; i++) {
+      const VkRect2D *s = &scissors[i];
+
+      /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
+       * ymax < ymin for empty clips.  In case clip x, y, width height are all
+       * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
+       * what we want. Just special case empty clips and produce a canonical
+       * empty clip. */
+      static const struct GEN7_SCISSOR_RECT empty_scissor = {
+         .ScissorRectangleYMin = 1,
+         .ScissorRectangleXMin = 1,
+         .ScissorRectangleYMax = 0,
+         .ScissorRectangleXMax = 0
+      };
+
+      const int max = 0xffff;
+      struct GEN7_SCISSOR_RECT scissor = {
+         /* Do this math using int64_t so overflow gets clamped correctly. */
+         .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
+         .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
+         .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
+         .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
+      };
+
+      if (s->extent.width <= 0 || s->extent.height <= 0) {
+         GEN7_SCISSOR_RECT_pack(NULL, scissor_state.map + i * 8,
+                                &empty_scissor);
+      } else {
+         GEN7_SCISSOR_RECT_pack(NULL, scissor_state.map + i * 8, &scissor);
+      }
+   }
+
+   anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_SCISSOR_STATE_POINTERS,
+                  .ScissorRectPointer = scissor_state.offset);
+
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(scissor_state);
+}
+
+void
+gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer)
+{
+   if (cmd_buffer->state.dynamic.scissor.count > 0) {
+      emit_scissor_state(cmd_buffer, cmd_buffer->state.dynamic.scissor.count,
+                         cmd_buffer->state.dynamic.scissor.scissors);
+   } else {
+      /* Emit a default scissor based on the currently bound framebuffer */
+      emit_scissor_state(cmd_buffer, 1,
+                         &(VkRect2D) {
+                            .offset = { .x = 0, .y = 0, },
+                            .extent = {
+                               .width = cmd_buffer->state.framebuffer->width,
+                               .height = cmd_buffer->state.framebuffer->height,
+                            },
+                         });
+   }
+}
+#endif
+
+static const uint32_t vk_to_gen_index_type[] = {
+   [VK_INDEX_TYPE_UINT16]                       = INDEX_WORD,
+   [VK_INDEX_TYPE_UINT32]                       = INDEX_DWORD,
+};
+
+static const uint32_t restart_index_for_type[] = {
+   [VK_INDEX_TYPE_UINT16]                    = UINT16_MAX,
+   [VK_INDEX_TYPE_UINT32]                    = UINT32_MAX,
+};
+
+void genX(CmdBindIndexBuffer)(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    _buffer,
+    VkDeviceSize                                offset,
+    VkIndexType                                 indexType)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
+   if (GEN_IS_HASWELL)
+      cmd_buffer->state.restart_index = restart_index_for_type[indexType];
+   cmd_buffer->state.gen7.index_buffer = buffer;
+   cmd_buffer->state.gen7.index_type = vk_to_gen_index_type[indexType];
+   cmd_buffer->state.gen7.index_offset = offset;
+}
+
+static VkResult
+flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_device *device = cmd_buffer->device;
+   struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+   struct anv_state surfaces = { 0, }, samplers = { 0, };
+   VkResult result;
+
+   result = anv_cmd_buffer_emit_samplers(cmd_buffer,
+                                         MESA_SHADER_COMPUTE, &samplers);
+   if (result != VK_SUCCESS)
+      return result;
+   result = anv_cmd_buffer_emit_binding_table(cmd_buffer,
+                                              MESA_SHADER_COMPUTE, &surfaces);
+   if (result != VK_SUCCESS)
+      return result;
+
+   struct anv_state push_state = anv_cmd_buffer_cs_push_constants(cmd_buffer);
+
+   const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
+   const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+
+   unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
+   unsigned push_constant_data_size =
+      (prog_data->nr_params + local_id_dwords) * 4;
+   unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
+   unsigned push_constant_regs = reg_aligned_constant_size / 32;
+
+   if (push_state.alloc_size) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD),
+                     .CURBETotalDataLength = push_state.alloc_size,
+                     .CURBEDataStartAddress = push_state.offset);
+   }
+
+   assert(prog_data->total_shared <= 64 * 1024);
+   uint32_t slm_size = 0;
+   if (prog_data->total_shared > 0) {
+      /* slm_size is in 4k increments, but must be a power of 2. */
+      slm_size = 4 * 1024;
+      while (slm_size < prog_data->total_shared)
+         slm_size <<= 1;
+      slm_size /= 4 * 1024;
+   }
+
+   struct anv_state state =
+      anv_state_pool_emit(&device->dynamic_state_pool,
+                          GENX(INTERFACE_DESCRIPTOR_DATA), 64,
+                          .KernelStartPointer = pipeline->cs_simd,
+                          .BindingTablePointer = surfaces.offset,
+                          .SamplerStatePointer = samplers.offset,
+                          .ConstantURBEntryReadLength =
+                             push_constant_regs,
+#if !GEN_IS_HASWELL
+                          .ConstantURBEntryReadOffset = 0,
+#endif
+                          .BarrierEnable = cs_prog_data->uses_barrier,
+                          .SharedLocalMemorySize = slm_size,
+                          .NumberofThreadsinGPGPUThreadGroup =
+                             pipeline->cs_thread_width_max);
+
+   const uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
+   anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD),
+                  .InterfaceDescriptorTotalLength = size,
+                  .InterfaceDescriptorDataStartAddress = state.offset);
+
+   return VK_SUCCESS;
+}
+
+static void
+emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
+{
+   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
+                  .RegisterOffset = reg,
+                  .DataDWord = imm);
+}
+
+#define GEN7_L3SQCREG1                     0xb010
+#define GEN7_L3CNTLREG2                    0xb020
+#define GEN7_L3CNTLREG3                    0xb024
+
+void
+genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm)
+{
+   /* References for GL state:
+    *
+    * - commits e307cfa..228d5a3
+    * - src/mesa/drivers/dri/i965/gen7_l3_state.c
+    */
+
+   uint32_t l3c2_val = enable_slm ?
+      /* All = 0 ways; URB = 16 ways; DC and RO = 16; SLM = 1 */
+      /*0x02040021*/0x010000a1 :
+      /* All = 0 ways; URB = 32 ways; DC = 0; RO = 32; SLM = 0 */
+      /*0x04080040*/0x02000030;
+   bool changed = cmd_buffer->state.current_l3_config != l3c2_val;
+
+   if (changed) {
+      /* According to the hardware docs, the L3 partitioning can only be changed
+       * while the pipeline is completely drained and the caches are flushed,
+       * which involves a first PIPE_CONTROL flush which stalls the pipeline and
+       * initiates invalidation of the relevant caches...
+       */
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                     .TextureCacheInvalidationEnable = true,
+                     .ConstantCacheInvalidationEnable = true,
+                     .InstructionCacheInvalidateEnable = true,
+                     .DCFlushEnable = true,
+                     .PostSyncOperation = NoWrite,
+                     .CommandStreamerStallEnable = true);
+
+      /* ...followed by a second stalling flush which guarantees that
+       * invalidation is complete when the L3 configuration registers are
+       * modified.
+       */
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                     .DCFlushEnable = true,
+                     .PostSyncOperation = NoWrite,
+                     .CommandStreamerStallEnable = true);
+
+      anv_finishme("write GEN7_L3SQCREG1");
+      emit_lri(&cmd_buffer->batch, GEN7_L3CNTLREG2, l3c2_val);
+      emit_lri(&cmd_buffer->batch, GEN7_L3CNTLREG3,
+               enable_slm ? 0x00040810 : 0x00040410);
+      cmd_buffer->state.current_l3_config = l3c2_val;
+   }
+}
+
+void
+genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+   const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
+   VkResult result;
+
+   assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
+
+   bool needs_slm = cs_prog_data->base.total_shared > 0;
+   genX(cmd_buffer_config_l3)(cmd_buffer, needs_slm);
+
+   genX(flush_pipeline_select_gpgpu)(cmd_buffer);
+
+   if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)
+      anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
+
+   if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
+       (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
+      /* FIXME: figure out descriptors for gen7 */
+      result = flush_compute_descriptor_set(cmd_buffer);
+      assert(result == VK_SUCCESS);
+      cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
+   }
+
+   cmd_buffer->state.compute_dirty = 0;
+}
+
+void
+genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+
+   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                  ANV_CMD_DIRTY_RENDER_TARGETS |
+                                  ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH |
+                                  ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) {
+
+      const struct anv_image_view *iview =
+         anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
+      const struct anv_image *image = iview ? iview->image : NULL;
+      const struct anv_format *anv_format =
+         iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
+      const bool has_depth = iview && anv_format->has_depth;
+      const uint32_t depth_format = has_depth ?
+         isl_surf_get_depth_format(&cmd_buffer->device->isl_dev,
+                                   &image->depth_surface.isl) : D16_UNORM;
+
+      uint32_t sf_dw[GENX(3DSTATE_SF_length)];
+      struct GENX(3DSTATE_SF) sf = {
+         GENX(3DSTATE_SF_header),
+         .DepthBufferSurfaceFormat = depth_format,
+         .LineWidth = cmd_buffer->state.dynamic.line_width,
+         .GlobalDepthOffsetConstant = cmd_buffer->state.dynamic.depth_bias.bias,
+         .GlobalDepthOffsetScale = cmd_buffer->state.dynamic.depth_bias.slope,
+         .GlobalDepthOffsetClamp = cmd_buffer->state.dynamic.depth_bias.clamp
+      };
+      GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf);
+
+      anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gen7.sf);
+   }
+
+   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
+                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
+      struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
+      struct anv_state cc_state =
+         anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+                                            GENX(COLOR_CALC_STATE_length) * 4,
+                                            64);
+      struct GENX(COLOR_CALC_STATE) cc = {
+         .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0],
+         .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1],
+         .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
+         .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
+         .StencilReferenceValue = d->stencil_reference.front & 0xff,
+         .BackFaceStencilReferenceValue = d->stencil_reference.back & 0xff,
+      };
+      GENX(COLOR_CALC_STATE_pack)(NULL, cc_state.map, &cc);
+      if (!cmd_buffer->device->info.has_llc)
+         anv_state_clflush(cc_state);
+
+      anv_batch_emit(&cmd_buffer->batch,
+                     GENX(3DSTATE_CC_STATE_POINTERS),
+                     .ColorCalcStatePointer = cc_state.offset);
+   }
+
+   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                  ANV_CMD_DIRTY_RENDER_TARGETS |
+                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
+                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
+      uint32_t depth_stencil_dw[GENX(DEPTH_STENCIL_STATE_length)];
+      struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
+
+      struct GENX(DEPTH_STENCIL_STATE) depth_stencil = {
+         .StencilTestMask = d->stencil_compare_mask.front & 0xff,
+         .StencilWriteMask = d->stencil_write_mask.front & 0xff,
+
+         .BackfaceStencilTestMask = d->stencil_compare_mask.back & 0xff,
+         .BackfaceStencilWriteMask = d->stencil_write_mask.back & 0xff,
+      };
+      GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil);
+
+      struct anv_state ds_state =
+         anv_cmd_buffer_merge_dynamic(cmd_buffer, depth_stencil_dw,
+                                      pipeline->gen7.depth_stencil_state,
+                                      GENX(DEPTH_STENCIL_STATE_length), 64);
+
+      anv_batch_emit(&cmd_buffer->batch,
+                     GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS),
+                     .PointertoDEPTH_STENCIL_STATE = ds_state.offset);
+   }
+
+   if (cmd_buffer->state.gen7.index_buffer &&
+       cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                  ANV_CMD_DIRTY_INDEX_BUFFER)) {
+      struct anv_buffer *buffer = cmd_buffer->state.gen7.index_buffer;
+      uint32_t offset = cmd_buffer->state.gen7.index_offset;
+
+#if GEN_IS_HASWELL
+      anv_batch_emit(&cmd_buffer->batch, GEN75_3DSTATE_VF,
+                     .IndexedDrawCutIndexEnable = pipeline->primitive_restart,
+                     .CutIndex = cmd_buffer->state.restart_index);
+#endif
+
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER),
+#if !GEN_IS_HASWELL
+                     .CutIndexEnable = pipeline->primitive_restart,
+#endif
+                     .IndexFormat = cmd_buffer->state.gen7.index_type,
+                     .MemoryObjectControlState = GENX(MOCS),
+                     .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
+                     .BufferEndingAddress = { buffer->bo, buffer->offset + buffer->size });
+   }
+
+   cmd_buffer->state.dirty = 0;
+}
+
+void genX(CmdSetEvent)(
+    VkCommandBuffer                             commandBuffer,
+    VkEvent                                     event,
+    VkPipelineStageFlags                        stageMask)
+{
+   stub();
+}
+
+void genX(CmdResetEvent)(
+    VkCommandBuffer                             commandBuffer,
+    VkEvent                                     event,
+    VkPipelineStageFlags                        stageMask)
+{
+   stub();
+}
+
+void genX(CmdWaitEvents)(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    eventCount,
+    const VkEvent*                              pEvents,
+    VkPipelineStageFlags                        srcStageMask,
+    VkPipelineStageFlags                        destStageMask,
+    uint32_t                                    memoryBarrierCount,
+    const VkMemoryBarrier*                      pMemoryBarriers,
+    uint32_t                                    bufferMemoryBarrierCount,
+    const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
+    uint32_t                                    imageMemoryBarrierCount,
+    const VkImageMemoryBarrier*                 pImageMemoryBarriers)
+{
+   stub();
+}
diff --git a/src/intel/vulkan/gen7_pipeline.c b/src/intel/vulkan/gen7_pipeline.c
new file mode 100644 (file)
index 0000000..37e4639
--- /dev/null
@@ -0,0 +1,402 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+#include "genxml/gen_macros.h"
+#include "genxml/genX_pack.h"
+
+#include "genX_pipeline_util.h"
+
+static void
+gen7_emit_rs_state(struct anv_pipeline *pipeline,
+                   const VkPipelineRasterizationStateCreateInfo *info,
+                   const struct anv_graphics_pipeline_create_info *extra)
+{
+   struct GENX(3DSTATE_SF) sf = {
+      GENX(3DSTATE_SF_header),
+
+      /* LegacyGlobalDepthBiasEnable */
+
+      .StatisticsEnable                         = true,
+      .FrontFaceFillMode                        = vk_to_gen_fillmode[info->polygonMode],
+      .BackFaceFillMode                         = vk_to_gen_fillmode[info->polygonMode],
+      .ViewTransformEnable                      = !(extra && extra->disable_viewport),
+      .FrontWinding                             = vk_to_gen_front_face[info->frontFace],
+      /* bool                                         AntiAliasingEnable; */
+
+      .CullMode                                 = vk_to_gen_cullmode[info->cullMode],
+
+      /* uint32_t                                     LineEndCapAntialiasingRegionWidth; */
+      .ScissorRectangleEnable                   =  !(extra && extra->disable_scissor),
+
+      /* uint32_t                                     MultisampleRasterizationMode; */
+      /* bool                                         LastPixelEnable; */
+
+      .TriangleStripListProvokingVertexSelect   = 0,
+      .LineStripListProvokingVertexSelect       = 0,
+      .TriangleFanProvokingVertexSelect         = 1,
+
+      /* uint32_t                                     AALineDistanceMode; */
+      /* uint32_t                                     VertexSubPixelPrecisionSelect; */
+      .UsePointWidthState                       = false,
+      .PointWidth                               = 1.0,
+      .GlobalDepthOffsetEnableSolid             = info->depthBiasEnable,
+      .GlobalDepthOffsetEnableWireframe         = info->depthBiasEnable,
+      .GlobalDepthOffsetEnablePoint             = info->depthBiasEnable,
+   };
+
+   GENX(3DSTATE_SF_pack)(NULL, &pipeline->gen7.sf, &sf);
+}
+
+static void
+gen7_emit_ds_state(struct anv_pipeline *pipeline,
+                   const VkPipelineDepthStencilStateCreateInfo *info)
+{
+   if (info == NULL) {
+      /* We're going to OR this together with the dynamic state.  We need
+       * to make sure it's initialized to something useful.
+       */
+      memset(pipeline->gen7.depth_stencil_state, 0,
+             sizeof(pipeline->gen7.depth_stencil_state));
+      return;
+   }
+
+   struct GENX(DEPTH_STENCIL_STATE) state = {
+      .DepthTestEnable = info->depthTestEnable,
+      .DepthBufferWriteEnable = info->depthWriteEnable,
+      .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
+      .DoubleSidedStencilEnable = true,
+
+      .StencilTestEnable = info->stencilTestEnable,
+      .StencilBufferWriteEnable = info->stencilTestEnable,
+      .StencilFailOp = vk_to_gen_stencil_op[info->front.failOp],
+      .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.passOp],
+      .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.depthFailOp],
+      .StencilTestFunction = vk_to_gen_compare_op[info->front.compareOp],
+
+      .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.failOp],
+      .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.passOp],
+      .BackfaceStencilPassDepthFailOp = vk_to_gen_stencil_op[info->back.depthFailOp],
+      .BackFaceStencilTestFunction = vk_to_gen_compare_op[info->back.compareOp],
+   };
+
+   GENX(DEPTH_STENCIL_STATE_pack)(NULL, &pipeline->gen7.depth_stencil_state, &state);
+}
+
+static void
+gen7_emit_cb_state(struct anv_pipeline *pipeline,
+                   const VkPipelineColorBlendStateCreateInfo *info,
+                   const VkPipelineMultisampleStateCreateInfo *ms_info)
+{
+   struct anv_device *device = pipeline->device;
+
+   if (info == NULL || info->attachmentCount == 0) {
+      pipeline->blend_state =
+         anv_state_pool_emit(&device->dynamic_state_pool,
+            GENX(BLEND_STATE), 64,
+            .ColorBufferBlendEnable = false,
+            .WriteDisableAlpha = true,
+            .WriteDisableRed = true,
+            .WriteDisableGreen = true,
+            .WriteDisableBlue = true);
+   } else {
+      const VkPipelineColorBlendAttachmentState *a = &info->pAttachments[0];
+      struct GENX(BLEND_STATE) blend = {
+         .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
+         .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
+
+         .LogicOpEnable = info->logicOpEnable,
+         .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
+         .ColorBufferBlendEnable = a->blendEnable,
+         .ColorClampRange = COLORCLAMP_RTFORMAT,
+         .PreBlendColorClampEnable = true,
+         .PostBlendColorClampEnable = true,
+         .SourceBlendFactor = vk_to_gen_blend[a->srcColorBlendFactor],
+         .DestinationBlendFactor = vk_to_gen_blend[a->dstColorBlendFactor],
+         .ColorBlendFunction = vk_to_gen_blend_op[a->colorBlendOp],
+         .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcAlphaBlendFactor],
+         .DestinationAlphaBlendFactor = vk_to_gen_blend[a->dstAlphaBlendFactor],
+         .AlphaBlendFunction = vk_to_gen_blend_op[a->alphaBlendOp],
+         .WriteDisableAlpha = !(a->colorWriteMask & VK_COLOR_COMPONENT_A_BIT),
+         .WriteDisableRed = !(a->colorWriteMask & VK_COLOR_COMPONENT_R_BIT),
+         .WriteDisableGreen = !(a->colorWriteMask & VK_COLOR_COMPONENT_G_BIT),
+         .WriteDisableBlue = !(a->colorWriteMask & VK_COLOR_COMPONENT_B_BIT),
+      };
+
+      /* Our hardware applies the blend factor prior to the blend function
+       * regardless of what function is used.  Technically, this means the
+       * hardware can do MORE than GL or Vulkan specify.  However, it also
+       * means that, for MIN and MAX, we have to stomp the blend factor to
+       * ONE to make it a no-op.
+       */
+      if (a->colorBlendOp == VK_BLEND_OP_MIN ||
+          a->colorBlendOp == VK_BLEND_OP_MAX) {
+         blend.SourceBlendFactor = BLENDFACTOR_ONE;
+         blend.DestinationBlendFactor = BLENDFACTOR_ONE;
+      }
+      if (a->alphaBlendOp == VK_BLEND_OP_MIN ||
+          a->alphaBlendOp == VK_BLEND_OP_MAX) {
+         blend.SourceAlphaBlendFactor = BLENDFACTOR_ONE;
+         blend.DestinationAlphaBlendFactor = BLENDFACTOR_ONE;
+      }
+
+      pipeline->blend_state = anv_state_pool_alloc(&device->dynamic_state_pool,
+                                                   GENX(BLEND_STATE_length) * 4,
+                                                   64);
+      GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend);
+      if (pipeline->device->info.has_llc)
+         anv_state_clflush(pipeline->blend_state);
+    }
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS),
+                  .BlendStatePointer = pipeline->blend_state.offset);
+}
+
+VkResult
+genX(graphics_pipeline_create)(
+    VkDevice                                    _device,
+    struct anv_pipeline_cache *                 cache,
+    const VkGraphicsPipelineCreateInfo*         pCreateInfo,
+    const struct anv_graphics_pipeline_create_info *extra,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipeline*                                 pPipeline)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_pipeline *pipeline;
+   VkResult result;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
+   
+   pipeline = anv_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pipeline == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   result = anv_pipeline_init(pipeline, device, cache,
+                              pCreateInfo, extra, pAllocator);
+   if (result != VK_SUCCESS) {
+      anv_free2(&device->alloc, pAllocator, pipeline);
+      return result;
+   }
+
+   assert(pCreateInfo->pVertexInputState);
+   emit_vertex_input(pipeline, pCreateInfo->pVertexInputState, extra);
+
+   assert(pCreateInfo->pRasterizationState);
+   gen7_emit_rs_state(pipeline, pCreateInfo->pRasterizationState, extra);
+
+   gen7_emit_ds_state(pipeline, pCreateInfo->pDepthStencilState);
+
+   gen7_emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
+                                pCreateInfo->pMultisampleState);
+
+   emit_urb_setup(pipeline);
+
+   const VkPipelineRasterizationStateCreateInfo *rs_info =
+      pCreateInfo->pRasterizationState;
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP),
+      .FrontWinding                             = vk_to_gen_front_face[rs_info->frontFace],
+      .CullMode                                 = vk_to_gen_cullmode[rs_info->cullMode],
+      .ClipEnable                               = true,
+      .APIMode                                  = APIMODE_OGL,
+      .ViewportXYClipTestEnable                 = !(extra && extra->disable_viewport),
+      .ClipMode                                 = CLIPMODE_NORMAL,
+      .TriangleStripListProvokingVertexSelect   = 0,
+      .LineStripListProvokingVertexSelect       = 0,
+      .TriangleFanProvokingVertexSelect         = 1,
+      .MinimumPointWidth                        = 0.125,
+      .MaximumPointWidth                        = 255.875,
+      .MaximumVPIndex = pCreateInfo->pViewportState->viewportCount - 1);
+
+   if (pCreateInfo->pMultisampleState &&
+       pCreateInfo->pMultisampleState->rasterizationSamples > 1)
+      anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
+
+   uint32_t samples = 1;
+   uint32_t log2_samples = __builtin_ffs(samples) - 1;
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE),
+      .PixelLocation                            = PIXLOC_CENTER,
+      .NumberofMultisamples                     = log2_samples);
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK),
+      .SampleMask                               = 0xff);
+
+   const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
+
+#if 0 
+   /* From gen7_vs_state.c */
+
+   /**
+    * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
+    * Geometry > Geometry Shader > State:
+    *
+    *     "Note: Because of corruption in IVB:GT2, software needs to flush the
+    *     whole fixed function pipeline when the GS enable changes value in
+    *     the 3DSTATE_GS."
+    *
+    * The hardware architects have clarified that in this context "flush the
+    * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
+    * Stall" bit set.
+    */
+   if (!brw->is_haswell && !brw->is_baytrail)
+      gen7_emit_vs_workaround_flush(brw);
+#endif
+
+   if (pipeline->vs_vec4 == NO_KERNEL || (extra && extra->disable_vs))
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), .VSFunctionEnable = false);
+   else
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS),
+         .KernelStartPointer                    = pipeline->vs_vec4,
+         .ScratchSpaceBaseOffset                = pipeline->scratch_start[MESA_SHADER_VERTEX],
+         .PerThreadScratchSpace                 = scratch_space(&vs_prog_data->base.base),
+
+         .DispatchGRFStartRegisterforURBData    =
+            vs_prog_data->base.base.dispatch_grf_start_reg,
+         .VertexURBEntryReadLength              = vs_prog_data->base.urb_read_length,
+         .VertexURBEntryReadOffset              = 0,
+
+         .MaximumNumberofThreads                = device->info.max_vs_threads - 1,
+         .StatisticsEnable                      = true,
+         .VSFunctionEnable                      = true);
+
+   const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
+
+   if (pipeline->gs_kernel == NO_KERNEL || (extra && extra->disable_vs)) {
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), .GSEnable = false);
+   } else {
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS),
+         .KernelStartPointer                    = pipeline->gs_kernel,
+         .ScratchSpaceBasePointer               = pipeline->scratch_start[MESA_SHADER_GEOMETRY],
+         .PerThreadScratchSpace                 = scratch_space(&gs_prog_data->base.base),
+
+         .OutputVertexSize                      = gs_prog_data->output_vertex_size_hwords * 2 - 1,
+         .OutputTopology                        = gs_prog_data->output_topology,
+         .VertexURBEntryReadLength              = gs_prog_data->base.urb_read_length,
+         .IncludeVertexHandles                  = gs_prog_data->base.include_vue_handles,
+         .DispatchGRFStartRegisterforURBData    =
+            gs_prog_data->base.base.dispatch_grf_start_reg,
+
+         .MaximumNumberofThreads                = device->info.max_gs_threads - 1,
+         /* This in the next dword on HSW. */
+         .ControlDataFormat                     = gs_prog_data->control_data_format,
+         .ControlDataHeaderSize                 = gs_prog_data->control_data_header_size_hwords,
+         .InstanceControl                       = MAX2(gs_prog_data->invocations, 1) - 1,
+         .DispatchMode                          = gs_prog_data->base.dispatch_mode,
+         .GSStatisticsEnable                    = true,
+         .IncludePrimitiveID                    = gs_prog_data->include_primitive_id,
+#     if (GEN_IS_HASWELL)
+         .ReorderMode                           = REORDER_TRAILING,
+#     else
+         .ReorderEnable                         = true,
+#     endif
+         .GSEnable                              = true);
+   }
+
+   if (pipeline->ps_ksp0 == NO_KERNEL) {
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE));
+
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM),
+                     .StatisticsEnable                         = true,
+                     .ThreadDispatchEnable                     = false,
+                     .LineEndCapAntialiasingRegionWidth        = 0, /* 0.5 pixels */
+                     .LineAntialiasingRegionWidth              = 1, /* 1.0 pixels */
+                     .EarlyDepthStencilControl                 = EDSC_NORMAL,
+                     .PointRasterizationRule                   = RASTRULE_UPPER_RIGHT);
+
+      /* Even if no fragments are ever dispatched, the hardware hangs if we
+       * don't at least set the maximum number of threads.
+       */
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS),
+                     .MaximumNumberofThreads                   = device->info.max_wm_threads - 1);
+   } else {
+      const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
+      if (wm_prog_data->urb_setup[VARYING_SLOT_BFC0] != -1 ||
+          wm_prog_data->urb_setup[VARYING_SLOT_BFC1] != -1)
+         anv_finishme("two-sided color needs sbe swizzling setup");
+      if (wm_prog_data->urb_setup[VARYING_SLOT_PRIMITIVE_ID] != -1)
+         anv_finishme("primitive_id needs sbe swizzling setup");
+
+      emit_3dstate_sbe(pipeline);
+
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS),
+                     .KernelStartPointer0                      = pipeline->ps_ksp0,
+                     .ScratchSpaceBasePointer                  = pipeline->scratch_start[MESA_SHADER_FRAGMENT],
+                     .PerThreadScratchSpace                    = scratch_space(&wm_prog_data->base),
+                  
+                     .MaximumNumberofThreads                   = device->info.max_wm_threads - 1,
+                     .PushConstantEnable                       = wm_prog_data->base.nr_params > 0,
+                     .AttributeEnable                          = wm_prog_data->num_varying_inputs > 0,
+                     .oMaskPresenttoRenderTarget               = wm_prog_data->uses_omask,
+
+                     .RenderTargetFastClearEnable              = false,
+                     .DualSourceBlendEnable                    = false,
+                     .RenderTargetResolveEnable                = false,
+
+                     .PositionXYOffsetSelect                   = wm_prog_data->uses_pos_offset ?
+                     POSOFFSET_SAMPLE : POSOFFSET_NONE,
+
+                     ._32PixelDispatchEnable                   = false,
+                     ._16PixelDispatchEnable                   = pipeline->ps_simd16 != NO_KERNEL,
+                     ._8PixelDispatchEnable                    = pipeline->ps_simd8 != NO_KERNEL,
+
+                     .DispatchGRFStartRegisterforConstantSetupData0 = pipeline->ps_grf_start0,
+                     .DispatchGRFStartRegisterforConstantSetupData1 = 0,
+                     .DispatchGRFStartRegisterforConstantSetupData2 = pipeline->ps_grf_start2,
+
+#if 0
+                     /* Haswell requires the sample mask to be set in this packet as well as
+                      * in 3DSTATE_SAMPLE_MASK; the values should match. */
+                     /* _NEW_BUFFERS, _NEW_MULTISAMPLE */
+#endif
+
+                     .KernelStartPointer1                      = 0,
+                     .KernelStartPointer2                      = pipeline->ps_ksp2);
+
+      /* FIXME-GEN7: This needs a lot more work, cf gen7 upload_wm_state(). */
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM),
+                     .StatisticsEnable                         = true,
+                     .ThreadDispatchEnable                     = true,
+                     .LineEndCapAntialiasingRegionWidth        = 0, /* 0.5 pixels */
+                     .LineAntialiasingRegionWidth              = 1, /* 1.0 pixels */
+                     .EarlyDepthStencilControl                 = EDSC_NORMAL,
+                     .PointRasterizationRule                   = RASTRULE_UPPER_RIGHT,
+                     .PixelShaderComputedDepthMode             = wm_prog_data->computed_depth_mode,
+                     .PixelShaderUsesSourceDepth               = wm_prog_data->uses_src_depth,
+                     .PixelShaderUsesSourceW                   = wm_prog_data->uses_src_w,
+                     .PixelShaderUsesInputCoverageMask         = wm_prog_data->uses_sample_mask,
+                     .BarycentricInterpolationMode             = wm_prog_data->barycentric_interp_modes);
+   }
+
+   *pPipeline = anv_pipeline_to_handle(pipeline);
+
+   return VK_SUCCESS;
+}
diff --git a/src/intel/vulkan/gen8_cmd_buffer.c b/src/intel/vulkan/gen8_cmd_buffer.c
new file mode 100644 (file)
index 0000000..87b5e34
--- /dev/null
@@ -0,0 +1,532 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+#include "genxml/gen_macros.h"
+#include "genxml/genX_pack.h"
+
+#if GEN_GEN == 8
+static void
+emit_viewport_state(struct anv_cmd_buffer *cmd_buffer,
+                    uint32_t count, const VkViewport *viewports)
+{
+   struct anv_state sf_clip_state =
+      anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 64, 64);
+   struct anv_state cc_state =
+      anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 8, 32);
+
+   for (uint32_t i = 0; i < count; i++) {
+      const VkViewport *vp = &viewports[i];
+
+      /* The gen7 state struct has just the matrix and guardband fields, the
+       * gen8 struct adds the min/max viewport fields. */
+      struct GENX(SF_CLIP_VIEWPORT) sf_clip_viewport = {
+         .ViewportMatrixElementm00 = vp->width / 2,
+         .ViewportMatrixElementm11 = vp->height / 2,
+         .ViewportMatrixElementm22 = 1.0,
+         .ViewportMatrixElementm30 = vp->x + vp->width / 2,
+         .ViewportMatrixElementm31 = vp->y + vp->height / 2,
+         .ViewportMatrixElementm32 = 0.0,
+         .XMinClipGuardband = -1.0f,
+         .XMaxClipGuardband = 1.0f,
+         .YMinClipGuardband = -1.0f,
+         .YMaxClipGuardband = 1.0f,
+         .XMinViewPort = vp->x,
+         .XMaxViewPort = vp->x + vp->width - 1,
+         .YMinViewPort = vp->y,
+         .YMaxViewPort = vp->y + vp->height - 1,
+      };
+
+      struct GENX(CC_VIEWPORT) cc_viewport = {
+         .MinimumDepth = vp->minDepth,
+         .MaximumDepth = vp->maxDepth
+      };
+
+      GENX(SF_CLIP_VIEWPORT_pack)(NULL, sf_clip_state.map + i * 64,
+                                 &sf_clip_viewport);
+      GENX(CC_VIEWPORT_pack)(NULL, cc_state.map + i * 8, &cc_viewport);
+   }
+
+   if (!cmd_buffer->device->info.has_llc) {
+      anv_state_clflush(sf_clip_state);
+      anv_state_clflush(cc_state);
+   }
+
+   anv_batch_emit(&cmd_buffer->batch,
+                  GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC),
+                  .CCViewportPointer = cc_state.offset);
+   anv_batch_emit(&cmd_buffer->batch,
+                  GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP),
+                  .SFClipViewportPointer = sf_clip_state.offset);
+}
+
+void
+gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer)
+{
+   if (cmd_buffer->state.dynamic.viewport.count > 0) {
+      emit_viewport_state(cmd_buffer, cmd_buffer->state.dynamic.viewport.count,
+                          cmd_buffer->state.dynamic.viewport.viewports);
+   } else {
+      /* If viewport count is 0, this is taken to mean "use the default" */
+      emit_viewport_state(cmd_buffer, 1,
+                          &(VkViewport) {
+                             .x = 0.0f,
+                             .y = 0.0f,
+                             .width = cmd_buffer->state.framebuffer->width,
+                             .height = cmd_buffer->state.framebuffer->height,
+                             .minDepth = 0.0f,
+                             .maxDepth = 1.0f,
+                          });
+   }
+}
+#endif
+
+static void
+emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
+{
+   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
+                  .RegisterOffset = reg,
+                  .DataDWord = imm);
+}
+
+#define GEN8_L3CNTLREG                  0x7034
+
+void
+genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm)
+{
+   /* References for GL state:
+    *
+    * - commits e307cfa..228d5a3
+    * - src/mesa/drivers/dri/i965/gen7_l3_state.c
+    */
+
+   uint32_t val = enable_slm ?
+      /* All = 48 ways; URB = 16 ways; DC and RO = 0, SLM = 1 */
+      0x60000021 :
+      /* All = 48 ways; URB = 48 ways; DC, RO and SLM = 0 */
+      0x60000060;
+   bool changed = cmd_buffer->state.current_l3_config != val;
+
+   if (changed) {
+      /* According to the hardware docs, the L3 partitioning can only be changed
+       * while the pipeline is completely drained and the caches are flushed,
+       * which involves a first PIPE_CONTROL flush which stalls the pipeline and
+       * initiates invalidation of the relevant caches...
+       */
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                     .TextureCacheInvalidationEnable = true,
+                     .ConstantCacheInvalidationEnable = true,
+                     .InstructionCacheInvalidateEnable = true,
+                     .DCFlushEnable = true,
+                     .PostSyncOperation = NoWrite,
+                     .CommandStreamerStallEnable = true);
+
+      /* ...followed by a second stalling flush which guarantees that
+       * invalidation is complete when the L3 configuration registers are
+       * modified.
+       */
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                     .DCFlushEnable = true,
+                     .PostSyncOperation = NoWrite,
+                     .CommandStreamerStallEnable = true);
+
+      emit_lri(&cmd_buffer->batch, GEN8_L3CNTLREG, val);
+      cmd_buffer->state.current_l3_config = val;
+   }
+}
+
+static void
+__emit_genx_sf_state(struct anv_cmd_buffer *cmd_buffer)
+{
+      uint32_t sf_dw[GENX(3DSTATE_SF_length)];
+      struct GENX(3DSTATE_SF) sf = {
+         GENX(3DSTATE_SF_header),
+         .LineWidth = cmd_buffer->state.dynamic.line_width,
+      };
+      GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf);
+      /* FIXME: gen9.fs */
+      anv_batch_emit_merge(&cmd_buffer->batch, sf_dw,
+                           cmd_buffer->state.pipeline->gen8.sf);
+}
+
+#include "genxml/gen9_pack.h"
+static void
+__emit_gen9_sf_state(struct anv_cmd_buffer *cmd_buffer)
+{
+      uint32_t sf_dw[GENX(3DSTATE_SF_length)];
+      struct GEN9_3DSTATE_SF sf = {
+         GEN9_3DSTATE_SF_header,
+         .LineWidth = cmd_buffer->state.dynamic.line_width,
+      };
+      GEN9_3DSTATE_SF_pack(NULL, sf_dw, &sf);
+      /* FIXME: gen9.fs */
+      anv_batch_emit_merge(&cmd_buffer->batch, sf_dw,
+                           cmd_buffer->state.pipeline->gen8.sf);
+}
+
+static void
+__emit_sf_state(struct anv_cmd_buffer *cmd_buffer)
+{
+   if (cmd_buffer->device->info.is_cherryview)
+      __emit_gen9_sf_state(cmd_buffer);
+   else
+      __emit_genx_sf_state(cmd_buffer);
+}
+
+void
+genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+
+   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                  ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)) {
+      __emit_sf_state(cmd_buffer);
+   }
+
+   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                  ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)){
+      uint32_t raster_dw[GENX(3DSTATE_RASTER_length)];
+      struct GENX(3DSTATE_RASTER) raster = {
+         GENX(3DSTATE_RASTER_header),
+         .GlobalDepthOffsetConstant = cmd_buffer->state.dynamic.depth_bias.bias,
+         .GlobalDepthOffsetScale = cmd_buffer->state.dynamic.depth_bias.slope,
+         .GlobalDepthOffsetClamp = cmd_buffer->state.dynamic.depth_bias.clamp
+      };
+      GENX(3DSTATE_RASTER_pack)(NULL, raster_dw, &raster);
+      anv_batch_emit_merge(&cmd_buffer->batch, raster_dw,
+                           pipeline->gen8.raster);
+   }
+
+   /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
+    * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
+    * across different state packets for gen8 and gen9. We handle that by
+    * using a big old #if switch here.
+    */
+#if GEN_GEN == 8
+   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
+                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
+      struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
+      struct anv_state cc_state =
+         anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+                                            GENX(COLOR_CALC_STATE_length) * 4,
+                                            64);
+      struct GENX(COLOR_CALC_STATE) cc = {
+         .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0],
+         .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1],
+         .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
+         .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
+         .StencilReferenceValue = d->stencil_reference.front & 0xff,
+         .BackFaceStencilReferenceValue = d->stencil_reference.back & 0xff,
+      };
+      GENX(COLOR_CALC_STATE_pack)(NULL, cc_state.map, &cc);
+
+      if (!cmd_buffer->device->info.has_llc)
+         anv_state_clflush(cc_state);
+
+      anv_batch_emit(&cmd_buffer->batch,
+                     GENX(3DSTATE_CC_STATE_POINTERS),
+                     .ColorCalcStatePointer = cc_state.offset,
+                     .ColorCalcStatePointerValid = true);
+   }
+
+   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
+                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
+      uint32_t wm_depth_stencil_dw[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
+      struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
+
+      struct GENX(3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil) = {
+         GENX(3DSTATE_WM_DEPTH_STENCIL_header),
+
+         .StencilTestMask = d->stencil_compare_mask.front & 0xff,
+         .StencilWriteMask = d->stencil_write_mask.front & 0xff,
+
+         .BackfaceStencilTestMask = d->stencil_compare_mask.back & 0xff,
+         .BackfaceStencilWriteMask = d->stencil_write_mask.back & 0xff,
+      };
+      GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, wm_depth_stencil_dw,
+                                          &wm_depth_stencil);
+
+      anv_batch_emit_merge(&cmd_buffer->batch, wm_depth_stencil_dw,
+                           pipeline->gen8.wm_depth_stencil);
+   }
+#else
+   if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
+      struct anv_state cc_state =
+         anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+                                            GEN9_COLOR_CALC_STATE_length * 4,
+                                            64);
+      struct GEN9_COLOR_CALC_STATE cc = {
+         .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0],
+         .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1],
+         .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
+         .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
+      };
+      GEN9_COLOR_CALC_STATE_pack(NULL, cc_state.map, &cc);
+
+      if (!cmd_buffer->device->info.has_llc)
+         anv_state_clflush(cc_state);
+
+      anv_batch_emit(&cmd_buffer->batch,
+                     GEN9_3DSTATE_CC_STATE_POINTERS,
+                     .ColorCalcStatePointer = cc_state.offset,
+                     .ColorCalcStatePointerValid = true);
+   }
+
+   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
+                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
+                                  ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
+      uint32_t dwords[GEN9_3DSTATE_WM_DEPTH_STENCIL_length];
+      struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
+      struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = {
+         GEN9_3DSTATE_WM_DEPTH_STENCIL_header,
+
+         .StencilTestMask = d->stencil_compare_mask.front & 0xff,
+         .StencilWriteMask = d->stencil_write_mask.front & 0xff,
+
+         .BackfaceStencilTestMask = d->stencil_compare_mask.back & 0xff,
+         .BackfaceStencilWriteMask = d->stencil_write_mask.back & 0xff,
+
+         .StencilReferenceValue = d->stencil_reference.front & 0xff,
+         .BackfaceStencilReferenceValue = d->stencil_reference.back & 0xff,
+      };
+      GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, dwords, &wm_depth_stencil);
+
+      anv_batch_emit_merge(&cmd_buffer->batch, dwords,
+                           pipeline->gen9.wm_depth_stencil);
+   }
+#endif
+
+   if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+                                  ANV_CMD_DIRTY_INDEX_BUFFER)) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF),
+         .IndexedDrawCutIndexEnable = pipeline->primitive_restart,
+         .CutIndex = cmd_buffer->state.restart_index,
+      );
+   }
+
+   cmd_buffer->state.dirty = 0;
+}
+
+void genX(CmdBindIndexBuffer)(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    _buffer,
+    VkDeviceSize                                offset,
+    VkIndexType                                 indexType)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+
+   static const uint32_t vk_to_gen_index_type[] = {
+      [VK_INDEX_TYPE_UINT16]                    = INDEX_WORD,
+      [VK_INDEX_TYPE_UINT32]                    = INDEX_DWORD,
+   };
+
+   static const uint32_t restart_index_for_type[] = {
+      [VK_INDEX_TYPE_UINT16]                    = UINT16_MAX,
+      [VK_INDEX_TYPE_UINT32]                    = UINT32_MAX,
+   };
+
+   cmd_buffer->state.restart_index = restart_index_for_type[indexType];
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER),
+                  .IndexFormat = vk_to_gen_index_type[indexType],
+                  .MemoryObjectControlState = GENX(MOCS),
+                  .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
+                  .BufferSize = buffer->size - offset);
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
+}
+
+static VkResult
+flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_device *device = cmd_buffer->device;
+   struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+   struct anv_state surfaces = { 0, }, samplers = { 0, };
+   VkResult result;
+
+   result = anv_cmd_buffer_emit_samplers(cmd_buffer,
+                                         MESA_SHADER_COMPUTE, &samplers);
+   if (result != VK_SUCCESS)
+      return result;
+   result = anv_cmd_buffer_emit_binding_table(cmd_buffer,
+                                              MESA_SHADER_COMPUTE, &surfaces);
+   if (result != VK_SUCCESS)
+      return result;
+
+   struct anv_state push_state = anv_cmd_buffer_cs_push_constants(cmd_buffer);
+
+   const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
+   const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+
+   unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
+   unsigned push_constant_data_size =
+      (prog_data->nr_params + local_id_dwords) * 4;
+   unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
+   unsigned push_constant_regs = reg_aligned_constant_size / 32;
+
+   if (push_state.alloc_size) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD),
+                     .CURBETotalDataLength = push_state.alloc_size,
+                     .CURBEDataStartAddress = push_state.offset);
+   }
+
+   assert(prog_data->total_shared <= 64 * 1024);
+   uint32_t slm_size = 0;
+   if (prog_data->total_shared > 0) {
+      /* slm_size is in 4k increments, but must be a power of 2. */
+      slm_size = 4 * 1024;
+      while (slm_size < prog_data->total_shared)
+         slm_size <<= 1;
+      slm_size /= 4 * 1024;
+   }
+
+   struct anv_state state =
+      anv_state_pool_emit(&device->dynamic_state_pool,
+                          GENX(INTERFACE_DESCRIPTOR_DATA), 64,
+                          .KernelStartPointer = pipeline->cs_simd,
+                          .KernelStartPointerHigh = 0,
+                          .BindingTablePointer = surfaces.offset,
+                          .BindingTableEntryCount = 0,
+                          .SamplerStatePointer = samplers.offset,
+                          .SamplerCount = 0,
+                          .ConstantIndirectURBEntryReadLength = push_constant_regs,
+                          .ConstantURBEntryReadOffset = 0,
+                          .BarrierEnable = cs_prog_data->uses_barrier,
+                          .SharedLocalMemorySize = slm_size,
+                          .NumberofThreadsinGPGPUThreadGroup =
+                             pipeline->cs_thread_width_max);
+
+   uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
+   anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD),
+                  .InterfaceDescriptorTotalLength = size,
+                  .InterfaceDescriptorDataStartAddress = state.offset);
+
+   return VK_SUCCESS;
+}
+
+void
+genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+   const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
+   VkResult result;
+
+   assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
+
+   bool needs_slm = cs_prog_data->base.total_shared > 0;
+   genX(cmd_buffer_config_l3)(cmd_buffer, needs_slm);
+
+   genX(flush_pipeline_select_gpgpu)(cmd_buffer);
+
+   if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)
+      anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
+
+   if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
+       (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
+      result = flush_compute_descriptor_set(cmd_buffer);
+      assert(result == VK_SUCCESS);
+      cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
+   }
+
+   cmd_buffer->state.compute_dirty = 0;
+}
+
+void genX(CmdSetEvent)(
+    VkCommandBuffer                             commandBuffer,
+    VkEvent                                     _event,
+    VkPipelineStageFlags                        stageMask)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                  .DestinationAddressType = DAT_PPGTT,
+                  .PostSyncOperation = WriteImmediateData,
+                  .Address = {
+                     &cmd_buffer->device->dynamic_state_block_pool.bo,
+                     event->state.offset
+                   },
+                  .ImmediateData = VK_EVENT_SET);
+}
+
+void genX(CmdResetEvent)(
+    VkCommandBuffer                             commandBuffer,
+    VkEvent                                     _event,
+    VkPipelineStageFlags                        stageMask)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                  .DestinationAddressType = DAT_PPGTT,
+                  .PostSyncOperation = WriteImmediateData,
+                  .Address = {
+                     &cmd_buffer->device->dynamic_state_block_pool.bo,
+                     event->state.offset
+                   },
+                  .ImmediateData = VK_EVENT_RESET);
+}
+
+void genX(CmdWaitEvents)(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    eventCount,
+    const VkEvent*                              pEvents,
+    VkPipelineStageFlags                        srcStageMask,
+    VkPipelineStageFlags                        destStageMask,
+    uint32_t                                    memoryBarrierCount,
+    const VkMemoryBarrier*                      pMemoryBarriers,
+    uint32_t                                    bufferMemoryBarrierCount,
+    const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
+    uint32_t                                    imageMemoryBarrierCount,
+    const VkImageMemoryBarrier*                 pImageMemoryBarriers)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   for (uint32_t i = 0; i < eventCount; i++) {
+      ANV_FROM_HANDLE(anv_event, event, pEvents[i]);
+
+      anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT),
+                     .WaitMode = PollingMode,
+                     .CompareOperation = COMPARE_SAD_EQUAL_SDD,
+                     .SemaphoreDataDword = VK_EVENT_SET,
+                     .SemaphoreAddress = {
+                        &cmd_buffer->device->dynamic_state_block_pool.bo,
+                        event->state.offset
+                     });
+   }
+
+   genX(CmdPipelineBarrier)(commandBuffer, srcStageMask, destStageMask,
+                            false, /* byRegion */
+                            memoryBarrierCount, pMemoryBarriers,
+                            bufferMemoryBarrierCount, pBufferMemoryBarriers,
+                            imageMemoryBarrierCount, pImageMemoryBarriers);
+}
diff --git a/src/intel/vulkan/gen8_pipeline.c b/src/intel/vulkan/gen8_pipeline.c
new file mode 100644 (file)
index 0000000..b8b29d4
--- /dev/null
@@ -0,0 +1,538 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+#include "genxml/gen_macros.h"
+#include "genxml/genX_pack.h"
+
+#include "genX_pipeline_util.h"
+
+static void
+emit_ia_state(struct anv_pipeline *pipeline,
+              const VkPipelineInputAssemblyStateCreateInfo *info,
+              const struct anv_graphics_pipeline_create_info *extra)
+{
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY),
+                  .PrimitiveTopologyType = pipeline->topology);
+}
+
+static void
+emit_rs_state(struct anv_pipeline *pipeline,
+              const VkPipelineRasterizationStateCreateInfo *info,
+              const VkPipelineMultisampleStateCreateInfo *ms_info,
+              const struct anv_graphics_pipeline_create_info *extra)
+{
+   uint32_t samples = 1;
+
+   if (ms_info)
+      samples = ms_info->rasterizationSamples;
+
+   struct GENX(3DSTATE_SF) sf = {
+      GENX(3DSTATE_SF_header),
+      .ViewportTransformEnable = !(extra && extra->disable_viewport),
+      .TriangleStripListProvokingVertexSelect = 0,
+      .LineStripListProvokingVertexSelect = 0,
+      .TriangleFanProvokingVertexSelect = 1,
+      .PointWidthSource = Vertex,
+      .PointWidth = 1.0,
+   };
+
+   /* FINISHME: VkBool32 rasterizerDiscardEnable; */
+
+   GENX(3DSTATE_SF_pack)(NULL, pipeline->gen8.sf, &sf);
+
+   struct GENX(3DSTATE_RASTER) raster = {
+      GENX(3DSTATE_RASTER_header),
+
+      /* For details on 3DSTATE_RASTER multisample state, see the BSpec table
+       * "Multisample Modes State".
+       */
+      .DXMultisampleRasterizationEnable = samples > 1,
+      .ForcedSampleCount = FSC_NUMRASTSAMPLES_0,
+      .ForceMultisampling = false,
+
+      .FrontWinding = vk_to_gen_front_face[info->frontFace],
+      .CullMode = vk_to_gen_cullmode[info->cullMode],
+      .FrontFaceFillMode = vk_to_gen_fillmode[info->polygonMode],
+      .BackFaceFillMode = vk_to_gen_fillmode[info->polygonMode],
+      .ScissorRectangleEnable = !(extra && extra->disable_scissor),
+#if GEN_GEN == 8
+      .ViewportZClipTestEnable = true,
+#else
+      /* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */
+      .ViewportZFarClipTestEnable = true,
+      .ViewportZNearClipTestEnable = true,
+#endif
+      .GlobalDepthOffsetEnableSolid = info->depthBiasEnable,
+      .GlobalDepthOffsetEnableWireframe = info->depthBiasEnable,
+      .GlobalDepthOffsetEnablePoint = info->depthBiasEnable,
+   };
+
+   GENX(3DSTATE_RASTER_pack)(NULL, pipeline->gen8.raster, &raster);
+}
+
+static void
+emit_cb_state(struct anv_pipeline *pipeline,
+              const VkPipelineColorBlendStateCreateInfo *info,
+              const VkPipelineMultisampleStateCreateInfo *ms_info)
+{
+   struct anv_device *device = pipeline->device;
+
+   uint32_t num_dwords = GENX(BLEND_STATE_length);
+   pipeline->blend_state =
+      anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
+
+   struct GENX(BLEND_STATE) blend_state = {
+      .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
+      .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
+   };
+
+   /* Default everything to disabled */
+   for (uint32_t i = 0; i < 8; i++) {
+      blend_state.Entry[i].WriteDisableAlpha = true;
+      blend_state.Entry[i].WriteDisableRed = true;
+      blend_state.Entry[i].WriteDisableGreen = true;
+      blend_state.Entry[i].WriteDisableBlue = true;
+   }
+
+   struct anv_pipeline_bind_map *map =
+      &pipeline->bindings[MESA_SHADER_FRAGMENT];
+
+   bool has_writeable_rt = false;
+   for (unsigned i = 0; i < map->surface_count; i++) {
+      struct anv_pipeline_binding *binding = &map->surface_to_descriptor[i];
+
+      /* All color attachments are at the beginning of the binding table */
+      if (binding->set != ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS)
+         break;
+
+      /* We can have at most 8 attachments */
+      assert(i < 8);
+
+      if (binding->offset >= info->attachmentCount)
+         continue;
+
+      const VkPipelineColorBlendAttachmentState *a =
+         &info->pAttachments[binding->offset];
+
+      if (a->srcColorBlendFactor != a->srcAlphaBlendFactor ||
+          a->dstColorBlendFactor != a->dstAlphaBlendFactor ||
+          a->colorBlendOp != a->alphaBlendOp) {
+         blend_state.IndependentAlphaBlendEnable = true;
+      }
+
+      blend_state.Entry[i] = (struct GENX(BLEND_STATE_ENTRY)) {
+         .LogicOpEnable = info->logicOpEnable,
+         .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
+         .ColorBufferBlendEnable = a->blendEnable,
+         .PreBlendSourceOnlyClampEnable = false,
+         .ColorClampRange = COLORCLAMP_RTFORMAT,
+         .PreBlendColorClampEnable = true,
+         .PostBlendColorClampEnable = true,
+         .SourceBlendFactor = vk_to_gen_blend[a->srcColorBlendFactor],
+         .DestinationBlendFactor = vk_to_gen_blend[a->dstColorBlendFactor],
+         .ColorBlendFunction = vk_to_gen_blend_op[a->colorBlendOp],
+         .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcAlphaBlendFactor],
+         .DestinationAlphaBlendFactor = vk_to_gen_blend[a->dstAlphaBlendFactor],
+         .AlphaBlendFunction = vk_to_gen_blend_op[a->alphaBlendOp],
+         .WriteDisableAlpha = !(a->colorWriteMask & VK_COLOR_COMPONENT_A_BIT),
+         .WriteDisableRed = !(a->colorWriteMask & VK_COLOR_COMPONENT_R_BIT),
+         .WriteDisableGreen = !(a->colorWriteMask & VK_COLOR_COMPONENT_G_BIT),
+         .WriteDisableBlue = !(a->colorWriteMask & VK_COLOR_COMPONENT_B_BIT),
+      };
+
+      if (a->colorWriteMask != 0)
+         has_writeable_rt = true;
+
+      /* Our hardware applies the blend factor prior to the blend function
+       * regardless of what function is used.  Technically, this means the
+       * hardware can do MORE than GL or Vulkan specify.  However, it also
+       * means that, for MIN and MAX, we have to stomp the blend factor to
+       * ONE to make it a no-op.
+       */
+      if (a->colorBlendOp == VK_BLEND_OP_MIN ||
+          a->colorBlendOp == VK_BLEND_OP_MAX) {
+         blend_state.Entry[i].SourceBlendFactor = BLENDFACTOR_ONE;
+         blend_state.Entry[i].DestinationBlendFactor = BLENDFACTOR_ONE;
+      }
+      if (a->alphaBlendOp == VK_BLEND_OP_MIN ||
+          a->alphaBlendOp == VK_BLEND_OP_MAX) {
+         blend_state.Entry[i].SourceAlphaBlendFactor = BLENDFACTOR_ONE;
+         blend_state.Entry[i].DestinationAlphaBlendFactor = BLENDFACTOR_ONE;
+      }
+   }
+
+   struct GENX(BLEND_STATE_ENTRY) *bs0 = &blend_state.Entry[0];
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_BLEND),
+                  .AlphaToCoverageEnable = blend_state.AlphaToCoverageEnable,
+                  .HasWriteableRT = has_writeable_rt,
+                  .ColorBufferBlendEnable = bs0->ColorBufferBlendEnable,
+                  .SourceAlphaBlendFactor = bs0->SourceAlphaBlendFactor,
+                  .DestinationAlphaBlendFactor =
+                     bs0->DestinationAlphaBlendFactor,
+                  .SourceBlendFactor = bs0->SourceBlendFactor,
+                  .DestinationBlendFactor = bs0->DestinationBlendFactor,
+                  .AlphaTestEnable = false,
+                  .IndependentAlphaBlendEnable =
+                     blend_state.IndependentAlphaBlendEnable);
+
+   GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state);
+   if (!device->info.has_llc)
+      anv_state_clflush(pipeline->blend_state);
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS),
+                  .BlendStatePointer = pipeline->blend_state.offset,
+                  .BlendStatePointerValid = true);
+}
+
+static void
+emit_ds_state(struct anv_pipeline *pipeline,
+              const VkPipelineDepthStencilStateCreateInfo *info)
+{
+   uint32_t *dw = GEN_GEN == 8 ?
+      pipeline->gen8.wm_depth_stencil : pipeline->gen9.wm_depth_stencil;
+
+   if (info == NULL) {
+      /* We're going to OR this together with the dynamic state.  We need
+       * to make sure it's initialized to something useful.
+       */
+      memset(pipeline->gen8.wm_depth_stencil, 0,
+             sizeof(pipeline->gen8.wm_depth_stencil));
+      memset(pipeline->gen9.wm_depth_stencil, 0,
+             sizeof(pipeline->gen9.wm_depth_stencil));
+      return;
+   }
+
+   /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */
+
+   struct GENX(3DSTATE_WM_DEPTH_STENCIL) wm_depth_stencil = {
+      .DepthTestEnable = info->depthTestEnable,
+      .DepthBufferWriteEnable = info->depthWriteEnable,
+      .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
+      .DoubleSidedStencilEnable = true,
+
+      .StencilTestEnable = info->stencilTestEnable,
+      .StencilBufferWriteEnable = info->stencilTestEnable,
+      .StencilFailOp = vk_to_gen_stencil_op[info->front.failOp],
+      .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.passOp],
+      .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.depthFailOp],
+      .StencilTestFunction = vk_to_gen_compare_op[info->front.compareOp],
+      .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.failOp],
+      .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.passOp],
+      .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info->back.depthFailOp],
+      .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.compareOp],
+   };
+
+   /* From the Broadwell PRM:
+    *
+    *    "If Depth_Test_Enable = 1 AND Depth_Test_func = EQUAL, the
+    *    Depth_Write_Enable must be set to 0."
+    */
+   if (info->depthTestEnable && info->depthCompareOp == VK_COMPARE_OP_EQUAL)
+      wm_depth_stencil.DepthBufferWriteEnable = false;
+
+   GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, dw, &wm_depth_stencil);
+}
+
+static void
+emit_ms_state(struct anv_pipeline *pipeline,
+              const VkPipelineMultisampleStateCreateInfo *info)
+{
+   uint32_t samples = 1;
+   uint32_t log2_samples = 0;
+
+   /* From the Vulkan 1.0 spec:
+    *    If pSampleMask is NULL, it is treated as if the mask has all bits
+    *    enabled, i.e. no coverage is removed from fragments.
+    *
+    * 3DSTATE_SAMPLE_MASK.SampleMask is 16 bits.
+    */
+   uint32_t sample_mask = 0xffff;
+
+   if (info) {
+      samples = info->rasterizationSamples;
+      log2_samples = __builtin_ffs(samples) - 1;
+   }
+
+   if (info && info->pSampleMask)
+      sample_mask &= info->pSampleMask[0];
+
+   if (info && info->sampleShadingEnable)
+      anv_finishme("VkPipelineMultisampleStateCreateInfo::sampleShadingEnable");
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE),
+
+      /* The PRM says that this bit is valid only for DX9:
+       *
+       *    SW can choose to set this bit only for DX9 API. DX10/OGL API's
+       *    should not have any effect by setting or not setting this bit.
+       */
+      .PixelPositionOffsetEnable = false,
+
+      .PixelLocation = CENTER,
+      .NumberofMultisamples = log2_samples);
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK),
+      .SampleMask = sample_mask);
+}
+
+VkResult
+genX(graphics_pipeline_create)(
+    VkDevice                                    _device,
+    struct anv_pipeline_cache *                 cache,
+    const VkGraphicsPipelineCreateInfo*         pCreateInfo,
+    const struct anv_graphics_pipeline_create_info *extra,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipeline*                                 pPipeline)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_pipeline *pipeline;
+   VkResult result;
+   uint32_t offset, length;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
+
+   pipeline = anv_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pipeline == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   result = anv_pipeline_init(pipeline, device, cache,
+                              pCreateInfo, extra, pAllocator);
+   if (result != VK_SUCCESS) {
+      anv_free2(&device->alloc, pAllocator, pipeline);
+      return result;
+   }
+
+   assert(pCreateInfo->pVertexInputState);
+   emit_vertex_input(pipeline, pCreateInfo->pVertexInputState, extra);
+   assert(pCreateInfo->pInputAssemblyState);
+   emit_ia_state(pipeline, pCreateInfo->pInputAssemblyState, extra);
+   assert(pCreateInfo->pRasterizationState);
+   emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
+                 pCreateInfo->pMultisampleState, extra);
+   emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
+   emit_ds_state(pipeline, pCreateInfo->pDepthStencilState);
+   emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
+                           pCreateInfo->pMultisampleState);
+
+   emit_urb_setup(pipeline);
+
+   const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP),
+                  .ClipEnable = true,
+                  .EarlyCullEnable = true,
+                  .APIMode = 1, /* D3D */
+                  .ViewportXYClipTestEnable = !(extra && extra->disable_viewport),
+
+                  .ClipMode =
+                     pCreateInfo->pRasterizationState->rasterizerDiscardEnable ?
+                     REJECT_ALL : NORMAL,
+
+                  .NonPerspectiveBarycentricEnable = wm_prog_data ?
+                     (wm_prog_data->barycentric_interp_modes & 0x38) != 0 : 0,
+
+                  .TriangleStripListProvokingVertexSelect = 0,
+                  .LineStripListProvokingVertexSelect = 0,
+                  .TriangleFanProvokingVertexSelect = 1,
+
+                  .MinimumPointWidth = 0.125,
+                  .MaximumPointWidth = 255.875,
+                  .MaximumVPIndex = pCreateInfo->pViewportState->viewportCount - 1);
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM),
+                  .StatisticsEnable = true,
+                  .LineEndCapAntialiasingRegionWidth = _05pixels,
+                  .LineAntialiasingRegionWidth = _10pixels,
+                  .EarlyDepthStencilControl = NORMAL,
+                  .ForceThreadDispatchEnable = NORMAL,
+                  .PointRasterizationRule = RASTRULE_UPPER_RIGHT,
+                  .BarycentricInterpolationMode =
+                     pipeline->ps_ksp0 == NO_KERNEL ?
+                     0 : wm_prog_data->barycentric_interp_modes);
+
+   if (pipeline->gs_kernel == NO_KERNEL) {
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), .Enable = false);
+   } else {
+      const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
+      offset = 1;
+      length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset;
+
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS),
+                     .SingleProgramFlow = false,
+                     .KernelStartPointer = pipeline->gs_kernel,
+                     .VectorMaskEnable = false,
+                     .SamplerCount = 0,
+                     .BindingTableEntryCount = 0,
+                     .ExpectedVertexCount = gs_prog_data->vertices_in,
+
+                     .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_GEOMETRY],
+                     .PerThreadScratchSpace = scratch_space(&gs_prog_data->base.base),
+
+                     .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1,
+                     .OutputTopology = gs_prog_data->output_topology,
+                     .VertexURBEntryReadLength = gs_prog_data->base.urb_read_length,
+                     .IncludeVertexHandles = gs_prog_data->base.include_vue_handles,
+                     .DispatchGRFStartRegisterForURBData =
+                        gs_prog_data->base.base.dispatch_grf_start_reg,
+
+                     .MaximumNumberofThreads = device->info.max_gs_threads / 2 - 1,
+                     .ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords,
+                     .DispatchMode = gs_prog_data->base.dispatch_mode,
+                     .StatisticsEnable = true,
+                     .IncludePrimitiveID = gs_prog_data->include_primitive_id,
+                     .ReorderMode = TRAILING,
+                     .Enable = true,
+
+                     .ControlDataFormat = gs_prog_data->control_data_format,
+
+                     .StaticOutput = gs_prog_data->static_vertex_count >= 0,
+                     .StaticOutputVertexCount =
+                        gs_prog_data->static_vertex_count >= 0 ?
+                        gs_prog_data->static_vertex_count : 0,
+
+                     /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
+                      * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
+                      * UserClipDistanceCullTestEnableBitmask(v)
+                      */
+
+                     .VertexURBEntryOutputReadOffset = offset,
+                     .VertexURBEntryOutputLength = length);
+   }
+
+   const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
+   /* Skip the VUE header and position slots */
+   offset = 1;
+   length = (vs_prog_data->base.vue_map.num_slots + 1) / 2 - offset;
+
+   uint32_t vs_start = pipeline->vs_simd8 != NO_KERNEL ? pipeline->vs_simd8 :
+                                                         pipeline->vs_vec4;
+
+   if (vs_start == NO_KERNEL || (extra && extra->disable_vs))
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS),
+                     .FunctionEnable = false,
+                     /* Even if VS is disabled, SBE still gets the amount of
+                      * vertex data to read from this field. */
+                     .VertexURBEntryOutputReadOffset = offset,
+                     .VertexURBEntryOutputLength = length);
+   else
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS),
+                     .KernelStartPointer = vs_start,
+                     .SingleVertexDispatch = false,
+                     .VectorMaskEnable = false,
+                     .SamplerCount = 0,
+                     .BindingTableEntryCount =
+                        vs_prog_data->base.base.binding_table.size_bytes / 4,
+                     .ThreadDispatchPriority = false,
+                     .FloatingPointMode = IEEE754,
+                     .IllegalOpcodeExceptionEnable = false,
+                     .AccessesUAV = false,
+                     .SoftwareExceptionEnable = false,
+
+                     .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_VERTEX],
+                     .PerThreadScratchSpace = scratch_space(&vs_prog_data->base.base),
+
+                     .DispatchGRFStartRegisterForURBData =
+                        vs_prog_data->base.base.dispatch_grf_start_reg,
+                     .VertexURBEntryReadLength = vs_prog_data->base.urb_read_length,
+                     .VertexURBEntryReadOffset = 0,
+
+                     .MaximumNumberofThreads = device->info.max_vs_threads - 1,
+                     .StatisticsEnable = false,
+                     .SIMD8DispatchEnable = pipeline->vs_simd8 != NO_KERNEL,
+                     .VertexCacheDisable = false,
+                     .FunctionEnable = true,
+
+                     .VertexURBEntryOutputReadOffset = offset,
+                     .VertexURBEntryOutputLength = length,
+                     .UserClipDistanceClipTestEnableBitmask = 0,
+                     .UserClipDistanceCullTestEnableBitmask = 0);
+
+   const int num_thread_bias = GEN_GEN == 8 ? 2 : 1;
+   if (pipeline->ps_ksp0 == NO_KERNEL) {
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS));
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA),
+                     .PixelShaderValid = false);
+   } else {
+      emit_3dstate_sbe(pipeline);
+
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS),
+                     .KernelStartPointer0 = pipeline->ps_ksp0,
+
+                     .SingleProgramFlow = false,
+                     .VectorMaskEnable = true,
+                     .SamplerCount = 1,
+
+                     .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_FRAGMENT],
+                     .PerThreadScratchSpace = scratch_space(&wm_prog_data->base),
+
+                     .MaximumNumberofThreadsPerPSD = 64 - num_thread_bias,
+                     .PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
+                        POSOFFSET_SAMPLE: POSOFFSET_NONE,
+                     .PushConstantEnable = wm_prog_data->base.nr_params > 0,
+                     ._8PixelDispatchEnable = pipeline->ps_simd8 != NO_KERNEL,
+                     ._16PixelDispatchEnable = pipeline->ps_simd16 != NO_KERNEL,
+                     ._32PixelDispatchEnable = false,
+
+                     .DispatchGRFStartRegisterForConstantSetupData0 = pipeline->ps_grf_start0,
+                     .DispatchGRFStartRegisterForConstantSetupData1 = 0,
+                     .DispatchGRFStartRegisterForConstantSetupData2 = pipeline->ps_grf_start2,
+
+                     .KernelStartPointer1 = 0,
+                     .KernelStartPointer2 = pipeline->ps_ksp2);
+
+      bool per_sample_ps = pCreateInfo->pMultisampleState &&
+                           pCreateInfo->pMultisampleState->sampleShadingEnable;
+
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA),
+                     .PixelShaderValid = true,
+                     .PixelShaderKillsPixel = wm_prog_data->uses_kill,
+                     .PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode,
+                     .AttributeEnable = wm_prog_data->num_varying_inputs > 0,
+                     .oMaskPresenttoRenderTarget = wm_prog_data->uses_omask,
+                     .PixelShaderIsPerSample = per_sample_ps,
+                     .PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth,
+                     .PixelShaderUsesSourceW = wm_prog_data->uses_src_w,
+#if GEN_GEN >= 9
+                     .PixelShaderPullsBary = wm_prog_data->pulls_bary,
+                     .InputCoverageMaskState = wm_prog_data->uses_sample_mask ?
+                        ICMS_INNER_CONSERVATIVE : ICMS_NONE,
+#else
+                     .PixelShaderUsesInputCoverageMask =
+                        wm_prog_data->uses_sample_mask,
+#endif
+         );
+   }
+
+   *pPipeline = anv_pipeline_to_handle(pipeline);
+
+   return VK_SUCCESS;
+}
diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c
new file mode 100644 (file)
index 0000000..1b53f85
--- /dev/null
@@ -0,0 +1,1278 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include "anv_private.h"
+
+#include "genxml/gen_macros.h"
+#include "genxml/genX_pack.h"
+
+void
+genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_device *device = cmd_buffer->device;
+   struct anv_bo *scratch_bo = NULL;
+
+   cmd_buffer->state.scratch_size =
+      anv_block_pool_size(&device->scratch_block_pool);
+   if (cmd_buffer->state.scratch_size > 0)
+      scratch_bo = &device->scratch_block_pool.bo;
+
+/* XXX: Do we need this on more than just BDW? */
+#if (GEN_GEN >= 8)
+   /* Emit a render target cache flush.
+    *
+    * This isn't documented anywhere in the PRM.  However, it seems to be
+    * necessary prior to changing the surface state base adress.  Without
+    * this, we get GPU hangs when using multi-level command buffers which
+    * clear depth, reset state base address, and then go render stuff.
+    */
+   anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                  .RenderTargetCacheFlushEnable = true);
+#endif
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS),
+      .GeneralStateBaseAddress = { scratch_bo, 0 },
+      .GeneralStateMemoryObjectControlState = GENX(MOCS),
+      .GeneralStateBaseAddressModifyEnable = true,
+
+      .SurfaceStateBaseAddress = anv_cmd_buffer_surface_base_address(cmd_buffer),
+      .SurfaceStateMemoryObjectControlState = GENX(MOCS),
+      .SurfaceStateBaseAddressModifyEnable = true,
+
+      .DynamicStateBaseAddress = { &device->dynamic_state_block_pool.bo, 0 },
+      .DynamicStateMemoryObjectControlState = GENX(MOCS),
+      .DynamicStateBaseAddressModifyEnable = true,
+
+      .IndirectObjectBaseAddress = { NULL, 0 },
+      .IndirectObjectMemoryObjectControlState = GENX(MOCS),
+      .IndirectObjectBaseAddressModifyEnable = true,
+
+      .InstructionBaseAddress = { &device->instruction_block_pool.bo, 0 },
+      .InstructionMemoryObjectControlState = GENX(MOCS),
+      .InstructionBaseAddressModifyEnable = true,
+
+#  if (GEN_GEN >= 8)
+      /* Broadwell requires that we specify a buffer size for a bunch of
+       * these fields.  However, since we will be growing the BO's live, we
+       * just set them all to the maximum.
+       */
+      .GeneralStateBufferSize = 0xfffff,
+      .GeneralStateBufferSizeModifyEnable = true,
+      .DynamicStateBufferSize = 0xfffff,
+      .DynamicStateBufferSizeModifyEnable = true,
+      .IndirectObjectBufferSize = 0xfffff,
+      .IndirectObjectBufferSizeModifyEnable = true,
+      .InstructionBufferSize = 0xfffff,
+      .InstructionBuffersizeModifyEnable = true,
+#  endif
+   );
+
+   /* After re-setting the surface state base address, we have to do some
+    * cache flusing so that the sampler engine will pick up the new
+    * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
+    * Shared Function > 3D Sampler > State > State Caching (page 96):
+    *
+    *    Coherency with system memory in the state cache, like the texture
+    *    cache is handled partially by software. It is expected that the
+    *    command stream or shader will issue Cache Flush operation or
+    *    Cache_Flush sampler message to ensure that the L1 cache remains
+    *    coherent with system memory.
+    *
+    *    [...]
+    *
+    *    Whenever the value of the Dynamic_State_Base_Addr,
+    *    Surface_State_Base_Addr are altered, the L1 state cache must be
+    *    invalidated to ensure the new surface or sampler state is fetched
+    *    from system memory.
+    *
+    * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
+    * which, according the PIPE_CONTROL instruction documentation in the
+    * Broadwell PRM:
+    *
+    *    Setting this bit is independent of any other bit in this packet.
+    *    This bit controls the invalidation of the L1 and L2 state caches
+    *    at the top of the pipe i.e. at the parsing time.
+    *
+    * Unfortunately, experimentation seems to indicate that state cache
+    * invalidation through a PIPE_CONTROL does nothing whatsoever in
+    * regards to surface state and binding tables.  In stead, it seems that
+    * invalidating the texture cache is what is actually needed.
+    *
+    * XXX:  As far as we have been able to determine through
+    * experimentation, shows that flush the texture cache appears to be
+    * sufficient.  The theory here is that all of the sampling/rendering
+    * units cache the binding table in the texture cache.  However, we have
+    * yet to be able to actually confirm this.
+    */
+   anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                  .TextureCacheInvalidationEnable = true);
+}
+
+void genX(CmdPipelineBarrier)(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineStageFlags                        srcStageMask,
+    VkPipelineStageFlags                        destStageMask,
+    VkBool32                                    byRegion,
+    uint32_t                                    memoryBarrierCount,
+    const VkMemoryBarrier*                      pMemoryBarriers,
+    uint32_t                                    bufferMemoryBarrierCount,
+    const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
+    uint32_t                                    imageMemoryBarrierCount,
+    const VkImageMemoryBarrier*                 pImageMemoryBarriers)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   uint32_t b, *dw;
+
+   /* XXX: Right now, we're really dumb and just flush whatever categories
+    * the app asks for.  One of these days we may make this a bit better
+    * but right now that's all the hardware allows for in most areas.
+    */
+   VkAccessFlags src_flags = 0;
+   VkAccessFlags dst_flags = 0;
+
+   for (uint32_t i = 0; i < memoryBarrierCount; i++) {
+      src_flags |= pMemoryBarriers[i].srcAccessMask;
+      dst_flags |= pMemoryBarriers[i].dstAccessMask;
+   }
+
+   for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
+      src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
+      dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
+   }
+
+   for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
+      src_flags |= pImageMemoryBarriers[i].srcAccessMask;
+      dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
+   }
+
+   /* Mask out the Source access flags we care about */
+   const uint32_t src_mask =
+      VK_ACCESS_SHADER_WRITE_BIT |
+      VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+      VK_ACCESS_TRANSFER_WRITE_BIT;
+
+   src_flags = src_flags & src_mask;
+
+   /* Mask out the destination access flags we care about */
+   const uint32_t dst_mask =
+      VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+      VK_ACCESS_INDEX_READ_BIT |
+      VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
+      VK_ACCESS_UNIFORM_READ_BIT |
+      VK_ACCESS_SHADER_READ_BIT |
+      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+      VK_ACCESS_TRANSFER_READ_BIT;
+
+   dst_flags = dst_flags & dst_mask;
+
+   /* The src flags represent how things were used previously.  This is
+    * what we use for doing flushes.
+    */
+   struct GENX(PIPE_CONTROL) flush_cmd = {
+      GENX(PIPE_CONTROL_header),
+      .PostSyncOperation = NoWrite,
+   };
+
+   for_each_bit(b, src_flags) {
+      switch ((VkAccessFlagBits)(1 << b)) {
+      case VK_ACCESS_SHADER_WRITE_BIT:
+         flush_cmd.DCFlushEnable = true;
+         break;
+      case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
+         flush_cmd.RenderTargetCacheFlushEnable = true;
+         break;
+      case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
+         flush_cmd.DepthCacheFlushEnable = true;
+         break;
+      case VK_ACCESS_TRANSFER_WRITE_BIT:
+         flush_cmd.RenderTargetCacheFlushEnable = true;
+         flush_cmd.DepthCacheFlushEnable = true;
+         break;
+      default:
+         unreachable("should've masked this out by now");
+      }
+   }
+
+   /* If we end up doing two PIPE_CONTROLs, the first, flusing one also has to
+    * stall and wait for the flushing to finish, so we don't re-dirty the
+    * caches with in-flight rendering after the second PIPE_CONTROL
+    * invalidates.
+    */
+
+   if (dst_flags)
+      flush_cmd.CommandStreamerStallEnable = true;
+
+   if (src_flags && dst_flags) {
+      dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
+      GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &flush_cmd);
+   }
+
+   /* The dst flags represent how things will be used in the future.  This
+    * is what we use for doing cache invalidations.
+    */
+   struct GENX(PIPE_CONTROL) invalidate_cmd = {
+      GENX(PIPE_CONTROL_header),
+      .PostSyncOperation = NoWrite,
+   };
+
+   for_each_bit(b, dst_flags) {
+      switch ((VkAccessFlagBits)(1 << b)) {
+      case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
+      case VK_ACCESS_INDEX_READ_BIT:
+      case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
+         invalidate_cmd.VFCacheInvalidationEnable = true;
+         break;
+      case VK_ACCESS_UNIFORM_READ_BIT:
+         invalidate_cmd.ConstantCacheInvalidationEnable = true;
+         /* fallthrough */
+      case VK_ACCESS_SHADER_READ_BIT:
+         invalidate_cmd.TextureCacheInvalidationEnable = true;
+         break;
+      case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
+         invalidate_cmd.TextureCacheInvalidationEnable = true;
+         break;
+      case VK_ACCESS_TRANSFER_READ_BIT:
+         invalidate_cmd.TextureCacheInvalidationEnable = true;
+         break;
+      default:
+         unreachable("should've masked this out by now");
+      }
+   }
+
+   if (dst_flags) {
+      dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
+      GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &invalidate_cmd);
+   }
+}
+
+static uint32_t
+cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
+{
+   static const uint32_t push_constant_opcodes[] = {
+      [MESA_SHADER_VERTEX]                      = 21,
+      [MESA_SHADER_TESS_CTRL]                   = 25, /* HS */
+      [MESA_SHADER_TESS_EVAL]                   = 26, /* DS */
+      [MESA_SHADER_GEOMETRY]                    = 22,
+      [MESA_SHADER_FRAGMENT]                    = 23,
+      [MESA_SHADER_COMPUTE]                     = 0,
+   };
+
+   VkShaderStageFlags flushed = 0;
+
+   anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
+      if (stage == MESA_SHADER_COMPUTE)
+         continue;
+
+      struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
+
+      if (state.offset == 0) {
+         anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
+                        ._3DCommandSubOpcode = push_constant_opcodes[stage]);
+      } else {
+         anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
+                        ._3DCommandSubOpcode = push_constant_opcodes[stage],
+                        .ConstantBody = {
+#if GEN_GEN >= 9
+                           .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
+                           .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
+#else
+                           .PointerToConstantBuffer0 = { .offset = state.offset },
+                           .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
+#endif
+                        });
+      }
+
+      flushed |= mesa_to_vk_shader_stage(stage);
+   }
+
+   cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
+
+   return flushed;
+}
+
+void
+genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+   uint32_t *p;
+
+   uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
+
+   assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
+
+#if GEN_GEN >= 8
+   /* FIXME (jason): Currently, the config_l3 function causes problems on
+    * Haswell and prior if you have a kernel older than 4.4.  In order to
+    * work, it requires a couple of registers be white-listed in the
+    * command parser and they weren't added until 4.4.  What we should do
+    * is check the command parser version and make it a no-op if your
+    * command parser is either off or too old.  Compute won't work 100%,
+    * but at least 3-D will.  In the mean time, I'm going to make this
+    * gen8+ only so that we can get Haswell working again.
+    */
+   genX(cmd_buffer_config_l3)(cmd_buffer, false);
+#endif
+
+   genX(flush_pipeline_select_3d)(cmd_buffer);
+
+   if (vb_emit) {
+      const uint32_t num_buffers = __builtin_popcount(vb_emit);
+      const uint32_t num_dwords = 1 + num_buffers * 4;
+
+      p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
+                          GENX(3DSTATE_VERTEX_BUFFERS));
+      uint32_t vb, i = 0;
+      for_each_bit(vb, vb_emit) {
+         struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
+         uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
+
+         struct GENX(VERTEX_BUFFER_STATE) state = {
+            .VertexBufferIndex = vb,
+
+#if GEN_GEN >= 8
+            .MemoryObjectControlState = GENX(MOCS),
+#else
+            .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
+            .InstanceDataStepRate = 1,
+            .VertexBufferMemoryObjectControlState = GENX(MOCS),
+#endif
+
+            .AddressModifyEnable = true,
+            .BufferPitch = pipeline->binding_stride[vb],
+            .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
+
+#if GEN_GEN >= 8
+            .BufferSize = buffer->size - offset
+#else
+            .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
+#endif
+         };
+
+         GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
+         i++;
+      }
+   }
+
+   cmd_buffer->state.vb_dirty &= ~vb_emit;
+
+   if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
+      /* If somebody compiled a pipeline after starting a command buffer the
+       * scratch bo may have grown since we started this cmd buffer (and
+       * emitted STATE_BASE_ADDRESS).  If we're binding that pipeline now,
+       * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
+      if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
+         anv_cmd_buffer_emit_state_base_address(cmd_buffer);
+
+      anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
+
+      /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
+       *
+       *    "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
+       *    the next 3DPRIMITIVE command after programming the
+       *    3DSTATE_PUSH_CONSTANT_ALLOC_VS"
+       *
+       * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
+       * pipeline setup, we need to dirty push constants.
+       */
+      cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
+   }
+
+#if GEN_GEN <= 7
+   if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
+       cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
+      /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
+       *
+       *    "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
+       *    stall needs to be sent just prior to any 3DSTATE_VS,
+       *    3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
+       *    3DSTATE_BINDING_TABLE_POINTER_VS,
+       *    3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one
+       *    PIPE_CONTROL needs to be sent before any combination of VS
+       *    associated 3DSTATE."
+       */
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                     .DepthStallEnable = true,
+                     .PostSyncOperation = WriteImmediateData,
+                     .Address = { &cmd_buffer->device->workaround_bo, 0 });
+   }
+#endif
+
+   /* We emit the binding tables and sampler tables first, then emit push
+    * constants and then finally emit binding table and sampler table
+    * pointers.  It has to happen in this order, since emitting the binding
+    * tables may change the push constants (in case of storage images). After
+    * emitting push constants, on SKL+ we have to emit the corresponding
+    * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
+    */
+   uint32_t dirty = 0;
+   if (cmd_buffer->state.descriptors_dirty)
+      dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
+
+   if (cmd_buffer->state.push_constants_dirty) {
+#if GEN_GEN >= 9
+      /* On Sky Lake and later, the binding table pointers commands are
+       * what actually flush the changes to push constant state so we need
+       * to dirty them so they get re-emitted below.
+       */
+      dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
+#else
+      cmd_buffer_flush_push_constants(cmd_buffer);
+#endif
+   }
+
+   if (dirty)
+      gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
+
+   if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
+      gen8_cmd_buffer_emit_viewport(cmd_buffer);
+
+   if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
+      gen7_cmd_buffer_emit_scissor(cmd_buffer);
+
+   genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
+}
+
+static void
+emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
+                             struct anv_bo *bo, uint32_t offset)
+{
+   uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
+                                 GENX(3DSTATE_VERTEX_BUFFERS));
+
+   GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
+      &(struct GENX(VERTEX_BUFFER_STATE)) {
+         .VertexBufferIndex = 32, /* Reserved for this */
+         .AddressModifyEnable = true,
+         .BufferPitch = 0,
+#if (GEN_GEN >= 8)
+         .MemoryObjectControlState = GENX(MOCS),
+         .BufferStartingAddress = { bo, offset },
+         .BufferSize = 8
+#else
+         .VertexBufferMemoryObjectControlState = GENX(MOCS),
+         .BufferStartingAddress = { bo, offset },
+         .EndAddress = { bo, offset + 8 },
+#endif
+      });
+}
+
+static void
+emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
+                          uint32_t base_vertex, uint32_t base_instance)
+{
+   struct anv_state id_state =
+      anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
+
+   ((uint32_t *)id_state.map)[0] = base_vertex;
+   ((uint32_t *)id_state.map)[1] = base_instance;
+
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(id_state);
+
+   emit_base_vertex_instance_bo(cmd_buffer,
+      &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
+}
+
+void genX(CmdDraw)(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    vertexCount,
+    uint32_t                                    instanceCount,
+    uint32_t                                    firstVertex,
+    uint32_t                                    firstInstance)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+   const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
+
+   genX(cmd_buffer_flush_state)(cmd_buffer);
+
+   if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
+      emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
+      .VertexAccessType                         = SEQUENTIAL,
+      .PrimitiveTopologyType                    = pipeline->topology,
+      .VertexCountPerInstance                   = vertexCount,
+      .StartVertexLocation                      = firstVertex,
+      .InstanceCount                            = instanceCount,
+      .StartInstanceLocation                    = firstInstance,
+      .BaseVertexLocation                       = 0);
+}
+
+void genX(CmdDrawIndexed)(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    indexCount,
+    uint32_t                                    instanceCount,
+    uint32_t                                    firstIndex,
+    int32_t                                     vertexOffset,
+    uint32_t                                    firstInstance)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+   const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
+
+   genX(cmd_buffer_flush_state)(cmd_buffer);
+
+   if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
+      emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
+      .VertexAccessType                         = RANDOM,
+      .PrimitiveTopologyType                    = pipeline->topology,
+      .VertexCountPerInstance                   = indexCount,
+      .StartVertexLocation                      = firstIndex,
+      .InstanceCount                            = instanceCount,
+      .StartInstanceLocation                    = firstInstance,
+      .BaseVertexLocation                       = vertexOffset);
+}
+
+/* Auto-Draw / Indirect Registers */
+#define GEN7_3DPRIM_END_OFFSET          0x2420
+#define GEN7_3DPRIM_START_VERTEX        0x2430
+#define GEN7_3DPRIM_VERTEX_COUNT        0x2434
+#define GEN7_3DPRIM_INSTANCE_COUNT      0x2438
+#define GEN7_3DPRIM_START_INSTANCE      0x243C
+#define GEN7_3DPRIM_BASE_VERTEX         0x2440
+
+static void
+emit_lrm(struct anv_batch *batch,
+         uint32_t reg, struct anv_bo *bo, uint32_t offset)
+{
+   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
+                  .RegisterAddress = reg,
+                  .MemoryAddress = { bo, offset });
+}
+
+static void
+emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
+{
+   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
+                  .RegisterOffset = reg,
+                  .DataDWord = imm);
+}
+
+void genX(CmdDrawIndirect)(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    _buffer,
+    VkDeviceSize                                offset,
+    uint32_t                                    drawCount,
+    uint32_t                                    stride)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+   struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+   const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
+   struct anv_bo *bo = buffer->bo;
+   uint32_t bo_offset = buffer->offset + offset;
+
+   genX(cmd_buffer_flush_state)(cmd_buffer);
+
+   if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
+      emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
+
+   emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
+   emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
+   emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
+   emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
+   emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
+      .IndirectParameterEnable                  = true,
+      .VertexAccessType                         = SEQUENTIAL,
+      .PrimitiveTopologyType                    = pipeline->topology);
+}
+
+void genX(CmdDrawIndexedIndirect)(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    _buffer,
+    VkDeviceSize                                offset,
+    uint32_t                                    drawCount,
+    uint32_t                                    stride)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+   struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+   const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
+   struct anv_bo *bo = buffer->bo;
+   uint32_t bo_offset = buffer->offset + offset;
+
+   genX(cmd_buffer_flush_state)(cmd_buffer);
+
+   /* TODO: We need to stomp base vertex to 0 somehow */
+   if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
+      emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
+
+   emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
+   emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
+   emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
+   emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
+   emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
+      .IndirectParameterEnable                  = true,
+      .VertexAccessType                         = RANDOM,
+      .PrimitiveTopologyType                    = pipeline->topology);
+}
+
+
+void genX(CmdDispatch)(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    x,
+    uint32_t                                    y,
+    uint32_t                                    z)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+   const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
+
+   if (prog_data->uses_num_work_groups) {
+      struct anv_state state =
+         anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
+      uint32_t *sizes = state.map;
+      sizes[0] = x;
+      sizes[1] = y;
+      sizes[2] = z;
+      if (!cmd_buffer->device->info.has_llc)
+         anv_state_clflush(state);
+      cmd_buffer->state.num_workgroups_offset = state.offset;
+      cmd_buffer->state.num_workgroups_bo =
+         &cmd_buffer->device->dynamic_state_block_pool.bo;
+   }
+
+   genX(cmd_buffer_flush_compute_state)(cmd_buffer);
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER),
+                  .SIMDSize = prog_data->simd_size / 16,
+                  .ThreadDepthCounterMaximum = 0,
+                  .ThreadHeightCounterMaximum = 0,
+                  .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
+                  .ThreadGroupIDXDimension = x,
+                  .ThreadGroupIDYDimension = y,
+                  .ThreadGroupIDZDimension = z,
+                  .RightExecutionMask = pipeline->cs_right_mask,
+                  .BottomExecutionMask = 0xffffffff);
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH));
+}
+
+#define GPGPU_DISPATCHDIMX 0x2500
+#define GPGPU_DISPATCHDIMY 0x2504
+#define GPGPU_DISPATCHDIMZ 0x2508
+
+#define MI_PREDICATE_SRC0  0x2400
+#define MI_PREDICATE_SRC1  0x2408
+
+void genX(CmdDispatchIndirect)(
+    VkCommandBuffer                             commandBuffer,
+    VkBuffer                                    _buffer,
+    VkDeviceSize                                offset)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+   struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+   const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
+   struct anv_bo *bo = buffer->bo;
+   uint32_t bo_offset = buffer->offset + offset;
+   struct anv_batch *batch = &cmd_buffer->batch;
+
+   if (prog_data->uses_num_work_groups) {
+      cmd_buffer->state.num_workgroups_offset = bo_offset;
+      cmd_buffer->state.num_workgroups_bo = bo;
+   }
+
+   genX(cmd_buffer_flush_compute_state)(cmd_buffer);
+
+   emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
+   emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
+   emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
+
+#if GEN_GEN <= 7
+   /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
+   emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
+   emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
+   emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
+
+   /* Load compute_dispatch_indirect_x_size into SRC0 */
+   emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
+
+   /* predicate = (compute_dispatch_indirect_x_size == 0); */
+   anv_batch_emit(batch, GENX(MI_PREDICATE),
+                  .LoadOperation = LOAD_LOAD,
+                  .CombineOperation = COMBINE_SET,
+                  .CompareOperation = COMPARE_SRCS_EQUAL);
+
+   /* Load compute_dispatch_indirect_y_size into SRC0 */
+   emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
+
+   /* predicate |= (compute_dispatch_indirect_y_size == 0); */
+   anv_batch_emit(batch, GENX(MI_PREDICATE),
+                  .LoadOperation = LOAD_LOAD,
+                  .CombineOperation = COMBINE_OR,
+                  .CompareOperation = COMPARE_SRCS_EQUAL);
+
+   /* Load compute_dispatch_indirect_z_size into SRC0 */
+   emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
+
+   /* predicate |= (compute_dispatch_indirect_z_size == 0); */
+   anv_batch_emit(batch, GENX(MI_PREDICATE),
+                  .LoadOperation = LOAD_LOAD,
+                  .CombineOperation = COMBINE_OR,
+                  .CompareOperation = COMPARE_SRCS_EQUAL);
+
+   /* predicate = !predicate; */
+#define COMPARE_FALSE                           1
+   anv_batch_emit(batch, GENX(MI_PREDICATE),
+                  .LoadOperation = LOAD_LOADINV,
+                  .CombineOperation = COMBINE_OR,
+                  .CompareOperation = COMPARE_FALSE);
+#endif
+
+   anv_batch_emit(batch, GENX(GPGPU_WALKER),
+                  .IndirectParameterEnable = true,
+                  .PredicateEnable = GEN_GEN <= 7,
+                  .SIMDSize = prog_data->simd_size / 16,
+                  .ThreadDepthCounterMaximum = 0,
+                  .ThreadHeightCounterMaximum = 0,
+                  .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
+                  .RightExecutionMask = pipeline->cs_right_mask,
+                  .BottomExecutionMask = 0xffffffff);
+
+   anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH));
+}
+
+static void
+flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
+                                      uint32_t pipeline)
+{
+#if GEN_GEN >= 8 && GEN_GEN < 10
+   /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
+    *
+    *   Software must clear the COLOR_CALC_STATE Valid field in
+    *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
+    *   with Pipeline Select set to GPGPU.
+    *
+    * The internal hardware docs recommend the same workaround for Gen9
+    * hardware too.
+    */
+   if (pipeline == GPGPU)
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS));
+#elif GEN_GEN <= 7
+      /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
+       * PIPELINE_SELECT [DevBWR+]":
+       *
+       *   Project: DEVSNB+
+       *
+       *   Software must ensure all the write caches are flushed through a
+       *   stalling PIPE_CONTROL command followed by another PIPE_CONTROL
+       *   command to invalidate read only caches prior to programming
+       *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
+       */
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                     .RenderTargetCacheFlushEnable = true,
+                     .DepthCacheFlushEnable = true,
+                     .DCFlushEnable = true,
+                     .PostSyncOperation = NoWrite,
+                     .CommandStreamerStallEnable = true);
+
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                     .TextureCacheInvalidationEnable = true,
+                     .ConstantCacheInvalidationEnable = true,
+                     .StateCacheInvalidationEnable = true,
+                     .InstructionCacheInvalidateEnable = true,
+                     .PostSyncOperation = NoWrite);
+#endif
+}
+
+void
+genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
+{
+   if (cmd_buffer->state.current_pipeline != _3D) {
+      flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
+
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
+#if GEN_GEN >= 9
+                     .MaskBits = 3,
+#endif
+                     .PipelineSelection = _3D);
+      cmd_buffer->state.current_pipeline = _3D;
+   }
+}
+
+void
+genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
+{
+   if (cmd_buffer->state.current_pipeline != GPGPU) {
+      flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
+
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
+#if GEN_GEN >= 9
+                     .MaskBits = 3,
+#endif
+                     .PipelineSelection = GPGPU);
+      cmd_buffer->state.current_pipeline = GPGPU;
+   }
+}
+
+struct anv_state
+genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
+                                          struct anv_framebuffer *fb)
+{
+   struct anv_state state =
+      anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
+
+   struct GENX(RENDER_SURFACE_STATE) null_ss = {
+      .SurfaceType = SURFTYPE_NULL,
+      .SurfaceArray = fb->layers > 0,
+      .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
+#if GEN_GEN >= 8
+      .TileMode = YMAJOR,
+#else
+      .TiledSurface = true,
+#endif
+      .Width = fb->width - 1,
+      .Height = fb->height - 1,
+      .Depth = fb->layers - 1,
+      .RenderTargetViewExtent = fb->layers - 1,
+   };
+
+   GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
+
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(state);
+
+   return state;
+}
+
+static void
+cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
+{
+   struct anv_device *device = cmd_buffer->device;
+   const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+   const struct anv_image_view *iview =
+      anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
+   const struct anv_image *image = iview ? iview->image : NULL;
+   const struct anv_format *anv_format =
+      iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
+   const bool has_depth = iview && anv_format->has_depth;
+   const bool has_stencil = iview && anv_format->has_stencil;
+
+   /* FIXME: Implement the PMA stall W/A */
+   /* FIXME: Width and Height are wrong */
+
+   /* Emit 3DSTATE_DEPTH_BUFFER */
+   if (has_depth) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
+         .SurfaceType = SURFTYPE_2D,
+         .DepthWriteEnable = true,
+         .StencilWriteEnable = has_stencil,
+         .HierarchicalDepthBufferEnable = false,
+         .SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
+                                                    &image->depth_surface.isl),
+         .SurfacePitch = image->depth_surface.isl.row_pitch - 1,
+         .SurfaceBaseAddress = {
+            .bo = image->bo,
+            .offset = image->offset + image->depth_surface.offset,
+         },
+         .Height = fb->height - 1,
+         .Width = fb->width - 1,
+         .LOD = 0,
+         .Depth = 1 - 1,
+         .MinimumArrayElement = 0,
+         .DepthBufferObjectControlState = GENX(MOCS),
+#if GEN_GEN >= 8
+         .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
+#endif
+         .RenderTargetViewExtent = 1 - 1);
+   } else {
+      /* Even when no depth buffer is present, the hardware requires that
+       * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
+       *
+       *    If a null depth buffer is bound, the driver must instead bind depth as:
+       *       3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
+       *       3DSTATE_DEPTH.Width = 1
+       *       3DSTATE_DEPTH.Height = 1
+       *       3DSTATE_DEPTH.SuraceFormat = D16_UNORM
+       *       3DSTATE_DEPTH.SurfaceBaseAddress = 0
+       *       3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
+       *       3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
+       *       3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
+       *
+       * The PRM is wrong, though. The width and height must be programmed to
+       * actual framebuffer's width and height, even when neither depth buffer
+       * nor stencil buffer is present.  Also, D16_UNORM is not allowed to
+       * be combined with a stencil buffer so we use D32_FLOAT instead.
+       */
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
+         .SurfaceType = SURFTYPE_2D,
+         .SurfaceFormat = D32_FLOAT,
+         .Width = fb->width - 1,
+         .Height = fb->height - 1,
+         .StencilWriteEnable = has_stencil);
+   }
+
+   /* Emit 3DSTATE_STENCIL_BUFFER */
+   if (has_stencil) {
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER),
+#if GEN_GEN >= 8 || GEN_IS_HASWELL
+         .StencilBufferEnable = true,
+#endif
+         .StencilBufferObjectControlState = GENX(MOCS),
+
+         /* Stencil buffers have strange pitch. The PRM says:
+          *
+          *    The pitch must be set to 2x the value computed based on width,
+          *    as the stencil buffer is stored with two rows interleaved.
+          */
+         .SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
+
+#if GEN_GEN >= 8
+         .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
+#endif
+         .SurfaceBaseAddress = {
+            .bo = image->bo,
+            .offset = image->offset + image->stencil_surface.offset,
+         });
+   } else {
+      anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER));
+   }
+
+   /* Disable hierarchial depth buffers. */
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER));
+
+   /* Clear the clear params. */
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS));
+}
+
+/**
+ * @see anv_cmd_buffer_set_subpass()
+ */
+void
+genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
+                             struct anv_subpass *subpass)
+{
+   cmd_buffer->state.subpass = subpass;
+
+   cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
+
+   cmd_buffer_emit_depth_stencil(cmd_buffer);
+}
+
+void genX(CmdBeginRenderPass)(
+    VkCommandBuffer                             commandBuffer,
+    const VkRenderPassBeginInfo*                pRenderPassBegin,
+    VkSubpassContents                           contents)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
+   ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
+
+   cmd_buffer->state.framebuffer = framebuffer;
+   cmd_buffer->state.pass = pass;
+   anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
+
+   genX(flush_pipeline_select_3d)(cmd_buffer);
+
+   const VkRect2D *render_area = &pRenderPassBegin->renderArea;
+
+   anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE),
+                  .ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0),
+                  .ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0),
+                  .ClippedDrawingRectangleYMax =
+                     render_area->offset.y + render_area->extent.height - 1,
+                  .ClippedDrawingRectangleXMax =
+                     render_area->offset.x + render_area->extent.width - 1,
+                  .DrawingRectangleOriginY = 0,
+                  .DrawingRectangleOriginX = 0);
+
+   genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
+   anv_cmd_buffer_clear_subpass(cmd_buffer);
+}
+
+void genX(CmdNextSubpass)(
+    VkCommandBuffer                             commandBuffer,
+    VkSubpassContents                           contents)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+
+   anv_cmd_buffer_resolve_subpass(cmd_buffer);
+   genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
+   anv_cmd_buffer_clear_subpass(cmd_buffer);
+}
+
+void genX(CmdEndRenderPass)(
+    VkCommandBuffer                             commandBuffer)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   anv_cmd_buffer_resolve_subpass(cmd_buffer);
+}
+
+static void
+emit_ps_depth_count(struct anv_batch *batch,
+                    struct anv_bo *bo, uint32_t offset)
+{
+   anv_batch_emit(batch, GENX(PIPE_CONTROL),
+                  .DestinationAddressType = DAT_PPGTT,
+                  .PostSyncOperation = WritePSDepthCount,
+                  .DepthStallEnable = true,
+                  .Address = { bo, offset });
+}
+
+static void
+emit_query_availability(struct anv_batch *batch,
+                        struct anv_bo *bo, uint32_t offset)
+{
+   anv_batch_emit(batch, GENX(PIPE_CONTROL),
+                  .DestinationAddressType = DAT_PPGTT,
+                  .PostSyncOperation = WriteImmediateData,
+                  .Address = { bo, offset },
+                  .ImmediateData = 1);
+}
+
+void genX(CmdBeginQuery)(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    query,
+    VkQueryControlFlags                         flags)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+
+   /* Workaround: When meta uses the pipeline with the VS disabled, it seems
+    * that the pipelining of the depth write breaks. What we see is that
+    * samples from the render pass clear leaks into the first query
+    * immediately after the clear. Doing a pipecontrol with a post-sync
+    * operation and DepthStallEnable seems to work around the issue.
+    */
+   if (cmd_buffer->state.need_query_wa) {
+      cmd_buffer->state.need_query_wa = false;
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                     .DepthCacheFlushEnable = true,
+                     .DepthStallEnable = true);
+   }
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION:
+      emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
+                          query * sizeof(struct anv_query_pool_slot));
+      break;
+
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   default:
+      unreachable("");
+   }
+}
+
+void genX(CmdEndQuery)(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    query)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION:
+      emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
+                          query * sizeof(struct anv_query_pool_slot) + 8);
+
+      emit_query_availability(&cmd_buffer->batch, &pool->bo,
+                              query * sizeof(struct anv_query_pool_slot) + 16);
+      break;
+
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   default:
+      unreachable("");
+   }
+}
+
+#define TIMESTAMP 0x2358
+
+void genX(CmdWriteTimestamp)(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineStageFlagBits                     pipelineStage,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    query)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+   uint32_t offset = query * sizeof(struct anv_query_pool_slot);
+
+   assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
+
+   switch (pipelineStage) {
+   case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
+      anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
+                     .RegisterAddress = TIMESTAMP,
+                     .MemoryAddress = { &pool->bo, offset });
+      anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
+                     .RegisterAddress = TIMESTAMP + 4,
+                     .MemoryAddress = { &pool->bo, offset + 4 });
+      break;
+
+   default:
+      /* Everything else is bottom-of-pipe */
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                     .DestinationAddressType = DAT_PPGTT,
+                     .PostSyncOperation = WriteTimestamp,
+                     .Address = { &pool->bo, offset });
+      break;
+   }
+
+   emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
+}
+
+#if GEN_GEN > 7 || GEN_IS_HASWELL
+
+#define alu_opcode(v)   __gen_uint((v),  20, 31)
+#define alu_operand1(v) __gen_uint((v),  10, 19)
+#define alu_operand2(v) __gen_uint((v),   0,  9)
+#define alu(opcode, operand1, operand2) \
+   alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
+
+#define OPCODE_NOOP      0x000
+#define OPCODE_LOAD      0x080
+#define OPCODE_LOADINV   0x480
+#define OPCODE_LOAD0     0x081
+#define OPCODE_LOAD1     0x481
+#define OPCODE_ADD       0x100
+#define OPCODE_SUB       0x101
+#define OPCODE_AND       0x102
+#define OPCODE_OR        0x103
+#define OPCODE_XOR       0x104
+#define OPCODE_STORE     0x180
+#define OPCODE_STOREINV  0x580
+
+#define OPERAND_R0   0x00
+#define OPERAND_R1   0x01
+#define OPERAND_R2   0x02
+#define OPERAND_R3   0x03
+#define OPERAND_R4   0x04
+#define OPERAND_SRCA 0x20
+#define OPERAND_SRCB 0x21
+#define OPERAND_ACCU 0x31
+#define OPERAND_ZF   0x32
+#define OPERAND_CF   0x33
+
+#define CS_GPR(n) (0x2600 + (n) * 8)
+
+static void
+emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
+                      struct anv_bo *bo, uint32_t offset)
+{
+   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
+                  .RegisterAddress = reg,
+                  .MemoryAddress = { bo, offset });
+   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
+                  .RegisterAddress = reg + 4,
+                  .MemoryAddress = { bo, offset + 4 });
+}
+
+static void
+store_query_result(struct anv_batch *batch, uint32_t reg,
+                   struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
+{
+      anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
+                     .RegisterAddress = reg,
+                     .MemoryAddress = { bo, offset });
+
+      if (flags & VK_QUERY_RESULT_64_BIT)
+         anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
+                        .RegisterAddress = reg + 4,
+                        .MemoryAddress = { bo, offset + 4 });
+}
+
+void genX(CmdCopyQueryPoolResults)(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    firstQuery,
+    uint32_t                                    queryCount,
+    VkBuffer                                    destBuffer,
+    VkDeviceSize                                destOffset,
+    VkDeviceSize                                destStride,
+    VkQueryResultFlags                          flags)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+   ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
+   uint32_t slot_offset, dst_offset;
+
+   if (flags & VK_QUERY_RESULT_WAIT_BIT)
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+                     .CommandStreamerStallEnable = true,
+                     .StallAtPixelScoreboard = true);
+
+   dst_offset = buffer->offset + destOffset;
+   for (uint32_t i = 0; i < queryCount; i++) {
+
+      slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
+      switch (pool->type) {
+      case VK_QUERY_TYPE_OCCLUSION:
+         emit_load_alu_reg_u64(&cmd_buffer->batch,
+                               CS_GPR(0), &pool->bo, slot_offset);
+         emit_load_alu_reg_u64(&cmd_buffer->batch,
+                               CS_GPR(1), &pool->bo, slot_offset + 8);
+
+         /* FIXME: We need to clamp the result for 32 bit. */
+
+         uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
+         dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
+         dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
+         dw[3] = alu(OPCODE_SUB, 0, 0);
+         dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
+         break;
+
+      case VK_QUERY_TYPE_TIMESTAMP:
+         emit_load_alu_reg_u64(&cmd_buffer->batch,
+                               CS_GPR(2), &pool->bo, slot_offset);
+         break;
+
+      default:
+         unreachable("unhandled query type");
+      }
+
+      store_query_result(&cmd_buffer->batch,
+                         CS_GPR(2), buffer->bo, dst_offset, flags);
+
+      if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
+         emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
+                               &pool->bo, slot_offset + 16);
+         if (flags & VK_QUERY_RESULT_64_BIT)
+            store_query_result(&cmd_buffer->batch,
+                               CS_GPR(0), buffer->bo, dst_offset + 8, flags);
+         else
+            store_query_result(&cmd_buffer->batch,
+                               CS_GPR(0), buffer->bo, dst_offset + 4, flags);
+      }
+
+      dst_offset += destStride;
+   }
+}
+
+#endif
diff --git a/src/intel/vulkan/genX_pipeline.c b/src/intel/vulkan/genX_pipeline.c
new file mode 100644 (file)
index 0000000..cc8841e
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_private.h"
+
+#include "genxml/gen_macros.h"
+#include "genxml/genX_pack.h"
+
+VkResult
+genX(compute_pipeline_create)(
+    VkDevice                                    _device,
+    struct anv_pipeline_cache *                 cache,
+    const VkComputePipelineCreateInfo*          pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipeline*                                 pPipeline)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_pipeline *pipeline;
+   VkResult result;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
+
+   pipeline = anv_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pipeline == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   pipeline->device = device;
+   pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
+
+   pipeline->blend_state.map = NULL;
+
+   result = anv_reloc_list_init(&pipeline->batch_relocs,
+                                pAllocator ? pAllocator : &device->alloc);
+   if (result != VK_SUCCESS) {
+      anv_free2(&device->alloc, pAllocator, pipeline);
+      return result;
+   }
+   pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
+   pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
+   pipeline->batch.relocs = &pipeline->batch_relocs;
+
+   /* When we free the pipeline, we detect stages based on the NULL status
+    * of various prog_data pointers.  Make them NULL by default.
+    */
+   memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
+   memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
+   memset(pipeline->bindings, 0, sizeof(pipeline->bindings));
+
+   pipeline->vs_simd8 = NO_KERNEL;
+   pipeline->vs_vec4 = NO_KERNEL;
+   pipeline->gs_kernel = NO_KERNEL;
+
+   pipeline->active_stages = 0;
+   pipeline->total_scratch = 0;
+
+   assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
+   ANV_FROM_HANDLE(anv_shader_module, module,  pCreateInfo->stage.module);
+   anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module,
+                           pCreateInfo->stage.pName,
+                           pCreateInfo->stage.pSpecializationInfo);
+
+   pipeline->use_repclear = false;
+
+   const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
+   const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+
+   unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
+   unsigned push_constant_data_size =
+      (prog_data->nr_params + local_id_dwords) * 4;
+   unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
+   unsigned push_constant_regs = reg_aligned_constant_size / 32;
+
+   uint32_t group_size = cs_prog_data->local_size[0] *
+      cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
+   pipeline->cs_thread_width_max =
+      DIV_ROUND_UP(group_size, cs_prog_data->simd_size);
+   uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
+
+   if (remainder > 0)
+      pipeline->cs_right_mask = ~0u >> (32 - remainder);
+   else
+      pipeline->cs_right_mask = ~0u >> (32 - cs_prog_data->simd_size);
+
+   const uint32_t vfe_curbe_allocation =
+      push_constant_regs * pipeline->cs_thread_width_max;
+
+   anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE),
+                  .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_COMPUTE],
+                  .PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch / 2048),
+#if GEN_GEN > 7
+                  .ScratchSpaceBasePointerHigh = 0,
+                  .StackSize = 0,
+#else
+                  .GPGPUMode = true,
+#endif
+                  .MaximumNumberofThreads = device->info.max_cs_threads - 1,
+                  .NumberofURBEntries = GEN_GEN <= 7 ? 0 : 2,
+                  .ResetGatewayTimer = true,
+#if GEN_GEN <= 8
+                  .BypassGatewayControl = true,
+#endif
+                  .URBEntryAllocationSize = GEN_GEN <= 7 ? 0 : 2,
+                  .CURBEAllocationSize = vfe_curbe_allocation);
+
+   *pPipeline = anv_pipeline_to_handle(pipeline);
+
+   return VK_SUCCESS;
+}
diff --git a/src/intel/vulkan/genX_pipeline_util.h b/src/intel/vulkan/genX_pipeline_util.h
new file mode 100644 (file)
index 0000000..cd138df
--- /dev/null
@@ -0,0 +1,432 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+static uint32_t
+vertex_element_comp_control(enum isl_format format, unsigned comp)
+{
+   uint8_t bits;
+   switch (comp) {
+   case 0: bits = isl_format_layouts[format].channels.r.bits; break;
+   case 1: bits = isl_format_layouts[format].channels.g.bits; break;
+   case 2: bits = isl_format_layouts[format].channels.b.bits; break;
+   case 3: bits = isl_format_layouts[format].channels.a.bits; break;
+   default: unreachable("Invalid component");
+   }
+
+   if (bits) {
+      return VFCOMP_STORE_SRC;
+   } else if (comp < 3) {
+      return VFCOMP_STORE_0;
+   } else if (isl_format_layouts[format].channels.r.type == ISL_UINT ||
+            isl_format_layouts[format].channels.r.type == ISL_SINT) {
+      assert(comp == 3);
+      return VFCOMP_STORE_1_INT;
+   } else {
+      assert(comp == 3);
+      return VFCOMP_STORE_1_FP;
+   }
+}
+
+static void
+emit_vertex_input(struct anv_pipeline *pipeline,
+                  const VkPipelineVertexInputStateCreateInfo *info,
+                  const struct anv_graphics_pipeline_create_info *extra)
+{
+   const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
+
+   uint32_t elements;
+   if (extra && extra->disable_vs) {
+      /* If the VS is disabled, just assume the user knows what they're
+       * doing and apply the layout blindly.  This can only come from
+       * meta, so this *should* be safe.
+       */
+      elements = 0;
+      for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++)
+         elements |= (1 << info->pVertexAttributeDescriptions[i].location);
+   } else {
+      /* Pull inputs_read out of the VS prog data */
+      uint64_t inputs_read = vs_prog_data->inputs_read;
+      assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
+      elements = inputs_read >> VERT_ATTRIB_GENERIC0;
+   }
+
+#if GEN_GEN >= 8
+   /* On BDW+, we only need to allocate space for base ids.  Setting up
+    * the actual vertex and instance id is a separate packet.
+    */
+   const bool needs_svgs_elem = vs_prog_data->uses_basevertex ||
+                                vs_prog_data->uses_baseinstance;
+#else
+   /* On Haswell and prior, vertex and instance id are created by using the
+    * ComponentControl fields, so we need an element for any of them.
+    */
+   const bool needs_svgs_elem = vs_prog_data->uses_vertexid ||
+                                vs_prog_data->uses_instanceid ||
+                                vs_prog_data->uses_basevertex ||
+                                vs_prog_data->uses_baseinstance;
+#endif
+
+   uint32_t elem_count = __builtin_popcount(elements) + needs_svgs_elem;
+   if (elem_count == 0)
+      return;
+
+   uint32_t *p;
+
+   const uint32_t num_dwords = 1 + elem_count * 2;
+   p = anv_batch_emitn(&pipeline->batch, num_dwords,
+                       GENX(3DSTATE_VERTEX_ELEMENTS));
+   memset(p + 1, 0, (num_dwords - 1) * 4);
+
+   for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
+      const VkVertexInputAttributeDescription *desc =
+         &info->pVertexAttributeDescriptions[i];
+      enum isl_format format = anv_get_isl_format(desc->format,
+                                                  VK_IMAGE_ASPECT_COLOR_BIT,
+                                                  VK_IMAGE_TILING_LINEAR,
+                                                  NULL);
+
+      assert(desc->binding < 32);
+
+      if ((elements & (1 << desc->location)) == 0)
+         continue; /* Binding unused */
+
+      uint32_t slot = __builtin_popcount(elements & ((1 << desc->location) - 1));
+
+      struct GENX(VERTEX_ELEMENT_STATE) element = {
+         .VertexBufferIndex = desc->binding,
+         .Valid = true,
+         .SourceElementFormat = format,
+         .EdgeFlagEnable = false,
+         .SourceElementOffset = desc->offset,
+         .Component0Control = vertex_element_comp_control(format, 0),
+         .Component1Control = vertex_element_comp_control(format, 1),
+         .Component2Control = vertex_element_comp_control(format, 2),
+         .Component3Control = vertex_element_comp_control(format, 3),
+      };
+      GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + slot * 2], &element);
+
+#if GEN_GEN >= 8
+      /* On Broadwell and later, we have a separate VF_INSTANCING packet
+       * that controls instancing.  On Haswell and prior, that's part of
+       * VERTEX_BUFFER_STATE which we emit later.
+       */
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING),
+                     .InstancingEnable = pipeline->instancing_enable[desc->binding],
+                     .VertexElementIndex = slot,
+                     /* Vulkan so far doesn't have an instance divisor, so
+                      * this is always 1 (ignored if not instancing). */
+                     .InstanceDataStepRate = 1);
+#endif
+   }
+
+   const uint32_t id_slot = __builtin_popcount(elements);
+   if (needs_svgs_elem) {
+      /* From the Broadwell PRM for the 3D_Vertex_Component_Control enum:
+       *    "Within a VERTEX_ELEMENT_STATE structure, if a Component
+       *    Control field is set to something other than VFCOMP_STORE_SRC,
+       *    no higher-numbered Component Control fields may be set to
+       *    VFCOMP_STORE_SRC"
+       *
+       * This means, that if we have BaseInstance, we need BaseVertex as
+       * well.  Just do all or nothing.
+       */
+      uint32_t base_ctrl = (vs_prog_data->uses_basevertex ||
+                            vs_prog_data->uses_baseinstance) ?
+                           VFCOMP_STORE_SRC : VFCOMP_STORE_0;
+
+      struct GENX(VERTEX_ELEMENT_STATE) element = {
+         .VertexBufferIndex = 32, /* Reserved for this */
+         .Valid = true,
+         .SourceElementFormat = ISL_FORMAT_R32G32_UINT,
+         .Component0Control = base_ctrl,
+         .Component1Control = base_ctrl,
+#if GEN_GEN >= 8
+         .Component2Control = VFCOMP_STORE_0,
+         .Component3Control = VFCOMP_STORE_0,
+#else
+         .Component2Control = VFCOMP_STORE_VID,
+         .Component3Control = VFCOMP_STORE_IID,
+#endif
+      };
+      GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + id_slot * 2], &element);
+   }
+
+#if GEN_GEN >= 8
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS),
+                  .VertexIDEnable = vs_prog_data->uses_vertexid,
+                  .VertexIDComponentNumber = 2,
+                  .VertexIDElementOffset = id_slot,
+                  .InstanceIDEnable = vs_prog_data->uses_instanceid,
+                  .InstanceIDComponentNumber = 3,
+                  .InstanceIDElementOffset = id_slot);
+#endif
+}
+
+static inline void
+emit_urb_setup(struct anv_pipeline *pipeline)
+{
+#if GEN_GEN == 7 && !GEN_IS_HASWELL
+   struct anv_device *device = pipeline->device;
+
+   /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
+    *
+    *    "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth stall
+    *    needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
+    *    3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
+    *    3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one PIPE_CONTROL
+    *    needs to be sent before any combination of VS associated 3DSTATE."
+    */
+   anv_batch_emit(&pipeline->batch, GEN7_PIPE_CONTROL,
+                  .DepthStallEnable = true,
+                  .PostSyncOperation = WriteImmediateData,
+                  .Address = { &device->workaround_bo, 0 });
+#endif
+
+   unsigned push_start = 0;
+   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
+      unsigned push_size = pipeline->urb.push_size[i];
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS),
+         ._3DCommandSubOpcode                   = 18 + i,
+         .ConstantBufferOffset                  = (push_size > 0) ? push_start : 0,
+         .ConstantBufferSize                    = push_size);
+      push_start += pipeline->urb.push_size[i];
+   }
+
+   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_VS),
+         ._3DCommandSubOpcode                   = 48 + i,
+         .VSURBStartingAddress                  = pipeline->urb.start[i],
+         .VSURBEntryAllocationSize              = pipeline->urb.size[i] - 1,
+         .VSNumberofURBEntries                  = pipeline->urb.entries[i]);
+   }
+}
+
+static void
+emit_3dstate_sbe(struct anv_pipeline *pipeline)
+{
+   const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
+   const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
+   const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
+   const struct brw_vue_map *fs_input_map;
+
+   if (pipeline->gs_kernel == NO_KERNEL)
+      fs_input_map = &vs_prog_data->base.vue_map;
+   else
+      fs_input_map = &gs_prog_data->base.vue_map;
+
+   struct GENX(3DSTATE_SBE) sbe = {
+      GENX(3DSTATE_SBE_header),
+      .AttributeSwizzleEnable = true,
+      .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
+      .NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs,
+
+#if GEN_GEN >= 9
+      .Attribute0ActiveComponentFormat = ACF_XYZW,
+      .Attribute1ActiveComponentFormat = ACF_XYZW,
+      .Attribute2ActiveComponentFormat = ACF_XYZW,
+      .Attribute3ActiveComponentFormat = ACF_XYZW,
+      .Attribute4ActiveComponentFormat = ACF_XYZW,
+      .Attribute5ActiveComponentFormat = ACF_XYZW,
+      .Attribute6ActiveComponentFormat = ACF_XYZW,
+      .Attribute7ActiveComponentFormat = ACF_XYZW,
+      .Attribute8ActiveComponentFormat = ACF_XYZW,
+      .Attribute9ActiveComponentFormat = ACF_XYZW,
+      .Attribute10ActiveComponentFormat = ACF_XYZW,
+      .Attribute11ActiveComponentFormat = ACF_XYZW,
+      .Attribute12ActiveComponentFormat = ACF_XYZW,
+      .Attribute13ActiveComponentFormat = ACF_XYZW,
+      .Attribute14ActiveComponentFormat = ACF_XYZW,
+      .Attribute15ActiveComponentFormat = ACF_XYZW,
+      /* wow, much field, very attribute */
+      .Attribute16ActiveComponentFormat = ACF_XYZW,
+      .Attribute17ActiveComponentFormat = ACF_XYZW,
+      .Attribute18ActiveComponentFormat = ACF_XYZW,
+      .Attribute19ActiveComponentFormat = ACF_XYZW,
+      .Attribute20ActiveComponentFormat = ACF_XYZW,
+      .Attribute21ActiveComponentFormat = ACF_XYZW,
+      .Attribute22ActiveComponentFormat = ACF_XYZW,
+      .Attribute23ActiveComponentFormat = ACF_XYZW,
+      .Attribute24ActiveComponentFormat = ACF_XYZW,
+      .Attribute25ActiveComponentFormat = ACF_XYZW,
+      .Attribute26ActiveComponentFormat = ACF_XYZW,
+      .Attribute27ActiveComponentFormat = ACF_XYZW,
+      .Attribute28ActiveComponentFormat = ACF_XYZW,
+      .Attribute29ActiveComponentFormat = ACF_XYZW,
+      .Attribute28ActiveComponentFormat = ACF_XYZW,
+      .Attribute29ActiveComponentFormat = ACF_XYZW,
+      .Attribute30ActiveComponentFormat = ACF_XYZW,
+#endif
+   };
+
+#if GEN_GEN >= 8
+   /* On Broadwell, they broke 3DSTATE_SBE into two packets */
+   struct GENX(3DSTATE_SBE_SWIZ) swiz = {
+      GENX(3DSTATE_SBE_SWIZ_header),
+   };
+#else
+#  define swiz sbe
+#endif
+
+   int max_source_attr = 0;
+   for (int attr = 0; attr < VARYING_SLOT_MAX; attr++) {
+      int input_index = wm_prog_data->urb_setup[attr];
+
+      if (input_index < 0)
+         continue;
+
+      const int slot = fs_input_map->varying_to_slot[attr];
+
+      if (input_index >= 16)
+         continue;
+
+      if (slot == -1) {
+         /* This attribute does not exist in the VUE--that means that the
+          * vertex shader did not write to it.  It could be that it's a
+          * regular varying read by the fragment shader but not written by
+          * the vertex shader or it's gl_PrimitiveID. In the first case the
+          * value is undefined, in the second it needs to be
+          * gl_PrimitiveID.
+          */
+         swiz.Attribute[input_index].ConstantSource = PRIM_ID;
+         swiz.Attribute[input_index].ComponentOverrideX = true;
+         swiz.Attribute[input_index].ComponentOverrideY = true;
+         swiz.Attribute[input_index].ComponentOverrideZ = true;
+         swiz.Attribute[input_index].ComponentOverrideW = true;
+      } else {
+         assert(slot >= 2);
+         const int source_attr = slot - 2;
+         max_source_attr = MAX2(max_source_attr, source_attr);
+         /* We have to subtract two slots to accout for the URB entry output
+          * read offset in the VS and GS stages.
+          */
+         swiz.Attribute[input_index].SourceAttribute = source_attr;
+      }
+   }
+
+   sbe.VertexURBEntryReadOffset = 1; /* Skip the VUE header and position slots */
+   sbe.VertexURBEntryReadLength = DIV_ROUND_UP(max_source_attr + 1, 2);
+
+   uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch,
+                                        GENX(3DSTATE_SBE_length));
+   GENX(3DSTATE_SBE_pack)(&pipeline->batch, dw, &sbe);
+
+#if GEN_GEN >= 8
+   dw = anv_batch_emit_dwords(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ_length));
+   GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz);
+#endif
+}
+
+static inline uint32_t
+scratch_space(const struct brw_stage_prog_data *prog_data)
+{
+   return ffs(prog_data->total_scratch / 2048);
+}
+
+static const uint32_t vk_to_gen_cullmode[] = {
+   [VK_CULL_MODE_NONE]                       = CULLMODE_NONE,
+   [VK_CULL_MODE_FRONT_BIT]                  = CULLMODE_FRONT,
+   [VK_CULL_MODE_BACK_BIT]                   = CULLMODE_BACK,
+   [VK_CULL_MODE_FRONT_AND_BACK]             = CULLMODE_BOTH
+};
+
+static const uint32_t vk_to_gen_fillmode[] = {
+   [VK_POLYGON_MODE_FILL]                    = FILL_MODE_SOLID,
+   [VK_POLYGON_MODE_LINE]                    = FILL_MODE_WIREFRAME,
+   [VK_POLYGON_MODE_POINT]                   = FILL_MODE_POINT,
+};
+
+static const uint32_t vk_to_gen_front_face[] = {
+   [VK_FRONT_FACE_COUNTER_CLOCKWISE]         = 1,
+   [VK_FRONT_FACE_CLOCKWISE]                 = 0
+};
+
+static const uint32_t vk_to_gen_logic_op[] = {
+   [VK_LOGIC_OP_COPY]                        = LOGICOP_COPY,
+   [VK_LOGIC_OP_CLEAR]                       = LOGICOP_CLEAR,
+   [VK_LOGIC_OP_AND]                         = LOGICOP_AND,
+   [VK_LOGIC_OP_AND_REVERSE]                 = LOGICOP_AND_REVERSE,
+   [VK_LOGIC_OP_AND_INVERTED]                = LOGICOP_AND_INVERTED,
+   [VK_LOGIC_OP_NO_OP]                       = LOGICOP_NOOP,
+   [VK_LOGIC_OP_XOR]                         = LOGICOP_XOR,
+   [VK_LOGIC_OP_OR]                          = LOGICOP_OR,
+   [VK_LOGIC_OP_NOR]                         = LOGICOP_NOR,
+   [VK_LOGIC_OP_EQUIVALENT]                  = LOGICOP_EQUIV,
+   [VK_LOGIC_OP_INVERT]                      = LOGICOP_INVERT,
+   [VK_LOGIC_OP_OR_REVERSE]                  = LOGICOP_OR_REVERSE,
+   [VK_LOGIC_OP_COPY_INVERTED]               = LOGICOP_COPY_INVERTED,
+   [VK_LOGIC_OP_OR_INVERTED]                 = LOGICOP_OR_INVERTED,
+   [VK_LOGIC_OP_NAND]                        = LOGICOP_NAND,
+   [VK_LOGIC_OP_SET]                         = LOGICOP_SET,
+};
+
+static const uint32_t vk_to_gen_blend[] = {
+   [VK_BLEND_FACTOR_ZERO]                    = BLENDFACTOR_ZERO,
+   [VK_BLEND_FACTOR_ONE]                     = BLENDFACTOR_ONE,
+   [VK_BLEND_FACTOR_SRC_COLOR]               = BLENDFACTOR_SRC_COLOR,
+   [VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR]     = BLENDFACTOR_INV_SRC_COLOR,
+   [VK_BLEND_FACTOR_DST_COLOR]               = BLENDFACTOR_DST_COLOR,
+   [VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR]     = BLENDFACTOR_INV_DST_COLOR,
+   [VK_BLEND_FACTOR_SRC_ALPHA]               = BLENDFACTOR_SRC_ALPHA,
+   [VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA]     = BLENDFACTOR_INV_SRC_ALPHA,
+   [VK_BLEND_FACTOR_DST_ALPHA]               = BLENDFACTOR_DST_ALPHA,
+   [VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA]     = BLENDFACTOR_INV_DST_ALPHA,
+   [VK_BLEND_FACTOR_CONSTANT_COLOR]          = BLENDFACTOR_CONST_COLOR,
+   [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR]= BLENDFACTOR_INV_CONST_COLOR,
+   [VK_BLEND_FACTOR_CONSTANT_ALPHA]          = BLENDFACTOR_CONST_ALPHA,
+   [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA]= BLENDFACTOR_INV_CONST_ALPHA,
+   [VK_BLEND_FACTOR_SRC_ALPHA_SATURATE]      = BLENDFACTOR_SRC_ALPHA_SATURATE,
+   [VK_BLEND_FACTOR_SRC1_COLOR]              = BLENDFACTOR_SRC1_COLOR,
+   [VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR]    = BLENDFACTOR_INV_SRC1_COLOR,
+   [VK_BLEND_FACTOR_SRC1_ALPHA]              = BLENDFACTOR_SRC1_ALPHA,
+   [VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA]    = BLENDFACTOR_INV_SRC1_ALPHA,
+};
+
+static const uint32_t vk_to_gen_blend_op[] = {
+   [VK_BLEND_OP_ADD]                         = BLENDFUNCTION_ADD,
+   [VK_BLEND_OP_SUBTRACT]                    = BLENDFUNCTION_SUBTRACT,
+   [VK_BLEND_OP_REVERSE_SUBTRACT]            = BLENDFUNCTION_REVERSE_SUBTRACT,
+   [VK_BLEND_OP_MIN]                         = BLENDFUNCTION_MIN,
+   [VK_BLEND_OP_MAX]                         = BLENDFUNCTION_MAX,
+};
+
+static const uint32_t vk_to_gen_compare_op[] = {
+   [VK_COMPARE_OP_NEVER]                        = PREFILTEROPNEVER,
+   [VK_COMPARE_OP_LESS]                         = PREFILTEROPLESS,
+   [VK_COMPARE_OP_EQUAL]                        = PREFILTEROPEQUAL,
+   [VK_COMPARE_OP_LESS_OR_EQUAL]                = PREFILTEROPLEQUAL,
+   [VK_COMPARE_OP_GREATER]                      = PREFILTEROPGREATER,
+   [VK_COMPARE_OP_NOT_EQUAL]                    = PREFILTEROPNOTEQUAL,
+   [VK_COMPARE_OP_GREATER_OR_EQUAL]             = PREFILTEROPGEQUAL,
+   [VK_COMPARE_OP_ALWAYS]                       = PREFILTEROPALWAYS,
+};
+
+static const uint32_t vk_to_gen_stencil_op[] = {
+   [VK_STENCIL_OP_KEEP]                         = STENCILOP_KEEP,
+   [VK_STENCIL_OP_ZERO]                         = STENCILOP_ZERO,
+   [VK_STENCIL_OP_REPLACE]                      = STENCILOP_REPLACE,
+   [VK_STENCIL_OP_INCREMENT_AND_CLAMP]          = STENCILOP_INCRSAT,
+   [VK_STENCIL_OP_DECREMENT_AND_CLAMP]          = STENCILOP_DECRSAT,
+   [VK_STENCIL_OP_INVERT]                       = STENCILOP_INVERT,
+   [VK_STENCIL_OP_INCREMENT_AND_WRAP]           = STENCILOP_INCR,
+   [VK_STENCIL_OP_DECREMENT_AND_WRAP]           = STENCILOP_DECR,
+};
diff --git a/src/intel/vulkan/genX_state.c b/src/intel/vulkan/genX_state.c
new file mode 100644 (file)
index 0000000..900f6dc
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+#include "genxml/gen_macros.h"
+#include "genxml/genX_pack.h"
+
+VkResult
+genX(init_device_state)(struct anv_device *device)
+{
+   GENX(MEMORY_OBJECT_CONTROL_STATE_pack)(NULL, &device->default_mocs,
+                                          &GENX(MOCS));
+
+   struct anv_batch batch;
+
+   uint32_t cmds[64];
+   batch.start = batch.next = cmds;
+   batch.end = (void *) cmds + sizeof(cmds);
+
+   anv_batch_emit(&batch, GENX(PIPELINE_SELECT),
+#if GEN_GEN >= 9
+                  .MaskBits = 3,
+#endif
+                  .PipelineSelection = _3D);
+
+   anv_batch_emit(&batch, GENX(3DSTATE_VF_STATISTICS),
+                  .StatisticsEnable = true);
+   anv_batch_emit(&batch, GENX(3DSTATE_HS));
+   anv_batch_emit(&batch, GENX(3DSTATE_TE));
+   anv_batch_emit(&batch, GENX(3DSTATE_DS));
+
+   anv_batch_emit(&batch, GENX(3DSTATE_STREAMOUT), .SOFunctionEnable = false);
+   anv_batch_emit(&batch, GENX(3DSTATE_AA_LINE_PARAMETERS));
+
+#if GEN_GEN >= 8
+   anv_batch_emit(&batch, GENX(3DSTATE_WM_CHROMAKEY),
+                  .ChromaKeyKillEnable = false);
+
+   /* See the Vulkan 1.0 spec Table 24.1 "Standard sample locations" and
+    * VkPhysicalDeviceFeatures::standardSampleLocations.
+    */
+   anv_batch_emit(&batch, GENX(3DSTATE_SAMPLE_PATTERN),
+      ._1xSample0XOffset      = 0.5,
+      ._1xSample0YOffset      = 0.5,
+      ._2xSample0XOffset      = 0.25,
+      ._2xSample0YOffset      = 0.25,
+      ._2xSample1XOffset      = 0.75,
+      ._2xSample1YOffset      = 0.75,
+      ._4xSample0XOffset      = 0.375,
+      ._4xSample0YOffset      = 0.125,
+      ._4xSample1XOffset      = 0.875,
+      ._4xSample1YOffset      = 0.375,
+      ._4xSample2XOffset      = 0.125,
+      ._4xSample2YOffset      = 0.625,
+      ._4xSample3XOffset      = 0.625,
+      ._4xSample3YOffset      = 0.875,
+      ._8xSample0XOffset      = 0.5625,
+      ._8xSample0YOffset      = 0.3125,
+      ._8xSample1XOffset      = 0.4375,
+      ._8xSample1YOffset      = 0.6875,
+      ._8xSample2XOffset      = 0.8125,
+      ._8xSample2YOffset      = 0.5625,
+      ._8xSample3XOffset      = 0.3125,
+      ._8xSample3YOffset      = 0.1875,
+      ._8xSample4XOffset      = 0.1875,
+      ._8xSample4YOffset      = 0.8125,
+      ._8xSample5XOffset      = 0.0625,
+      ._8xSample5YOffset      = 0.4375,
+      ._8xSample6XOffset      = 0.6875,
+      ._8xSample6YOffset      = 0.9375,
+      ._8xSample7XOffset      = 0.9375,
+      ._8xSample7YOffset      = 0.0625,
+#if GEN_GEN >= 9
+      ._16xSample0XOffset     = 0.5625,
+      ._16xSample0YOffset     = 0.5625,
+      ._16xSample1XOffset     = 0.4375,
+      ._16xSample1YOffset     = 0.3125,
+      ._16xSample2XOffset     = 0.3125,
+      ._16xSample2YOffset     = 0.6250,
+      ._16xSample3XOffset     = 0.7500,
+      ._16xSample3YOffset     = 0.4375,
+      ._16xSample4XOffset     = 0.1875,
+      ._16xSample4YOffset     = 0.3750,
+      ._16xSample5XOffset     = 0.6250,
+      ._16xSample5YOffset     = 0.8125,
+      ._16xSample6XOffset     = 0.8125,
+      ._16xSample6YOffset     = 0.6875,
+      ._16xSample7XOffset     = 0.6875,
+      ._16xSample7YOffset     = 0.1875,
+      ._16xSample8XOffset     = 0.3750,
+      ._16xSample8YOffset     = 0.8750,
+      ._16xSample9XOffset     = 0.5000,
+      ._16xSample9YOffset     = 0.0625,
+      ._16xSample10XOffset    = 0.2500,
+      ._16xSample10YOffset    = 0.1250,
+      ._16xSample11XOffset    = 0.1250,
+      ._16xSample11YOffset    = 0.7500,
+      ._16xSample12XOffset    = 0.0000,
+      ._16xSample12YOffset    = 0.5000,
+      ._16xSample13XOffset    = 0.9375,
+      ._16xSample13YOffset    = 0.2500,
+      ._16xSample14XOffset    = 0.8750,
+      ._16xSample14YOffset    = 0.9375,
+      ._16xSample15XOffset    = 0.0625,
+      ._16xSample15YOffset    = 0.0000,
+#endif
+   );
+#endif
+
+   anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END));
+
+   assert(batch.next <= batch.end);
+
+   return anv_device_submit_simple_batch(device, &batch);
+}
+
+static inline uint32_t
+vk_to_gen_tex_filter(VkFilter filter, bool anisotropyEnable)
+{
+   switch (filter) {
+   default:
+      assert(!"Invalid filter");
+   case VK_FILTER_NEAREST:
+      return MAPFILTER_NEAREST;
+   case VK_FILTER_LINEAR:
+      return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_LINEAR;
+   }
+}
+
+static inline uint32_t
+vk_to_gen_max_anisotropy(float ratio)
+{
+   return (anv_clamp_f(ratio, 2, 16) - 2) / 2;
+}
+
+static const uint32_t vk_to_gen_mipmap_mode[] = {
+   [VK_SAMPLER_MIPMAP_MODE_NEAREST]          = MIPFILTER_NEAREST,
+   [VK_SAMPLER_MIPMAP_MODE_LINEAR]           = MIPFILTER_LINEAR
+};
+
+static const uint32_t vk_to_gen_tex_address[] = {
+   [VK_SAMPLER_ADDRESS_MODE_REPEAT]          = TCM_WRAP,
+   [VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT] = TCM_MIRROR,
+   [VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE]   = TCM_CLAMP,
+   [VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
+   [VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
+};
+
+/* Vulkan specifies the result of shadow comparisons as:
+ *     1     if   ref <op> texel,
+ *     0     otherwise.
+ *
+ * The hardware does:
+ *     0     if texel <op> ref,
+ *     1     otherwise.
+ *
+ * So, these look a bit strange because there's both a negation
+ * and swapping of the arguments involved.
+ */
+static const uint32_t vk_to_gen_shadow_compare_op[] = {
+   [VK_COMPARE_OP_NEVER]                        = PREFILTEROPALWAYS,
+   [VK_COMPARE_OP_LESS]                         = PREFILTEROPLEQUAL,
+   [VK_COMPARE_OP_EQUAL]                        = PREFILTEROPNOTEQUAL,
+   [VK_COMPARE_OP_LESS_OR_EQUAL]                = PREFILTEROPLESS,
+   [VK_COMPARE_OP_GREATER]                      = PREFILTEROPGEQUAL,
+   [VK_COMPARE_OP_NOT_EQUAL]                    = PREFILTEROPEQUAL,
+   [VK_COMPARE_OP_GREATER_OR_EQUAL]             = PREFILTEROPGREATER,
+   [VK_COMPARE_OP_ALWAYS]                       = PREFILTEROPNEVER,
+};
+
+VkResult genX(CreateSampler)(
+    VkDevice                                    _device,
+    const VkSamplerCreateInfo*                  pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkSampler*                                  pSampler)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_sampler *sampler;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
+
+   sampler = anv_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
+                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!sampler)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   uint32_t border_color_offset = device->border_colors.offset +
+                                  pCreateInfo->borderColor * 64;
+
+   struct GENX(SAMPLER_STATE) sampler_state = {
+      .SamplerDisable = false,
+      .TextureBorderColorMode = DX10OGL,
+
+#if GEN_GEN >= 8
+      .LODPreClampMode = CLAMP_MODE_OGL,
+#else
+      .LODPreClampEnable = CLAMP_ENABLE_OGL,
+#endif
+
+#if GEN_GEN == 8
+      .BaseMipLevel = 0.0,
+#endif
+      .MipModeFilter = vk_to_gen_mipmap_mode[pCreateInfo->mipmapMode],
+      .MagModeFilter = vk_to_gen_tex_filter(pCreateInfo->magFilter,
+                                            pCreateInfo->anisotropyEnable),
+      .MinModeFilter = vk_to_gen_tex_filter(pCreateInfo->minFilter,
+                                            pCreateInfo->anisotropyEnable),
+      .TextureLODBias = anv_clamp_f(pCreateInfo->mipLodBias, -16, 15.996),
+      .AnisotropicAlgorithm = EWAApproximation,
+      .MinLOD = anv_clamp_f(pCreateInfo->minLod, 0, 14),
+      .MaxLOD = anv_clamp_f(pCreateInfo->maxLod, 0, 14),
+      .ChromaKeyEnable = 0,
+      .ChromaKeyIndex = 0,
+      .ChromaKeyMode = 0,
+      .ShadowFunction = vk_to_gen_shadow_compare_op[pCreateInfo->compareOp],
+      .CubeSurfaceControlMode = OVERRIDE,
+
+      .BorderColorPointer = border_color_offset,
+
+#if GEN_GEN >= 8
+      .LODClampMagnificationMode = MIPNONE,
+#endif
+
+      .MaximumAnisotropy = vk_to_gen_max_anisotropy(pCreateInfo->maxAnisotropy),
+      .RAddressMinFilterRoundingEnable = 0,
+      .RAddressMagFilterRoundingEnable = 0,
+      .VAddressMinFilterRoundingEnable = 0,
+      .VAddressMagFilterRoundingEnable = 0,
+      .UAddressMinFilterRoundingEnable = 0,
+      .UAddressMagFilterRoundingEnable = 0,
+      .TrilinearFilterQuality = 0,
+      .NonnormalizedCoordinateEnable = pCreateInfo->unnormalizedCoordinates,
+      .TCXAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressModeU],
+      .TCYAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressModeV],
+      .TCZAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressModeW],
+   };
+
+   GENX(SAMPLER_STATE_pack)(NULL, sampler->state, &sampler_state);
+
+   *pSampler = anv_sampler_to_handle(sampler);
+
+   return VK_SUCCESS;
+}
diff --git a/src/intel/vulkan/intel_icd.json.in b/src/intel/vulkan/intel_icd.json.in
new file mode 100644 (file)
index 0000000..d9b363a
--- /dev/null
@@ -0,0 +1,7 @@
+{
+    "file_format_version": "1.0.0",
+    "ICD": {
+        "library_path": "@install_libdir@/libvulkan_intel.so",
+        "abi_versions": "1.0.3"
+    }
+}
diff --git a/src/intel/vulkan/tests/.gitignore b/src/intel/vulkan/tests/.gitignore
new file mode 100644 (file)
index 0000000..5d05405
--- /dev/null
@@ -0,0 +1,5 @@
+block_pool
+block_pool_no_free
+state_pool
+state_pool_free_list_only
+state_pool_no_free
diff --git a/src/intel/vulkan/tests/Makefile.am b/src/intel/vulkan/tests/Makefile.am
new file mode 100644 (file)
index 0000000..ddff73c
--- /dev/null
@@ -0,0 +1,47 @@
+# Copyright © 2009 Intel Corporation
+#
+#  Permission is hereby granted, free of charge, to any person obtaining a
+#  copy of this software and associated documentation files (the "Software"),
+#  to deal in the Software without restriction, including without limitation
+#  on the rights to use, copy, modify, merge, publish, distribute, sub
+#  license, and/or sell copies of the Software, and to permit persons to whom
+#  the Software is furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice (including the next
+#  paragraph) shall be included in all copies or substantial portions of the
+#  Software.
+#
+#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+#  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+#  FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+#  ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+#  IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+#  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+AM_CPPFLAGS = \
+       $(INTEL_CFLAGS) \
+       $(VALGRIND_CFLAGS) \
+       $(DEFINES) \
+       -I$(top_srcdir)/include \
+       -I$(top_srcdir)/src \
+       -I$(top_srcdir)/src/mapi \
+       -I$(top_srcdir)/src/mesa \
+       -I$(top_srcdir)/src/mesa/drivers/dri/common \
+       -I$(top_srcdir)/src/mesa/drivers/dri/i965 \
+       -I$(top_srcdir)/src/gallium/auxiliary \
+       -I$(top_srcdir)/src/gallium/include \
+       -I$(top_srcdir)/src/intel \
+       -I$(top_srcdir)/src/intel/vulkan \
+       -I$(top_builddir)/src/intel/vulkan
+
+LDADD = \
+       $(top_builddir)/src/intel/vulkan/libvulkan-test.la \
+       $(PTHREAD_LIBS) -lm -lstdc++
+
+check_PROGRAMS = \
+       block_pool_no_free \
+       state_pool_no_free \
+       state_pool_free_list_only \
+       state_pool
+
+TESTS = $(check_PROGRAMS)
diff --git a/src/intel/vulkan/tests/block_pool_no_free.c b/src/intel/vulkan/tests/block_pool_no_free.c
new file mode 100644 (file)
index 0000000..86d1a76
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <pthread.h>
+
+#include "anv_private.h"
+
+#define NUM_THREADS 16
+#define BLOCKS_PER_THREAD 1024
+#define NUM_RUNS 64
+
+struct job {
+   pthread_t thread;
+   unsigned id;
+   struct anv_block_pool *pool;
+   uint32_t blocks[BLOCKS_PER_THREAD];
+   uint32_t back_blocks[BLOCKS_PER_THREAD];
+} jobs[NUM_THREADS];
+
+
+static void *alloc_blocks(void *_job)
+{
+   struct job *job = _job;
+   int32_t block, *data;
+
+   for (unsigned i = 0; i < BLOCKS_PER_THREAD; i++) {
+      block = anv_block_pool_alloc(job->pool);
+      data = job->pool->map + block;
+      *data = block;
+      assert(block >= 0);
+      job->blocks[i] = block;
+
+      block = anv_block_pool_alloc_back(job->pool);
+      data = job->pool->map + block;
+      *data = block;
+      assert(block < 0);
+      job->back_blocks[i] = -block;
+   }
+
+   for (unsigned i = 0; i < BLOCKS_PER_THREAD; i++) {
+      block = job->blocks[i];
+      data = job->pool->map + block;
+      assert(*data == block);
+
+      block = -job->back_blocks[i];
+      data = job->pool->map + block;
+      assert(*data == block);
+   }
+
+   return NULL;
+}
+
+static void validate_monotonic(uint32_t **blocks)
+{
+   /* A list of indices, one per thread */
+   unsigned next[NUM_THREADS];
+   memset(next, 0, sizeof(next));
+
+   int highest = -1;
+   while (true) {
+      /* First, we find which thread has the highest next element */
+      int thread_max = -1;
+      int max_thread_idx = -1;
+      for (unsigned i = 0; i < NUM_THREADS; i++) {
+         if (next[i] >= BLOCKS_PER_THREAD)
+            continue;
+
+         if (thread_max < blocks[i][next[i]]) {
+            thread_max = blocks[i][next[i]];
+            max_thread_idx = i;
+         }
+      }
+
+      /* The only way this can happen is if all of the next[] values are at
+       * BLOCKS_PER_THREAD, in which case, we're done.
+       */
+      if (thread_max == -1)
+         break;
+
+      /* That next element had better be higher than the previous highest */
+      assert(blocks[max_thread_idx][next[max_thread_idx]] > highest);
+
+      highest = blocks[max_thread_idx][next[max_thread_idx]];
+      next[max_thread_idx]++;
+   }
+}
+
+static void run_test()
+{
+   struct anv_device device;
+   struct anv_block_pool pool;
+
+   pthread_mutex_init(&device.mutex, NULL);
+   anv_block_pool_init(&pool, &device, 16);
+
+   for (unsigned i = 0; i < NUM_THREADS; i++) {
+      jobs[i].pool = &pool;
+      jobs[i].id = i;
+      pthread_create(&jobs[i].thread, NULL, alloc_blocks, &jobs[i]);
+   }
+
+   for (unsigned i = 0; i < NUM_THREADS; i++)
+      pthread_join(jobs[i].thread, NULL);
+
+   /* Validate that the block allocations were monotonic */
+   uint32_t *block_ptrs[NUM_THREADS];
+   for (unsigned i = 0; i < NUM_THREADS; i++)
+      block_ptrs[i] = jobs[i].blocks;
+   validate_monotonic(block_ptrs);
+
+   /* Validate that the back block allocations were monotonic */
+   for (unsigned i = 0; i < NUM_THREADS; i++)
+      block_ptrs[i] = jobs[i].back_blocks;
+   validate_monotonic(block_ptrs);
+
+   anv_block_pool_finish(&pool);
+   pthread_mutex_destroy(&device.mutex);
+}
+
+int main(int argc, char **argv)
+{
+   for (unsigned i = 0; i < NUM_RUNS; i++)
+      run_test();
+}
diff --git a/src/intel/vulkan/tests/state_pool.c b/src/intel/vulkan/tests/state_pool.c
new file mode 100644 (file)
index 0000000..878ec19
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <pthread.h>
+
+#include "anv_private.h"
+
+#define NUM_THREADS 8
+#define STATES_PER_THREAD_LOG2 10
+#define STATES_PER_THREAD (1 << STATES_PER_THREAD_LOG2)
+#define NUM_RUNS 64
+
+#include "state_pool_test_helper.h"
+
+int main(int argc, char **argv)
+{
+   struct anv_device device;
+   struct anv_block_pool block_pool;
+   struct anv_state_pool state_pool;
+
+   pthread_mutex_init(&device.mutex, NULL);
+
+   for (unsigned i = 0; i < NUM_RUNS; i++) {
+      anv_block_pool_init(&block_pool, &device, 256);
+      anv_state_pool_init(&state_pool, &block_pool);
+
+      /* Grab one so a zero offset is impossible */
+      anv_state_pool_alloc(&state_pool, 16, 16);
+
+      run_state_pool_test(&state_pool);
+
+      anv_state_pool_finish(&state_pool);
+      anv_block_pool_finish(&block_pool);
+   }
+
+   pthread_mutex_destroy(&device.mutex);
+}
diff --git a/src/intel/vulkan/tests/state_pool_free_list_only.c b/src/intel/vulkan/tests/state_pool_free_list_only.c
new file mode 100644 (file)
index 0000000..2f4eb47
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <pthread.h>
+
+#include "anv_private.h"
+
+#define NUM_THREADS 8
+#define STATES_PER_THREAD_LOG2 12
+#define STATES_PER_THREAD (1 << STATES_PER_THREAD_LOG2)
+
+#include "state_pool_test_helper.h"
+
+int main(int argc, char **argv)
+{
+   struct anv_device device;
+   struct anv_block_pool block_pool;
+   struct anv_state_pool state_pool;
+
+   pthread_mutex_init(&device.mutex, NULL);
+   anv_block_pool_init(&block_pool, &device, 4096);
+   anv_state_pool_init(&state_pool, &block_pool);
+
+   /* Grab one so a zero offset is impossible */
+   anv_state_pool_alloc(&state_pool, 16, 16);
+
+   /* Grab and return enough states that the state pool test below won't
+    * actually ever resize anything.
+    */
+   {
+      struct anv_state states[NUM_THREADS * STATES_PER_THREAD];
+      for (unsigned i = 0; i < NUM_THREADS * STATES_PER_THREAD; i++) {
+         states[i] = anv_state_pool_alloc(&state_pool, 16, 16);
+         assert(states[i].offset != 0);
+      }
+
+      for (unsigned i = 0; i < NUM_THREADS * STATES_PER_THREAD; i++)
+         anv_state_pool_free(&state_pool, states[i]);
+   }
+
+   run_state_pool_test(&state_pool);
+
+   anv_state_pool_finish(&state_pool);
+   anv_block_pool_finish(&block_pool);
+   pthread_mutex_destroy(&device.mutex);
+}
diff --git a/src/intel/vulkan/tests/state_pool_no_free.c b/src/intel/vulkan/tests/state_pool_no_free.c
new file mode 100644 (file)
index 0000000..4b248c2
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <pthread.h>
+
+#include "anv_private.h"
+
+#define NUM_THREADS 16
+#define STATES_PER_THREAD 1024
+#define NUM_RUNS 64
+
+struct job {
+   pthread_t thread;
+   unsigned id;
+   struct anv_state_pool *pool;
+   uint32_t offsets[STATES_PER_THREAD];
+} jobs[NUM_THREADS];
+
+pthread_barrier_t barrier;
+
+static void *alloc_states(void *_job)
+{
+   struct job *job = _job;
+
+   pthread_barrier_wait(&barrier);
+
+   for (unsigned i = 0; i < STATES_PER_THREAD; i++) {
+      struct anv_state state = anv_state_pool_alloc(job->pool, 16, 16);
+      job->offsets[i] = state.offset;
+   }
+
+   return NULL;
+}
+
+static void run_test()
+{
+   struct anv_device device;
+   struct anv_block_pool block_pool;
+   struct anv_state_pool state_pool;
+
+   pthread_mutex_init(&device.mutex, NULL);
+   anv_block_pool_init(&block_pool, &device, 64);
+   anv_state_pool_init(&state_pool, &block_pool);
+
+   pthread_barrier_init(&barrier, NULL, NUM_THREADS);
+
+   for (unsigned i = 0; i < NUM_THREADS; i++) {
+      jobs[i].pool = &state_pool;
+      jobs[i].id = i;
+      pthread_create(&jobs[i].thread, NULL, alloc_states, &jobs[i]);
+   }
+
+   for (unsigned i = 0; i < NUM_THREADS; i++)
+      pthread_join(jobs[i].thread, NULL);
+
+   /* A list of indices, one per thread */
+   unsigned next[NUM_THREADS];
+   memset(next, 0, sizeof(next));
+
+   int highest = -1;
+   while (true) {
+      /* First, we find which thread has the highest next element */
+      int thread_max = -1;
+      int max_thread_idx = -1;
+      for (unsigned i = 0; i < NUM_THREADS; i++) {
+         if (next[i] >= STATES_PER_THREAD)
+            continue;
+
+         if (thread_max < jobs[i].offsets[next[i]]) {
+            thread_max = jobs[i].offsets[next[i]];
+            max_thread_idx = i;
+         }
+      }
+
+      /* The only way this can happen is if all of the next[] values are at
+       * BLOCKS_PER_THREAD, in which case, we're done.
+       */
+      if (thread_max == -1)
+         break;
+
+      /* That next element had better be higher than the previous highest */
+      assert(jobs[max_thread_idx].offsets[next[max_thread_idx]] > highest);
+
+      highest = jobs[max_thread_idx].offsets[next[max_thread_idx]];
+      next[max_thread_idx]++;
+   }
+
+   anv_state_pool_finish(&state_pool);
+   anv_block_pool_finish(&block_pool);
+   pthread_mutex_destroy(&device.mutex);
+}
+
+int main(int argc, char **argv)
+{
+   for (unsigned i = 0; i < NUM_RUNS; i++)
+      run_test();
+}
diff --git a/src/intel/vulkan/tests/state_pool_test_helper.h b/src/intel/vulkan/tests/state_pool_test_helper.h
new file mode 100644 (file)
index 0000000..0e56431
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <pthread.h>
+
+struct job {
+   struct anv_state_pool *pool;
+   unsigned id;
+   pthread_t thread;
+} jobs[NUM_THREADS];
+
+pthread_barrier_t barrier;
+
+static void *alloc_states(void *void_job)
+{
+   struct job *job = void_job;
+
+   const unsigned chunk_size = 1 << (job->id % STATES_PER_THREAD_LOG2);
+   const unsigned num_chunks = STATES_PER_THREAD / chunk_size;
+
+   struct anv_state states[chunk_size];
+
+   pthread_barrier_wait(&barrier);
+
+   for (unsigned c = 0; c < num_chunks; c++) {
+      for (unsigned i = 0; i < chunk_size; i++) {
+         states[i] = anv_state_pool_alloc(job->pool, 16, 16);
+         memset(states[i].map, 139, 16);
+         assert(states[i].offset != 0);
+      }
+
+      for (unsigned i = 0; i < chunk_size; i++)
+         anv_state_pool_free(job->pool, states[i]);
+   }
+
+   return NULL;
+}
+
+static void run_state_pool_test(struct anv_state_pool *state_pool)
+{
+   pthread_barrier_init(&barrier, NULL, NUM_THREADS);
+
+   for (unsigned i = 0; i < NUM_THREADS; i++) {
+      jobs[i].pool = state_pool;
+      jobs[i].id = i;
+      pthread_create(&jobs[i].thread, NULL, alloc_states, &jobs[i]);
+   }
+
+   for (unsigned i = 0; i < NUM_THREADS; i++)
+      pthread_join(jobs[i].thread, NULL);
+}
index 468958824140a6f40c8c919f1bc1868701fdf973..2802ec9887c07c54517eea64884251a49528b906 100644 (file)
@@ -53,6 +53,7 @@ i965_compiler_FILES = \
        brw_shader.cpp \
        brw_shader.h \
        brw_surface_formats.c \
+       brw_surface_formats.h \
        brw_util.c \
        brw_util.h \
        brw_vec4_builder.h \
index 2f05a26e0e03103d87aec4cfdbb40bab0dab8d55..a95f51bfa4a9d08261adee37436c61978dae5652 100644 (file)
@@ -82,7 +82,8 @@ shader_perf_log_mesa(void *data, const char *fmt, ...)
    .lower_uadd_carry = true,                                                  \
    .lower_usub_borrow = true,                                                 \
    .lower_fdiv = true,                                                        \
-   .native_integers = true
+   .native_integers = true,                                                   \
+   .vertex_id_zero_based = true
 
 static const struct nir_shader_compiler_options scalar_nir_options = {
    COMMON_OPTIONS,
@@ -133,7 +134,7 @@ brw_compiler_create(void *mem_ctx, const struct brw_device_info *devinfo)
    compiler->scalar_stage[MESA_SHADER_TESS_EVAL] =
       devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_TES", true);
    compiler->scalar_stage[MESA_SHADER_GEOMETRY] =
-      devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_GS", false);
+      devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_GS", true);
    compiler->scalar_stage[MESA_SHADER_FRAGMENT] = true;
    compiler->scalar_stage[MESA_SHADER_COMPUTE] = true;
 
index 27a95a3c6612cb1701883a7d6b13842f070131c0..fb5740114dc73c779419becfbdda7cb1529c6bd5 100644 (file)
@@ -94,6 +94,9 @@ struct brw_compiler {
    struct gl_shader_compiler_options glsl_compiler_options[MESA_SHADER_STAGES];
 };
 
+struct brw_compiler *
+brw_compiler_create(void *mem_ctx, const struct brw_device_info *devinfo);
+
 
 /**
  * Program key structures.
index 60b696cfb98de3b578c7a577b0d12e5476ce1557..8ef5afea149ec82b3eda5c4feebf5a69b168c635 100644 (file)
@@ -63,6 +63,7 @@
 # define GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL (0 << 8)
 # define GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM     (1 << 8)
 
+#ifndef _3DPRIM_POINTLIST /* FIXME: Avoid clashing with defines from bdw_pack.h */
 #define _3DPRIM_POINTLIST         0x01
 #define _3DPRIM_LINELIST          0x02
 #define _3DPRIM_LINESTRIP         0x03
@@ -86,6 +87,7 @@
 #define _3DPRIM_TRIFAN_NOSTIPPLE  0x16
 #define _3DPRIM_PATCHLIST(n) ({ assert(n > 0 && n <= 32); 0x20 + (n - 1); })
 
+#endif /* bdw_pack.h */
 
 /* We use this offset to be able to pass native primitive types in struct
  * _mesa_prim::mode.  Native primitive types are BRW_PRIM_OFFSET +
index c703fb5d4cf4f5f9c9aa3722bdcffd56e966a434..3666190fc363aecf3d7b1a4092afdf32ac573001 100644 (file)
@@ -482,3 +482,15 @@ brw_get_device_info(int devid)
 
    return devinfo;
 }
+
+const char *
+brw_get_device_name(int devid)
+{
+   switch (devid) {
+#undef CHIPSET
+#define CHIPSET(id, family, name) case id: return name;
+#include "pci_ids/i965_pci_ids.h"
+   default:
+      return NULL;
+   }
+}
index c641ffc281e921ecc0b53729b838f312600ca0c3..4e7f3135960c912683203247963d420c3e2af32c 100644 (file)
@@ -144,3 +144,4 @@ struct brw_device_info
 };
 
 const struct brw_device_info *brw_get_device_info(int devid);
+const char *brw_get_device_name(int devid);
index c295d91223c13fce633f0a22186f30295fedf927..afa8a4e9eaea36086b5023532a713f19a638c238 100644 (file)
 
 #define FILE_DEBUG_FLAG DEBUG_PRIMS
 
-static const GLuint prim_to_hw_prim[GL_TRIANGLE_STRIP_ADJACENCY+1] = {
-   [GL_POINTS] =_3DPRIM_POINTLIST,
-   [GL_LINES] = _3DPRIM_LINELIST,
-   [GL_LINE_LOOP] = _3DPRIM_LINELOOP,
-   [GL_LINE_STRIP] = _3DPRIM_LINESTRIP,
-   [GL_TRIANGLES] = _3DPRIM_TRILIST,
-   [GL_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
-   [GL_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
-   [GL_QUADS] = _3DPRIM_QUADLIST,
-   [GL_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
-   [GL_POLYGON] = _3DPRIM_POLYGON,
-   [GL_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
-   [GL_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
-   [GL_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
-   [GL_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
-};
-
 
 static const GLenum reduced_prim[GL_POLYGON+1] = {
    [GL_POINTS] = GL_POINTS,
@@ -85,18 +68,6 @@ static const GLenum reduced_prim[GL_POLYGON+1] = {
    [GL_POLYGON] = GL_TRIANGLES
 };
 
-uint32_t
-get_hw_prim_for_gl_prim(int mode)
-{
-   if (mode >= BRW_PRIM_OFFSET)
-      return mode - BRW_PRIM_OFFSET;
-   else {
-      assert(mode < ARRAY_SIZE(prim_to_hw_prim));
-      return prim_to_hw_prim[mode];
-   }
-}
-
-
 /* When the primitive changes, set a state bit and re-validate.  Not
  * the nicest and would rather deal with this by having all the
  * programs be immune to the active primitive (ie. cope with all
index 86d2bd92726e68ee15648e20a9d0a574a31c5ac6..b5f1a8743681bae919074fead15873c4a75d64c7 100644 (file)
@@ -174,7 +174,7 @@ fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
     * CSE can later notice that those loads are all the same and eliminate
     * the redundant ones.
     */
-   fs_reg vec4_offset = vgrf(glsl_type::int_type);
+   fs_reg vec4_offset = vgrf(glsl_type::uint_type);
    bld.ADD(vec4_offset, varying_offset, brw_imm_ud(const_offset & ~0xf));
 
    int scale = 1;
@@ -433,7 +433,6 @@ fs_reg::fs_reg(struct ::brw_reg reg) :
 {
    this->reg_offset = 0;
    this->subreg_offset = 0;
-   this->reladdr = NULL;
    this->stride = 1;
    if (this->file == IMM &&
        (this->type != BRW_REGISTER_TYPE_V &&
@@ -448,7 +447,6 @@ fs_reg::equals(const fs_reg &r) const
 {
    return (this->backend_reg::equals(r) &&
            subreg_offset == r.subreg_offset &&
-           !reladdr && !r.reladdr &&
            stride == r.stride);
 }
 
@@ -853,7 +851,10 @@ fs_inst::regs_read(int arg) const
          assert(src[2].file == IMM);
          unsigned region_length = src[2].ud;
 
-         if (src[0].file == FIXED_GRF) {
+         if (src[0].file == UNIFORM) {
+            assert(region_length % 4 == 0);
+            return region_length / 4;
+         } else if (src[0].file == FIXED_GRF) {
             /* If the start of the region is not register aligned, then
              * there's some portion of the register that's technically
              * unread at the beginning.
@@ -867,7 +868,7 @@ fs_inst::regs_read(int arg) const
              * unread portion at the beginning.
              */
             if (src[0].subnr)
-               region_length += src[0].subnr * type_sz(src[0].type);
+               region_length += src[0].subnr;
 
             return DIV_ROUND_UP(region_length, REG_SIZE);
          } else {
@@ -1023,7 +1024,6 @@ fs_visitor::import_uniforms(fs_visitor *v)
    this->push_constant_loc = v->push_constant_loc;
    this->pull_constant_loc = v->pull_constant_loc;
    this->uniforms = v->uniforms;
-   this->param_size = v->param_size;
 }
 
 fs_reg *
@@ -1926,9 +1926,7 @@ fs_visitor::compact_virtual_grfs()
  * maximum number of fragment shader uniform components (64).  If
  * there are too many of these, they'd fill up all of register space.
  * So, this will push some of them out to the pull constant buffer and
- * update the program to load them.  We also use pull constants for all
- * indirect constant loads because we don't support indirect accesses in
- * registers yet.
+ * update the program to load them.
  */
 void
 fs_visitor::assign_constant_locations()
@@ -1937,20 +1935,21 @@ fs_visitor::assign_constant_locations()
    if (dispatch_width != min_dispatch_width)
       return;
 
-   unsigned int num_pull_constants = 0;
-
-   pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
-   memset(pull_constant_loc, -1, sizeof(pull_constant_loc[0]) * uniforms);
-
    bool is_live[uniforms];
    memset(is_live, 0, sizeof(is_live));
 
+   /* For each uniform slot, a value of true indicates that the given slot and
+    * the next slot must remain contiguous.  This is used to keep us from
+    * splitting arrays apart.
+    */
+   bool contiguous[uniforms];
+   memset(contiguous, 0, sizeof(contiguous));
+
    /* First, we walk through the instructions and do two things:
     *
     *  1) Figure out which uniforms are live.
     *
-    *  2) Find all indirect access of uniform arrays and flag them as needing
-    *     to go into the pull constant buffer.
+    *  2) Mark any indirectly used ranges of registers as contiguous.
     *
     * Note that we don't move constant-indexed accesses to arrays.  No
     * testing has been done of the performance impact of this choice.
@@ -1960,20 +1959,19 @@ fs_visitor::assign_constant_locations()
          if (inst->src[i].file != UNIFORM)
             continue;
 
-         if (inst->src[i].reladdr) {
-            int uniform = inst->src[i].nr;
+         int constant_nr = inst->src[i].nr + inst->src[i].reg_offset;
 
-            /* If this array isn't already present in the pull constant buffer,
-             * add it.
-             */
-            if (pull_constant_loc[uniform] == -1) {
-               assert(param_size[uniform]);
-               for (int j = 0; j < param_size[uniform]; j++)
-                  pull_constant_loc[uniform + j] = num_pull_constants++;
+         if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0) {
+            assert(inst->src[2].ud % 4 == 0);
+            unsigned last = constant_nr + (inst->src[2].ud / 4) - 1;
+            assert(last < uniforms);
+
+            for (unsigned j = constant_nr; j < last; j++) {
+               is_live[j] = true;
+               contiguous[j] = true;
             }
+            is_live[last] = true;
          } else {
-            /* Mark the the one accessed uniform as live */
-            int constant_nr = inst->src[i].nr + inst->src[i].reg_offset;
             if (constant_nr >= 0 && constant_nr < (int) uniforms)
                is_live[constant_nr] = true;
          }
@@ -1988,29 +1986,48 @@ fs_visitor::assign_constant_locations()
     * If changing this value, note the limitation about total_regs in
     * brw_curbe.c.
     */
-   unsigned int max_push_components = 16 * 8;
+   const unsigned int max_push_components = 16 * 8;
+
+   /* For vulkan we don't limit the max_chunk_size. We set it to 32 float =
+    * 128 bytes, which is the maximum vulkan push constant size.
+    */
+   const unsigned int max_chunk_size = 32;
+
    unsigned int num_push_constants = 0;
+   unsigned int num_pull_constants = 0;
 
    push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
+   pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
 
-   for (unsigned int i = 0; i < uniforms; i++) {
-      if (!is_live[i] || pull_constant_loc[i] != -1) {
-         /* This UNIFORM register is either dead, or has already been demoted
-          * to a pull const.  Mark it as no longer living in the param[] array.
-          */
-         push_constant_loc[i] = -1;
+   int chunk_start = -1;
+   for (unsigned u = 0; u < uniforms; u++) {
+      push_constant_loc[u] = -1;
+      pull_constant_loc[u] = -1;
+
+      if (!is_live[u])
          continue;
-      }
 
-      if (num_push_constants < max_push_components) {
-         /* Retain as a push constant.  Record the location in the params[]
-          * array.
-          */
-         push_constant_loc[i] = num_push_constants++;
-      } else {
-         /* Demote to a pull constant. */
-         push_constant_loc[i] = -1;
-         pull_constant_loc[i] = num_pull_constants++;
+      /* This is the first live uniform in the chunk */
+      if (chunk_start < 0)
+         chunk_start = u;
+
+      /* If this element does not need to be contiguous with the next, we
+       * split at this point and everthing between chunk_start and u forms a
+       * single chunk.
+       */
+      if (!contiguous[u]) {
+         unsigned chunk_size = u - chunk_start + 1;
+
+         if (num_push_constants + chunk_size <= max_push_components &&
+             chunk_size <= max_chunk_size) {
+            for (unsigned j = chunk_start; j <= u; j++)
+               push_constant_loc[j] = num_push_constants++;
+         } else {
+            for (unsigned j = chunk_start; j <= u; j++)
+               pull_constant_loc[j] = num_pull_constants++;
+         }
+
+         chunk_start = -1;
       }
    }
 
@@ -2041,51 +2058,67 @@ fs_visitor::assign_constant_locations()
  * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
  */
 void
-fs_visitor::demote_pull_constants()
+fs_visitor::lower_constant_loads()
 {
-   foreach_block_and_inst (block, fs_inst, inst, cfg) {
+   const unsigned index = stage_prog_data->binding_table.pull_constants_start;
+
+   foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
+      /* Set up the annotation tracking for new generated instructions. */
+      const fs_builder ibld(this, block, inst);
+
       for (int i = 0; i < inst->sources; i++) {
         if (inst->src[i].file != UNIFORM)
            continue;
 
-         int pull_index;
+         /* We'll handle this case later */
+         if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0)
+            continue;
+
          unsigned location = inst->src[i].nr + inst->src[i].reg_offset;
-         if (location >= uniforms) /* Out of bounds access */
-            pull_index = -1;
-         else
-            pull_index = pull_constant_loc[location];
+         if (location >= uniforms)
+            continue; /* Out of bounds access */
+
+         int pull_index = pull_constant_loc[location];
 
          if (pull_index == -1)
            continue;
 
-         /* Set up the annotation tracking for new generated instructions. */
-         const fs_builder ibld(this, block, inst);
-         const unsigned index = stage_prog_data->binding_table.pull_constants_start;
-         fs_reg dst = vgrf(glsl_type::float_type);
-
          assert(inst->src[i].stride == 0);
 
-         /* Generate a pull load into dst. */
-         if (inst->src[i].reladdr) {
-            VARYING_PULL_CONSTANT_LOAD(ibld, dst,
-                                       brw_imm_ud(index),
-                                       *inst->src[i].reladdr,
-                                       pull_index * 4);
-            inst->src[i].reladdr = NULL;
-            inst->src[i].stride = 1;
-         } else {
-            const fs_builder ubld = ibld.exec_all().group(8, 0);
-            struct brw_reg offset = brw_imm_ud((unsigned)(pull_index * 4) & ~15);
-            ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
-                      dst, brw_imm_ud(index), offset);
-            inst->src[i].set_smear(pull_index & 3);
-         }
-         brw_mark_surface_used(prog_data, index);
+         fs_reg dst = vgrf(glsl_type::float_type);
+         const fs_builder ubld = ibld.exec_all().group(8, 0);
+         struct brw_reg offset = brw_imm_ud((unsigned)(pull_index * 4) & ~15);
+         ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
+                   dst, brw_imm_ud(index), offset);
 
          /* Rewrite the instruction to use the temporary VGRF. */
          inst->src[i].file = VGRF;
          inst->src[i].nr = dst.nr;
          inst->src[i].reg_offset = 0;
+         inst->src[i].set_smear(pull_index & 3);
+
+         brw_mark_surface_used(prog_data, index);
+      }
+
+      if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
+          inst->src[0].file == UNIFORM) {
+
+         unsigned location = inst->src[0].nr + inst->src[0].reg_offset;
+         if (location >= uniforms)
+            continue; /* Out of bounds access */
+
+         int pull_index = pull_constant_loc[location];
+
+         if (pull_index == -1)
+           continue;
+
+         VARYING_PULL_CONSTANT_LOAD(ibld, inst->dst,
+                                    brw_imm_ud(index),
+                                    inst->src[1],
+                                    pull_index * 4);
+         inst->remove(block);
+
+         brw_mark_surface_used(prog_data, index);
       }
    }
    invalidate_live_intervals();
@@ -2798,10 +2831,23 @@ fs_visitor::emit_repclear_shader()
    brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
    int base_mrf = 1;
    int color_mrf = base_mrf + 2;
+   fs_inst *mov;
+
+   if (uniforms == 1) {
+      mov = bld.exec_all().group(4, 0)
+               .MOV(brw_message_reg(color_mrf),
+                    fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
+   } else {
+      struct brw_reg reg =
+         brw_reg(BRW_GENERAL_REGISTER_FILE,
+                 2, 3, 0, 0, BRW_REGISTER_TYPE_F,
+                 BRW_VERTICAL_STRIDE_8,
+                 BRW_WIDTH_2,
+                 BRW_HORIZONTAL_STRIDE_4, BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
 
-   fs_inst *mov = bld.exec_all().group(4, 0)
-                     .MOV(brw_message_reg(color_mrf),
-                          fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
+      mov = bld.exec_all().group(4, 0)
+               .MOV(vec4(brw_message_reg(color_mrf)), fs_reg(reg));
+   }
 
    fs_inst *write;
    if (key->nr_color_regions == 1) {
@@ -2830,8 +2876,10 @@ fs_visitor::emit_repclear_shader()
    assign_curb_setup();
 
    /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
-   assert(mov->src[0].file == FIXED_GRF);
-   mov->src[0] = brw_vec4_grf(mov->src[0].nr, 0);
+   if (uniforms == 1) {
+      assert(mov->src[0].file == FIXED_GRF);
+      mov->src[0] = brw_vec4_grf(mov->src[0].nr, 0);
+   }
 }
 
 /**
@@ -4478,6 +4526,10 @@ get_lowered_simd_width(const struct brw_device_info *devinfo,
    case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
       return 8;
 
+   case SHADER_OPCODE_MOV_INDIRECT:
+      /* Prior to Broadwell, we only have 8 address subregisters */
+      return devinfo->gen < 8 ? 8 : inst->exec_size;
+
    default:
       return inst->exec_size;
    }
@@ -4760,9 +4812,7 @@ fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
          break;
       case UNIFORM:
          fprintf(file, "u%d", inst->src[i].nr + inst->src[i].reg_offset);
-         if (inst->src[i].reladdr) {
-            fprintf(file, "+reladdr");
-         } else if (inst->src[i].subreg_offset) {
+         if (inst->src[i].subreg_offset) {
             fprintf(file, "+%d.%d", inst->src[i].reg_offset,
                     inst->src[i].subreg_offset);
          }
@@ -4873,7 +4923,6 @@ fs_visitor::get_instruction_generating_reg(fs_inst *start,
 {
    if (end == start ||
        end->is_partial_write() ||
-       reg.reladdr ||
        !reg.equals(end->dst)) {
       return NULL;
    } else {
@@ -5091,7 +5140,7 @@ fs_visitor::optimize()
    bld = fs_builder(this, 64);
 
    assign_constant_locations();
-   demote_pull_constants();
+   lower_constant_loads();
 
    validate();
 
index d4acc8798be3e754eae75cdadc6c7b7b7422be92..2b00129b4ba79fd1efab375cb4d9af49032c4ae1 100644 (file)
@@ -139,7 +139,7 @@ public:
    void split_virtual_grfs();
    bool compact_virtual_grfs();
    void assign_constant_locations();
-   void demote_pull_constants();
+   void lower_constant_loads();
    void invalidate_live_intervals();
    void calculate_live_intervals();
    void calculate_register_pressure();
@@ -225,7 +225,7 @@ public:
    void emit_unspill(bblock_t *block, fs_inst *inst, fs_reg reg,
                      uint32_t spill_offset, int count);
    void emit_spill(bblock_t *block, fs_inst *inst, fs_reg reg,
-                   uint32_t spill_offset, int count);
+                   uint32_t spill_offset, int count, bool we_all);
 
    void emit_nir_code();
    void nir_setup_inputs();
@@ -326,8 +326,6 @@ public:
 
    const struct brw_vue_map *input_vue_map;
 
-   int *param_size;
-
    int *virtual_grf_start;
    int *virtual_grf_end;
    brw::fs_live_variables *live_intervals;
index b58c938c53cd8338bd0fd8e021f53bff1c3cbda1..75c29c597f5d3d2481a1cd3ddcee97c9d2345bdb 100644 (file)
@@ -351,23 +351,47 @@ fs_generator::generate_mov_indirect(fs_inst *inst,
 
    unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr;
 
-   /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
-   struct brw_reg addr = vec8(brw_address_reg(0));
+   if (indirect_byte_offset.file == BRW_IMMEDIATE_VALUE) {
+      imm_byte_offset += indirect_byte_offset.ud;
 
-   /* The destination stride of an instruction (in bytes) must be greater
-    * than or equal to the size of the rest of the instruction.  Since the
-    * address register is of type UW, we can't use a D-type instruction.
-    * In order to get around this, re re-type to UW and use a stride.
-    */
-   indirect_byte_offset =
-      retype(spread(indirect_byte_offset, 2), BRW_REGISTER_TYPE_UW);
+      reg.nr = imm_byte_offset / REG_SIZE;
+      reg.subnr = imm_byte_offset % REG_SIZE;
+      brw_MOV(p, dst, reg);
+   } else {
+      /* Prior to Broadwell, there are only 8 address registers. */
+      assert(inst->exec_size == 8 || devinfo->gen >= 8);
 
-   /* Prior to Broadwell, there are only 8 address registers. */
-   assert(inst->exec_size == 8 || devinfo->gen >= 8);
+      /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
+      struct brw_reg addr = vec8(brw_address_reg(0));
 
-   brw_MOV(p, addr, indirect_byte_offset);
-   brw_inst_set_mask_control(devinfo, brw_last_inst, BRW_MASK_DISABLE);
-   brw_MOV(p, dst, retype(brw_VxH_indirect(0, imm_byte_offset), dst.type));
+      /* The destination stride of an instruction (in bytes) must be greater
+       * than or equal to the size of the rest of the instruction.  Since the
+       * address register is of type UW, we can't use a D-type instruction.
+       * In order to get around this, re re-type to UW and use a stride.
+       */
+      indirect_byte_offset =
+         retype(spread(indirect_byte_offset, 2), BRW_REGISTER_TYPE_UW);
+
+      if (devinfo->gen < 8) {
+         /* Prior to broadwell, we have a restriction that the bottom 5 bits
+          * of the base offset and the bottom 5 bits of the indirect must add
+          * to less than 32.  In other words, the hardware needs to be able to
+          * add the bottom five bits of the two to get the subnumber and add
+          * the next 7 bits of each to get the actual register number.  Since
+          * the indirect may cause us to cross a register boundary, this makes
+          * it almost useless.  We could try and do something clever where we
+          * use a actual base offset if base_offset % 32 == 0 but that would
+          * mean we were generating different code depending on the base
+          * offset.  Instead, for the sake of consistency, we'll just do the
+          * add ourselves.
+          */
+         brw_ADD(p, addr, indirect_byte_offset, brw_imm_uw(imm_byte_offset));
+         brw_MOV(p, dst, retype(brw_VxH_indirect(0, 0), dst.type));
+      } else {
+         brw_MOV(p, addr, indirect_byte_offset);
+         brw_MOV(p, dst, retype(brw_VxH_indirect(0, imm_byte_offset), dst.type));
+      }
+   }
 }
 
 void
index cde8f0b6381bb1d939632d6760fd7455f059f503..29ef609fce38e8d210dbfbf9e7227b819d158d1c 100644 (file)
@@ -179,15 +179,6 @@ fs_visitor::nir_setup_uniforms()
       return;
 
    uniforms = nir->num_uniforms / 4;
-
-   nir_foreach_variable(var, &nir->uniforms) {
-      /* UBO's and atomics don't take up space in the uniform file */
-      if (var->interface_type != NULL || var->type->contains_atomic())
-         continue;
-
-      if (type_size_scalar(var->type) > 0)
-         param_size[var->data.driver_location / 4] = type_size_scalar(var->type);
-   }
 }
 
 static bool
@@ -774,15 +765,29 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
       inst->saturate = instr->dest.saturate;
       break;
 
-   case nir_op_fsin:
-      inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
-      inst->saturate = instr->dest.saturate;
+   case nir_op_fsin: {
+      fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F);
+      inst = bld.emit(SHADER_OPCODE_SIN, tmp, op[0]);
+      if (instr->dest.saturate) {
+         inst->dst = result;
+         inst->saturate = true;
+      } else {
+         bld.MUL(result, tmp, brw_imm_f(0.99997));
+      }
       break;
+   }
 
-   case nir_op_fcos:
-      inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
-      inst->saturate = instr->dest.saturate;
+   case nir_op_fcos: {
+      fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F);
+      inst = bld.emit(SHADER_OPCODE_COS, tmp, op[0]);
+      if (instr->dest.saturate) {
+         inst->dst = result;
+         inst->saturate = true;
+      } else {
+         bld.MUL(result, tmp, brw_imm_f(0.99997));
+      }
       break;
+   }
 
    case nir_op_fddx:
       if (fs_key->high_quality_derivatives) {
@@ -853,9 +858,41 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
       unreachable("Should have been lowered by borrow_to_arith().");
 
    case nir_op_umod:
+   case nir_op_irem:
+      /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
+       * appears that our hardware just does the right thing for signed
+       * remainder.
+       */
       bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
       break;
 
+   case nir_op_imod: {
+      /* Get a regular C-style remainder.  If a % b == 0, set the predicate. */
+      bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
+
+      /* Math instructions don't support conditional mod */
+      inst = bld.MOV(bld.null_reg_d(), result);
+      inst->conditional_mod = BRW_CONDITIONAL_NZ;
+
+      /* Now, we need to determine if signs of the sources are different.
+       * When we XOR the sources, the top bit is 0 if they are the same and 1
+       * if they are different.  We can then use a conditional modifier to
+       * turn that into a predicate.  This leads us to an XOR.l instruction.
+       */
+      fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D);
+      inst = bld.XOR(tmp, op[0], op[1]);
+      inst->predicate = BRW_PREDICATE_NORMAL;
+      inst->conditional_mod = BRW_CONDITIONAL_L;
+
+      /* If the result of the initial remainder operation is non-zero and the
+       * two sources have different signs, add in a copy of op[1] to get the
+       * final integer modulus value.
+       */
+      inst = bld.ADD(result, result, op[1]);
+      inst->predicate = BRW_PREDICATE_NORMAL;
+      break;
+   }
+
    case nir_op_flt:
    case nir_op_ilt:
    case nir_op_ult:
@@ -993,6 +1030,34 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
       inst->saturate = instr->dest.saturate;
       break;
 
+   case nir_op_fquantize2f16: {
+      fs_reg tmp16 = bld.vgrf(BRW_REGISTER_TYPE_D);
+      fs_reg tmp32 = bld.vgrf(BRW_REGISTER_TYPE_F);
+      fs_reg zero = bld.vgrf(BRW_REGISTER_TYPE_F);
+
+      /* The destination stride must be at least as big as the source stride. */
+      tmp16.type = BRW_REGISTER_TYPE_W;
+      tmp16.stride = 2;
+
+      /* Check for denormal */
+      fs_reg abs_src0 = op[0];
+      abs_src0.abs = true;
+      bld.CMP(bld.null_reg_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
+              BRW_CONDITIONAL_L);
+      /* Get the appropriately signed zero */
+      bld.AND(retype(zero, BRW_REGISTER_TYPE_UD),
+              retype(op[0], BRW_REGISTER_TYPE_UD),
+              brw_imm_ud(0x80000000));
+      /* Do the actual F32 -> F16 -> F32 conversion */
+      bld.emit(BRW_OPCODE_F32TO16, tmp16, op[0]);
+      bld.emit(BRW_OPCODE_F16TO32, tmp32, tmp16);
+      /* Select that or zero based on normal status */
+      inst = bld.SEL(result, zero, tmp32);
+      inst->predicate = BRW_PREDICATE_NORMAL;
+      inst->saturate = instr->dest.saturate;
+      break;
+   }
+
    case nir_op_fmin:
    case nir_op_imin:
    case nir_op_umin:
@@ -1202,6 +1267,8 @@ fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
 {
    fs_reg image(UNIFORM, deref->var->data.driver_location / 4,
                 BRW_REGISTER_TYPE_UD);
+   fs_reg indirect;
+   unsigned indirect_max = 0;
 
    for (const nir_deref *tail = &deref->deref; tail->child;
         tail = tail->child) {
@@ -1213,7 +1280,7 @@ fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
       image = offset(image, bld, base * element_size);
 
       if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
-         fs_reg tmp = vgrf(glsl_type::int_type);
+         fs_reg tmp = vgrf(glsl_type::uint_type);
 
          if (devinfo->gen == 7 && !devinfo->is_haswell) {
             /* IVB hangs when trying to access an invalid surface index with
@@ -1231,15 +1298,31 @@ fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
             bld.MOV(tmp, get_nir_src(deref_array->indirect));
          }
 
+         indirect_max += element_size * (tail->type->length - 1);
+
          bld.MUL(tmp, tmp, brw_imm_ud(element_size * 4));
-         if (image.reladdr)
-            bld.ADD(*image.reladdr, *image.reladdr, tmp);
-         else
-            image.reladdr = new(mem_ctx) fs_reg(tmp);
+         if (indirect.file == BAD_FILE) {
+            indirect = tmp;
+         } else {
+            bld.ADD(indirect, indirect, tmp);
+         }
       }
    }
 
-   return image;
+   if (indirect.file == BAD_FILE) {
+      return image;
+   } else {
+      /* Emit a pile of MOVs to load the uniform into a temporary.  The
+       * dead-code elimination pass will get rid of what we don't use.
+       */
+      fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, BRW_IMAGE_PARAM_SIZE);
+      for (unsigned j = 0; j < BRW_IMAGE_PARAM_SIZE; j++) {
+         bld.emit(SHADER_OPCODE_MOV_INDIRECT,
+                  offset(tmp, bld, j), offset(image, bld, j),
+                  indirect, brw_imm_ud((indirect_max + 1) * 4));
+      }
+      return tmp;
+   }
 }
 
 void
@@ -2328,6 +2411,82 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
       nir_emit_shared_atomic(bld, BRW_AOP_CMPWR, instr);
       break;
 
+   case nir_intrinsic_load_shared: {
+      assert(devinfo->gen >= 7);
+
+      fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
+
+      /* Get the offset to read from */
+      fs_reg offset_reg;
+      nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
+      if (const_offset) {
+         offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0]);
+      } else {
+         offset_reg = vgrf(glsl_type::uint_type);
+         bld.ADD(offset_reg,
+                 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
+                 brw_imm_ud(instr->const_index[0]));
+      }
+
+      /* Read the vector */
+      fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
+                                             1 /* dims */,
+                                             instr->num_components,
+                                             BRW_PREDICATE_NONE);
+      read_result.type = dest.type;
+      for (int i = 0; i < instr->num_components; i++)
+         bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
+
+      break;
+   }
+
+   case nir_intrinsic_store_shared: {
+      assert(devinfo->gen >= 7);
+
+      /* Block index */
+      fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
+
+      /* Value */
+      fs_reg val_reg = get_nir_src(instr->src[0]);
+
+      /* Writemask */
+      unsigned writemask = instr->const_index[1];
+
+      /* Combine groups of consecutive enabled channels in one write
+       * message. We use ffs to find the first enabled channel and then ffs on
+       * the bit-inverse, down-shifted writemask to determine the length of
+       * the block of enabled bits.
+       */
+      while (writemask) {
+         unsigned first_component = ffs(writemask) - 1;
+         unsigned length = ffs(~(writemask >> first_component)) - 1;
+         fs_reg offset_reg;
+
+         nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
+         if (const_offset) {
+            offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0] +
+                                    4 * first_component);
+         } else {
+            offset_reg = vgrf(glsl_type::uint_type);
+            bld.ADD(offset_reg,
+                    retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD),
+                    brw_imm_ud(instr->const_index[0] + 4 * first_component));
+         }
+
+         emit_untyped_write(bld, surf_index, offset_reg,
+                            offset(val_reg, bld, first_component),
+                            1 /* dims */, length,
+                            BRW_PREDICATE_NONE);
+
+         /* Clear the bits in the writemask that we just wrote, then try
+          * again to see if more channels are left.
+          */
+         writemask &= (15 << (first_component + length));
+      }
+
+      break;
+   }
+
    default:
       nir_emit_intrinsic(bld, instr);
       break;
@@ -2538,12 +2697,28 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
          /* Offsets are in bytes but they should always be multiples of 4 */
          assert(const_offset->u[0] % 4 == 0);
          src.reg_offset = const_offset->u[0] / 4;
+
+         for (unsigned j = 0; j < instr->num_components; j++) {
+            bld.MOV(offset(dest, bld, j), offset(src, bld, j));
+         }
       } else {
-         src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
-      }
+         fs_reg indirect = retype(get_nir_src(instr->src[0]),
+                                  BRW_REGISTER_TYPE_UD);
 
-      for (unsigned j = 0; j < instr->num_components; j++) {
-         bld.MOV(offset(dest, bld, j), offset(src, bld, j));
+         /* We need to pass a size to the MOV_INDIRECT but we don't want it to
+          * go past the end of the uniform.  In order to keep the n'th
+          * component from running past, we subtract off the size of all but
+          * one component of the vector.
+          */
+         assert(instr->const_index[1] >= instr->num_components * 4);
+         unsigned read_size = instr->const_index[1] -
+                              (instr->num_components - 1) * 4;
+
+         for (unsigned j = 0; j < instr->num_components; j++) {
+            bld.emit(SHADER_OPCODE_MOV_INDIRECT,
+                     offset(dest, bld, j), offset(src, bld, j),
+                     indirect, brw_imm_ud(read_size));
+         }
       }
       break;
    }
@@ -2651,82 +2826,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       break;
    }
 
-   case nir_intrinsic_load_shared: {
-      assert(devinfo->gen >= 7);
-
-      fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
-
-      /* Get the offset to read from */
-      fs_reg offset_reg;
-      nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
-      if (const_offset) {
-         offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0]);
-      } else {
-         offset_reg = vgrf(glsl_type::uint_type);
-         bld.ADD(offset_reg,
-                 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
-                 brw_imm_ud(instr->const_index[0]));
-      }
-
-      /* Read the vector */
-      fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
-                                             1 /* dims */,
-                                             instr->num_components,
-                                             BRW_PREDICATE_NONE);
-      read_result.type = dest.type;
-      for (int i = 0; i < instr->num_components; i++)
-         bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
-
-      break;
-   }
-
-   case nir_intrinsic_store_shared: {
-      assert(devinfo->gen >= 7);
-
-      /* Block index */
-      fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
-
-      /* Value */
-      fs_reg val_reg = get_nir_src(instr->src[0]);
-
-      /* Writemask */
-      unsigned writemask = instr->const_index[1];
-
-      /* Combine groups of consecutive enabled channels in one write
-       * message. We use ffs to find the first enabled channel and then ffs on
-       * the bit-inverse, down-shifted writemask to determine the length of
-       * the block of enabled bits.
-       */
-      while (writemask) {
-         unsigned first_component = ffs(writemask) - 1;
-         unsigned length = ffs(~(writemask >> first_component)) - 1;
-         fs_reg offset_reg;
-
-         nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
-         if (const_offset) {
-            offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0] +
-                                    4 * first_component);
-         } else {
-            offset_reg = vgrf(glsl_type::uint_type);
-            bld.ADD(offset_reg,
-                    retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD),
-                    brw_imm_ud(instr->const_index[0] + 4 * first_component));
-         }
-
-         emit_untyped_write(bld, surf_index, offset_reg,
-                            offset(val_reg, bld, first_component),
-                            1 /* dims */, length,
-                            BRW_PREDICATE_NONE);
-
-         /* Clear the bits in the writemask that we just wrote, then try
-          * again to see if more channels are left.
-          */
-         writemask &= (15 << (first_component + length));
-      }
-
-      break;
-   }
-
    case nir_intrinsic_load_input: {
       fs_reg src;
       if (stage == MESA_SHADER_VERTEX) {
@@ -2982,6 +3081,10 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
 
    fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, tex_offset;
 
+   /* Our hardware requires a LOD for buffer textures */
+   if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
+      lod = brw_imm_d(0);
+
    for (unsigned i = 0; i < instr->num_srcs; i++) {
       fs_reg src = get_nir_src(instr->src[i].src);
       switch (instr->src[i].src_type) {
index 2347cd5d33f726233b1f04d29c337623584594c0..8396854fcb165c08035023900126f25bed14ea59 100644 (file)
@@ -751,6 +751,7 @@ fs_visitor::emit_unspill(bblock_t *block, fs_inst *inst, fs_reg dst,
                                         dst);
       unspill_inst->offset = spill_offset;
       unspill_inst->regs_written = reg_size;
+      unspill_inst->force_writemask_all = true;
 
       if (!gen7_read) {
          unspill_inst->base_mrf = FIRST_SPILL_MRF(devinfo->gen) + 1;
@@ -764,11 +765,11 @@ fs_visitor::emit_unspill(bblock_t *block, fs_inst *inst, fs_reg dst,
 
 void
 fs_visitor::emit_spill(bblock_t *block, fs_inst *inst, fs_reg src,
-                       uint32_t spill_offset, int count)
+                       uint32_t spill_offset, int count, bool we_all)
 {
    int reg_size = 1;
    int spill_base_mrf = FIRST_SPILL_MRF(devinfo->gen) + 1;
-   if (dispatch_width == 16 && count % 2 == 0) {
+   if (inst->exec_size == 16 && count % 2 == 0) {
       spill_base_mrf = FIRST_SPILL_MRF(devinfo->gen);
       reg_size = 2;
    }
@@ -784,6 +785,8 @@ fs_visitor::emit_spill(bblock_t *block, fs_inst *inst, fs_reg src,
       spill_inst->offset = spill_offset + i * reg_size * REG_SIZE;
       spill_inst->mlen = 1 + reg_size; /* header, value */
       spill_inst->base_mrf = spill_base_mrf;
+      spill_inst->force_writemask_all = we_all;
+      spill_inst->force_sechalf = inst->force_sechalf;
    }
 }
 
@@ -805,30 +808,13 @@ fs_visitor::choose_spill_reg(struct ra_graph *g)
     */
    foreach_block_and_inst(block, fs_inst, inst, cfg) {
       for (unsigned int i = 0; i < inst->sources; i++) {
-        if (inst->src[i].file == VGRF) {
+        if (inst->src[i].file == VGRF)
             spill_costs[inst->src[i].nr] += loop_scale;
-
-            /* Register spilling logic assumes full-width registers; smeared
-             * registers have a width of 1 so if we try to spill them we'll
-             * generate invalid assembly.  This shouldn't be a problem because
-             * smeared registers are only used as short-term temporaries when
-             * loading pull constants, so spilling them is unlikely to reduce
-             * register pressure anyhow.
-             */
-            if (!inst->src[i].is_contiguous()) {
-               no_spill[inst->src[i].nr] = true;
-            }
-        }
       }
 
-      if (inst->dst.file == VGRF) {
+      if (inst->dst.file == VGRF)
          spill_costs[inst->dst.nr] += inst->regs_written * loop_scale;
 
-         if (!inst->dst.is_contiguous()) {
-            no_spill[inst->dst.nr] = true;
-         }
-      }
-
       switch (inst->opcode) {
 
       case BRW_OPCODE_DO:
@@ -938,12 +924,15 @@ fs_visitor::spill_reg(int spill_reg)
           * inst->regs_written(), then we need to unspill the destination
           * since we write back out all of the regs_written().
          */
-        if (inst->is_partial_write())
+         bool need_unspill = inst->is_partial_write() ||
+                             type_sz(inst->dst.type) != 4;
+         if (need_unspill)
             emit_unspill(block, inst, spill_src, subset_spill_offset,
                          inst->regs_written);
 
          emit_spill(block, inst, spill_src, subset_spill_offset,
-                    inst->regs_written);
+                    inst->regs_written,
+                    need_unspill || inst->force_writemask_all);
       }
    }
 
index 75734d2cfa049c05b46ae857bec42e330574a629..4adffdd75fba532ab06163d803e76ac175c8050e 100644 (file)
@@ -717,6 +717,15 @@ namespace {
                   bld.emit_minmax(offset(dst, bld, c), offset(dst, bld, c),
                                   brw_imm_d(-(int)scale(widths[c] - s) - 1),
                                   BRW_CONDITIONAL_GE);
+
+               /* Mask off all but the bits we actually want.  Otherwise, if
+                * we pass a negative number into the hardware when it's
+                * expecting something like UINT8, it will happily clamp it to
+                * +255 for us.
+                */
+               if (is_signed && widths[c] < 32)
+                  bld.AND(offset(dst, bld, c), offset(dst, bld, c),
+                          brw_imm_d((1 << widths[c]) - 1));
             }
          }
 
@@ -787,6 +796,15 @@ namespace {
                /* Convert to integer. */
                bld.RNDE(offset(fdst, bld, c), offset(fdst, bld, c));
                bld.MOV(offset(dst, bld, c), offset(fdst, bld, c));
+
+               /* Mask off all but the bits we actually want.  Otherwise, if
+                * we pass a negative number into the hardware when it's
+                * expecting something like UINT8, it will happily clamp it to
+                * +255 for us.
+                */
+               if (is_signed && widths[c] < 32)
+                  bld.AND(offset(dst, bld, c), offset(dst, bld, c),
+                          brw_imm_d((1 << widths[c]) - 1));
             }
          }
 
index dc61d096efc21981a865a994c10eedf71b5978a2..f1da218ba63c139faaf6481f47f57b6a771c3e04 100644 (file)
@@ -1063,9 +1063,6 @@ fs_visitor::init()
 
    this->spilled_any_registers = false;
    this->do_dual_src = false;
-
-   if (dispatch_width == 8)
-      this->param_size = rzalloc_array(mem_ctx, int, stage_prog_data->nr_params);
 }
 
 fs_visitor::~fs_visitor()
index c3eec2efb42fa9b43f676cefb840ee18bfc3aac6..e4f20f4ffc9d8abadd740a83f5d1c4c782fb7d5f 100644 (file)
@@ -58,8 +58,6 @@ public:
     */
    int subreg_offset;
 
-   fs_reg *reladdr;
-
    /** Register region horizontal stride */
    uint8_t stride;
 };
@@ -136,8 +134,7 @@ component(fs_reg reg, unsigned idx)
 static inline bool
 is_uniform(const fs_reg &reg)
 {
-   return (reg.stride == 0 || reg.is_null()) &&
-          (!reg.reladdr || is_uniform(*reg.reladdr));
+   return (reg.stride == 0 || reg.is_null());
 }
 
 /**
index a5949d5d6eb65d13907c1cb0a369f95af32c6233..ba9cb3f608cd472625b09d15c3f47d7f6c114887 100644 (file)
@@ -377,6 +377,14 @@ brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
    }
 }
 
+void
+brw_nir_lower_shared(nir_shader *nir)
+{
+   nir_assign_var_locations(&nir->shared, &nir->num_shared,
+                            type_size_scalar_bytes);
+   nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes);
+}
+
 #define OPT(pass, ...) ({                                  \
    bool this_progress = false;                             \
    NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
@@ -460,7 +468,7 @@ brw_preprocess_nir(nir_shader *nir, bool is_scalar)
    /* Get rid of split copies */
    nir = nir_optimize(nir, is_scalar);
 
-   OPT(nir_remove_dead_variables);
+   OPT(nir_remove_dead_variables, nir_var_local);
 
    return nir;
 }
@@ -487,7 +495,7 @@ brw_postprocess_nir(nir_shader *nir,
 
    if (devinfo->gen >= 6) {
       /* Try and fuse multiply-adds */
-      OPT(brw_nir_opt_peephole_ffma);
+//      OPT(brw_nir_opt_peephole_ffma);
    }
 
    OPT(nir_opt_algebraic_late);
@@ -570,6 +578,9 @@ brw_create_nir(struct brw_context *brw,
       OPT_V(nir_lower_atomics, shader_prog);
    }
 
+   if (nir->stage == MESA_SHADER_COMPUTE)
+      OPT_V(brw_nir_lower_shared);
+
    return nir;
 }
 
index 2d8341fd40e175f45235e7819f89062fb975b986..0ef3473556159f96084fcdfdd208992e12d4bcb9 100644 (file)
@@ -83,6 +83,7 @@ nir_shader *brw_create_nir(struct brw_context *brw,
 
 nir_shader *brw_preprocess_nir(nir_shader *nir, bool is_scalar);
 
+void brw_nir_lower_shared(nir_shader *nir);
 void brw_nir_lower_vs_inputs(nir_shader *nir,
                              const struct brw_device_info *devinfo,
                              bool is_scalar,
index 3112c0c40143bc1214f719767fca48791fe63059..b093a87bb82da8d6456ff419a449f4019efd5696 100644 (file)
@@ -279,7 +279,7 @@ brw_get_scratch_bo(struct brw_context *brw,
 
 void brwInitFragProgFuncs( struct dd_function_table *functions )
 {
-   assert(functions->ProgramStringNotify == _tnl_program_string);
+   /* assert(functions->ProgramStringNotify == _tnl_program_string); */
 
    functions->NewProgram = brwNewProgram;
    functions->DeleteProgram = brwDeleteProgram;
index 3c0b23b4a42639c207789ef94ad9f7eae629f3c0..b216dc9d074f98014ed93efb5a3551a74aa3bd02 100644 (file)
 #include "brw_context.h"
 #include "brw_state.h"
 #include "brw_defines.h"
-
-struct surface_format_info {
-   bool exists;
-   int sampling;
-   int filtering;
-   int shadow_compare;
-   int chroma_key;
-   int render_target;
-   int alpha_blend;
-   int input_vb;
-   int streamed_output_vb;
-   int color_processing;
-   int lossless_compression;
-   const char *name;
-};
+#include "brw_wm.h"
+#include "brw_surface_formats.h"
 
 /* This macro allows us to write the table almost as it appears in the PRM,
  * while restructuring it to turn it into the C code we want.
@@ -86,7 +73,7 @@ struct surface_format_info {
  * - VOL4_Part1 section 3.9.11 Render Target Write.
  * - Render Target Surface Types [SKL+]
  */
-const struct surface_format_info surface_formats[] = {
+const struct brw_surface_format_info surface_formats[] = {
 /* smpl filt shad CK  RT  AB  VB  SO  color ccs_e */
    SF( Y, 50,  x,  x,  Y,  Y,  Y,  Y,  x,   90,   R32G32B32A32_FLOAT)
    SF( Y,  x,  x,  x,  Y,  x,  Y,  Y,  x,   90,   R32G32B32A32_SINT)
@@ -218,7 +205,7 @@ const struct surface_format_info surface_formats[] = {
    SF(50, 50,  x,  x,  x,  x,  x,  x,  x,    x,   P8A8_UNORM_PALETTE0)
    SF(50, 50,  x,  x,  x,  x,  x,  x,  x,    x,   P8A8_UNORM_PALETTE1)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   A1B5G5R5_UNORM)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   A4B4G4R4_UNORM)
+   SF( Y,  Y,  x,  Y, 90,  x,  x,  x,  x,    x,   A4B4G4R4_UNORM)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   L8A8_UINT)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   L8A8_SINT)
    SF( Y,  Y,  x, 45,  Y,  Y,  Y,  x,  x,    x,   R8_UNORM)
@@ -281,13 +268,13 @@ const struct surface_format_info surface_formats[] = {
    SF(70, 70,  x,  x,  x,  x,  x,  x,  x,    x,   BC6H_UF16)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   PLANAR_420_8)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   R8G8B8_UNORM_SRGB)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   ETC1_RGB8)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_RGB8)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   EAC_R11)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   EAC_RG11)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   EAC_SIGNED_R11)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   EAC_SIGNED_RG11)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_SRGB8)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   ETC1_RGB8)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_RGB8)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   EAC_R11)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   EAC_RG11)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   EAC_SIGNED_R11)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   EAC_SIGNED_RG11)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_SRGB8)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   R16G16B16_UINT)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   R16G16B16_SINT)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   R32_SFIXED)
@@ -302,10 +289,10 @@ const struct surface_format_info surface_formats[] = {
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   B10G10R10A2_SINT)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   R64G64B64A64_PASSTHRU)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   R64G64B64_PASSTHRU)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_RGB8_PTA)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_SRGB8_PTA)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_EAC_RGBA8)
-   SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_EAC_SRGB8_A8)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_RGB8_PTA)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_SRGB8_PTA)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_EAC_RGBA8)
+   SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   ETC2_EAC_SRGB8_A8)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   R8G8B8_UINT)
    SF( x,  x,  x,  x,  x,  x,  x,  x,  x,    x,   R8G8B8_SINT)
    SF(80, 80,  x,  x,  x,  x,  x,  x,  x,    x,   ASTC_LDR_2D_4x4_FLT16)
@@ -618,7 +605,7 @@ brw_init_surface_formats(struct brw_context *brw)
 
    for (format = MESA_FORMAT_NONE + 1; format < MESA_FORMAT_COUNT; format++) {
       uint32_t texture, render;
-      const struct surface_format_info *rinfo, *tinfo;
+      const struct brw_surface_format_info *rinfo, *tinfo;
       bool is_integer = _mesa_is_format_integer_color(format);
 
       render = texture = brw_format_for_mesa_format(format);
@@ -827,7 +814,7 @@ bool
 brw_losslessly_compressible_format(const struct brw_context *brw,
                                    uint32_t brw_format)
 {
-   const struct surface_format_info * const sinfo =
+   const struct brw_surface_format_info * const sinfo =
       &surface_formats[brw_format];
    const int gen = brw->gen * 10;
 
diff --git a/src/mesa/drivers/dri/i965/brw_surface_formats.h b/src/mesa/drivers/dri/i965/brw_surface_formats.h
new file mode 100644 (file)
index 0000000..a5cd49f
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+struct brw_surface_format_info {
+   bool exists;
+   int sampling;
+   int filtering;
+   int shadow_compare;
+   int chroma_key;
+   int render_target;
+   int alpha_blend;
+   int input_vb;
+   int streamed_output_vb;
+   int color_processing;
+   int lossless_compression;
+   const char *name;
+};
+
+extern const struct brw_surface_format_info surface_formats[];
index bf7f9c61c84959fc43ce7860a48b019690e3d0bd..934b6b8d6273313418717c53fef616033b736e4e 100644 (file)
@@ -98,3 +98,31 @@ GLuint brw_translate_blend_factor( GLenum factor )
       unreachable("not reached");
    }
 }
+
+static const GLuint prim_to_hw_prim[GL_TRIANGLE_STRIP_ADJACENCY+1] = {
+   [GL_POINTS] =_3DPRIM_POINTLIST,
+   [GL_LINES] = _3DPRIM_LINELIST,
+   [GL_LINE_LOOP] = _3DPRIM_LINELOOP,
+   [GL_LINE_STRIP] = _3DPRIM_LINESTRIP,
+   [GL_TRIANGLES] = _3DPRIM_TRILIST,
+   [GL_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
+   [GL_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
+   [GL_QUADS] = _3DPRIM_QUADLIST,
+   [GL_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
+   [GL_POLYGON] = _3DPRIM_POLYGON,
+   [GL_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
+   [GL_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
+   [GL_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
+   [GL_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
+};
+
+uint32_t
+get_hw_prim_for_gl_prim(int mode)
+{
+   if (mode >= BRW_PRIM_OFFSET)
+      return mode - BRW_PRIM_OFFSET;
+   else {
+      assert(mode < ARRAY_SIZE(prim_to_hw_prim));
+      return prim_to_hw_prim[mode];
+   }
+}
index baf72a25c420487156746c6e49a834922eab618b..65e57ba5e624fde1383f63f9daf1b482fd6a4e70 100644 (file)
@@ -496,11 +496,6 @@ vec4_visitor::split_uniform_registers()
         inst->src[i].reg_offset = 0;
       }
    }
-
-   /* Update that everything is now vector-sized. */
-   for (int i = 0; i < this->uniforms; i++) {
-      this->uniform_size[i] = 1;
-   }
 }
 
 void
@@ -558,7 +553,6 @@ vec4_visitor::pack_uniform_registers()
     * push constants.
     */
    for (int src = 0; src < uniforms; src++) {
-      assert(src < uniform_array_size);
       int size = chans_used[src];
 
       if (size == 0)
@@ -805,7 +799,7 @@ vec4_visitor::move_push_constants_to_pull_constants()
         dst_reg temp = dst_reg(this, glsl_type::vec4_type);
 
         emit_pull_constant_load(block, inst, temp, inst->src[i],
-                                pull_constant_loc[uniform]);
+                                pull_constant_loc[uniform], src_reg());
 
         inst->src[i].file = temp.file;
          inst->src[i].nr = temp.nr;
@@ -1610,8 +1604,6 @@ vec4_visitor::setup_uniforms(int reg)
     * matter what, or the GPU would hang.
     */
    if (devinfo->gen < 6 && this->uniforms == 0) {
-      assert(this->uniforms < this->uniform_array_size);
-
       stage_prog_data->param =
          reralloc(NULL, stage_prog_data->param, const gl_constant_value *, 4);
       for (unsigned int i = 0; i < 4; i++) {
index d43a5a82052cbe1465e15e353e70102de66c7c52..6143f65efa139b7d52c4c28369ed33ff6ce0b8ba 100644 (file)
@@ -115,8 +115,6 @@ public:
     */
    dst_reg output_reg[BRW_VARYING_SLOT_COUNT];
    const char *output_reg_annotation[BRW_VARYING_SLOT_COUNT];
-   int *uniform_size;
-   int uniform_array_size; /*< Size of the uniform_size array */
    int uniforms;
 
    src_reg shader_start_time;
@@ -278,8 +276,6 @@ public:
 
    src_reg get_scratch_offset(bblock_t *block, vec4_instruction *inst,
                              src_reg *reladdr, int reg_offset);
-   src_reg get_pull_constant_offset(bblock_t *block, vec4_instruction *inst,
-                                   src_reg *reladdr, int reg_offset);
    void emit_scratch_read(bblock_t *block, vec4_instruction *inst,
                          dst_reg dst,
                          src_reg orig_src,
@@ -289,7 +285,8 @@ public:
    void emit_pull_constant_load(bblock_t *block, vec4_instruction *inst,
                                dst_reg dst,
                                src_reg orig_src,
-                               int base_offset);
+                               int base_offset,
+                                src_reg indirect);
    void emit_pull_constant_load_reg(dst_reg dst,
                                     src_reg surf_index,
                                     src_reg offset,
index 549b707203fe7575e8d9a6b8d6f73887a2ebee71..695c4df258b313f320eb10a7bda6feef1848789e 100644 (file)
@@ -1393,6 +1393,48 @@ generate_set_simd4x2_header_gen9(struct brw_codegen *p,
    brw_pop_insn_state(p);
 }
 
+static void
+generate_mov_indirect(struct brw_codegen *p,
+                      vec4_instruction *inst,
+                      struct brw_reg dst, struct brw_reg reg,
+                      struct brw_reg indirect, struct brw_reg length)
+{
+   assert(indirect.type == BRW_REGISTER_TYPE_UD);
+
+   unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr * (REG_SIZE / 2);
+
+   /* This instruction acts in align1 mode */
+   assert(inst->force_writemask_all || reg.writemask == 0xf);
+
+   brw_push_insn_state(p);
+   brw_set_default_access_mode(p, BRW_ALIGN_1);
+   brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+
+   struct brw_reg addr = vec2(brw_address_reg(0));
+
+   /* We need to move the indirect value into the address register.  In order
+    * to make things make some sense, we want to respect at least the X
+    * component of the swizzle.  In order to do that, we need to convert the
+    * subnr (probably 0) to an align1 subnr and add in the swizzle.  We then
+    * use a region of <8,4,0>:uw to pick off the first 2 bytes of the indirect
+    * and splat it out to all four channels of the given half of a0.
+    */
+   assert(brw_is_single_value_swizzle(indirect.swizzle));
+   indirect.subnr = (indirect.subnr * 4 + BRW_GET_SWZ(indirect.swizzle, 0)) * 2;
+   indirect = stride(retype(indirect, BRW_REGISTER_TYPE_UW), 8, 4, 0);
+
+   brw_ADD(p, addr, indirect, brw_imm_uw(imm_byte_offset));
+
+   /* Use a <4,1> region Vx1 region*/
+   struct brw_reg src = brw_VxH_indirect(0, 0);
+   src.width = BRW_WIDTH_4;
+   src.hstride = BRW_HORIZONTAL_STRIDE_1;
+
+   brw_MOV(p, dst, retype(src, reg.type));
+
+   brw_pop_insn_state(p);
+}
+
 static void
 generate_code(struct brw_codegen *p,
               const struct brw_compiler *compiler,
@@ -1939,6 +1981,9 @@ generate_code(struct brw_codegen *p,
          brw_WAIT(p);
          break;
 
+      case SHADER_OPCODE_MOV_INDIRECT:
+         generate_mov_indirect(p, inst, dst, src[0], src[1], src[2]);
+
       default:
          unreachable("Unsupported opcode");
       }
index 52977f1c12b959d97d1ddb1f2e30307d3e8f9581..4686f2014c614ee48e72a362befd2f838a424a16 100644 (file)
@@ -132,15 +132,6 @@ void
 vec4_visitor::nir_setup_uniforms()
 {
    uniforms = nir->num_uniforms / 16;
-
-   nir_foreach_variable(var, &nir->uniforms) {
-      /* UBO's and atomics don't take up space in the uniform file */
-      if (var->interface_type != NULL || var->type->contains_atomic())
-         continue;
-
-      if (type_size_vec4(var->type) > 0)
-         uniform_size[var->data.driver_location / 16] = type_size_vec4(var->type);
-   }
 }
 
 void
@@ -708,12 +699,14 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
          /* Offsets are in bytes but they should always be multiples of 16 */
          assert(const_offset->u[0] % 16 == 0);
          src.reg_offset = const_offset->u[0] / 16;
+
+         emit(MOV(dest, src));
       } else {
-         src_reg tmp = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_D, 1);
-         src.reladdr = new(mem_ctx) src_reg(tmp);
-      }
+         src_reg indirect = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_UD, 1);
 
-      emit(MOV(dest, src));
+         emit(SHADER_OPCODE_MOV_INDIRECT, dest, src,
+              indirect, brw_imm_ud(instr->const_index[1]));
+      }
       break;
    }
 
@@ -1100,15 +1093,29 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       inst->saturate = instr->dest.saturate;
       break;
 
-   case nir_op_fsin:
-      inst = emit_math(SHADER_OPCODE_SIN, dst, op[0]);
-      inst->saturate = instr->dest.saturate;
+   case nir_op_fsin: {
+      src_reg tmp = src_reg(this, glsl_type::vec4_type);
+      inst = emit_math(SHADER_OPCODE_SIN, dst_reg(tmp), op[0]);
+      if (instr->dest.saturate) {
+         inst->dst = dst;
+         inst->saturate = true;
+      } else {
+         emit(MUL(dst, tmp, brw_imm_f(0.99997)));
+      }
       break;
+   }
 
-   case nir_op_fcos:
-      inst = emit_math(SHADER_OPCODE_COS, dst, op[0]);
-      inst->saturate = instr->dest.saturate;
+   case nir_op_fcos: {
+      src_reg tmp = src_reg(this, glsl_type::vec4_type);
+      inst = emit_math(SHADER_OPCODE_COS, dst_reg(tmp), op[0]);
+      if (instr->dest.saturate) {
+         inst->dst = dst;
+         inst->saturate = true;
+      } else {
+         emit(MUL(dst, tmp, brw_imm_f(0.99997)));
+      }
       break;
+   }
 
    case nir_op_idiv:
    case nir_op_udiv:
@@ -1116,9 +1123,41 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       break;
 
    case nir_op_umod:
+   case nir_op_irem:
+      /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
+       * appears that our hardware just does the right thing for signed
+       * remainder.
+       */
       emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
       break;
 
+   case nir_op_imod: {
+      /* Get a regular C-style remainder.  If a % b == 0, set the predicate. */
+      inst = emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
+
+      /* Math instructions don't support conditional mod */
+      inst = emit(MOV(dst_null_d(), src_reg(dst)));
+      inst->conditional_mod = BRW_CONDITIONAL_NZ;
+
+      /* Now, we need to determine if signs of the sources are different.
+       * When we XOR the sources, the top bit is 0 if they are the same and 1
+       * if they are different.  We can then use a conditional modifier to
+       * turn that into a predicate.  This leads us to an XOR.l instruction.
+       */
+      src_reg tmp = src_reg(this, glsl_type::ivec4_type);
+      inst = emit(XOR(dst_reg(tmp), op[0], op[1]));
+      inst->predicate = BRW_PREDICATE_NORMAL;
+      inst->conditional_mod = BRW_CONDITIONAL_L;
+
+      /* If the result of the initial remainder operation is non-zero and the
+       * two sources have different signs, add in a copy of op[1] to get the
+       * final integer modulus value.
+       */
+      inst = emit(ADD(dst, src_reg(dst), op[1]));
+      inst->predicate = BRW_PREDICATE_NORMAL;
+      break;
+   }
+
    case nir_op_ldexp:
       unreachable("not reached: should be handled by ldexp_to_arith()");
 
@@ -1188,6 +1227,32 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       inst->saturate = instr->dest.saturate;
       break;
 
+   case nir_op_fquantize2f16: {
+      /* See also vec4_visitor::emit_pack_half_2x16() */
+      src_reg tmp16 = src_reg(this, glsl_type::uvec4_type);
+      src_reg tmp32 = src_reg(this, glsl_type::vec4_type);
+      src_reg zero = src_reg(this, glsl_type::vec4_type);
+
+      /* Check for denormal */
+      src_reg abs_src0 = op[0];
+      abs_src0.abs = true;
+      emit(CMP(dst_null_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
+               BRW_CONDITIONAL_L));
+      /* Get the appropriately signed zero */
+      emit(AND(retype(dst_reg(zero), BRW_REGISTER_TYPE_UD),
+               retype(op[0], BRW_REGISTER_TYPE_UD),
+               brw_imm_ud(0x80000000)));
+      /* Do the actual F32 -> F16 -> F32 conversion */
+      emit(F32TO16(dst_reg(tmp16), op[0]));
+      emit(F16TO32(dst_reg(tmp32), tmp16));
+      /* Select that or zero based on normal status */
+      inst = emit(BRW_OPCODE_SEL, dst, zero, tmp32);
+      inst->predicate = BRW_PREDICATE_NORMAL;
+      inst->predicate_inverse = true;
+      inst->saturate = instr->dest.saturate;
+      break;
+   }
+
    case nir_op_fmin:
    case nir_op_imin:
    case nir_op_umin:
@@ -1593,7 +1658,6 @@ vec4_visitor::nir_emit_jump(nir_jump_instr *instr)
       break;
 
    case nir_jump_return:
-      /* fall through */
    default:
       unreachable("unknown jump");
    }
@@ -1663,6 +1727,10 @@ vec4_visitor::nir_emit_texture(nir_tex_instr *instr)
                                  nir_tex_instr_dest_size(instr));
    dst_reg dest = get_nir_dest(instr->dest, instr->dest_type);
 
+   /* Our hardware requires a LOD for buffer textures */
+   if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
+      lod = brw_imm_d(0);
+
    /* Load the texture operation sources */
    uint32_t constant_offset = 0;
    for (unsigned i = 0; i < instr->num_srcs; i++) {
index 8f77b59ea0323fd151360fe1a22413053f90dfac..f344eaad66436dd4ca6b3b4fb9040916398ad62d 100644 (file)
@@ -59,8 +59,6 @@ vec4_tcs_visitor::emit_nir_code()
        * copies VS outputs to TES inputs.
        */
       uniforms = 2;
-      uniform_size[0] = 1;
-      uniform_size[1] = 1;
 
       uint64_t varyings = key->outputs_written;
 
index d30330a379f4d40de01c487d15248480b4315fcd..4cfbc143d5a605e670d9fa355049df952b4440fc 100644 (file)
@@ -1404,27 +1404,6 @@ vec4_visitor::get_scratch_offset(bblock_t *block, vec4_instruction *inst,
    }
 }
 
-src_reg
-vec4_visitor::get_pull_constant_offset(bblock_t * block, vec4_instruction *inst,
-                                      src_reg *reladdr, int reg_offset)
-{
-   if (reladdr) {
-      src_reg index = src_reg(this, glsl_type::int_type);
-
-      emit_before(block, inst, ADD(dst_reg(index), *reladdr,
-                                   brw_imm_d(reg_offset * 16)));
-
-      return index;
-   } else if (devinfo->gen >= 8) {
-      /* Store the offset in a GRF so we can send-from-GRF. */
-      src_reg offset = src_reg(this, glsl_type::int_type);
-      emit_before(block, inst, MOV(dst_reg(offset), brw_imm_d(reg_offset * 16)));
-      return offset;
-   } else {
-      return brw_imm_d(reg_offset * 16);
-   }
-}
-
 /**
  * Emits an instruction before @inst to load the value named by @orig_src
  * from scratch space at @base_offset to @temp.
@@ -1602,12 +1581,24 @@ vec4_visitor::move_grf_array_access_to_scratch()
 void
 vec4_visitor::emit_pull_constant_load(bblock_t *block, vec4_instruction *inst,
                                      dst_reg temp, src_reg orig_src,
-                                     int base_offset)
+                                     int base_offset, src_reg indirect)
 {
    int reg_offset = base_offset + orig_src.reg_offset;
    const unsigned index = prog_data->base.binding_table.pull_constants_start;
-   src_reg offset = get_pull_constant_offset(block, inst, orig_src.reladdr,
-                                             reg_offset);
+
+   src_reg offset;
+   if (indirect.file != BAD_FILE) {
+      offset = src_reg(this, glsl_type::int_type);
+
+      emit_before(block, inst, ADD(dst_reg(offset), indirect,
+                                   brw_imm_d(reg_offset * 16)));
+   } else if (devinfo->gen >= 8) {
+      /* Store the offset in a GRF so we can send-from-GRF. */
+      offset = src_reg(this, glsl_type::int_type);
+      emit_before(block, inst, MOV(dst_reg(offset), brw_imm_d(reg_offset * 16)));
+   } else {
+      offset = brw_imm_d(reg_offset * 16);
+   }
 
    emit_pull_constant_load_reg(temp,
                                brw_imm_ud(index),
@@ -1634,59 +1625,55 @@ vec4_visitor::move_uniform_array_access_to_pull_constants()
 {
    int pull_constant_loc[this->uniforms];
    memset(pull_constant_loc, -1, sizeof(pull_constant_loc));
-   bool nested_reladdr;
 
-   /* Walk through and find array access of uniforms.  Put a copy of that
-    * uniform in the pull constant buffer.
-    *
-    * Note that we don't move constant-indexed accesses to arrays.  No
-    * testing has been done of the performance impact of this choice.
+   /* First, walk through the instructions and determine which things need to
+    * be pulled.  We mark something as needing to be pulled by setting
+    * pull_constant_loc to 0.
     */
-   do {
-      nested_reladdr = false;
-
-      foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
-         for (int i = 0 ; i < 3; i++) {
-            if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
-               continue;
+   foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
+      /* We only care about MOV_INDIRECT of a uniform */
+      if (inst->opcode != SHADER_OPCODE_MOV_INDIRECT ||
+          inst->src[0].file != UNIFORM)
+         continue;
 
-            int uniform = inst->src[i].nr;
+      int uniform_nr = inst->src[0].nr + inst->src[0].reg_offset;
 
-            if (inst->src[i].reladdr->reladdr)
-               nested_reladdr = true;  /* will need another pass */
+      for (unsigned j = 0; j < DIV_ROUND_UP(inst->src[2].ud, 16); j++)
+         pull_constant_loc[uniform_nr + j] = 0;
+   }
 
-            /* If this array isn't already present in the pull constant buffer,
-             * add it.
-             */
-            if (pull_constant_loc[uniform] == -1) {
-               const gl_constant_value **values =
-                  &stage_prog_data->param[uniform * 4];
+   /* Next, we walk the list of uniforms and assign real pull constant
+    * locations and set their corresponding entries in pull_param.
+    */
+   for (int j = 0; j < this->uniforms; j++) {
+      if (pull_constant_loc[j] < 0)
+         continue;
 
-               pull_constant_loc[uniform] = stage_prog_data->nr_pull_params / 4;
+      pull_constant_loc[j] = stage_prog_data->nr_pull_params / 4;
 
-               assert(uniform < uniform_array_size);
-               for (int j = 0; j < uniform_size[uniform] * 4; j++) {
-                  stage_prog_data->pull_param[stage_prog_data->nr_pull_params++]
-                     = values[j];
-               }
-            }
+      for (int i = 0; i < 4; i++) {
+         stage_prog_data->pull_param[stage_prog_data->nr_pull_params++]
+            = stage_prog_data->param[j * 4 + i];
+      }
+   }
 
-            /* Set up the annotation tracking for new generated instructions. */
-            base_ir = inst->ir;
-            current_annotation = inst->annotation;
+   /* Finally, we can walk through the instructions and lower MOV_INDIRECT
+    * instructions to actual uniform pulls.
+    */
+   foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
+      /* We only care about MOV_INDIRECT of a uniform */
+      if (inst->opcode != SHADER_OPCODE_MOV_INDIRECT ||
+          inst->src[0].file != UNIFORM)
+         continue;
 
-            dst_reg temp = dst_reg(this, glsl_type::vec4_type);
+      int uniform_nr = inst->src[0].nr + inst->src[0].reg_offset;
 
-            emit_pull_constant_load(block, inst, temp, inst->src[i],
-                                    pull_constant_loc[uniform]);
+      assert(inst->src[0].swizzle == BRW_SWIZZLE_NOOP);
 
-            inst->src[i].file = temp.file;
-            inst->src[i].nr = temp.nr;
-            inst->src[i].reg_offset = temp.reg_offset;
-            inst->src[i].reladdr = NULL;
-         }
-      }
-   } while (nested_reladdr);
+      emit_pull_constant_load(block, inst, inst->dst, inst->src[0],
+                              pull_constant_loc[uniform_nr], inst->src[1]);
+      inst->remove(block);
+   }
 
    /* Now there are no accesses of the UNIFORM file with a reladdr, so
     * no need to track them as larger-than-vec4 objects.  This will be
@@ -1739,17 +1726,6 @@ vec4_visitor::vec4_visitor(const struct brw_compiler *compiler,
    this->max_grf = devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
 
    this->uniforms = 0;
-
-   /* Initialize uniform_array_size to at least 1 because pre-gen6 VS requires
-    * at least one. See setup_uniforms() in brw_vec4.cpp.
-    */
-   this->uniform_array_size = 1;
-   if (prog_data) {
-      this->uniform_array_size =
-         MAX2(DIV_ROUND_UP(stage_prog_data->nr_params, 4), 1);
-   }
-
-   this->uniform_size = rzalloc_array(mem_ctx, int, this->uniform_array_size);
 }
 
 vec4_visitor::~vec4_visitor()
index f3cfc8892d3fb4deaef6bb17129d0fad095620b5..39f0c0b932d3d73e4b6140ccc54fe520af05a72a 100644 (file)
@@ -161,7 +161,6 @@ void
 vec4_vs_visitor::setup_uniform_clipplane_values()
 {
    for (int i = 0; i < key->nr_userclip_plane_consts; ++i) {
-      assert(this->uniforms < uniform_array_size);
       this->userplane[i] = dst_reg(UNIFORM, this->uniforms);
       this->userplane[i].type = BRW_REGISTER_TYPE_F;
       for (int j = 0; j < 4; ++j) {
index 5d8bfe4bb09e0abe1a293cc6c34e1c72aa32ec51..2e43996f23ad5afdff1a3a1f72611c91bb7473db 100644 (file)
@@ -2503,6 +2503,11 @@ struct gl_uniform_block
     */
    GLuint Binding;
 
+   /**
+    * Vulkan descriptor set qualifier for this block.
+    */
+   GLuint Set;
+
    /**
     * Minimum size (in bytes) of a buffer object to back this uniform buffer
     * (GL_UNIFORM_BLOCK_DATA_SIZE).
index c452819414fae1839358c3c210dbbf810103a52c..2404ce7f630296b1c7622201e443f8b58e619406 100644 (file)
@@ -98,7 +98,7 @@ __bitset_ffs(const BITSET_WORD *x, int n)
 
 static inline unsigned
 __bitset_next_set(unsigned i, BITSET_WORD *tmp,
-                  BITSET_WORD *set, unsigned size)
+                  const BITSET_WORD *set, unsigned size)
 {
    unsigned bit, word;
 
index 066f9b8dfe50bb5852e9b31d781e602cb6b55ce4..f0dec5da60874c7cf9e6028589e9d1f03697816b 100644 (file)
@@ -116,6 +116,28 @@ static inline unsigned list_length(struct list_head *list)
    return length;
 }
 
+static inline void list_splice(struct list_head *src, struct list_head *dst)
+{
+   if (list_empty(src))
+      return;
+
+   src->next->prev = dst;
+   src->prev->next = dst->next;
+   dst->next->prev = src->prev;
+   dst->next = src->next;
+}
+
+static inline void list_splicetail(struct list_head *src, struct list_head *dst)
+{
+   if (list_empty(src))
+      return;
+
+   src->prev->next = dst;
+   src->next->prev = dst->prev;
+   dst->prev->next = src->next;
+   dst->prev = src->prev;
+}
+
 static inline void list_validate(struct list_head *list)
 {
    struct list_head *node;